repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
NgeliMrasi/healthstash-prototype | 41,448 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.13/crypto/curve25519/asm/x25519-asm-arm.S | // Copyright 2015 The BoringSSL Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* This file is taken from crypto_scalarmult/curve25519/neon2/scalarmult.s in
* SUPERCOP 20141124 (http://bench.cr.yp.to/supercop.html). That code is public
* domain licensed but the standard Apache 2.0 license is included above to keep
* licensing simple. */
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
.fpu neon
.text
.align 4
.global x25519_NEON
.hidden x25519_NEON
.type x25519_NEON, %function
x25519_NEON:
vpush {q4,q5,q6,q7}
mov r12,sp
sub sp,sp,#736
and sp,sp,#0xffffffe0
strd r4,[sp,#0]
strd r6,[sp,#8]
strd r8,[sp,#16]
strd r10,[sp,#24]
str r12,[sp,#480]
str r14,[sp,#484]
mov r0,r0
mov r1,r1
mov r2,r2
add r3,sp,#32
ldr r4,=0
ldr r5,=254
vmov.i32 q0,#1
vshr.u64 q1,q0,#7
vshr.u64 q0,q0,#8
vmov.i32 d4,#19
vmov.i32 d5,#38
add r6,sp,#512
vst1.8 {d2-d3},[r6,: 128]
add r6,sp,#528
vst1.8 {d0-d1},[r6,: 128]
add r6,sp,#544
vst1.8 {d4-d5},[r6,: 128]
add r6,r3,#0
vmov.i32 q2,#0
vst1.8 {d4-d5},[r6,: 128]!
vst1.8 {d4-d5},[r6,: 128]!
vst1.8 d4,[r6,: 64]
add r6,r3,#0
ldr r7,=960
sub r7,r7,#2
neg r7,r7
sub r7,r7,r7,LSL #7
str r7,[r6]
add r6,sp,#704
vld1.8 {d4-d5},[r1]!
vld1.8 {d6-d7},[r1]
vst1.8 {d4-d5},[r6,: 128]!
vst1.8 {d6-d7},[r6,: 128]
sub r1,r6,#16
ldrb r6,[r1]
and r6,r6,#248
strb r6,[r1]
ldrb r6,[r1,#31]
and r6,r6,#127
orr r6,r6,#64
strb r6,[r1,#31]
vmov.i64 q2,#0xffffffff
vshr.u64 q3,q2,#7
vshr.u64 q2,q2,#6
vld1.8 {d8},[r2]
vld1.8 {d10},[r2]
add r2,r2,#6
vld1.8 {d12},[r2]
vld1.8 {d14},[r2]
add r2,r2,#6
vld1.8 {d16},[r2]
add r2,r2,#4
vld1.8 {d18},[r2]
vld1.8 {d20},[r2]
add r2,r2,#6
vld1.8 {d22},[r2]
add r2,r2,#2
vld1.8 {d24},[r2]
vld1.8 {d26},[r2]
vshr.u64 q5,q5,#26
vshr.u64 q6,q6,#3
vshr.u64 q7,q7,#29
vshr.u64 q8,q8,#6
vshr.u64 q10,q10,#25
vshr.u64 q11,q11,#3
vshr.u64 q12,q12,#12
vshr.u64 q13,q13,#38
vand q4,q4,q2
vand q6,q6,q2
vand q8,q8,q2
vand q10,q10,q2
vand q2,q12,q2
vand q5,q5,q3
vand q7,q7,q3
vand q9,q9,q3
vand q11,q11,q3
vand q3,q13,q3
add r2,r3,#48
vadd.i64 q12,q4,q1
vadd.i64 q13,q10,q1
vshr.s64 q12,q12,#26
vshr.s64 q13,q13,#26
vadd.i64 q5,q5,q12
vshl.i64 q12,q12,#26
vadd.i64 q14,q5,q0
vadd.i64 q11,q11,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q11,q0
vsub.i64 q4,q4,q12
vshr.s64 q12,q14,#25
vsub.i64 q10,q10,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q12
vshl.i64 q12,q12,#25
vadd.i64 q14,q6,q1
vadd.i64 q2,q2,q13
vsub.i64 q5,q5,q12
vshr.s64 q12,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q1
vadd.i64 q7,q7,q12
vshl.i64 q12,q12,#26
vadd.i64 q15,q7,q0
vsub.i64 q11,q11,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q12
vshr.s64 q12,q15,#25
vadd.i64 q3,q3,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q3,q0
vadd.i64 q8,q8,q12
vshl.i64 q12,q12,#25
vadd.i64 q15,q8,q1
add r2,r2,#8
vsub.i64 q2,q2,q13
vshr.s64 q13,q14,#25
vsub.i64 q7,q7,q12
vshr.s64 q12,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q9,q9,q12
vtrn.32 d12,d14
vshl.i64 q12,q12,#26
vtrn.32 d13,d15
vadd.i64 q0,q9,q0
vadd.i64 q4,q4,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q6,q13,#4
vsub.i64 q7,q8,q12
vshr.s64 q0,q0,#25
vadd.i64 q4,q4,q6
vadd.i64 q6,q10,q0
vshl.i64 q0,q0,#25
vadd.i64 q8,q6,q1
vadd.i64 q4,q4,q13
vshl.i64 q10,q13,#25
vadd.i64 q1,q4,q1
vsub.i64 q0,q9,q0
vshr.s64 q8,q8,#26
vsub.i64 q3,q3,q10
vtrn.32 d14,d0
vshr.s64 q1,q1,#26
vtrn.32 d15,d1
vadd.i64 q0,q11,q8
vst1.8 d14,[r2,: 64]
vshl.i64 q7,q8,#26
vadd.i64 q5,q5,q1
vtrn.32 d4,d6
vshl.i64 q1,q1,#26
vtrn.32 d5,d7
vsub.i64 q3,q6,q7
add r2,r2,#16
vsub.i64 q1,q4,q1
vst1.8 d4,[r2,: 64]
vtrn.32 d6,d0
vtrn.32 d7,d1
sub r2,r2,#8
vtrn.32 d2,d10
vtrn.32 d3,d11
vst1.8 d6,[r2,: 64]
sub r2,r2,#24
vst1.8 d2,[r2,: 64]
add r2,r3,#96
vmov.i32 q0,#0
vmov.i64 d2,#0xff
vmov.i64 d3,#0
vshr.u32 q1,q1,#7
vst1.8 {d2-d3},[r2,: 128]!
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 d0,[r2,: 64]
add r2,r3,#144
vmov.i32 q0,#0
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 d0,[r2,: 64]
add r2,r3,#240
vmov.i32 q0,#0
vmov.i64 d2,#0xff
vmov.i64 d3,#0
vshr.u32 q1,q1,#7
vst1.8 {d2-d3},[r2,: 128]!
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 d0,[r2,: 64]
add r2,r3,#48
add r6,r3,#192
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4},[r2,: 64]
vst1.8 {d0-d1},[r6,: 128]!
vst1.8 {d2-d3},[r6,: 128]!
vst1.8 d4,[r6,: 64]
._mainloop:
mov r2,r5,LSR #3
and r6,r5,#7
ldrb r2,[r1,r2]
mov r2,r2,LSR r6
and r2,r2,#1
str r5,[sp,#488]
eor r4,r4,r2
str r2,[sp,#492]
neg r2,r4
add r4,r3,#96
add r5,r3,#192
add r6,r3,#144
vld1.8 {d8-d9},[r4,: 128]!
add r7,r3,#240
vld1.8 {d10-d11},[r5,: 128]!
veor q6,q4,q5
vld1.8 {d14-d15},[r6,: 128]!
vdup.i32 q8,r2
vld1.8 {d18-d19},[r7,: 128]!
veor q10,q7,q9
vld1.8 {d22-d23},[r4,: 128]!
vand q6,q6,q8
vld1.8 {d24-d25},[r5,: 128]!
vand q10,q10,q8
vld1.8 {d26-d27},[r6,: 128]!
veor q4,q4,q6
vld1.8 {d28-d29},[r7,: 128]!
veor q5,q5,q6
vld1.8 {d0},[r4,: 64]
veor q6,q7,q10
vld1.8 {d2},[r5,: 64]
veor q7,q9,q10
vld1.8 {d4},[r6,: 64]
veor q9,q11,q12
vld1.8 {d6},[r7,: 64]
veor q10,q0,q1
sub r2,r4,#32
vand q9,q9,q8
sub r4,r5,#32
vand q10,q10,q8
sub r5,r6,#32
veor q11,q11,q9
sub r6,r7,#32
veor q0,q0,q10
veor q9,q12,q9
veor q1,q1,q10
veor q10,q13,q14
veor q12,q2,q3
vand q10,q10,q8
vand q8,q12,q8
veor q12,q13,q10
veor q2,q2,q8
veor q10,q14,q10
veor q3,q3,q8
vadd.i32 q8,q4,q6
vsub.i32 q4,q4,q6
vst1.8 {d16-d17},[r2,: 128]!
vadd.i32 q6,q11,q12
vst1.8 {d8-d9},[r5,: 128]!
vsub.i32 q4,q11,q12
vst1.8 {d12-d13},[r2,: 128]!
vadd.i32 q6,q0,q2
vst1.8 {d8-d9},[r5,: 128]!
vsub.i32 q0,q0,q2
vst1.8 d12,[r2,: 64]
vadd.i32 q2,q5,q7
vst1.8 d0,[r5,: 64]
vsub.i32 q0,q5,q7
vst1.8 {d4-d5},[r4,: 128]!
vadd.i32 q2,q9,q10
vst1.8 {d0-d1},[r6,: 128]!
vsub.i32 q0,q9,q10
vst1.8 {d4-d5},[r4,: 128]!
vadd.i32 q2,q1,q3
vst1.8 {d0-d1},[r6,: 128]!
vsub.i32 q0,q1,q3
vst1.8 d4,[r4,: 64]
vst1.8 d0,[r6,: 64]
add r2,sp,#544
add r4,r3,#96
add r5,r3,#144
vld1.8 {d0-d1},[r2,: 128]
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4-d5},[r5,: 128]!
vzip.i32 q1,q2
vld1.8 {d6-d7},[r4,: 128]!
vld1.8 {d8-d9},[r5,: 128]!
vshl.i32 q5,q1,#1
vzip.i32 q3,q4
vshl.i32 q6,q2,#1
vld1.8 {d14},[r4,: 64]
vshl.i32 q8,q3,#1
vld1.8 {d15},[r5,: 64]
vshl.i32 q9,q4,#1
vmul.i32 d21,d7,d1
vtrn.32 d14,d15
vmul.i32 q11,q4,q0
vmul.i32 q0,q7,q0
vmull.s32 q12,d2,d2
vmlal.s32 q12,d11,d1
vmlal.s32 q12,d12,d0
vmlal.s32 q12,d13,d23
vmlal.s32 q12,d16,d22
vmlal.s32 q12,d7,d21
vmull.s32 q10,d2,d11
vmlal.s32 q10,d4,d1
vmlal.s32 q10,d13,d0
vmlal.s32 q10,d6,d23
vmlal.s32 q10,d17,d22
vmull.s32 q13,d10,d4
vmlal.s32 q13,d11,d3
vmlal.s32 q13,d13,d1
vmlal.s32 q13,d16,d0
vmlal.s32 q13,d17,d23
vmlal.s32 q13,d8,d22
vmull.s32 q1,d10,d5
vmlal.s32 q1,d11,d4
vmlal.s32 q1,d6,d1
vmlal.s32 q1,d17,d0
vmlal.s32 q1,d8,d23
vmull.s32 q14,d10,d6
vmlal.s32 q14,d11,d13
vmlal.s32 q14,d4,d4
vmlal.s32 q14,d17,d1
vmlal.s32 q14,d18,d0
vmlal.s32 q14,d9,d23
vmull.s32 q11,d10,d7
vmlal.s32 q11,d11,d6
vmlal.s32 q11,d12,d5
vmlal.s32 q11,d8,d1
vmlal.s32 q11,d19,d0
vmull.s32 q15,d10,d8
vmlal.s32 q15,d11,d17
vmlal.s32 q15,d12,d6
vmlal.s32 q15,d13,d5
vmlal.s32 q15,d19,d1
vmlal.s32 q15,d14,d0
vmull.s32 q2,d10,d9
vmlal.s32 q2,d11,d8
vmlal.s32 q2,d12,d7
vmlal.s32 q2,d13,d6
vmlal.s32 q2,d14,d1
vmull.s32 q0,d15,d1
vmlal.s32 q0,d10,d14
vmlal.s32 q0,d11,d19
vmlal.s32 q0,d12,d8
vmlal.s32 q0,d13,d17
vmlal.s32 q0,d6,d6
add r2,sp,#512
vld1.8 {d18-d19},[r2,: 128]
vmull.s32 q3,d16,d7
vmlal.s32 q3,d10,d15
vmlal.s32 q3,d11,d14
vmlal.s32 q3,d12,d9
vmlal.s32 q3,d13,d8
add r2,sp,#528
vld1.8 {d8-d9},[r2,: 128]
vadd.i64 q5,q12,q9
vadd.i64 q6,q15,q9
vshr.s64 q5,q5,#26
vshr.s64 q6,q6,#26
vadd.i64 q7,q10,q5
vshl.i64 q5,q5,#26
vadd.i64 q8,q7,q4
vadd.i64 q2,q2,q6
vshl.i64 q6,q6,#26
vadd.i64 q10,q2,q4
vsub.i64 q5,q12,q5
vshr.s64 q8,q8,#25
vsub.i64 q6,q15,q6
vshr.s64 q10,q10,#25
vadd.i64 q12,q13,q8
vshl.i64 q8,q8,#25
vadd.i64 q13,q12,q9
vadd.i64 q0,q0,q10
vsub.i64 q7,q7,q8
vshr.s64 q8,q13,#26
vshl.i64 q10,q10,#25
vadd.i64 q13,q0,q9
vadd.i64 q1,q1,q8
vshl.i64 q8,q8,#26
vadd.i64 q15,q1,q4
vsub.i64 q2,q2,q10
vshr.s64 q10,q13,#26
vsub.i64 q8,q12,q8
vshr.s64 q12,q15,#25
vadd.i64 q3,q3,q10
vshl.i64 q10,q10,#26
vadd.i64 q13,q3,q4
vadd.i64 q14,q14,q12
add r2,r3,#288
vshl.i64 q12,q12,#25
add r4,r3,#336
vadd.i64 q15,q14,q9
add r2,r2,#8
vsub.i64 q0,q0,q10
add r4,r4,#8
vshr.s64 q10,q13,#25
vsub.i64 q1,q1,q12
vshr.s64 q12,q15,#26
vadd.i64 q13,q10,q10
vadd.i64 q11,q11,q12
vtrn.32 d16,d2
vshl.i64 q12,q12,#26
vtrn.32 d17,d3
vadd.i64 q1,q11,q4
vadd.i64 q4,q5,q13
vst1.8 d16,[r2,: 64]!
vshl.i64 q5,q10,#4
vst1.8 d17,[r4,: 64]!
vsub.i64 q8,q14,q12
vshr.s64 q1,q1,#25
vadd.i64 q4,q4,q5
vadd.i64 q5,q6,q1
vshl.i64 q1,q1,#25
vadd.i64 q6,q5,q9
vadd.i64 q4,q4,q10
vshl.i64 q10,q10,#25
vadd.i64 q9,q4,q9
vsub.i64 q1,q11,q1
vshr.s64 q6,q6,#26
vsub.i64 q3,q3,q10
vtrn.32 d16,d2
vshr.s64 q9,q9,#26
vtrn.32 d17,d3
vadd.i64 q1,q2,q6
vst1.8 d16,[r2,: 64]
vshl.i64 q2,q6,#26
vst1.8 d17,[r4,: 64]
vadd.i64 q6,q7,q9
vtrn.32 d0,d6
vshl.i64 q7,q9,#26
vtrn.32 d1,d7
vsub.i64 q2,q5,q2
add r2,r2,#16
vsub.i64 q3,q4,q7
vst1.8 d0,[r2,: 64]
add r4,r4,#16
vst1.8 d1,[r4,: 64]
vtrn.32 d4,d2
vtrn.32 d5,d3
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d6,d12
vtrn.32 d7,d13
vst1.8 d4,[r2,: 64]
vst1.8 d5,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d6,[r2,: 64]
vst1.8 d7,[r4,: 64]
add r2,r3,#240
add r4,r3,#96
vld1.8 {d0-d1},[r4,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4},[r4,: 64]
add r4,r3,#144
vld1.8 {d6-d7},[r4,: 128]!
vtrn.32 q0,q3
vld1.8 {d8-d9},[r4,: 128]!
vshl.i32 q5,q0,#4
vtrn.32 q1,q4
vshl.i32 q6,q3,#4
vadd.i32 q5,q5,q0
vadd.i32 q6,q6,q3
vshl.i32 q7,q1,#4
vld1.8 {d5},[r4,: 64]
vshl.i32 q8,q4,#4
vtrn.32 d4,d5
vadd.i32 q7,q7,q1
vadd.i32 q8,q8,q4
vld1.8 {d18-d19},[r2,: 128]!
vshl.i32 q10,q2,#4
vld1.8 {d22-d23},[r2,: 128]!
vadd.i32 q10,q10,q2
vld1.8 {d24},[r2,: 64]
vadd.i32 q5,q5,q0
add r2,r3,#192
vld1.8 {d26-d27},[r2,: 128]!
vadd.i32 q6,q6,q3
vld1.8 {d28-d29},[r2,: 128]!
vadd.i32 q8,q8,q4
vld1.8 {d25},[r2,: 64]
vadd.i32 q10,q10,q2
vtrn.32 q9,q13
vadd.i32 q7,q7,q1
vadd.i32 q5,q5,q0
vtrn.32 q11,q14
vadd.i32 q6,q6,q3
add r2,sp,#560
vadd.i32 q10,q10,q2
vtrn.32 d24,d25
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q6,q13,#1
add r2,sp,#576
vst1.8 {d20-d21},[r2,: 128]
vshl.i32 q10,q14,#1
add r2,sp,#592
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q15,q12,#1
vadd.i32 q8,q8,q4
vext.32 d10,d31,d30,#0
vadd.i32 q7,q7,q1
add r2,sp,#608
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q8,d18,d5
vmlal.s32 q8,d26,d4
vmlal.s32 q8,d19,d9
vmlal.s32 q8,d27,d3
vmlal.s32 q8,d22,d8
vmlal.s32 q8,d28,d2
vmlal.s32 q8,d23,d7
vmlal.s32 q8,d29,d1
vmlal.s32 q8,d24,d6
vmlal.s32 q8,d25,d0
add r2,sp,#624
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q2,d18,d4
vmlal.s32 q2,d12,d9
vmlal.s32 q2,d13,d8
vmlal.s32 q2,d19,d3
vmlal.s32 q2,d22,d2
vmlal.s32 q2,d23,d1
vmlal.s32 q2,d24,d0
add r2,sp,#640
vst1.8 {d20-d21},[r2,: 128]
vmull.s32 q7,d18,d9
vmlal.s32 q7,d26,d3
vmlal.s32 q7,d19,d8
vmlal.s32 q7,d27,d2
vmlal.s32 q7,d22,d7
vmlal.s32 q7,d28,d1
vmlal.s32 q7,d23,d6
vmlal.s32 q7,d29,d0
add r2,sp,#656
vst1.8 {d10-d11},[r2,: 128]
vmull.s32 q5,d18,d3
vmlal.s32 q5,d19,d2
vmlal.s32 q5,d22,d1
vmlal.s32 q5,d23,d0
vmlal.s32 q5,d12,d8
add r2,sp,#672
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q4,d18,d8
vmlal.s32 q4,d26,d2
vmlal.s32 q4,d19,d7
vmlal.s32 q4,d27,d1
vmlal.s32 q4,d22,d6
vmlal.s32 q4,d28,d0
vmull.s32 q8,d18,d7
vmlal.s32 q8,d26,d1
vmlal.s32 q8,d19,d6
vmlal.s32 q8,d27,d0
add r2,sp,#576
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q7,d24,d21
vmlal.s32 q7,d25,d20
vmlal.s32 q4,d23,d21
vmlal.s32 q4,d29,d20
vmlal.s32 q8,d22,d21
vmlal.s32 q8,d28,d20
vmlal.s32 q5,d24,d20
add r2,sp,#576
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q7,d18,d6
vmlal.s32 q7,d26,d0
add r2,sp,#656
vld1.8 {d30-d31},[r2,: 128]
vmlal.s32 q2,d30,d21
vmlal.s32 q7,d19,d21
vmlal.s32 q7,d27,d20
add r2,sp,#624
vld1.8 {d26-d27},[r2,: 128]
vmlal.s32 q4,d25,d27
vmlal.s32 q8,d29,d27
vmlal.s32 q8,d25,d26
vmlal.s32 q7,d28,d27
vmlal.s32 q7,d29,d26
add r2,sp,#608
vld1.8 {d28-d29},[r2,: 128]
vmlal.s32 q4,d24,d29
vmlal.s32 q8,d23,d29
vmlal.s32 q8,d24,d28
vmlal.s32 q7,d22,d29
vmlal.s32 q7,d23,d28
add r2,sp,#608
vst1.8 {d8-d9},[r2,: 128]
add r2,sp,#560
vld1.8 {d8-d9},[r2,: 128]
vmlal.s32 q7,d24,d9
vmlal.s32 q7,d25,d31
vmull.s32 q1,d18,d2
vmlal.s32 q1,d19,d1
vmlal.s32 q1,d22,d0
vmlal.s32 q1,d24,d27
vmlal.s32 q1,d23,d20
vmlal.s32 q1,d12,d7
vmlal.s32 q1,d13,d6
vmull.s32 q6,d18,d1
vmlal.s32 q6,d19,d0
vmlal.s32 q6,d23,d27
vmlal.s32 q6,d22,d20
vmlal.s32 q6,d24,d26
vmull.s32 q0,d18,d0
vmlal.s32 q0,d22,d27
vmlal.s32 q0,d23,d26
vmlal.s32 q0,d24,d31
vmlal.s32 q0,d19,d20
add r2,sp,#640
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q2,d18,d7
vmlal.s32 q2,d19,d6
vmlal.s32 q5,d18,d6
vmlal.s32 q5,d19,d21
vmlal.s32 q1,d18,d21
vmlal.s32 q1,d19,d29
vmlal.s32 q0,d18,d28
vmlal.s32 q0,d19,d9
vmlal.s32 q6,d18,d29
vmlal.s32 q6,d19,d28
add r2,sp,#592
vld1.8 {d18-d19},[r2,: 128]
add r2,sp,#512
vld1.8 {d22-d23},[r2,: 128]
vmlal.s32 q5,d19,d7
vmlal.s32 q0,d18,d21
vmlal.s32 q0,d19,d29
vmlal.s32 q6,d18,d6
add r2,sp,#528
vld1.8 {d6-d7},[r2,: 128]
vmlal.s32 q6,d19,d21
add r2,sp,#576
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q0,d30,d8
add r2,sp,#672
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q5,d30,d29
add r2,sp,#608
vld1.8 {d24-d25},[r2,: 128]
vmlal.s32 q1,d30,d28
vadd.i64 q13,q0,q11
vadd.i64 q14,q5,q11
vmlal.s32 q6,d30,d9
vshr.s64 q4,q13,#26
vshr.s64 q13,q14,#26
vadd.i64 q7,q7,q4
vshl.i64 q4,q4,#26
vadd.i64 q14,q7,q3
vadd.i64 q9,q9,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q9,q3
vsub.i64 q0,q0,q4
vshr.s64 q4,q14,#25
vsub.i64 q5,q5,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q4
vshl.i64 q4,q4,#25
vadd.i64 q14,q6,q11
vadd.i64 q2,q2,q13
vsub.i64 q4,q7,q4
vshr.s64 q7,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q11
vadd.i64 q8,q8,q7
vshl.i64 q7,q7,#26
vadd.i64 q15,q8,q3
vsub.i64 q9,q9,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q7
vshr.s64 q7,q15,#25
vadd.i64 q10,q10,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q10,q3
vadd.i64 q1,q1,q7
add r2,r3,#144
vshl.i64 q7,q7,#25
add r4,r3,#96
vadd.i64 q15,q1,q11
add r2,r2,#8
vsub.i64 q2,q2,q13
add r4,r4,#8
vshr.s64 q13,q14,#25
vsub.i64 q7,q8,q7
vshr.s64 q8,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q12,q12,q8
vtrn.32 d12,d14
vshl.i64 q8,q8,#26
vtrn.32 d13,d15
vadd.i64 q3,q12,q3
vadd.i64 q0,q0,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q7,q13,#4
vst1.8 d13,[r4,: 64]!
vsub.i64 q1,q1,q8
vshr.s64 q3,q3,#25
vadd.i64 q0,q0,q7
vadd.i64 q5,q5,q3
vshl.i64 q3,q3,#25
vadd.i64 q6,q5,q11
vadd.i64 q0,q0,q13
vshl.i64 q7,q13,#25
vadd.i64 q8,q0,q11
vsub.i64 q3,q12,q3
vshr.s64 q6,q6,#26
vsub.i64 q7,q10,q7
vtrn.32 d2,d6
vshr.s64 q8,q8,#26
vtrn.32 d3,d7
vadd.i64 q3,q9,q6
vst1.8 d2,[r2,: 64]
vshl.i64 q6,q6,#26
vst1.8 d3,[r4,: 64]
vadd.i64 q1,q4,q8
vtrn.32 d4,d14
vshl.i64 q4,q8,#26
vtrn.32 d5,d15
vsub.i64 q5,q5,q6
add r2,r2,#16
vsub.i64 q0,q0,q4
vst1.8 d4,[r2,: 64]
add r4,r4,#16
vst1.8 d5,[r4,: 64]
vtrn.32 d10,d6
vtrn.32 d11,d7
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d0,d2
vtrn.32 d1,d3
vst1.8 d10,[r2,: 64]
vst1.8 d11,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d0,[r2,: 64]
vst1.8 d1,[r4,: 64]
add r2,r3,#288
add r4,r3,#336
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vsub.i32 q0,q0,q1
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4-d5},[r4,: 128]!
vsub.i32 q1,q1,q2
add r5,r3,#240
vld1.8 {d4},[r2,: 64]
vld1.8 {d6},[r4,: 64]
vsub.i32 q2,q2,q3
vst1.8 {d0-d1},[r5,: 128]!
vst1.8 {d2-d3},[r5,: 128]!
vst1.8 d4,[r5,: 64]
add r2,r3,#144
add r4,r3,#96
add r5,r3,#144
add r6,r3,#192
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vsub.i32 q2,q0,q1
vadd.i32 q0,q0,q1
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d6-d7},[r4,: 128]!
vsub.i32 q4,q1,q3
vadd.i32 q1,q1,q3
vld1.8 {d6},[r2,: 64]
vld1.8 {d10},[r4,: 64]
vsub.i32 q6,q3,q5
vadd.i32 q3,q3,q5
vst1.8 {d4-d5},[r5,: 128]!
vst1.8 {d0-d1},[r6,: 128]!
vst1.8 {d8-d9},[r5,: 128]!
vst1.8 {d2-d3},[r6,: 128]!
vst1.8 d12,[r5,: 64]
vst1.8 d6,[r6,: 64]
add r2,r3,#0
add r4,r3,#240
vld1.8 {d0-d1},[r4,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4},[r4,: 64]
add r4,r3,#336
vld1.8 {d6-d7},[r4,: 128]!
vtrn.32 q0,q3
vld1.8 {d8-d9},[r4,: 128]!
vshl.i32 q5,q0,#4
vtrn.32 q1,q4
vshl.i32 q6,q3,#4
vadd.i32 q5,q5,q0
vadd.i32 q6,q6,q3
vshl.i32 q7,q1,#4
vld1.8 {d5},[r4,: 64]
vshl.i32 q8,q4,#4
vtrn.32 d4,d5
vadd.i32 q7,q7,q1
vadd.i32 q8,q8,q4
vld1.8 {d18-d19},[r2,: 128]!
vshl.i32 q10,q2,#4
vld1.8 {d22-d23},[r2,: 128]!
vadd.i32 q10,q10,q2
vld1.8 {d24},[r2,: 64]
vadd.i32 q5,q5,q0
add r2,r3,#288
vld1.8 {d26-d27},[r2,: 128]!
vadd.i32 q6,q6,q3
vld1.8 {d28-d29},[r2,: 128]!
vadd.i32 q8,q8,q4
vld1.8 {d25},[r2,: 64]
vadd.i32 q10,q10,q2
vtrn.32 q9,q13
vadd.i32 q7,q7,q1
vadd.i32 q5,q5,q0
vtrn.32 q11,q14
vadd.i32 q6,q6,q3
add r2,sp,#560
vadd.i32 q10,q10,q2
vtrn.32 d24,d25
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q6,q13,#1
add r2,sp,#576
vst1.8 {d20-d21},[r2,: 128]
vshl.i32 q10,q14,#1
add r2,sp,#592
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q15,q12,#1
vadd.i32 q8,q8,q4
vext.32 d10,d31,d30,#0
vadd.i32 q7,q7,q1
add r2,sp,#608
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q8,d18,d5
vmlal.s32 q8,d26,d4
vmlal.s32 q8,d19,d9
vmlal.s32 q8,d27,d3
vmlal.s32 q8,d22,d8
vmlal.s32 q8,d28,d2
vmlal.s32 q8,d23,d7
vmlal.s32 q8,d29,d1
vmlal.s32 q8,d24,d6
vmlal.s32 q8,d25,d0
add r2,sp,#624
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q2,d18,d4
vmlal.s32 q2,d12,d9
vmlal.s32 q2,d13,d8
vmlal.s32 q2,d19,d3
vmlal.s32 q2,d22,d2
vmlal.s32 q2,d23,d1
vmlal.s32 q2,d24,d0
add r2,sp,#640
vst1.8 {d20-d21},[r2,: 128]
vmull.s32 q7,d18,d9
vmlal.s32 q7,d26,d3
vmlal.s32 q7,d19,d8
vmlal.s32 q7,d27,d2
vmlal.s32 q7,d22,d7
vmlal.s32 q7,d28,d1
vmlal.s32 q7,d23,d6
vmlal.s32 q7,d29,d0
add r2,sp,#656
vst1.8 {d10-d11},[r2,: 128]
vmull.s32 q5,d18,d3
vmlal.s32 q5,d19,d2
vmlal.s32 q5,d22,d1
vmlal.s32 q5,d23,d0
vmlal.s32 q5,d12,d8
add r2,sp,#672
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q4,d18,d8
vmlal.s32 q4,d26,d2
vmlal.s32 q4,d19,d7
vmlal.s32 q4,d27,d1
vmlal.s32 q4,d22,d6
vmlal.s32 q4,d28,d0
vmull.s32 q8,d18,d7
vmlal.s32 q8,d26,d1
vmlal.s32 q8,d19,d6
vmlal.s32 q8,d27,d0
add r2,sp,#576
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q7,d24,d21
vmlal.s32 q7,d25,d20
vmlal.s32 q4,d23,d21
vmlal.s32 q4,d29,d20
vmlal.s32 q8,d22,d21
vmlal.s32 q8,d28,d20
vmlal.s32 q5,d24,d20
add r2,sp,#576
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q7,d18,d6
vmlal.s32 q7,d26,d0
add r2,sp,#656
vld1.8 {d30-d31},[r2,: 128]
vmlal.s32 q2,d30,d21
vmlal.s32 q7,d19,d21
vmlal.s32 q7,d27,d20
add r2,sp,#624
vld1.8 {d26-d27},[r2,: 128]
vmlal.s32 q4,d25,d27
vmlal.s32 q8,d29,d27
vmlal.s32 q8,d25,d26
vmlal.s32 q7,d28,d27
vmlal.s32 q7,d29,d26
add r2,sp,#608
vld1.8 {d28-d29},[r2,: 128]
vmlal.s32 q4,d24,d29
vmlal.s32 q8,d23,d29
vmlal.s32 q8,d24,d28
vmlal.s32 q7,d22,d29
vmlal.s32 q7,d23,d28
add r2,sp,#608
vst1.8 {d8-d9},[r2,: 128]
add r2,sp,#560
vld1.8 {d8-d9},[r2,: 128]
vmlal.s32 q7,d24,d9
vmlal.s32 q7,d25,d31
vmull.s32 q1,d18,d2
vmlal.s32 q1,d19,d1
vmlal.s32 q1,d22,d0
vmlal.s32 q1,d24,d27
vmlal.s32 q1,d23,d20
vmlal.s32 q1,d12,d7
vmlal.s32 q1,d13,d6
vmull.s32 q6,d18,d1
vmlal.s32 q6,d19,d0
vmlal.s32 q6,d23,d27
vmlal.s32 q6,d22,d20
vmlal.s32 q6,d24,d26
vmull.s32 q0,d18,d0
vmlal.s32 q0,d22,d27
vmlal.s32 q0,d23,d26
vmlal.s32 q0,d24,d31
vmlal.s32 q0,d19,d20
add r2,sp,#640
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q2,d18,d7
vmlal.s32 q2,d19,d6
vmlal.s32 q5,d18,d6
vmlal.s32 q5,d19,d21
vmlal.s32 q1,d18,d21
vmlal.s32 q1,d19,d29
vmlal.s32 q0,d18,d28
vmlal.s32 q0,d19,d9
vmlal.s32 q6,d18,d29
vmlal.s32 q6,d19,d28
add r2,sp,#592
vld1.8 {d18-d19},[r2,: 128]
add r2,sp,#512
vld1.8 {d22-d23},[r2,: 128]
vmlal.s32 q5,d19,d7
vmlal.s32 q0,d18,d21
vmlal.s32 q0,d19,d29
vmlal.s32 q6,d18,d6
add r2,sp,#528
vld1.8 {d6-d7},[r2,: 128]
vmlal.s32 q6,d19,d21
add r2,sp,#576
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q0,d30,d8
add r2,sp,#672
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q5,d30,d29
add r2,sp,#608
vld1.8 {d24-d25},[r2,: 128]
vmlal.s32 q1,d30,d28
vadd.i64 q13,q0,q11
vadd.i64 q14,q5,q11
vmlal.s32 q6,d30,d9
vshr.s64 q4,q13,#26
vshr.s64 q13,q14,#26
vadd.i64 q7,q7,q4
vshl.i64 q4,q4,#26
vadd.i64 q14,q7,q3
vadd.i64 q9,q9,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q9,q3
vsub.i64 q0,q0,q4
vshr.s64 q4,q14,#25
vsub.i64 q5,q5,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q4
vshl.i64 q4,q4,#25
vadd.i64 q14,q6,q11
vadd.i64 q2,q2,q13
vsub.i64 q4,q7,q4
vshr.s64 q7,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q11
vadd.i64 q8,q8,q7
vshl.i64 q7,q7,#26
vadd.i64 q15,q8,q3
vsub.i64 q9,q9,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q7
vshr.s64 q7,q15,#25
vadd.i64 q10,q10,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q10,q3
vadd.i64 q1,q1,q7
add r2,r3,#288
vshl.i64 q7,q7,#25
add r4,r3,#96
vadd.i64 q15,q1,q11
add r2,r2,#8
vsub.i64 q2,q2,q13
add r4,r4,#8
vshr.s64 q13,q14,#25
vsub.i64 q7,q8,q7
vshr.s64 q8,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q12,q12,q8
vtrn.32 d12,d14
vshl.i64 q8,q8,#26
vtrn.32 d13,d15
vadd.i64 q3,q12,q3
vadd.i64 q0,q0,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q7,q13,#4
vst1.8 d13,[r4,: 64]!
vsub.i64 q1,q1,q8
vshr.s64 q3,q3,#25
vadd.i64 q0,q0,q7
vadd.i64 q5,q5,q3
vshl.i64 q3,q3,#25
vadd.i64 q6,q5,q11
vadd.i64 q0,q0,q13
vshl.i64 q7,q13,#25
vadd.i64 q8,q0,q11
vsub.i64 q3,q12,q3
vshr.s64 q6,q6,#26
vsub.i64 q7,q10,q7
vtrn.32 d2,d6
vshr.s64 q8,q8,#26
vtrn.32 d3,d7
vadd.i64 q3,q9,q6
vst1.8 d2,[r2,: 64]
vshl.i64 q6,q6,#26
vst1.8 d3,[r4,: 64]
vadd.i64 q1,q4,q8
vtrn.32 d4,d14
vshl.i64 q4,q8,#26
vtrn.32 d5,d15
vsub.i64 q5,q5,q6
add r2,r2,#16
vsub.i64 q0,q0,q4
vst1.8 d4,[r2,: 64]
add r4,r4,#16
vst1.8 d5,[r4,: 64]
vtrn.32 d10,d6
vtrn.32 d11,d7
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d0,d2
vtrn.32 d1,d3
vst1.8 d10,[r2,: 64]
vst1.8 d11,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d0,[r2,: 64]
vst1.8 d1,[r4,: 64]
add r2,sp,#544
add r4,r3,#144
add r5,r3,#192
vld1.8 {d0-d1},[r2,: 128]
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4-d5},[r5,: 128]!
vzip.i32 q1,q2
vld1.8 {d6-d7},[r4,: 128]!
vld1.8 {d8-d9},[r5,: 128]!
vshl.i32 q5,q1,#1
vzip.i32 q3,q4
vshl.i32 q6,q2,#1
vld1.8 {d14},[r4,: 64]
vshl.i32 q8,q3,#1
vld1.8 {d15},[r5,: 64]
vshl.i32 q9,q4,#1
vmul.i32 d21,d7,d1
vtrn.32 d14,d15
vmul.i32 q11,q4,q0
vmul.i32 q0,q7,q0
vmull.s32 q12,d2,d2
vmlal.s32 q12,d11,d1
vmlal.s32 q12,d12,d0
vmlal.s32 q12,d13,d23
vmlal.s32 q12,d16,d22
vmlal.s32 q12,d7,d21
vmull.s32 q10,d2,d11
vmlal.s32 q10,d4,d1
vmlal.s32 q10,d13,d0
vmlal.s32 q10,d6,d23
vmlal.s32 q10,d17,d22
vmull.s32 q13,d10,d4
vmlal.s32 q13,d11,d3
vmlal.s32 q13,d13,d1
vmlal.s32 q13,d16,d0
vmlal.s32 q13,d17,d23
vmlal.s32 q13,d8,d22
vmull.s32 q1,d10,d5
vmlal.s32 q1,d11,d4
vmlal.s32 q1,d6,d1
vmlal.s32 q1,d17,d0
vmlal.s32 q1,d8,d23
vmull.s32 q14,d10,d6
vmlal.s32 q14,d11,d13
vmlal.s32 q14,d4,d4
vmlal.s32 q14,d17,d1
vmlal.s32 q14,d18,d0
vmlal.s32 q14,d9,d23
vmull.s32 q11,d10,d7
vmlal.s32 q11,d11,d6
vmlal.s32 q11,d12,d5
vmlal.s32 q11,d8,d1
vmlal.s32 q11,d19,d0
vmull.s32 q15,d10,d8
vmlal.s32 q15,d11,d17
vmlal.s32 q15,d12,d6
vmlal.s32 q15,d13,d5
vmlal.s32 q15,d19,d1
vmlal.s32 q15,d14,d0
vmull.s32 q2,d10,d9
vmlal.s32 q2,d11,d8
vmlal.s32 q2,d12,d7
vmlal.s32 q2,d13,d6
vmlal.s32 q2,d14,d1
vmull.s32 q0,d15,d1
vmlal.s32 q0,d10,d14
vmlal.s32 q0,d11,d19
vmlal.s32 q0,d12,d8
vmlal.s32 q0,d13,d17
vmlal.s32 q0,d6,d6
add r2,sp,#512
vld1.8 {d18-d19},[r2,: 128]
vmull.s32 q3,d16,d7
vmlal.s32 q3,d10,d15
vmlal.s32 q3,d11,d14
vmlal.s32 q3,d12,d9
vmlal.s32 q3,d13,d8
add r2,sp,#528
vld1.8 {d8-d9},[r2,: 128]
vadd.i64 q5,q12,q9
vadd.i64 q6,q15,q9
vshr.s64 q5,q5,#26
vshr.s64 q6,q6,#26
vadd.i64 q7,q10,q5
vshl.i64 q5,q5,#26
vadd.i64 q8,q7,q4
vadd.i64 q2,q2,q6
vshl.i64 q6,q6,#26
vadd.i64 q10,q2,q4
vsub.i64 q5,q12,q5
vshr.s64 q8,q8,#25
vsub.i64 q6,q15,q6
vshr.s64 q10,q10,#25
vadd.i64 q12,q13,q8
vshl.i64 q8,q8,#25
vadd.i64 q13,q12,q9
vadd.i64 q0,q0,q10
vsub.i64 q7,q7,q8
vshr.s64 q8,q13,#26
vshl.i64 q10,q10,#25
vadd.i64 q13,q0,q9
vadd.i64 q1,q1,q8
vshl.i64 q8,q8,#26
vadd.i64 q15,q1,q4
vsub.i64 q2,q2,q10
vshr.s64 q10,q13,#26
vsub.i64 q8,q12,q8
vshr.s64 q12,q15,#25
vadd.i64 q3,q3,q10
vshl.i64 q10,q10,#26
vadd.i64 q13,q3,q4
vadd.i64 q14,q14,q12
add r2,r3,#144
vshl.i64 q12,q12,#25
add r4,r3,#192
vadd.i64 q15,q14,q9
add r2,r2,#8
vsub.i64 q0,q0,q10
add r4,r4,#8
vshr.s64 q10,q13,#25
vsub.i64 q1,q1,q12
vshr.s64 q12,q15,#26
vadd.i64 q13,q10,q10
vadd.i64 q11,q11,q12
vtrn.32 d16,d2
vshl.i64 q12,q12,#26
vtrn.32 d17,d3
vadd.i64 q1,q11,q4
vadd.i64 q4,q5,q13
vst1.8 d16,[r2,: 64]!
vshl.i64 q5,q10,#4
vst1.8 d17,[r4,: 64]!
vsub.i64 q8,q14,q12
vshr.s64 q1,q1,#25
vadd.i64 q4,q4,q5
vadd.i64 q5,q6,q1
vshl.i64 q1,q1,#25
vadd.i64 q6,q5,q9
vadd.i64 q4,q4,q10
vshl.i64 q10,q10,#25
vadd.i64 q9,q4,q9
vsub.i64 q1,q11,q1
vshr.s64 q6,q6,#26
vsub.i64 q3,q3,q10
vtrn.32 d16,d2
vshr.s64 q9,q9,#26
vtrn.32 d17,d3
vadd.i64 q1,q2,q6
vst1.8 d16,[r2,: 64]
vshl.i64 q2,q6,#26
vst1.8 d17,[r4,: 64]
vadd.i64 q6,q7,q9
vtrn.32 d0,d6
vshl.i64 q7,q9,#26
vtrn.32 d1,d7
vsub.i64 q2,q5,q2
add r2,r2,#16
vsub.i64 q3,q4,q7
vst1.8 d0,[r2,: 64]
add r4,r4,#16
vst1.8 d1,[r4,: 64]
vtrn.32 d4,d2
vtrn.32 d5,d3
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d6,d12
vtrn.32 d7,d13
vst1.8 d4,[r2,: 64]
vst1.8 d5,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d6,[r2,: 64]
vst1.8 d7,[r4,: 64]
add r2,r3,#336
add r4,r3,#288
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vadd.i32 q0,q0,q1
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4-d5},[r4,: 128]!
vadd.i32 q1,q1,q2
add r5,r3,#288
vld1.8 {d4},[r2,: 64]
vld1.8 {d6},[r4,: 64]
vadd.i32 q2,q2,q3
vst1.8 {d0-d1},[r5,: 128]!
vst1.8 {d2-d3},[r5,: 128]!
vst1.8 d4,[r5,: 64]
add r2,r3,#48
add r4,r3,#144
vld1.8 {d0-d1},[r4,: 128]!
vld1.8 {d2-d3},[r4,: 128]!
vld1.8 {d4},[r4,: 64]
add r4,r3,#288
vld1.8 {d6-d7},[r4,: 128]!
vtrn.32 q0,q3
vld1.8 {d8-d9},[r4,: 128]!
vshl.i32 q5,q0,#4
vtrn.32 q1,q4
vshl.i32 q6,q3,#4
vadd.i32 q5,q5,q0
vadd.i32 q6,q6,q3
vshl.i32 q7,q1,#4
vld1.8 {d5},[r4,: 64]
vshl.i32 q8,q4,#4
vtrn.32 d4,d5
vadd.i32 q7,q7,q1
vadd.i32 q8,q8,q4
vld1.8 {d18-d19},[r2,: 128]!
vshl.i32 q10,q2,#4
vld1.8 {d22-d23},[r2,: 128]!
vadd.i32 q10,q10,q2
vld1.8 {d24},[r2,: 64]
vadd.i32 q5,q5,q0
add r2,r3,#240
vld1.8 {d26-d27},[r2,: 128]!
vadd.i32 q6,q6,q3
vld1.8 {d28-d29},[r2,: 128]!
vadd.i32 q8,q8,q4
vld1.8 {d25},[r2,: 64]
vadd.i32 q10,q10,q2
vtrn.32 q9,q13
vadd.i32 q7,q7,q1
vadd.i32 q5,q5,q0
vtrn.32 q11,q14
vadd.i32 q6,q6,q3
add r2,sp,#560
vadd.i32 q10,q10,q2
vtrn.32 d24,d25
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q6,q13,#1
add r2,sp,#576
vst1.8 {d20-d21},[r2,: 128]
vshl.i32 q10,q14,#1
add r2,sp,#592
vst1.8 {d12-d13},[r2,: 128]
vshl.i32 q15,q12,#1
vadd.i32 q8,q8,q4
vext.32 d10,d31,d30,#0
vadd.i32 q7,q7,q1
add r2,sp,#608
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q8,d18,d5
vmlal.s32 q8,d26,d4
vmlal.s32 q8,d19,d9
vmlal.s32 q8,d27,d3
vmlal.s32 q8,d22,d8
vmlal.s32 q8,d28,d2
vmlal.s32 q8,d23,d7
vmlal.s32 q8,d29,d1
vmlal.s32 q8,d24,d6
vmlal.s32 q8,d25,d0
add r2,sp,#624
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q2,d18,d4
vmlal.s32 q2,d12,d9
vmlal.s32 q2,d13,d8
vmlal.s32 q2,d19,d3
vmlal.s32 q2,d22,d2
vmlal.s32 q2,d23,d1
vmlal.s32 q2,d24,d0
add r2,sp,#640
vst1.8 {d20-d21},[r2,: 128]
vmull.s32 q7,d18,d9
vmlal.s32 q7,d26,d3
vmlal.s32 q7,d19,d8
vmlal.s32 q7,d27,d2
vmlal.s32 q7,d22,d7
vmlal.s32 q7,d28,d1
vmlal.s32 q7,d23,d6
vmlal.s32 q7,d29,d0
add r2,sp,#656
vst1.8 {d10-d11},[r2,: 128]
vmull.s32 q5,d18,d3
vmlal.s32 q5,d19,d2
vmlal.s32 q5,d22,d1
vmlal.s32 q5,d23,d0
vmlal.s32 q5,d12,d8
add r2,sp,#672
vst1.8 {d16-d17},[r2,: 128]
vmull.s32 q4,d18,d8
vmlal.s32 q4,d26,d2
vmlal.s32 q4,d19,d7
vmlal.s32 q4,d27,d1
vmlal.s32 q4,d22,d6
vmlal.s32 q4,d28,d0
vmull.s32 q8,d18,d7
vmlal.s32 q8,d26,d1
vmlal.s32 q8,d19,d6
vmlal.s32 q8,d27,d0
add r2,sp,#576
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q7,d24,d21
vmlal.s32 q7,d25,d20
vmlal.s32 q4,d23,d21
vmlal.s32 q4,d29,d20
vmlal.s32 q8,d22,d21
vmlal.s32 q8,d28,d20
vmlal.s32 q5,d24,d20
add r2,sp,#576
vst1.8 {d14-d15},[r2,: 128]
vmull.s32 q7,d18,d6
vmlal.s32 q7,d26,d0
add r2,sp,#656
vld1.8 {d30-d31},[r2,: 128]
vmlal.s32 q2,d30,d21
vmlal.s32 q7,d19,d21
vmlal.s32 q7,d27,d20
add r2,sp,#624
vld1.8 {d26-d27},[r2,: 128]
vmlal.s32 q4,d25,d27
vmlal.s32 q8,d29,d27
vmlal.s32 q8,d25,d26
vmlal.s32 q7,d28,d27
vmlal.s32 q7,d29,d26
add r2,sp,#608
vld1.8 {d28-d29},[r2,: 128]
vmlal.s32 q4,d24,d29
vmlal.s32 q8,d23,d29
vmlal.s32 q8,d24,d28
vmlal.s32 q7,d22,d29
vmlal.s32 q7,d23,d28
add r2,sp,#608
vst1.8 {d8-d9},[r2,: 128]
add r2,sp,#560
vld1.8 {d8-d9},[r2,: 128]
vmlal.s32 q7,d24,d9
vmlal.s32 q7,d25,d31
vmull.s32 q1,d18,d2
vmlal.s32 q1,d19,d1
vmlal.s32 q1,d22,d0
vmlal.s32 q1,d24,d27
vmlal.s32 q1,d23,d20
vmlal.s32 q1,d12,d7
vmlal.s32 q1,d13,d6
vmull.s32 q6,d18,d1
vmlal.s32 q6,d19,d0
vmlal.s32 q6,d23,d27
vmlal.s32 q6,d22,d20
vmlal.s32 q6,d24,d26
vmull.s32 q0,d18,d0
vmlal.s32 q0,d22,d27
vmlal.s32 q0,d23,d26
vmlal.s32 q0,d24,d31
vmlal.s32 q0,d19,d20
add r2,sp,#640
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q2,d18,d7
vmlal.s32 q2,d19,d6
vmlal.s32 q5,d18,d6
vmlal.s32 q5,d19,d21
vmlal.s32 q1,d18,d21
vmlal.s32 q1,d19,d29
vmlal.s32 q0,d18,d28
vmlal.s32 q0,d19,d9
vmlal.s32 q6,d18,d29
vmlal.s32 q6,d19,d28
add r2,sp,#592
vld1.8 {d18-d19},[r2,: 128]
add r2,sp,#512
vld1.8 {d22-d23},[r2,: 128]
vmlal.s32 q5,d19,d7
vmlal.s32 q0,d18,d21
vmlal.s32 q0,d19,d29
vmlal.s32 q6,d18,d6
add r2,sp,#528
vld1.8 {d6-d7},[r2,: 128]
vmlal.s32 q6,d19,d21
add r2,sp,#576
vld1.8 {d18-d19},[r2,: 128]
vmlal.s32 q0,d30,d8
add r2,sp,#672
vld1.8 {d20-d21},[r2,: 128]
vmlal.s32 q5,d30,d29
add r2,sp,#608
vld1.8 {d24-d25},[r2,: 128]
vmlal.s32 q1,d30,d28
vadd.i64 q13,q0,q11
vadd.i64 q14,q5,q11
vmlal.s32 q6,d30,d9
vshr.s64 q4,q13,#26
vshr.s64 q13,q14,#26
vadd.i64 q7,q7,q4
vshl.i64 q4,q4,#26
vadd.i64 q14,q7,q3
vadd.i64 q9,q9,q13
vshl.i64 q13,q13,#26
vadd.i64 q15,q9,q3
vsub.i64 q0,q0,q4
vshr.s64 q4,q14,#25
vsub.i64 q5,q5,q13
vshr.s64 q13,q15,#25
vadd.i64 q6,q6,q4
vshl.i64 q4,q4,#25
vadd.i64 q14,q6,q11
vadd.i64 q2,q2,q13
vsub.i64 q4,q7,q4
vshr.s64 q7,q14,#26
vshl.i64 q13,q13,#25
vadd.i64 q14,q2,q11
vadd.i64 q8,q8,q7
vshl.i64 q7,q7,#26
vadd.i64 q15,q8,q3
vsub.i64 q9,q9,q13
vshr.s64 q13,q14,#26
vsub.i64 q6,q6,q7
vshr.s64 q7,q15,#25
vadd.i64 q10,q10,q13
vshl.i64 q13,q13,#26
vadd.i64 q14,q10,q3
vadd.i64 q1,q1,q7
add r2,r3,#240
vshl.i64 q7,q7,#25
add r4,r3,#144
vadd.i64 q15,q1,q11
add r2,r2,#8
vsub.i64 q2,q2,q13
add r4,r4,#8
vshr.s64 q13,q14,#25
vsub.i64 q7,q8,q7
vshr.s64 q8,q15,#26
vadd.i64 q14,q13,q13
vadd.i64 q12,q12,q8
vtrn.32 d12,d14
vshl.i64 q8,q8,#26
vtrn.32 d13,d15
vadd.i64 q3,q12,q3
vadd.i64 q0,q0,q14
vst1.8 d12,[r2,: 64]!
vshl.i64 q7,q13,#4
vst1.8 d13,[r4,: 64]!
vsub.i64 q1,q1,q8
vshr.s64 q3,q3,#25
vadd.i64 q0,q0,q7
vadd.i64 q5,q5,q3
vshl.i64 q3,q3,#25
vadd.i64 q6,q5,q11
vadd.i64 q0,q0,q13
vshl.i64 q7,q13,#25
vadd.i64 q8,q0,q11
vsub.i64 q3,q12,q3
vshr.s64 q6,q6,#26
vsub.i64 q7,q10,q7
vtrn.32 d2,d6
vshr.s64 q8,q8,#26
vtrn.32 d3,d7
vadd.i64 q3,q9,q6
vst1.8 d2,[r2,: 64]
vshl.i64 q6,q6,#26
vst1.8 d3,[r4,: 64]
vadd.i64 q1,q4,q8
vtrn.32 d4,d14
vshl.i64 q4,q8,#26
vtrn.32 d5,d15
vsub.i64 q5,q5,q6
add r2,r2,#16
vsub.i64 q0,q0,q4
vst1.8 d4,[r2,: 64]
add r4,r4,#16
vst1.8 d5,[r4,: 64]
vtrn.32 d10,d6
vtrn.32 d11,d7
sub r2,r2,#8
sub r4,r4,#8
vtrn.32 d0,d2
vtrn.32 d1,d3
vst1.8 d10,[r2,: 64]
vst1.8 d11,[r4,: 64]
sub r2,r2,#24
sub r4,r4,#24
vst1.8 d0,[r2,: 64]
vst1.8 d1,[r4,: 64]
ldr r2,[sp,#488]
ldr r4,[sp,#492]
subs r5,r2,#1
bge ._mainloop
add r1,r3,#144
add r2,r3,#336
vld1.8 {d0-d1},[r1,: 128]!
vld1.8 {d2-d3},[r1,: 128]!
vld1.8 {d4},[r1,: 64]
vst1.8 {d0-d1},[r2,: 128]!
vst1.8 {d2-d3},[r2,: 128]!
vst1.8 d4,[r2,: 64]
ldr r1,=0
._invertloop:
add r2,r3,#144
ldr r4,=0
ldr r5,=2
cmp r1,#1
ldreq r5,=1
addeq r2,r3,#336
addeq r4,r3,#48
cmp r1,#2
ldreq r5,=1
addeq r2,r3,#48
cmp r1,#3
ldreq r5,=5
addeq r4,r3,#336
cmp r1,#4
ldreq r5,=10
cmp r1,#5
ldreq r5,=20
cmp r1,#6
ldreq r5,=10
addeq r2,r3,#336
addeq r4,r3,#336
cmp r1,#7
ldreq r5,=50
cmp r1,#8
ldreq r5,=100
cmp r1,#9
ldreq r5,=50
addeq r2,r3,#336
cmp r1,#10
ldreq r5,=5
addeq r2,r3,#48
cmp r1,#11
ldreq r5,=0
addeq r2,r3,#96
add r6,r3,#144
add r7,r3,#288
vld1.8 {d0-d1},[r6,: 128]!
vld1.8 {d2-d3},[r6,: 128]!
vld1.8 {d4},[r6,: 64]
vst1.8 {d0-d1},[r7,: 128]!
vst1.8 {d2-d3},[r7,: 128]!
vst1.8 d4,[r7,: 64]
cmp r5,#0
beq ._skipsquaringloop
._squaringloop:
add r6,r3,#288
add r7,r3,#288
add r8,r3,#288
vmov.i32 q0,#19
vmov.i32 q1,#0
vmov.i32 q2,#1
vzip.i32 q1,q2
vld1.8 {d4-d5},[r7,: 128]!
vld1.8 {d6-d7},[r7,: 128]!
vld1.8 {d9},[r7,: 64]
vld1.8 {d10-d11},[r6,: 128]!
add r7,sp,#416
vld1.8 {d12-d13},[r6,: 128]!
vmul.i32 q7,q2,q0
vld1.8 {d8},[r6,: 64]
vext.32 d17,d11,d10,#1
vmul.i32 q9,q3,q0
vext.32 d16,d10,d8,#1
vshl.u32 q10,q5,q1
vext.32 d22,d14,d4,#1
vext.32 d24,d18,d6,#1
vshl.u32 q13,q6,q1
vshl.u32 d28,d8,d2
vrev64.i32 d22,d22
vmul.i32 d1,d9,d1
vrev64.i32 d24,d24
vext.32 d29,d8,d13,#1
vext.32 d0,d1,d9,#1
vrev64.i32 d0,d0
vext.32 d2,d9,d1,#1
vext.32 d23,d15,d5,#1
vmull.s32 q4,d20,d4
vrev64.i32 d23,d23
vmlal.s32 q4,d21,d1
vrev64.i32 d2,d2
vmlal.s32 q4,d26,d19
vext.32 d3,d5,d15,#1
vmlal.s32 q4,d27,d18
vrev64.i32 d3,d3
vmlal.s32 q4,d28,d15
vext.32 d14,d12,d11,#1
vmull.s32 q5,d16,d23
vext.32 d15,d13,d12,#1
vmlal.s32 q5,d17,d4
vst1.8 d8,[r7,: 64]!
vmlal.s32 q5,d14,d1
vext.32 d12,d9,d8,#0
vmlal.s32 q5,d15,d19
vmov.i64 d13,#0
vmlal.s32 q5,d29,d18
vext.32 d25,d19,d7,#1
vmlal.s32 q6,d20,d5
vrev64.i32 d25,d25
vmlal.s32 q6,d21,d4
vst1.8 d11,[r7,: 64]!
vmlal.s32 q6,d26,d1
vext.32 d9,d10,d10,#0
vmlal.s32 q6,d27,d19
vmov.i64 d8,#0
vmlal.s32 q6,d28,d18
vmlal.s32 q4,d16,d24
vmlal.s32 q4,d17,d5
vmlal.s32 q4,d14,d4
vst1.8 d12,[r7,: 64]!
vmlal.s32 q4,d15,d1
vext.32 d10,d13,d12,#0
vmlal.s32 q4,d29,d19
vmov.i64 d11,#0
vmlal.s32 q5,d20,d6
vmlal.s32 q5,d21,d5
vmlal.s32 q5,d26,d4
vext.32 d13,d8,d8,#0
vmlal.s32 q5,d27,d1
vmov.i64 d12,#0
vmlal.s32 q5,d28,d19
vst1.8 d9,[r7,: 64]!
vmlal.s32 q6,d16,d25
vmlal.s32 q6,d17,d6
vst1.8 d10,[r7,: 64]
vmlal.s32 q6,d14,d5
vext.32 d8,d11,d10,#0
vmlal.s32 q6,d15,d4
vmov.i64 d9,#0
vmlal.s32 q6,d29,d1
vmlal.s32 q4,d20,d7
vmlal.s32 q4,d21,d6
vmlal.s32 q4,d26,d5
vext.32 d11,d12,d12,#0
vmlal.s32 q4,d27,d4
vmov.i64 d10,#0
vmlal.s32 q4,d28,d1
vmlal.s32 q5,d16,d0
sub r6,r7,#32
vmlal.s32 q5,d17,d7
vmlal.s32 q5,d14,d6
vext.32 d30,d9,d8,#0
vmlal.s32 q5,d15,d5
vld1.8 {d31},[r6,: 64]!
vmlal.s32 q5,d29,d4
vmlal.s32 q15,d20,d0
vext.32 d0,d6,d18,#1
vmlal.s32 q15,d21,d25
vrev64.i32 d0,d0
vmlal.s32 q15,d26,d24
vext.32 d1,d7,d19,#1
vext.32 d7,d10,d10,#0
vmlal.s32 q15,d27,d23
vrev64.i32 d1,d1
vld1.8 {d6},[r6,: 64]
vmlal.s32 q15,d28,d22
vmlal.s32 q3,d16,d4
add r6,r6,#24
vmlal.s32 q3,d17,d2
vext.32 d4,d31,d30,#0
vmov d17,d11
vmlal.s32 q3,d14,d1
vext.32 d11,d13,d13,#0
vext.32 d13,d30,d30,#0
vmlal.s32 q3,d15,d0
vext.32 d1,d8,d8,#0
vmlal.s32 q3,d29,d3
vld1.8 {d5},[r6,: 64]
sub r6,r6,#16
vext.32 d10,d6,d6,#0
vmov.i32 q1,#0xffffffff
vshl.i64 q4,q1,#25
add r7,sp,#512
vld1.8 {d14-d15},[r7,: 128]
vadd.i64 q9,q2,q7
vshl.i64 q1,q1,#26
vshr.s64 q10,q9,#26
vld1.8 {d0},[r6,: 64]!
vadd.i64 q5,q5,q10
vand q9,q9,q1
vld1.8 {d16},[r6,: 64]!
add r6,sp,#528
vld1.8 {d20-d21},[r6,: 128]
vadd.i64 q11,q5,q10
vsub.i64 q2,q2,q9
vshr.s64 q9,q11,#25
vext.32 d12,d5,d4,#0
vand q11,q11,q4
vadd.i64 q0,q0,q9
vmov d19,d7
vadd.i64 q3,q0,q7
vsub.i64 q5,q5,q11
vshr.s64 q11,q3,#26
vext.32 d18,d11,d10,#0
vand q3,q3,q1
vadd.i64 q8,q8,q11
vadd.i64 q11,q8,q10
vsub.i64 q0,q0,q3
vshr.s64 q3,q11,#25
vand q11,q11,q4
vadd.i64 q3,q6,q3
vadd.i64 q6,q3,q7
vsub.i64 q8,q8,q11
vshr.s64 q11,q6,#26
vand q6,q6,q1
vadd.i64 q9,q9,q11
vadd.i64 d25,d19,d21
vsub.i64 q3,q3,q6
vshr.s64 d23,d25,#25
vand q4,q12,q4
vadd.i64 d21,d23,d23
vshl.i64 d25,d23,#4
vadd.i64 d21,d21,d23
vadd.i64 d25,d25,d21
vadd.i64 d4,d4,d25
vzip.i32 q0,q8
vadd.i64 d12,d4,d14
add r6,r8,#8
vst1.8 d0,[r6,: 64]
vsub.i64 d19,d19,d9
add r6,r6,#16
vst1.8 d16,[r6,: 64]
vshr.s64 d22,d12,#26
vand q0,q6,q1
vadd.i64 d10,d10,d22
vzip.i32 q3,q9
vsub.i64 d4,d4,d0
sub r6,r6,#8
vst1.8 d6,[r6,: 64]
add r6,r6,#16
vst1.8 d18,[r6,: 64]
vzip.i32 q2,q5
sub r6,r6,#32
vst1.8 d4,[r6,: 64]
subs r5,r5,#1
bhi ._squaringloop
._skipsquaringloop:
mov r2,r2
add r5,r3,#288
add r6,r3,#144
vmov.i32 q0,#19
vmov.i32 q1,#0
vmov.i32 q2,#1
vzip.i32 q1,q2
vld1.8 {d4-d5},[r5,: 128]!
vld1.8 {d6-d7},[r5,: 128]!
vld1.8 {d9},[r5,: 64]
vld1.8 {d10-d11},[r2,: 128]!
add r5,sp,#416
vld1.8 {d12-d13},[r2,: 128]!
vmul.i32 q7,q2,q0
vld1.8 {d8},[r2,: 64]
vext.32 d17,d11,d10,#1
vmul.i32 q9,q3,q0
vext.32 d16,d10,d8,#1
vshl.u32 q10,q5,q1
vext.32 d22,d14,d4,#1
vext.32 d24,d18,d6,#1
vshl.u32 q13,q6,q1
vshl.u32 d28,d8,d2
vrev64.i32 d22,d22
vmul.i32 d1,d9,d1
vrev64.i32 d24,d24
vext.32 d29,d8,d13,#1
vext.32 d0,d1,d9,#1
vrev64.i32 d0,d0
vext.32 d2,d9,d1,#1
vext.32 d23,d15,d5,#1
vmull.s32 q4,d20,d4
vrev64.i32 d23,d23
vmlal.s32 q4,d21,d1
vrev64.i32 d2,d2
vmlal.s32 q4,d26,d19
vext.32 d3,d5,d15,#1
vmlal.s32 q4,d27,d18
vrev64.i32 d3,d3
vmlal.s32 q4,d28,d15
vext.32 d14,d12,d11,#1
vmull.s32 q5,d16,d23
vext.32 d15,d13,d12,#1
vmlal.s32 q5,d17,d4
vst1.8 d8,[r5,: 64]!
vmlal.s32 q5,d14,d1
vext.32 d12,d9,d8,#0
vmlal.s32 q5,d15,d19
vmov.i64 d13,#0
vmlal.s32 q5,d29,d18
vext.32 d25,d19,d7,#1
vmlal.s32 q6,d20,d5
vrev64.i32 d25,d25
vmlal.s32 q6,d21,d4
vst1.8 d11,[r5,: 64]!
vmlal.s32 q6,d26,d1
vext.32 d9,d10,d10,#0
vmlal.s32 q6,d27,d19
vmov.i64 d8,#0
vmlal.s32 q6,d28,d18
vmlal.s32 q4,d16,d24
vmlal.s32 q4,d17,d5
vmlal.s32 q4,d14,d4
vst1.8 d12,[r5,: 64]!
vmlal.s32 q4,d15,d1
vext.32 d10,d13,d12,#0
vmlal.s32 q4,d29,d19
vmov.i64 d11,#0
vmlal.s32 q5,d20,d6
vmlal.s32 q5,d21,d5
vmlal.s32 q5,d26,d4
vext.32 d13,d8,d8,#0
vmlal.s32 q5,d27,d1
vmov.i64 d12,#0
vmlal.s32 q5,d28,d19
vst1.8 d9,[r5,: 64]!
vmlal.s32 q6,d16,d25
vmlal.s32 q6,d17,d6
vst1.8 d10,[r5,: 64]
vmlal.s32 q6,d14,d5
vext.32 d8,d11,d10,#0
vmlal.s32 q6,d15,d4
vmov.i64 d9,#0
vmlal.s32 q6,d29,d1
vmlal.s32 q4,d20,d7
vmlal.s32 q4,d21,d6
vmlal.s32 q4,d26,d5
vext.32 d11,d12,d12,#0
vmlal.s32 q4,d27,d4
vmov.i64 d10,#0
vmlal.s32 q4,d28,d1
vmlal.s32 q5,d16,d0
sub r2,r5,#32
vmlal.s32 q5,d17,d7
vmlal.s32 q5,d14,d6
vext.32 d30,d9,d8,#0
vmlal.s32 q5,d15,d5
vld1.8 {d31},[r2,: 64]!
vmlal.s32 q5,d29,d4
vmlal.s32 q15,d20,d0
vext.32 d0,d6,d18,#1
vmlal.s32 q15,d21,d25
vrev64.i32 d0,d0
vmlal.s32 q15,d26,d24
vext.32 d1,d7,d19,#1
vext.32 d7,d10,d10,#0
vmlal.s32 q15,d27,d23
vrev64.i32 d1,d1
vld1.8 {d6},[r2,: 64]
vmlal.s32 q15,d28,d22
vmlal.s32 q3,d16,d4
add r2,r2,#24
vmlal.s32 q3,d17,d2
vext.32 d4,d31,d30,#0
vmov d17,d11
vmlal.s32 q3,d14,d1
vext.32 d11,d13,d13,#0
vext.32 d13,d30,d30,#0
vmlal.s32 q3,d15,d0
vext.32 d1,d8,d8,#0
vmlal.s32 q3,d29,d3
vld1.8 {d5},[r2,: 64]
sub r2,r2,#16
vext.32 d10,d6,d6,#0
vmov.i32 q1,#0xffffffff
vshl.i64 q4,q1,#25
add r5,sp,#512
vld1.8 {d14-d15},[r5,: 128]
vadd.i64 q9,q2,q7
vshl.i64 q1,q1,#26
vshr.s64 q10,q9,#26
vld1.8 {d0},[r2,: 64]!
vadd.i64 q5,q5,q10
vand q9,q9,q1
vld1.8 {d16},[r2,: 64]!
add r2,sp,#528
vld1.8 {d20-d21},[r2,: 128]
vadd.i64 q11,q5,q10
vsub.i64 q2,q2,q9
vshr.s64 q9,q11,#25
vext.32 d12,d5,d4,#0
vand q11,q11,q4
vadd.i64 q0,q0,q9
vmov d19,d7
vadd.i64 q3,q0,q7
vsub.i64 q5,q5,q11
vshr.s64 q11,q3,#26
vext.32 d18,d11,d10,#0
vand q3,q3,q1
vadd.i64 q8,q8,q11
vadd.i64 q11,q8,q10
vsub.i64 q0,q0,q3
vshr.s64 q3,q11,#25
vand q11,q11,q4
vadd.i64 q3,q6,q3
vadd.i64 q6,q3,q7
vsub.i64 q8,q8,q11
vshr.s64 q11,q6,#26
vand q6,q6,q1
vadd.i64 q9,q9,q11
vadd.i64 d25,d19,d21
vsub.i64 q3,q3,q6
vshr.s64 d23,d25,#25
vand q4,q12,q4
vadd.i64 d21,d23,d23
vshl.i64 d25,d23,#4
vadd.i64 d21,d21,d23
vadd.i64 d25,d25,d21
vadd.i64 d4,d4,d25
vzip.i32 q0,q8
vadd.i64 d12,d4,d14
add r2,r6,#8
vst1.8 d0,[r2,: 64]
vsub.i64 d19,d19,d9
add r2,r2,#16
vst1.8 d16,[r2,: 64]
vshr.s64 d22,d12,#26
vand q0,q6,q1
vadd.i64 d10,d10,d22
vzip.i32 q3,q9
vsub.i64 d4,d4,d0
sub r2,r2,#8
vst1.8 d6,[r2,: 64]
add r2,r2,#16
vst1.8 d18,[r2,: 64]
vzip.i32 q2,q5
sub r2,r2,#32
vst1.8 d4,[r2,: 64]
cmp r4,#0
beq ._skippostcopy
add r2,r3,#144
mov r4,r4
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4},[r2,: 64]
vst1.8 {d0-d1},[r4,: 128]!
vst1.8 {d2-d3},[r4,: 128]!
vst1.8 d4,[r4,: 64]
._skippostcopy:
cmp r1,#1
bne ._skipfinalcopy
add r2,r3,#288
add r4,r3,#144
vld1.8 {d0-d1},[r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
vld1.8 {d4},[r2,: 64]
vst1.8 {d0-d1},[r4,: 128]!
vst1.8 {d2-d3},[r4,: 128]!
vst1.8 d4,[r4,: 64]
._skipfinalcopy:
add r1,r1,#1
cmp r1,#12
blo ._invertloop
add r1,r3,#144
ldr r2,[r1],#4
ldr r3,[r1],#4
ldr r4,[r1],#4
ldr r5,[r1],#4
ldr r6,[r1],#4
ldr r7,[r1],#4
ldr r8,[r1],#4
ldr r9,[r1],#4
ldr r10,[r1],#4
ldr r1,[r1]
add r11,r1,r1,LSL #4
add r11,r11,r1,LSL #1
add r11,r11,#16777216
mov r11,r11,ASR #25
add r11,r11,r2
mov r11,r11,ASR #26
add r11,r11,r3
mov r11,r11,ASR #25
add r11,r11,r4
mov r11,r11,ASR #26
add r11,r11,r5
mov r11,r11,ASR #25
add r11,r11,r6
mov r11,r11,ASR #26
add r11,r11,r7
mov r11,r11,ASR #25
add r11,r11,r8
mov r11,r11,ASR #26
add r11,r11,r9
mov r11,r11,ASR #25
add r11,r11,r10
mov r11,r11,ASR #26
add r11,r11,r1
mov r11,r11,ASR #25
add r2,r2,r11
add r2,r2,r11,LSL #1
add r2,r2,r11,LSL #4
mov r11,r2,ASR #26
add r3,r3,r11
sub r2,r2,r11,LSL #26
mov r11,r3,ASR #25
add r4,r4,r11
sub r3,r3,r11,LSL #25
mov r11,r4,ASR #26
add r5,r5,r11
sub r4,r4,r11,LSL #26
mov r11,r5,ASR #25
add r6,r6,r11
sub r5,r5,r11,LSL #25
mov r11,r6,ASR #26
add r7,r7,r11
sub r6,r6,r11,LSL #26
mov r11,r7,ASR #25
add r8,r8,r11
sub r7,r7,r11,LSL #25
mov r11,r8,ASR #26
add r9,r9,r11
sub r8,r8,r11,LSL #26
mov r11,r9,ASR #25
add r10,r10,r11
sub r9,r9,r11,LSL #25
mov r11,r10,ASR #26
add r1,r1,r11
sub r10,r10,r11,LSL #26
mov r11,r1,ASR #25
sub r1,r1,r11,LSL #25
add r2,r2,r3,LSL #26
mov r3,r3,LSR #6
add r3,r3,r4,LSL #19
mov r4,r4,LSR #13
add r4,r4,r5,LSL #13
mov r5,r5,LSR #19
add r5,r5,r6,LSL #6
add r6,r7,r8,LSL #25
mov r7,r8,LSR #7
add r7,r7,r9,LSL #19
mov r8,r9,LSR #13
add r8,r8,r10,LSL #12
mov r9,r10,LSR #20
add r1,r9,r1,LSL #6
str r2,[r0],#4
str r3,[r0],#4
str r4,[r0],#4
str r5,[r0],#4
str r6,[r0],#4
str r7,[r0],#4
str r8,[r0],#4
str r1,[r0]
ldrd r4,[sp,#0]
ldrd r6,[sp,#8]
ldrd r8,[sp,#16]
ldrd r10,[sp,#24]
ldr r12,[sp,#480]
ldr r14,[sp,#484]
ldr r0,=0
mov sp,r12
vpop {q4,q5,q6,q7}
bx lr
#endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */
|
NgeliMrasi/healthstash-prototype | 6,761 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/lzma-sys-0.1.20/xz-5.2/src/liblzma/check/crc64_x86.S | /*
* Speed-optimized CRC64 using slicing-by-four algorithm
*
* This uses only i386 instructions, but it is optimized for i686 and later
* (including e.g. Pentium II/III/IV, Athlon XP, and Core 2).
*
* Authors: Igor Pavlov (original CRC32 assembly code)
* Lasse Collin (CRC64 adaptation of the modified CRC32 code)
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*
* This code needs lzma_crc64_table, which can be created using the
* following C code:
uint64_t lzma_crc64_table[4][256];
void
init_table(void)
{
// ECMA-182
static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42);
for (size_t s = 0; s < 4; ++s) {
for (size_t b = 0; b < 256; ++b) {
uint64_t r = s == 0 ? b : lzma_crc64_table[s - 1][b];
for (size_t i = 0; i < 8; ++i) {
if (r & 1)
r = (r >> 1) ^ poly64;
else
r >>= 1;
}
lzma_crc64_table[s][b] = r;
}
}
}
* The prototype of the CRC64 function:
* extern uint64_t lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc);
*/
/*
* On some systems, the functions need to be prefixed. The prefix is
* usually an underscore.
*/
#ifndef __USER_LABEL_PREFIX__
# define __USER_LABEL_PREFIX__
#endif
#define MAKE_SYM_CAT(prefix, sym) prefix ## sym
#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym)
#define LZMA_CRC64 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64)
#define LZMA_CRC64_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64_table)
/*
* Solaris assembler doesn't have .p2align, and Darwin uses .align
* differently than GNU/Linux and Solaris.
*/
#if defined(__APPLE__) || defined(__MSDOS__)
# define ALIGN(pow2, abs) .align pow2
#else
# define ALIGN(pow2, abs) .align abs
#endif
.text
.globl LZMA_CRC64
#if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \
&& !defined(__MSDOS__)
.type LZMA_CRC64, @function
#endif
ALIGN(4, 16)
LZMA_CRC64:
/*
* Register usage:
* %eax crc LSB
* %edx crc MSB
* %esi buf
* %edi size or buf + size
* %ebx lzma_crc64_table
* %ebp Table index
* %ecx Temporary
*/
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl 0x14(%esp), %esi /* buf */
movl 0x18(%esp), %edi /* size */
movl 0x1C(%esp), %eax /* crc LSB */
movl 0x20(%esp), %edx /* crc MSB */
/*
* Store the address of lzma_crc64_table to %ebx. This is needed to
* get position-independent code (PIC).
*
* The PIC macro is defined by libtool, while __PIC__ is defined
* by GCC but only on some systems. Testing for both makes it simpler
* to test this code without libtool, and keeps the code working also
* when built with libtool but using something else than GCC.
*
* I understood that libtool may define PIC on Windows even though
* the code in Windows DLLs is not PIC in sense that it is in ELF
* binaries, so we need a separate check to always use the non-PIC
* code on Windows.
*/
#if (!defined(PIC) && !defined(__PIC__)) \
|| (defined(_WIN32) || defined(__CYGWIN__))
/* Not PIC */
movl $ LZMA_CRC64_TABLE, %ebx
#elif defined(__APPLE__)
/* Mach-O */
call .L_get_pc
.L_pic:
leal .L_lzma_crc64_table$non_lazy_ptr-.L_pic(%ebx), %ebx
movl (%ebx), %ebx
#else
/* ELF */
call .L_get_pc
addl $_GLOBAL_OFFSET_TABLE_, %ebx
movl LZMA_CRC64_TABLE@GOT(%ebx), %ebx
#endif
/* Complement the initial value. */
notl %eax
notl %edx
.L_align:
/*
* Check if there is enough input to use slicing-by-four.
* We need eight bytes, because the loop pre-reads four bytes.
*/
cmpl $8, %edi
jb .L_rest
/* Check if we have reached alignment of four bytes. */
testl $3, %esi
jz .L_slice
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrdl $8, %edx, %eax
xorl (%ebx, %ebp, 8), %eax
shrl $8, %edx
xorl 4(%ebx, %ebp, 8), %edx
decl %edi
jmp .L_align
.L_slice:
/*
* If we get here, there's at least eight bytes of aligned input
* available. Make %edi multiple of four bytes. Store the possible
* remainder over the "size" variable in the argument stack.
*/
movl %edi, 0x18(%esp)
andl $-4, %edi
subl %edi, 0x18(%esp)
/*
* Let %edi be buf + size - 4 while running the main loop. This way
* we can compare for equality to determine when exit the loop.
*/
addl %esi, %edi
subl $4, %edi
/* Read in the first four aligned bytes. */
movl (%esi), %ecx
.L_loop:
xorl %eax, %ecx
movzbl %cl, %ebp
movl 0x1800(%ebx, %ebp, 8), %eax
xorl %edx, %eax
movl 0x1804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
xorl 0x1000(%ebx, %ebp, 8), %eax
xorl 0x1004(%ebx, %ebp, 8), %edx
shrl $16, %ecx
movzbl %cl, %ebp
xorl 0x0800(%ebx, %ebp, 8), %eax
xorl 0x0804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
addl $4, %esi
xorl (%ebx, %ebp, 8), %eax
xorl 4(%ebx, %ebp, 8), %edx
/* Check for end of aligned input. */
cmpl %edi, %esi
/*
* Copy the next input byte to %ecx. It is slightly faster to
* read it here than at the top of the loop.
*/
movl (%esi), %ecx
jb .L_loop
/*
* Process the remaining four bytes, which we have already
* copied to %ecx.
*/
xorl %eax, %ecx
movzbl %cl, %ebp
movl 0x1800(%ebx, %ebp, 8), %eax
xorl %edx, %eax
movl 0x1804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
xorl 0x1000(%ebx, %ebp, 8), %eax
xorl 0x1004(%ebx, %ebp, 8), %edx
shrl $16, %ecx
movzbl %cl, %ebp
xorl 0x0800(%ebx, %ebp, 8), %eax
xorl 0x0804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
addl $4, %esi
xorl (%ebx, %ebp, 8), %eax
xorl 4(%ebx, %ebp, 8), %edx
/* Copy the number of remaining bytes to %edi. */
movl 0x18(%esp), %edi
.L_rest:
/* Check for end of input. */
testl %edi, %edi
jz .L_return
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrdl $8, %edx, %eax
xorl (%ebx, %ebp, 8), %eax
shrl $8, %edx
xorl 4(%ebx, %ebp, 8), %edx
decl %edi
jmp .L_rest
.L_return:
/* Complement the final value. */
notl %eax
notl %edx
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
#if defined(PIC) || defined(__PIC__)
ALIGN(4, 16)
.L_get_pc:
movl (%esp), %ebx
ret
#endif
#if defined(__APPLE__) && (defined(PIC) || defined(__PIC__))
/* Mach-O PIC */
.section __IMPORT,__pointers,non_lazy_symbol_pointers
.L_lzma_crc64_table$non_lazy_ptr:
.indirect_symbol LZMA_CRC64_TABLE
.long 0
#elif defined(_WIN32) || defined(__CYGWIN__)
# ifdef DLL_EXPORT
/* This is equivalent of __declspec(dllexport). */
.section .drectve
.ascii " -export:lzma_crc64"
# endif
#elif !defined(__MSDOS__)
/* ELF */
.size LZMA_CRC64, .-LZMA_CRC64
#endif
/*
* This is needed to support non-executable stack. It's ugly to
* use __linux__ here, but I don't know a way to detect when
* we are using GNU assembler.
*/
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",@progbits
#endif
|
NgeliMrasi/healthstash-prototype | 7,228 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/lzma-sys-0.1.20/xz-5.2/src/liblzma/check/crc32_x86.S | /*
* Speed-optimized CRC32 using slicing-by-eight algorithm
*
* This uses only i386 instructions, but it is optimized for i686 and later
* (including e.g. Pentium II/III/IV, Athlon XP, and Core 2). For i586
* (e.g. Pentium), slicing-by-four would be better, and even the C version
* of slicing-by-eight built with gcc -march=i586 tends to be a little bit
* better than this. Very few probably run this code on i586 or older x86
* so this shouldn't be a problem in practice.
*
* Authors: Igor Pavlov (original version)
* Lasse Collin (AT&T syntax, PIC support, better portability)
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*
* This code needs lzma_crc32_table, which can be created using the
* following C code:
uint32_t lzma_crc32_table[8][256];
void
init_table(void)
{
// IEEE-802.3
static const uint32_t poly32 = UINT32_C(0xEDB88320);
// Castagnoli
// static const uint32_t poly32 = UINT32_C(0x82F63B78);
// Koopman
// static const uint32_t poly32 = UINT32_C(0xEB31D82E);
for (size_t s = 0; s < 8; ++s) {
for (size_t b = 0; b < 256; ++b) {
uint32_t r = s == 0 ? b : lzma_crc32_table[s - 1][b];
for (size_t i = 0; i < 8; ++i) {
if (r & 1)
r = (r >> 1) ^ poly32;
else
r >>= 1;
}
lzma_crc32_table[s][b] = r;
}
}
}
* The prototype of the CRC32 function:
* extern uint32_t lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc);
*/
/*
* On some systems, the functions need to be prefixed. The prefix is
* usually an underscore.
*/
#ifndef __USER_LABEL_PREFIX__
# define __USER_LABEL_PREFIX__
#endif
#define MAKE_SYM_CAT(prefix, sym) prefix ## sym
#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym)
#define LZMA_CRC32 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32)
#define LZMA_CRC32_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32_table)
/*
* Solaris assembler doesn't have .p2align, and Darwin uses .align
* differently than GNU/Linux and Solaris.
*/
#if defined(__APPLE__) || defined(__MSDOS__)
# define ALIGN(pow2, abs) .align pow2
#else
# define ALIGN(pow2, abs) .align abs
#endif
.text
.globl LZMA_CRC32
#if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \
&& !defined(__MSDOS__)
.type LZMA_CRC32, @function
#endif
ALIGN(4, 16)
LZMA_CRC32:
/*
* Register usage:
* %eax crc
* %esi buf
* %edi size or buf + size
* %ebx lzma_crc32_table
* %ebp Table index
* %ecx Temporary
* %edx Temporary
*/
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl 0x14(%esp), %esi /* buf */
movl 0x18(%esp), %edi /* size */
movl 0x1C(%esp), %eax /* crc */
/*
* Store the address of lzma_crc32_table to %ebx. This is needed to
* get position-independent code (PIC).
*
* The PIC macro is defined by libtool, while __PIC__ is defined
* by GCC but only on some systems. Testing for both makes it simpler
* to test this code without libtool, and keeps the code working also
* when built with libtool but using something else than GCC.
*
* I understood that libtool may define PIC on Windows even though
* the code in Windows DLLs is not PIC in sense that it is in ELF
* binaries, so we need a separate check to always use the non-PIC
* code on Windows.
*/
#if (!defined(PIC) && !defined(__PIC__)) \
|| (defined(_WIN32) || defined(__CYGWIN__))
/* Not PIC */
movl $ LZMA_CRC32_TABLE, %ebx
#elif defined(__APPLE__)
/* Mach-O */
call .L_get_pc
.L_pic:
leal .L_lzma_crc32_table$non_lazy_ptr-.L_pic(%ebx), %ebx
movl (%ebx), %ebx
#else
/* ELF */
call .L_get_pc
addl $_GLOBAL_OFFSET_TABLE_, %ebx
movl LZMA_CRC32_TABLE@GOT(%ebx), %ebx
#endif
/* Complement the initial value. */
notl %eax
ALIGN(4, 16)
.L_align:
/*
* Check if there is enough input to use slicing-by-eight.
* We need 16 bytes, because the loop pre-reads eight bytes.
*/
cmpl $16, %edi
jb .L_rest
/* Check if we have reached alignment of eight bytes. */
testl $7, %esi
jz .L_slice
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrl $8, %eax
xorl (%ebx, %ebp, 4), %eax
decl %edi
jmp .L_align
ALIGN(2, 4)
.L_slice:
/*
* If we get here, there's at least 16 bytes of aligned input
* available. Make %edi multiple of eight bytes. Store the possible
* remainder over the "size" variable in the argument stack.
*/
movl %edi, 0x18(%esp)
andl $-8, %edi
subl %edi, 0x18(%esp)
/*
* Let %edi be buf + size - 8 while running the main loop. This way
* we can compare for equality to determine when exit the loop.
*/
addl %esi, %edi
subl $8, %edi
/* Read in the first eight aligned bytes. */
xorl (%esi), %eax
movl 4(%esi), %ecx
movzbl %cl, %ebp
.L_loop:
movl 0x0C00(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl 0x0800(%ebx, %ebp, 4), %edx
shrl $16, %ecx
xorl 8(%esi), %edx
movzbl %cl, %ebp
xorl 0x0400(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl (%ebx, %ebp, 4), %edx
movzbl %al, %ebp
/*
* Read the next four bytes, for which the CRC is calculated
* on the next interation of the loop.
*/
movl 12(%esi), %ecx
xorl 0x1C00(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
shrl $16, %eax
xorl 0x1800(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
movzbl %al, %eax
movl 0x1400(%ebx, %eax, 4), %eax
addl $8, %esi
xorl %edx, %eax
xorl 0x1000(%ebx, %ebp, 4), %eax
/* Check for end of aligned input. */
cmpl %edi, %esi
movzbl %cl, %ebp
jne .L_loop
/*
* Process the remaining eight bytes, which we have already
* copied to %ecx and %edx.
*/
movl 0x0C00(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl 0x0800(%ebx, %ebp, 4), %edx
shrl $16, %ecx
movzbl %cl, %ebp
xorl 0x0400(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl (%ebx, %ebp, 4), %edx
movzbl %al, %ebp
xorl 0x1C00(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
shrl $16, %eax
xorl 0x1800(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
movzbl %al, %eax
movl 0x1400(%ebx, %eax, 4), %eax
addl $8, %esi
xorl %edx, %eax
xorl 0x1000(%ebx, %ebp, 4), %eax
/* Copy the number of remaining bytes to %edi. */
movl 0x18(%esp), %edi
.L_rest:
/* Check for end of input. */
testl %edi, %edi
jz .L_return
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrl $8, %eax
xorl (%ebx, %ebp, 4), %eax
decl %edi
jmp .L_rest
.L_return:
/* Complement the final value. */
notl %eax
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
#if defined(PIC) || defined(__PIC__)
ALIGN(4, 16)
.L_get_pc:
movl (%esp), %ebx
ret
#endif
#if defined(__APPLE__) && (defined(PIC) || defined(__PIC__))
/* Mach-O PIC */
.section __IMPORT,__pointers,non_lazy_symbol_pointers
.L_lzma_crc32_table$non_lazy_ptr:
.indirect_symbol LZMA_CRC32_TABLE
.long 0
#elif defined(_WIN32) || defined(__CYGWIN__)
# ifdef DLL_EXPORT
/* This is equivalent of __declspec(dllexport). */
.section .drectve
.ascii " -export:lzma_crc32"
# endif
#elif !defined(__MSDOS__)
/* ELF */
.size LZMA_CRC32, .-LZMA_CRC32
#endif
/*
* This is needed to support non-executable stack. It's ugly to
* use __linux__ here, but I don't know a way to detect when
* we are using GNU assembler.
*/
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",@progbits
#endif
|
NIBRSYK16/wasmtime-as-lib | 4,052 | src/runtime/vm/arch/s390x.S | // Currently `global_asm!` isn't stable on s390x, so this is an external
// assembler file built with the `build.rs`.
.machine z13
.text
.hidden host_to_wasm_trampoline
.globl host_to_wasm_trampoline
.type host_to_wasm_trampoline,@function
.p2align 2
#define CONCAT2(a, b) a ## b
#define CONCAT(a, b) CONCAT2(a , b)
#define VERSIONED_SYMBOL(a) CONCAT(a, VERSIONED_SUFFIX)
#define LIBCALL_TRAMPOLINE(libcall, libcall_impl) \
.hidden VERSIONED_SYMBOL(libcall) ; \
.globl VERSIONED_SYMBOL(libcall) ; \
.type VERSIONED_SYMBOL(libcall),@function ; \
.p2align 2 ; \
VERSIONED_SYMBOL(libcall): ; \
.cfi_startproc ; \
\
/* Load the pointer to `VMRuntimeLimits` in `%r1`. */ \
lg %r1, 8(%r2) ; \
\
/* Store the last Wasm FP into the `last_wasm_exit_fp` in the limits. */ \
lg %r0, 0(%r15) ; \
stg %r0, 24(%r1) ; \
\
/* Store the last Wasm PC into the `last_wasm_exit_pc` in the limits. */ \
stg %r14, 32(%r1) ; \
\
/* Tail call to the actual implementation of this libcall. */ \
jg VERSIONED_SYMBOL(libcall_impl) ; \
\
.cfi_endproc ; \
.size VERSIONED_SYMBOL(libcall),.-VERSIONED_SYMBOL(libcall)
LIBCALL_TRAMPOLINE(memory32_grow, impl_memory32_grow)
LIBCALL_TRAMPOLINE(table_grow_func_ref, impl_table_grow_func_ref)
LIBCALL_TRAMPOLINE(table_grow_externref, impl_table_grow_externref)
LIBCALL_TRAMPOLINE(table_fill_func_ref, impl_table_fill_func_ref)
LIBCALL_TRAMPOLINE(table_fill_externref, impl_table_fill_externref)
LIBCALL_TRAMPOLINE(table_copy, impl_table_copy)
LIBCALL_TRAMPOLINE(table_init, impl_table_init)
LIBCALL_TRAMPOLINE(elem_drop, impl_elem_drop)
LIBCALL_TRAMPOLINE(memory_copy, impl_memory_copy)
LIBCALL_TRAMPOLINE(memory_fill, impl_memory_fill)
LIBCALL_TRAMPOLINE(memory_init, impl_memory_init)
LIBCALL_TRAMPOLINE(ref_func, impl_ref_func)
LIBCALL_TRAMPOLINE(data_drop, impl_data_drop)
LIBCALL_TRAMPOLINE(table_get_lazy_init_func_ref, impl_table_get_lazy_init_func_ref)
LIBCALL_TRAMPOLINE(drop_gc_ref, impl_drop_gc_ref)
LIBCALL_TRAMPOLINE(gc, gc)
LIBCALL_TRAMPOLINE(gc_ref_global_get, impl_gc_ref_global_get)
LIBCALL_TRAMPOLINE(gc_ref_global_set, impl_gc_ref_global_set)
LIBCALL_TRAMPOLINE(memory_atomic_notify, impl_memory_atomic_notify)
LIBCALL_TRAMPOLINE(memory_atomic_wait32, impl_memory_atomic_wait32)
LIBCALL_TRAMPOLINE(memory_atomic_wait64, impl_memory_atomic_wait64)
LIBCALL_TRAMPOLINE(out_of_gas, impl_out_of_gas)
LIBCALL_TRAMPOLINE(new_epoch, impl_new_epoch)
LIBCALL_TRAMPOLINE(check_malloc, impl_check_malloc)
LIBCALL_TRAMPOLINE(check_free, impl_check_free)
LIBCALL_TRAMPOLINE(check_load, impl_check_load)
LIBCALL_TRAMPOLINE(check_store, impl_check_store)
LIBCALL_TRAMPOLINE(malloc_start, impl_malloc_start)
LIBCALL_TRAMPOLINE(free_start, impl_free_start)
LIBCALL_TRAMPOLINE(update_stack_pointer, impl_update_stack_pointer)
LIBCALL_TRAMPOLINE(update_mem_size, impl_update_mem_size)
|
NickWentworth/os | 5,159 | src/vec.s | .globl _vector_table
.globl _load_exception_frame
/*
Exception Frame Layout
All entries are 8-byte values
0x000 : x0, regs[0]
0x008 : x1, regs[1]
...
0x0F0 : x30, regs[30]
0x0F8 : sp
0x100 : elr
0x108 : spsr
0x110 : esr
0x118 : far
0x120 : (kind)
*/
.equ REG_OFFSET, 0x000
.equ SP_OFFSET, 0x0F8
.equ ELR_OFFSET, 0x100
.equ SPSR_OFFSET, 0x108
.equ ESR_OFFSET, 0x110
.equ FAR_OFFSET, 0x118
.equ KIND_OFFSET, 0x120
.equ FRAME_SIZE, 296
.macro vec_item kind
// align each item to 0x80 bytes
.balign 0x80
// allocate memory for exception frame
sub sp, sp, #FRAME_SIZE
// store x0 initially, which will be overwritten by kind
str x0, [sp, #REG_OFFSET]
mov x0, #\kind
// now branch to remaining store_exception_frame logic
b store_exception_frame
.endm
store_exception_frame:
// from initial vec_item, we expect the stack space to be allocated
// and x0 to be stored, with x0 now holding the exception kind
// TEMP: dummy values to test struct layout
// mov x0, #0 // x0 no longer works, will just store whatever was in it initially
// mov x1, #1
// mov x2, #2
// mov x3, #3
// mov x4, #4
// mov x5, #5
// mov x6, #6
// mov x7, #7
// mov x8, #8
// mov x9, #9
// mov x10, #10
// mov x11, #11
// mov x12, #12
// mov x13, #13
// mov x14, #14
// mov x15, #15
// mov x16, #16
// mov x17, #17
// mov x18, #18
// mov x19, #19
// mov x20, #20
// mov x21, #21
// mov x22, #22
// mov x23, #23
// mov x24, #24
// mov x25, #25
// mov x26, #26
// mov x27, #27
// mov x28, #28
// mov x29, #29
// mov x30, #30
// ----
// store remaining x1-30 registers
stp x1, x2, [sp, #REG_OFFSET + 0x08]
stp x3, x4, [sp, #REG_OFFSET + 0x18]
stp x5, x6, [sp, #REG_OFFSET + 0x28]
stp x7, x8, [sp, #REG_OFFSET + 0x38]
stp x9, x10, [sp, #REG_OFFSET + 0x48]
stp x11, x12, [sp, #REG_OFFSET + 0x58]
stp x13, x14, [sp, #REG_OFFSET + 0x68]
stp x15, x16, [sp, #REG_OFFSET + 0x78]
stp x17, x18, [sp, #REG_OFFSET + 0x88]
stp x19, x20, [sp, #REG_OFFSET + 0x98]
stp x21, x22, [sp, #REG_OFFSET + 0xA8]
stp x23, x24, [sp, #REG_OFFSET + 0xB8]
stp x25, x26, [sp, #REG_OFFSET + 0xC8]
stp x27, x28, [sp, #REG_OFFSET + 0xD8]
stp x29, x30, [sp, #REG_OFFSET + 0xE8]
// store exception kind value (x0)
str x0, [sp, #KIND_OFFSET]
// and store remaining system registers
mov x0, sp
str x0, [sp, #SP_OFFSET]
mrs x0, ELR_EL1
str x0, [sp, #ELR_OFFSET]
mrs x0, SPSR_EL1
str x0, [sp, #SPSR_OFFSET]
mrs x0, ESR_EL1
str x0, [sp, #ESR_OFFSET]
mrs x0, FAR_EL1
str x0, [sp, #FAR_OFFSET]
// before swapping back to rust, we need to provide pointer
// to exception frame in register x0, per C ABI
mov x0, sp
bl _handle_exception
_load_exception_frame:
// now restore registers from exception frame
ldr x0, [sp, #ELR_OFFSET]
msr ELR_EL1, x0
ldr x0, [sp, #SPSR_OFFSET]
msr SPSR_EL1, x0
ldr x0, [sp, #ESR_OFFSET]
msr ESR_EL1, x0
ldr x0, [sp, #FAR_OFFSET]
msr FAR_EL1, x0
// (kind doesn't matter here)
ldp x0, x1, [sp, #REG_OFFSET + 0x00]
ldp x2, x3, [sp, #REG_OFFSET + 0x10]
ldp x4, x5, [sp, #REG_OFFSET + 0x20]
ldp x6, x7, [sp, #REG_OFFSET + 0x30]
ldp x8, x9, [sp, #REG_OFFSET + 0x40]
ldp x10, x11, [sp, #REG_OFFSET + 0x50]
ldp x12, x13, [sp, #REG_OFFSET + 0x60]
ldp x14, x15, [sp, #REG_OFFSET + 0x70]
ldp x16, x17, [sp, #REG_OFFSET + 0x80]
ldp x18, x19, [sp, #REG_OFFSET + 0x90]
ldp x20, x21, [sp, #REG_OFFSET + 0xA0]
ldp x22, x23, [sp, #REG_OFFSET + 0xB0]
ldp x24, x25, [sp, #REG_OFFSET + 0xC0]
ldp x26, x27, [sp, #REG_OFFSET + 0xD0]
ldp x28, x29, [sp, #REG_OFFSET + 0xE0]
ldr x30, [sp, #REG_OFFSET + 0xF0]
// Deallocate frame from stack and return
add sp, sp, #FRAME_SIZE
eret
.balign 2048 // align entire table to 2KB
_vector_table:
// when passing value to kernel,
// bits 3:2 describe where the exception originated
// bits 1:0 denote the exception type
// Current EL with SP_EL0
vec_item 0 // synchronous
vec_item 1 // irq
vec_item 2 // fiq
vec_item 3 // serror
// Current EL with SP_ELx
vec_item 4 // synchronous
vec_item 5 // irq
vec_item 6 // fiq
vec_item 7 // serror
// Lower EL using aarch64
vec_item 8 // synchronous
vec_item 9 // irq
vec_item 10 // fiq
vec_item 11 // serror
// Lower EL using aarch32
vec_item 12 // synchronous
vec_item 13 // irq
vec_item 14 // fiq
vec_item 15 // serror
|
NickWentworth/os | 2,908 | src/boot.s | .section ".text.boot"
.globl _start
_start:
// halt all but main processor
mrs x5, MPIDR_EL1
and x5, x5, #0b11
cbnz x5, halt
el3_entry:
// verify we are currently in EL3
mrs x5, CurrentEL
lsr x5, x5, #2
cmp x5, #3
blt el2_entry
// configure EL3 system registers
ldr x5, __SCR_EL3
msr SCR_EL3, x5
// move to EL2
ldr x5, __SPSR_EL3
msr SPSR_EL3, x5
adr x5, el2_entry
msr ELR_EL3, x5
eret
el2_entry:
// verify we are currently in EL2
mrs x5, CurrentEL
lsr x5, x5, #2
cmp x5, #2
blt el1_entry
// configure EL2 system registers
ldr x5, __HCR_EL2
msr HCR_EL2, x5
// move to EL1
ldr x5, __SPSR_EL2
msr SPSR_EL2, x5
adr x5, el1_entry
msr ELR_EL2, x5
eret
el1_entry:
// configure EL1 system registers, initially MMU disabled
ldr x5, __SCTLR_EL1_MMU_DISABLED
msr SCTLR_EL1, x5
ldr x5, __CPACR_EL1
msr CPACR_EL1, x5
ldr x5, __CNTP_CTL_EL0
msr CNTP_CTL_EL0, x5
msr DAIFCLR, #0b1111 // enable all interrupts
// reference exception vector table
ldr x5, =_vector_table
msr VBAR_EL1, x5
// initialize stack
ldr x5, =_start
mov sp, x5
// clear bss
ldr x5, =__bss_start
ldr w6, =__bss_size
1: cbz w6, 2f
str xzr, [x5], #8
sub w6, w6, #1
cbnz w6, 1b
2: // setup virtual address related system registers
ldr x5, __MAIR_EL1
msr MAIR_EL1, x5
ldr x5, __TCR_EL1
msr TCR_EL1, x5
// load L0 table base address into system register, then link to L1 table base address
adr x5, __L0_TABLE
msr TTBR0_EL1, x5
msr TTBR1_EL1, x5
adr x6, __L1_TABLE
orr x7, x6, #0b11 // mark L0 entry as table descriptor
str x7, [x5]
// identity map full physical memory space (0GB - 4GB)
ldr x5, __KERNEL_IDENTITY_MAP_ATTR
mov x7, #0x00000000
orr x8, x5, x7 // 0GB - 1GB
str x8, [x6, #0]
mov x7, #0x40000000
orr x8, x5, x7 // 1GB - 2GB
str x8, [x6, #8]
mov x7, #0x80000000
orr x8, x5, x7 // 2GB - 3GB
str x8, [x6, #16]
mov x7, #0xC0000000
orr x8, x5, x7 // 3GB - 4GB
str x8, [x6, #24]
// enable the MMU
ldr x5, __SCTLR_EL1_MMU_ENABLED
msr SCTLR_EL1, x5
isb
// branch to an absolute virtual address
ldr x5, =virtual_addr_jump
br x5
virtual_addr_jump:
// pc is now going off of virtual addresses, safe to reset TTBR0 identity map
msr TTBR0_EL1, xzr
isb
// and jump to rust code
b _kernel_main
// loop forever, send extra cpus here
// TODO: figure out what to do with them
halt:
wfe
b halt
|
Night-commits/comprehensive-rust | 4,676 | src/bare-metal/aps/examples/src/exceptions.S | /*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes
* 14 instructions, so it can be used in exception handlers with 18
* instructions left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2
* respectively, which can be used as the first and second arguments
* of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested
* exception and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently
* takes 14 instructions, so it can be used in exception handlers
* while still leaving 18 instructions left; if paired with
* save_volatile_to_stack, there are 4 instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/*
* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch.
*/
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL
* while using SP0. It behaves similarly to the SPx case by first
* switching to SPx, doing the work, then switching back to SP0 before
* returning.
*
* Switching to SPx and calling the Rust handler takes 16
* instructions. To restore and return we need an additional 16
* instructions, so we can implement the whole handler within the
* allotted 32 instructions.
*
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL
* while using SPx. It saves volatile registers, calls the Rust
* handler, restores volatile registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care
* about non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions,
* and restoring and returning also takes 15 instructions, so we can
* fit the whole handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
Night-commits/comprehensive-rust | 1,445 | src/bare-metal/aps/examples/src/idmap.S | /*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad .L_BLOCK_MEM | 0x40000000 // 1 GiB of DRAM
.fill 254, 8, 0x0 // 254 GiB of unmapped VA space
.quad .L_BLOCK_DEV | 0x4000000000 // 1 GiB of device mappings
.fill 255, 8, 0x0 // 255 GiB of remaining VA space
|
Night-commits/comprehensive-rust | 4,768 | src/bare-metal/aps/examples/src/entry.S | /*
* Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.macro adr_l, reg:req, sym:req
adrp \reg, \sym
add \reg, \reg, :lo12:\sym
.endm
.macro mov_i, reg:req, imm:req
movz \reg, :abs_g3:\imm
movk \reg, :abs_g2_nc:\imm
movk \reg, :abs_g1_nc:\imm
movk \reg, :abs_g0_nc:\imm
.endm
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/*
* Disable translation table walk for TTBR1_EL1, generating a
* translation fault instead.
*/
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back
* read-allocate write-allocate cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back
* read-allocate write-allocate cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/*
* Privileged Access Never is unchanged on taking an exception to EL1.
*/
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1
// ANCHOR: entry
/**
* This is a generic entry point for an image. It carries out the
* operations required to prepare the loaded image to be run.
* Specifically, it
*
* - sets up the MMU with an identity map of virtual to physical
* addresses, and enables caching
* - enables floating point
* - zeroes the bss section using registers x25 and above
* - prepares the stack, pointing to a section within the image
* - sets up the exception vector
* - branches to the Rust `main` function
*
* It preserves x0-x3 for the Rust entry point, as these may contain
* boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/*
* Load and apply the memory management configuration, ready to
* enable MMU and caches.
*/
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then
* invalidate any potentially stale local TLB entries before they
* start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed
* until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Prepare the stack. */
adr_l x30, boot_stack_end
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/* Call into Rust code. */
bl main
/* Loop forever waiting for interrupts. */
2: wfi
b 2b
|
NightWatcher314/zos | 1,494 | os/src/link_app.S |
.align 3
.section .data
.global _num_app
_num_app:
.quad 7
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_5_start
.quad app_6_start
.quad app_6_end
.section .data
.global app_0_start
.global app_0_end
.align 3
app_0_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/00power_3"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
.align 3
app_1_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/01power_5"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
.align 3
app_2_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/02power_7"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
.align 3
app_3_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/03sleep"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
.align 3
app_4_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/04load_fault"
app_4_end:
.section .data
.global app_5_start
.global app_5_end
.align 3
app_5_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/05store_fault"
app_5_end:
.section .data
.global app_6_start
.global app_6_end
.align 3
app_6_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sbrk_test"
app_6_end:
|
NightWatcher314/zos | 1,640 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
NitroA/mtkclient | 7,195 | src/da_x/common/start.S | /* Copyright 2024 (c) B.Kerler */
/* Use of this source code is governed by a GPLv3 license, see LICENSE.txt. */
.syntax unified
.code 32
.global start
.section .text.start
start:
add r3, pc, #1
bx r3
.global apmcu_dcache_clean_invalidate
.section .text
.type apmcu_dcache_clean_invalidate,%function
apmcu_dcache_clean_invalidate:
push {r4,r5,r7,r9,r10,r11}
dmb /* ensure ordering with previous memory accesses */
mrc p15, 1, r0, c0, c0, 1 /* read clidr */
ands r3, r0, #0x7000000 /* extract loc from clidr */
mov r3, r3, lsr #23 /* left align loc bit field */
beq ci_finished /* if loc is 0, then no need to clean */
mov r10, #0 /* start clean at cache level 0 */
ci_loop1:
add r2, r10, r10, lsr #1 /* work out 3x current cache level */
mov r1, r0, lsr r2 /* extract cache type bits from clidr */
and r1, r1, #7 /* mask of the bits for current cache only */
cmp r1, #2 /* see what cache we have at this level */
blt ci_skip /* skip if no cache, or just i-cache */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
isb /* isb to sych the new cssr&csidr */
mrc p15, 1, r1, c0, c0, 0 /* read the new csidr */
and r2, r1, #7 /* extract the length of the cache lines */
add r2, r2, #4 /* add 4 (line length offset) */
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 /* find maximum number on the way size */
clz r5, r4 /* find bit position of way size increment */
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 /* extract max number of the index size */
ci_loop2:
mov r9, r4 /* create working copy of max way size */
ci_loop3:
orr r11, r10, r9, lsl r5 /* factor way and cache number into r11 */
orr r11, r11, r7, lsl r2 /* factor index number into r11 */
mcr p15, 0, r11, c7, c14, 2 /* clean & invalidate by set/way */
subs r9, r9, #1 /* decrement the way */
bge ci_loop3
subs r7, r7, #1 /* decrement the index */
bge ci_loop2
ci_skip:
add r10, r10, #2 /* increment cache number */
cmp r3, r10
bgt ci_loop1
ci_finished:
mov r10, #0 /* swith back to cache level 0 */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
dsb
isb
pop {r4,r5,r7,r9,r10,r11}
bx lr
.global apmcu_dcache_invalidate
.section .text
.type apmcu_dcache_invalidate,%function
apmcu_dcache_invalidate:
push {r4,r5,r7,r9,r10,r11}
dmb /* ensure ordering with previous memory accesses */
mrc p15, 1, r0, c0, c0, 1 /* read clidr */
ands r3, r0, #0x7000000 /* extract loc from clidr */
mov r3, r3, lsr #23 /* left align loc bit field */
beq cii_finished /* if loc is 0, then no need to clean */
mov r10, #0 /* start clean at cache level 0 */
cii_loop1:
add r2, r10, r10, lsr #1 /* work out 3x current cache level */
mov r1, r0, lsr r2 /* extract cache type bits from clidr */
and r1, r1, #7 /* mask of the bits for current cache only */
cmp r1, #2 /* see what cache we have at this level */
blt cii_skip /* skip if no cache, or just i-cache */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
isb /* isb to sych the new cssr&csidr */
mrc p15, 1, r1, c0, c0, 0 /* read the new csidr */
and r2, r1, #7 /* extract the length of the cache lines */
add r2, r2, #4 /* add 4 (line length offset) */
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 /* find maximum number on the way size */
clz r5, r4 /* find bit position of way size increment */
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 /* extract max number of the index size */
cii_loop2:
mov r9, r4 /* create working copy of max way size */
cii_loop3:
orr r11, r10, r9, lsl r5 /* factor way and cache number into r11 */
orr r11, r11, r7, lsl r2 /* factor index number into r11 */
mcr p15, 0, r11, c7, c6, 2 /* invalidate by set/way */
subs r9, r9, #1 /* decrement the way */
bge cii_loop3
subs r7, r7, #1 /* decrement the index */
bge cii_loop2
cii_skip:
add r10, r10, #2 /* increment cache number */
cmp r3, r10
bgt cii_loop1
cii_finished:
mov r10, #0 /* swith back to cache level 0 */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
dsb
isb
pop {r4,r5,r7,r9,r10,r11}
bx lr
.global cache_init
.section .text
.type cache_init,%function
cache_init:
PUSH {R4-R11,LR}
MOV R7, R0
MRS R12, CPSR
CPSID AIF
TST R7, #2
BEQ mmt
MRC p15, 0, R0,c1,c0, 0
TST R0, #4
BEQ mma
BIC R0, R0, #4
MCR p15, 0, R0,c1,c0, 0
BL apmcu_dcache_clean_invalidate
B mmt
mma:
BL apmcu_dcache_invalidate
mmt:
TST R7, #1
BEQ mml
MRC p15, 0, R0,c1,c0, 0
BIC R0, R0, #0x1000
MCR p15, 0, R0,c1,c0, 0
mml:
MOV R0, #0
MCR p15, 0, R0,c7,c5, 0
MSR CPSR_cf, R12
POP {R4-R11,PC}
.global cache_close
.section .text
.type cache_close,%function
cache_close:
PUSH {R4-R11,LR}
MOV R7, R0
MRS R12, CPSR
CPSID AIF
TST R7, #2
BEQ cci
MRC p15, 0, R0,c1,c0, 0
TST R0, #4
BNE cci
BL apmcu_dcache_invalidate
MRC p15, 0, R0,c1,c0, 0
ORR R0, R0, #4
MCR p15, 0, R0,c1,c0, 0
cci:
TST R7, #1
BEQ cct
MOV R0, #0
MCR p15, 0, R0,c7,c5, 0
MRC p15, 0, R0,c1,c0, 0
ORR R0, R0, #0x1000
MCR p15, 0, R0,c1,c0, 0
cct:
MSR CPSR_cf, R12
POP {R4-R11,PC}
|
NitroA/mtkclient | 7,083 | src/da_xml/common/start.S | .syntax unified
.code 32
.global start
.section .text.start
start:
#add r3, pc, #1
#bx r3
.global apmcu_dcache_clean_invalidate
.section .text
.type apmcu_dcache_clean_invalidate,%function
apmcu_dcache_clean_invalidate:
push {r4,r5,r7,r9,r10,r11}
dmb /* ensure ordering with previous memory accesses */
mrc p15, 1, r0, c0, c0, 1 /* read clidr */
ands r3, r0, #0x7000000 /* extract loc from clidr */
mov r3, r3, lsr #23 /* left align loc bit field */
beq ci_finished /* if loc is 0, then no need to clean */
mov r10, #0 /* start clean at cache level 0 */
ci_loop1:
add r2, r10, r10, lsr #1 /* work out 3x current cache level */
mov r1, r0, lsr r2 /* extract cache type bits from clidr */
and r1, r1, #7 /* mask of the bits for current cache only */
cmp r1, #2 /* see what cache we have at this level */
blt ci_skip /* skip if no cache, or just i-cache */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
isb /* isb to sych the new cssr&csidr */
mrc p15, 1, r1, c0, c0, 0 /* read the new csidr */
and r2, r1, #7 /* extract the length of the cache lines */
add r2, r2, #4 /* add 4 (line length offset) */
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 /* find maximum number on the way size */
clz r5, r4 /* find bit position of way size increment */
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 /* extract max number of the index size */
ci_loop2:
mov r9, r4 /* create working copy of max way size */
ci_loop3:
orr r11, r10, r9, lsl r5 /* factor way and cache number into r11 */
orr r11, r11, r7, lsl r2 /* factor index number into r11 */
mcr p15, 0, r11, c7, c14, 2 /* clean & invalidate by set/way */
subs r9, r9, #1 /* decrement the way */
bge ci_loop3
subs r7, r7, #1 /* decrement the index */
bge ci_loop2
ci_skip:
add r10, r10, #2 /* increment cache number */
cmp r3, r10
bgt ci_loop1
ci_finished:
mov r10, #0 /* swith back to cache level 0 */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
dsb
isb
pop {r4,r5,r7,r9,r10,r11}
bx lr
.global apmcu_dcache_invalidate
.section .text
.type apmcu_dcache_invalidate,%function
apmcu_dcache_invalidate:
push {r4,r5,r7,r9,r10,r11}
dmb /* ensure ordering with previous memory accesses */
mrc p15, 1, r0, c0, c0, 1 /* read clidr */
ands r3, r0, #0x7000000 /* extract loc from clidr */
mov r3, r3, lsr #23 /* left align loc bit field */
beq cii_finished /* if loc is 0, then no need to clean */
mov r10, #0 /* start clean at cache level 0 */
cii_loop1:
add r2, r10, r10, lsr #1 /* work out 3x current cache level */
mov r1, r0, lsr r2 /* extract cache type bits from clidr */
and r1, r1, #7 /* mask of the bits for current cache only */
cmp r1, #2 /* see what cache we have at this level */
blt cii_skip /* skip if no cache, or just i-cache */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
isb /* isb to sych the new cssr&csidr */
mrc p15, 1, r1, c0, c0, 0 /* read the new csidr */
and r2, r1, #7 /* extract the length of the cache lines */
add r2, r2, #4 /* add 4 (line length offset) */
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 /* find maximum number on the way size */
clz r5, r4 /* find bit position of way size increment */
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 /* extract max number of the index size */
cii_loop2:
mov r9, r4 /* create working copy of max way size */
cii_loop3:
orr r11, r10, r9, lsl r5 /* factor way and cache number into r11 */
orr r11, r11, r7, lsl r2 /* factor index number into r11 */
mcr p15, 0, r11, c7, c6, 2 /* invalidate by set/way */
subs r9, r9, #1 /* decrement the way */
bge cii_loop3
subs r7, r7, #1 /* decrement the index */
bge cii_loop2
cii_skip:
add r10, r10, #2 /* increment cache number */
cmp r3, r10
bgt cii_loop1
cii_finished:
mov r10, #0 /* swith back to cache level 0 */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
dsb
isb
pop {r4,r5,r7,r9,r10,r11}
bx lr
.global cache_init
.section .text
.type cache_init,%function
cache_init:
PUSH {R4-R11,LR}
MOV R7, R0
MRS R12, CPSR
CPSID AIF
TST R7, #2
BEQ mmt
MRC p15, 0, R0,c1,c0, 0
TST R0, #4
BEQ mma
BIC R0, R0, #4
MCR p15, 0, R0,c1,c0, 0
BL apmcu_dcache_clean_invalidate
B mmt
mma:
BL apmcu_dcache_invalidate
mmt:
TST R7, #1
BEQ mml
MRC p15, 0, R0,c1,c0, 0
BIC R0, R0, #0x1000
MCR p15, 0, R0,c1,c0, 0
mml:
MOV R0, #0
MCR p15, 0, R0,c7,c5, 0
MSR CPSR_cf, R12
POP {R4-R11,PC}
.global cache_close
.section .text
.type cache_close,%function
cache_close:
PUSH {R4-R11,LR}
MOV R7, R0
MRS R12, CPSR
CPSID AIF
TST R7, #2
BEQ cci
MRC p15, 0, R0,c1,c0, 0
TST R0, #4
BNE cci
BL apmcu_dcache_invalidate
MRC p15, 0, R0,c1,c0, 0
ORR R0, R0, #4
MCR p15, 0, R0,c1,c0, 0
cci:
TST R7, #1
BEQ cct
MOV R0, #0
MCR p15, 0, R0,c7,c5, 0
MRC p15, 0, R0,c1,c0, 0
ORR R0, R0, #0x1000
MCR p15, 0, R0,c1,c0, 0
cct:
MSR CPSR_cf, R12
POP {R4-R11,PC}
|
NjrSea/ycc | 1,143 | test/main_return_int.s | .section __TEXT,__text,regular,pure_instructions
; __TEXT段名, __text子段名, regular表明是常规段, pure_instructions表明该段只有指令不含数据
.build_version macos, 15, 0 sdk_version 15, 2
; 指定程序支持的操作系统版本和SDK版本
.globl _main ; -- Begin function main
; 声明全局符号
.p2align 2
; 将下一条指令的地址按2的幂次方对齐(2表示按4字节对齐),下一条指令地址是4的倍数
_main: ; @main
.cfi_startproc
; DWARF(Debugging With Attributed Record Fromats)调试信息相关指令,标记函数开始,此指令能让汇编器正确识别函数的起始点
; %bb.0:
sub sp, sp, #16
; 为当前函数在栈上分配16字节空间
.cfi_def_cfa_offset 16
; DWRAF信息,定义当前帧指针(Canonical Frame Address,CFA)偏移量。这里表明当前函数栈帧大小是16字节
str wzr, [sp, #12]
; wzr是零寄存器,其值始终为0。此指令将wzr的值(0)存储到sp+12的位置
mov w0, #42 ; =0x2a
; 将立即数42移动到w0寄存器,意味着返回值是42
add sp, sp, #16
; 释放申请的16字节栈空间
ret
; 返回指令,程序会跳转到调用该函数的下一条指令继续执行
.cfi_endproc
; DWARF调试指令,标记函数结束
; -- End function
.subsections_via_symbols
; Macho-O中的一个特殊伪指令(assembler directive),主要用于控制符号和段之间的关联方式
; 传统上,符号的地址直接对应某个字段的具体位置。但是某些情况下(如动态链接、符号重定位),需要灵活的管理符号与字段的映射关系。
; .subsections_via_symbols的作用:
; 告诉汇编器:符号的地址解析通过符号表(symbol table)中的信息来确定,而不是直接关联某个固定的字段.
|
NKU-EmbeddedSystem/Prism | 2,741 | asm2vec/asm2vec/examples/training.s | my_strlen_train:
push rbp
mov rbp, rsp
mov QWORD PTR [rbp-24], rdi
mov rax, QWORD PTR [rbp-24]
mov QWORD PTR [rbp-8], rax
jmp .L2
.L3:
add QWORD PTR [rbp-8], 1
.L2:
mov rax, QWORD PTR [rbp-8]
movzx eax, BYTE PTR [rax]
test al, al
jne .L3
mov rax, QWORD PTR [rbp-8]
sub rax, QWORD PTR [rbp-24]
pop rbp
ret
my_strcmp_train:
push rbp
mov rbp, rsp
mov QWORD PTR [rbp-8], rdi
mov QWORD PTR [rbp-16], rsi
jmp .L6
.L10:
mov rax, QWORD PTR [rbp-8]
movzx edx, BYTE PTR [rax]
mov rax, QWORD PTR [rbp-16]
movzx eax, BYTE PTR [rax]
cmp dl, al
je .L7
mov rax, QWORD PTR [rbp-8]
movzx eax, BYTE PTR [rax]
movsx edx, al
mov rax, QWORD PTR [rbp-16]
movzx eax, BYTE PTR [rax]
movsx eax, al
sub edx, eax
mov eax, edx
jmp .L8
.L7:
add QWORD PTR [rbp-8], 1
add QWORD PTR [rbp-16], 1
.L6:
mov rax, QWORD PTR [rbp-8]
movzx eax, BYTE PTR [rax]
test al, al
je .L9
mov rax, QWORD PTR [rbp-16]
movzx eax, BYTE PTR [rax]
test al, al
jne .L10
.L9:
mov rax, QWORD PTR [rbp-8]
movzx eax, BYTE PTR [rax]
test al, al
je .L11
mov eax, 1
jmp .L8
.L11:
mov rax, QWORD PTR [rbp-16]
movzx eax, BYTE PTR [rax]
test al, al
je .L12
mov eax, -1
jmp .L8
.L12:
mov eax, 0
.L8:
pop rbp
ret
.LC0:
.string "%s"
.LC1:
.string "%d\n"
main:
push rbp
mov rbp, rsp
sub rsp, 256
lea rax, [rbp-128]
mov rsi, rax
mov edi, OFFSET FLAT:.LC0
mov eax, 0
call scanf
lea rax, [rbp-256]
mov rsi, rax
mov edi, OFFSET FLAT:.LC0
mov eax, 0
call scanf
lea rax, [rbp-128]
mov rdi, rax
call my_strlen_train
mov esi, eax
mov edi, OFFSET FLAT:.LC1
mov eax, 0
call printf
lea rdx, [rbp-256]
lea rax, [rbp-128]
mov rsi, rdx
mov rdi, rax
call my_strcmp_train
mov esi, eax
mov edi, OFFSET FLAT:.LC1
mov eax, 0
call printf
mov eax, 0
leave
ret |
NKU-EmbeddedSystem/Prism | 1,712 | asm2vec/asm2vec/examples/estimating.s | my_strlen_est:
cmp BYTE PTR [rdi], 0
je .L4
mov rax, rdi
.L3:
add rax, 1
cmp BYTE PTR [rax], 0
jne .L3
.L2:
sub rax, rdi
ret
.L4:
mov rax, rdi
jmp .L2
my_strcmp_est:
movzx eax, BYTE PTR [rdi]
test al, al
je .L12
.L7:
movzx edx, BYTE PTR [rsi]
test dl, dl
je .L15
cmp dl, al
jne .L16
add rdi, 1
add rsi, 1
movzx eax, BYTE PTR [rdi]
test al, al
jne .L7
.L12:
cmp BYTE PTR [rsi], 0
setne dl
movzx edx, dl
neg edx
.L6:
mov eax, edx
ret
.L16:
movsx eax, al
movsx edx, dl
sub eax, edx
mov edx, eax
jmp .L6
.L15:
mov edx, 1
test al, al
jne .L6
jmp .L12
.LC0:
.string "%s"
.LC1:
.string "%d\n"
main:
sub rsp, 264
lea rsi, [rsp+128]
mov edi, OFFSET FLAT:.LC0
mov eax, 0
call scanf
mov rsi, rsp
mov edi, OFFSET FLAT:.LC0
mov eax, 0
call scanf
lea rdi, [rsp+128]
call my_strlen_est
mov esi, eax
mov edi, OFFSET FLAT:.LC1
mov eax, 0
call printf
mov rsi, rsp
lea rdi, [rsp+128]
call my_strcmp_est
mov esi, eax
mov edi, OFFSET FLAT:.LC1
mov eax, 0
call printf
mov eax, 0
add rsp, 264
ret |
NjrSea/ycc | 1,143 | test/main_return_int.s | .section __TEXT,__text,regular,pure_instructions
; __TEXT段名, __text子段名, regular表明是常规段, pure_instructions表明该段只有指令不含数据
.build_version macos, 15, 0 sdk_version 15, 2
; 指定程序支持的操作系统版本和SDK版本
.globl _main ; -- Begin function main
; 声明全局符号
.p2align 2
; 将下一条指令的地址按2的幂次方对齐(2表示按4字节对齐),下一条指令地址是4的倍数
_main: ; @main
.cfi_startproc
; DWARF(Debugging With Attributed Record Fromats)调试信息相关指令,标记函数开始,此指令能让汇编器正确识别函数的起始点
; %bb.0:
sub sp, sp, #16
; 为当前函数在栈上分配16字节空间
.cfi_def_cfa_offset 16
; DWRAF信息,定义当前帧指针(Canonical Frame Address,CFA)偏移量。这里表明当前函数栈帧大小是16字节
str wzr, [sp, #12]
; wzr是零寄存器,其值始终为0。此指令将wzr的值(0)存储到sp+12的位置
mov w0, #42 ; =0x2a
; 将立即数42移动到w0寄存器,意味着返回值是42
add sp, sp, #16
; 释放申请的16字节栈空间
ret
; 返回指令,程序会跳转到调用该函数的下一条指令继续执行
.cfi_endproc
; DWARF调试指令,标记函数结束
; -- End function
.subsections_via_symbols
; Macho-O中的一个特殊伪指令(assembler directive),主要用于控制符号和段之间的关联方式
; 传统上,符号的地址直接对应某个字段的具体位置。但是某些情况下(如动态链接、符号重定位),需要灵活的管理符号与字段的映射关系。
; .subsections_via_symbols的作用:
; 告诉汇编器:符号的地址解析通过符号表(symbol table)中的信息来确定,而不是直接关联某个固定的字段.
|
NKU-EmbeddedSystem/Prism | 2,741 | asm2vec/asm2vec/examples/training.s | my_strlen_train:
push rbp
mov rbp, rsp
mov QWORD PTR [rbp-24], rdi
mov rax, QWORD PTR [rbp-24]
mov QWORD PTR [rbp-8], rax
jmp .L2
.L3:
add QWORD PTR [rbp-8], 1
.L2:
mov rax, QWORD PTR [rbp-8]
movzx eax, BYTE PTR [rax]
test al, al
jne .L3
mov rax, QWORD PTR [rbp-8]
sub rax, QWORD PTR [rbp-24]
pop rbp
ret
my_strcmp_train:
push rbp
mov rbp, rsp
mov QWORD PTR [rbp-8], rdi
mov QWORD PTR [rbp-16], rsi
jmp .L6
.L10:
mov rax, QWORD PTR [rbp-8]
movzx edx, BYTE PTR [rax]
mov rax, QWORD PTR [rbp-16]
movzx eax, BYTE PTR [rax]
cmp dl, al
je .L7
mov rax, QWORD PTR [rbp-8]
movzx eax, BYTE PTR [rax]
movsx edx, al
mov rax, QWORD PTR [rbp-16]
movzx eax, BYTE PTR [rax]
movsx eax, al
sub edx, eax
mov eax, edx
jmp .L8
.L7:
add QWORD PTR [rbp-8], 1
add QWORD PTR [rbp-16], 1
.L6:
mov rax, QWORD PTR [rbp-8]
movzx eax, BYTE PTR [rax]
test al, al
je .L9
mov rax, QWORD PTR [rbp-16]
movzx eax, BYTE PTR [rax]
test al, al
jne .L10
.L9:
mov rax, QWORD PTR [rbp-8]
movzx eax, BYTE PTR [rax]
test al, al
je .L11
mov eax, 1
jmp .L8
.L11:
mov rax, QWORD PTR [rbp-16]
movzx eax, BYTE PTR [rax]
test al, al
je .L12
mov eax, -1
jmp .L8
.L12:
mov eax, 0
.L8:
pop rbp
ret
.LC0:
.string "%s"
.LC1:
.string "%d\n"
main:
push rbp
mov rbp, rsp
sub rsp, 256
lea rax, [rbp-128]
mov rsi, rax
mov edi, OFFSET FLAT:.LC0
mov eax, 0
call scanf
lea rax, [rbp-256]
mov rsi, rax
mov edi, OFFSET FLAT:.LC0
mov eax, 0
call scanf
lea rax, [rbp-128]
mov rdi, rax
call my_strlen_train
mov esi, eax
mov edi, OFFSET FLAT:.LC1
mov eax, 0
call printf
lea rdx, [rbp-256]
lea rax, [rbp-128]
mov rsi, rdx
mov rdi, rax
call my_strcmp_train
mov esi, eax
mov edi, OFFSET FLAT:.LC1
mov eax, 0
call printf
mov eax, 0
leave
ret |
NKU-EmbeddedSystem/Prism | 1,712 | asm2vec/asm2vec/examples/estimating.s | my_strlen_est:
cmp BYTE PTR [rdi], 0
je .L4
mov rax, rdi
.L3:
add rax, 1
cmp BYTE PTR [rax], 0
jne .L3
.L2:
sub rax, rdi
ret
.L4:
mov rax, rdi
jmp .L2
my_strcmp_est:
movzx eax, BYTE PTR [rdi]
test al, al
je .L12
.L7:
movzx edx, BYTE PTR [rsi]
test dl, dl
je .L15
cmp dl, al
jne .L16
add rdi, 1
add rsi, 1
movzx eax, BYTE PTR [rdi]
test al, al
jne .L7
.L12:
cmp BYTE PTR [rsi], 0
setne dl
movzx edx, dl
neg edx
.L6:
mov eax, edx
ret
.L16:
movsx eax, al
movsx edx, dl
sub eax, edx
mov edx, eax
jmp .L6
.L15:
mov edx, 1
test al, al
jne .L6
jmp .L12
.LC0:
.string "%s"
.LC1:
.string "%d\n"
main:
sub rsp, 264
lea rsi, [rsp+128]
mov edi, OFFSET FLAT:.LC0
mov eax, 0
call scanf
mov rsi, rsp
mov edi, OFFSET FLAT:.LC0
mov eax, 0
call scanf
lea rdi, [rsp+128]
call my_strlen_est
mov esi, eax
mov edi, OFFSET FLAT:.LC1
mov eax, 0
call printf
mov rsi, rsp
lea rdi, [rsp+128]
call my_strcmp_est
mov esi, eax
mov edi, OFFSET FLAT:.LC1
mov eax, 0
call printf
mov eax, 0
add rsp, 264
ret |
Noratrieb/rustv32i | 1,030 | tests/helper.S | .macro START_TEST
.section .text
.globl _start
_start:
.endm
.macro ASSERT_EQ actual expected
li t6, \expected
bne \actual, t6, fail
.endm
.macro PASS
li a7, -1
li a0, 1
ecall
.endm
.macro FAIL
j fail
.endm
.macro WITH_SINGLE_TEST_NUMBERS macro
\macro a, 0
\macro c, 1
\macro d, 2
\macro u, 3
\macro e, 4
\macro v, 5
\macro f, 8
\macro t, 10
\macro g, 16
\macro h, 32
\macro i, 64
\macro s, 100
\macro j, 128
\macro k, 256
\macro l, 512
\macro w, 1000
\macro m, 1024
\macro n, 2047
\macro b, -1
\macro o, -2
\macro p, -16
\macro q, -1024
\macro r, -1000
.endm
.macro WITH_TWO_TEST_NUMBERS macro
.macro \macro\()_TMP namea:req a:req
.macro \macro\()_TMP_\namea nameb:req b:req
\macro \a, \b
.endm
WITH_SINGLE_TEST_NUMBERS \macro\()_TMP_\namea
.endm
WITH_SINGLE_TEST_NUMBERS \macro\()_TMP
.endm
fail:
li a7, -1
li a0, 0
ecall
|
Noratrieb/rustv32i | 1,616 | tests/check/zaamo.S | # Atomic Memory Operations
#include "../helper.S"
.macro CASE_BASE inst reg mem expected_mem
li t0, 0
li t1, \mem
sw t1, (t0)
li t3, \reg
\inst t2, t3, (t0)
ASSERT_EQ t2, \mem
lw t3, (t0)
ASSERT_EQ t3, \expected_mem
.endm
.macro CASE inst reg mem expected_mem
CASE_BASE \inst, \reg, \mem, \expected_mem
CASE_BASE \inst\().aq, \reg, \mem, \expected_mem
CASE_BASE \inst\().rl, \reg, \mem, \expected_mem
CASE_BASE \inst\().aqrl, \reg, \mem, \expected_mem
.endm
START_TEST
.macro CASE_AMOSWAP a:req b:req
CASE amoswap.w, \a, \b, \a
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOSWAP
.macro CASE_AMOADD a:req b:req
CASE amoadd.w, \a, \b, \a + \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOADD
.macro CASE_AMOAND a:req b:req
CASE amoand.w, \a, \b, \a & \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOAND
.macro CASE_AMOOR a:req b:req
CASE amoor.w, \a, \b, \a | \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOOR
.macro CASE_AMOXOR a:req b:req
CASE amoxor.w, \a, \b, \a ^ \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOXOR
CASE amomax.w, 0, 0, 0
CASE amomax.w, 0, 1, 1
CASE amomax.w, -1, 0, 0
CASE amomax.w 100, -100, 100
CASE amomaxu.w, 0, 0, 0
CASE amomaxu.w, 0, 1, 1
CASE amomaxu.w, -1, 0, -1
CASE amomaxu.w 100, -100, -100
CASE amomin.w, 0, 0, 0
CASE amomin.w, 0, 1, 0
CASE amomin.w, -1, 0, -1
CASE amomin.w 100, -100, -100
CASE amominu.w, 0, 0, 0
CASE amominu.w, 0, 1, 0
CASE amominu.w, -1, 0, 0
CASE amominu.w 100, -100, 100
PASS
|
Noratrieb/rustv32i | 4,858 | tests/check/int_comp.S | # Integer computational register-register instruction.
#include "../helper.S"
.macro CASER inst:req a:req b:req expected:req
li t0, \a
li t1, \b
\inst t2, t0, t1
ASSERT_EQ t2, \expected
.endm
.macro CASE_IMM inst:req a:req b:req expected:req
li t0, \a
\inst t2, t0, \b
ASSERT_EQ t2, \expected
.endm
.macro CASE_BOTH inst:req insti:req a:req b:req expected:req
CASER \inst, \a, \b, \expected
CASE_IMM \insti, \a, \b, \expected
.endm
.macro CASE inst:req a:req b:req expected:req
CASE_BOTH \inst, \inst\()i, \a, \b, \expected
.endm
START_TEST
# Base instructions
.macro CASE_ADD a:req, b:req
CASE add, \a, \b, \a + \b
.endm
WITH_TWO_TEST_NUMBERS CASE_ADD
CASE slt 10 20 1
CASE slt 20 10 0
CASE slt, -1 0 1
CASE slt 0, -1 0
CASE slt, -1, -1, 0
CASE slt, -100, -1, 1
CASE_BOTH sltu sltiu 10 20 1
CASE_BOTH sltu sltiu 20 10 0
CASE_BOTH sltu sltiu, -1, 0, 0
CASE_BOTH sltu sltiu, -100, -1, 1
CASE_BOTH sltu sltiu, 100, -1, 1
CASE and 0b11, 0b11, 0b11
CASE and, -1, -1, -1
CASE and, -1, 0, 0
CASE and, -1, 40, 40
CASE and, 0b101, 0b100, 0b100
.macro CASE_AND a:req, b:req
CASE and, \a, \b, \a & \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AND
CASE or, -1, 0, -1
CASE or, -1, 40, -1
CASE or, 0, 0, 0
CASE or, 0b101, 0b110, 0b111
.macro CASE_OR a:req, b:req
CASE or, \a, \b, \a | \b
.endm
WITH_TWO_TEST_NUMBERS CASE_OR
CASE xor, -1, 0, -1
CASE xor, -1, -1, 0
CASE xor 0b101, 0b100, 0b001
.macro CASE_XOR a:req, b:req
CASE xor, \a, \b, \a ^ \b
.endm
WITH_TWO_TEST_NUMBERS CASE_XOR
CASE sll, 2, 1, 4
CASE sll, 2, 20, 2097152
CASE sll, 2, 30, 2147483648
CASE sll, 0, 10, 0
CASE sll, 10, 0, 10
#ifdef RV32
CASE sll, 2, 31, 0
CASE sll, -1, 31, -2147483648
CASER sll, -1, 32, -1 # error for immediate
CASER sll, 2, 32, 2 # error for immediate
#elif RV64
#CASE_BOTH sllw, slliw, 2, 31, 0
#CASE_BOTH sllw, slliw, -1, 31, -2147483648
#CASER sllw, -1, 32, -1 # error for immediate
#CASER sllw, 2, 32, 2 # error for immediate
CASE sll, -1, 31, 18446744071562067968
CASER sll, 2, 63, 0 # error for immediate
CASE sll, -1, 32, 18446744069414584320 # test with immediate as well
CASER sll, -1, 63, 9223372036854775808 # error for immediate
CASER sll, -1, 64, -1 # error for immediate
CASER sll, 2, 64, 2 # error for immediate
#endif
CASE srl, 4, 1, 2
CASE srl, 0, 10, 0
CASE srl, 10, 0, 10
CASE srl, 0b111, 2, 0b001
#ifdef RV32
CASE srl, -1, 1, 2147483647
CASER srl, -1, 32, -1 # error for immediate
#elif RV64
CASE srl, -1, 1, 9223372036854775807
CASE srl, -1, 32, 4294967295
CASER srl, -1, 64, -1 # error for immediate
#endif
CASER sub, 10, 5, 5
CASER sub, -1, 1, -2
CASER sub, 1, 2, -1
CASER sub, -1, -2, 1
#ifdef RV32
CASER sub, 0, 4294967295, 1
#elif RV64
CASER sub, 0, 18446744073709551615, 1
#endif
.macro CASE_SUB a:req, b:req
CASER sub, \a, \b, \a - \b
.endm
WITH_TWO_TEST_NUMBERS CASE_SUB
CASE sra, 4, 1, 2
CASE sra, 0, 10, 0
CASE sra, 10, 0, 10
CASE sra, -1, 1, -1
CASE sra, -1, 31, -1
CASE sra, 0b111, 2, 0b001
#ifdef RV32
CASER sra, 10, 32, 10 # error for immediate
#elif RV64
CASE sra, 10, 32, 0
CASER sra, 10, 64, 10 # error for immediate
#endif
# M extension
CASER mul, 4, 4, 16
CASER mul, 10, 0, 0
CASER mul, 10, 1, 10
CASER mul, -1, -1, 1
#ifdef RV32
CASER mul, 25252566, 5225225, 353909638
#elif RV64
// TODO
#endif
.macro CASE_MUL a:req, b:req
CASER mul, \a, \b, \a * \b
.endm
WITH_TWO_TEST_NUMBERS CASE_MUL
CASER mulh 4, 4, 0
CASER mulh, -1, -1, 0
#ifdef RV32
CASER mulh, 25252566, 5225225, 30722
#elif RV64
// TODO
#endif
CASER mulhu 4, 4, 0
#ifdef RV32
CASER mulhu, -1, -1, 4294967294
CASER mulhu, 25252566, 5225225, 30722
#elif RV64
// TODO
#endif
# mulhsu hasn't been implemented yet.
CASER div, 4, 2, 2
CASER div, -1, 1, -1
CASER div, 1, 1, 1
CASER div, 1, 0, -1
CASER div, -10, 2, -5
CASER div, 5, 2, 2
CASER div, 5, -1, -5
#ifdef RV32
CASER div, -2147483648, -1, -1
#elif RV64
// TODO
#endif
CASER divu, 4, 2, 2
CASER divu, -1, 1, -1
CASER divu, 1, 1, 1
CASER divu, 1, 0, -1
CASER divu, 5, 2, 2
#ifdef RV32
CASER divu, -10, 2, 2147483643
#elif RV64
// TODO
#endif
CASER rem, 4, 2, 0
CASER rem, 5, 2, 1
CASER rem, 5, 0, 5
CASER rem, -10, 3, -1
CASER rem, 5, -1, 0
#ifdef RV32
CASER rem, -2147483648, -1, 0
#elif RV64
// TODO
#endif
CASER remu, 4, 2, 0
CASER remu, 5, 2, 1
CASER remu, 5, 0, 5
CASER remu, -10, 3, 0
PASS
|
Noratrieb/rustv32i | 1,292 | tests/check/branch.S | # Control transfer instructions
#include "../helper.S"
START_TEST
j unconditional
FAIL
unconditional:
# Test branching instructions
li t0, 10
li t1, 10
beq t0, t1, branch2
FAIL
branch2:
bge t0, t1, branch3
FAIL
branch3:
bne t0, t1, fail
blt t0, t1, fail
bltu t0, t1, fail
li t0, -1
li t1, 1
blt t1, t0, fail
bltu t0, t1, fail
bge t0, t1, fail
bgeu t1, t0, fail
blt t0, t1, branch4
FAIL
branch4:
bltu t1, t0, branch5
FAIL
branch5:
bge t1, t0, branch6
FAIL
branch6:
bgeu t0, t1, branch7
FAIL
branch7:
# Backwards jal
li t0, 0
backwards:
li t1, 0
bne t0, t1, end_backwards
li t0, 1
jal backwards
end_backwards:
# Test link registers being set correctly:
auipc t1, 0
jal t0, link2
jal t5, fail # force uncompressed
link2:
addi t1, t1, 8 # the instruction following the jump
bne t1, t0, fail
auipc t1, 0
jalr t0, 12(t1) # 12 is the three instructions, so to the addi
jal t5, fail # force uncompressed
addi t1, t1, 8 # the instruction following the jump
bne t0, t1, fail
# Test a loop (t0=counter, t1=expected)
li t0, 0
li t1, 100
loop1:
addi t0, t0, 1
blt t0, t1, loop1
ASSERT_EQ t0, 100
# End
PASS
|
Noratrieb/rustv32i | 1,790 | tests/check/mem.S | # Load and Store Instructions
#include "../helper.S"
.macro CASE_NO_OFFSET_DIFF_RESULT s l value result
li t1, \value
\s t1, 0(t0)
\l t2, 0(t0)
ASSERT_EQ t2, \result
.endm
.macro CASE_NO_OFFSET s l value
CASE_NO_OFFSET_DIFF_RESULT \s, \l, \value, \value
.endm
.macro CASE_SAME_OFFSET s l value offset
li t1, \value
\s t1, \offset(t0)
\l t2, \offset(t0)
ASSERT_EQ t2, \value
.endm
.macro CASE_ADDED_OFFSET s l value offset
addi t3, t0, \offset
li t1, \value
\s t1, \offset(t0)
\l t2, 0(t3)
ASSERT_EQ t2, \value
.endm
START_TEST
li t0, 0
##### word
CASE_NO_OFFSET sw, lw, -4
CASE_SAME_OFFSET sw, lw, -5, 4
CASE_SAME_OFFSET sw, lw, -6, 1000
CASE_ADDED_OFFSET sw, lw, -7, 4
CASE_ADDED_OFFSET sw, lw, -8, 1000
##### half
CASE_NO_OFFSET_DIFF_RESULT sh, lh, 65535, -1
CASE_NO_OFFSET sh, lhu, 65535
CASE_NO_OFFSET sh, lh, 21450
CASE_SAME_OFFSET sh, lh, 21451, 4
CASE_SAME_OFFSET sh, lh, 21452, 1000
CASE_ADDED_OFFSET sh, lh, 21453, 4
CASE_ADDED_OFFSET sh, lh, 21454, 1000
CASE_NO_OFFSET sh, lhu, 20420
CASE_SAME_OFFSET sh, lhu, 20421, 4
CASE_SAME_OFFSET sh, lhu, 20422, 1000
CASE_ADDED_OFFSET sh, lhu, 20423, 4
CASE_ADDED_OFFSET sh, lhu, 20424, 1000
##### byte
CASE_NO_OFFSET_DIFF_RESULT sb, lb, 255, -1
CASE_NO_OFFSET sb, lbu, 255
CASE_NO_OFFSET sb, lb, 90
CASE_SAME_OFFSET sb, lb, 90, 4
CASE_SAME_OFFSET sb, lb, 91, 1000
CASE_ADDED_OFFSET sb, lb, 92, 4
CASE_ADDED_OFFSET sb, lb, 93, 1000
CASE_NO_OFFSET sb, lbu, 110
CASE_SAME_OFFSET sb, lbu, 110, 4
CASE_SAME_OFFSET sb, lbu, 111, 1000
CASE_ADDED_OFFSET sb, lbu, 112, 4
CASE_ADDED_OFFSET sb, lbu, 113, 1000
PASS
|
Not-Buddy/OmniBuild | 2,290 | src/ASM_Code/bitwise_ops_arm64.s | // src/ASM_Code/bitwise_ops_arm64.s
// Fast bitwise operations in ARM64 assembly (AArch64) for Linux
.text
.global asm_fast_and
.global asm_fast_or
.global asm_fast_xor
.global asm_fast_shift_left
.global asm_fast_shift_right
.global asm_count_bits
.global asm_reverse_bits
// Fast bitwise AND: asm_fast_and(uint64_t a, uint64_t b) -> uint64_t
asm_fast_and:
and x0, x0, x1
ret
// Fast bitwise OR: asm_fast_or(uint64_t a, uint64_t b) -> uint64_t
asm_fast_or:
orr x0, x0, x1
ret
// Fast bitwise XOR: asm_fast_xor(uint64_t a, uint64_t b) -> uint64_t
asm_fast_xor:
eor x0, x0, x1
ret
// Fast left shift: asm_fast_shift_left(uint64_t value, uint64_t shift) -> uint64_t
asm_fast_shift_left:
// shift amount in x1 (only lower 6 bits used)
lsl x0, x0, x1
ret
// Fast right shift: asm_fast_shift_right(uint64_t value, uint64_t shift) -> uint64_t
asm_fast_shift_right:
// Logical shift right
lsr x0, x0, x1
ret
// Count set bits (population count): asm_count_bits(uint64_t value) -> uint64_t
asm_count_bits:
// Use ARM64's builtin cnt instruction sequence for population count
// Break value into bytes, then count bits
// We'll implement using popcount instruction available in ARMv8.2+
// If not available, fall back to software loop or use compiler intrinsic in Rust/C.
// For maximum compatibility, here's a loop method:
mov x1, #0 // counter = 0
count_loop:
cbz x0, count_done // if x0 == 0 jump done
add x1, x1, #1 // counter++
sub x0, x0, #1
and x0, x0, x0, lsl #1 // clear lowest set bit (Brian Kernighan's algorithm)
b count_loop
count_done:
mov x0, x1
ret
// Reverse bits in a 64-bit integer: asm_reverse_bits(uint64_t value) -> uint64_t
asm_reverse_bits:
mov x1, x0 // input value
mov x0, #0 // result = 0
mov x2, #64 // bit counter
reverse_loop:
cbz x2, reverse_done
lsr x3, x1, #1 // shift right input by 1
and x4, x1, #1 // get lowest bit
lsl x0, x0, #1 // result << 1
orr x0, x0, x4 // result |= lowest bit
mov x1, x3 // input = shifted input
subs x2, x2, #1 // decrement bit counter
bne reverse_loop
reverse_done:
ret
|
Not-Buddy/OmniBuild | 2,689 | src/ASM_Code/bitwise_ops.s | # src/ASM_Code/bitwise_ops.s
# Fast bitwise operations in x86-64 assembly
.section .text
.globl asm_fast_and
.globl asm_fast_or
.globl asm_fast_xor
.globl asm_fast_shift_left
.globl asm_fast_shift_right
.globl asm_count_bits
.globl asm_reverse_bits
# Fast bitwise AND: asm_fast_and(a: u64, b: u64) -> u64
asm_fast_and:
movq %rdi, %rax # Move first argument (a) to return register
andq %rsi, %rax # Bitwise AND with second argument (b)
ret # Return result in %rax
# Fast bitwise OR: asm_fast_or(a: u64, b: u64) -> u64
asm_fast_or:
movq %rdi, %rax # Move first argument to return register
orq %rsi, %rax # Bitwise OR with second argument
ret
# Fast bitwise XOR: asm_fast_xor(a: u64, b: u64) -> u64
asm_fast_xor:
movq %rdi, %rax # Move first argument to return register
xorq %rsi, %rax # Bitwise XOR with second argument
ret
# Fast left shift: asm_fast_shift_left(value: u64, shift: u64) -> u64
asm_fast_shift_left:
movq %rdi, %rax # Move value to return register
movq %rsi, %rcx # Move shift amount to %rcx (required for shift ops)
salq %cl, %rax # Shift left by %cl bits (lower 8 bits of %rcx)
ret
# Fast right shift: asm_fast_shift_right(value: u64, shift: u64) -> u64
asm_fast_shift_right:
movq %rdi, %rax # Move value to return register
movq %rsi, %rcx # Move shift amount to %rcx
shrq %cl, %rax # Logical right shift by %cl bits
ret
# Count set bits (population count): asm_count_bits(value: u64) -> u64
asm_count_bits:
movq %rdi, %rax # Move input to working register
xorq %rdx, %rdx # Clear counter
count_loop:
testq %rax, %rax # Test if any bits left
jz count_done # Jump to done if zero
incq %rdx # Increment counter
decq %rcx # Decrement for next iteration
movq %rax, %rcx # Copy value
decq %rcx # Subtract 1
andq %rcx, %rax # Clear lowest set bit
jmp count_loop # Continue loop
count_done:
movq %rdx, %rax # Move result to return register
ret
# Reverse bits in a 64-bit integer: asm_reverse_bits(value: u64) -> u64
asm_reverse_bits:
movq %rdi, %rax # Move input to working register
movq $63, %rcx # Counter for bit position
xorq %rdx, %rdx # Clear result register
reverse_loop:
testq %rax, %rax # Check if any bits left to process
jz reverse_done # Jump if done
shrq $1, %rax # Shift input right by 1
rclq $1, %rdx # Rotate result left through carry
loop reverse_loop # Decrement %rcx and loop if not zero
reverse_done:
movq %rdx, %rax # Move result to return register
ret
|
Noratrieb/rustv32i | 1,030 | tests/helper.S | .macro START_TEST
.section .text
.globl _start
_start:
.endm
.macro ASSERT_EQ actual expected
li t6, \expected
bne \actual, t6, fail
.endm
.macro PASS
li a7, -1
li a0, 1
ecall
.endm
.macro FAIL
j fail
.endm
.macro WITH_SINGLE_TEST_NUMBERS macro
\macro a, 0
\macro c, 1
\macro d, 2
\macro u, 3
\macro e, 4
\macro v, 5
\macro f, 8
\macro t, 10
\macro g, 16
\macro h, 32
\macro i, 64
\macro s, 100
\macro j, 128
\macro k, 256
\macro l, 512
\macro w, 1000
\macro m, 1024
\macro n, 2047
\macro b, -1
\macro o, -2
\macro p, -16
\macro q, -1024
\macro r, -1000
.endm
.macro WITH_TWO_TEST_NUMBERS macro
.macro \macro\()_TMP namea:req a:req
.macro \macro\()_TMP_\namea nameb:req b:req
\macro \a, \b
.endm
WITH_SINGLE_TEST_NUMBERS \macro\()_TMP_\namea
.endm
WITH_SINGLE_TEST_NUMBERS \macro\()_TMP
.endm
fail:
li a7, -1
li a0, 0
ecall
|
Noratrieb/rustv32i | 1,616 | tests/check/zaamo.S | # Atomic Memory Operations
#include "../helper.S"
.macro CASE_BASE inst reg mem expected_mem
li t0, 0
li t1, \mem
sw t1, (t0)
li t3, \reg
\inst t2, t3, (t0)
ASSERT_EQ t2, \mem
lw t3, (t0)
ASSERT_EQ t3, \expected_mem
.endm
.macro CASE inst reg mem expected_mem
CASE_BASE \inst, \reg, \mem, \expected_mem
CASE_BASE \inst\().aq, \reg, \mem, \expected_mem
CASE_BASE \inst\().rl, \reg, \mem, \expected_mem
CASE_BASE \inst\().aqrl, \reg, \mem, \expected_mem
.endm
START_TEST
.macro CASE_AMOSWAP a:req b:req
CASE amoswap.w, \a, \b, \a
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOSWAP
.macro CASE_AMOADD a:req b:req
CASE amoadd.w, \a, \b, \a + \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOADD
.macro CASE_AMOAND a:req b:req
CASE amoand.w, \a, \b, \a & \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOAND
.macro CASE_AMOOR a:req b:req
CASE amoor.w, \a, \b, \a | \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOOR
.macro CASE_AMOXOR a:req b:req
CASE amoxor.w, \a, \b, \a ^ \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AMOXOR
CASE amomax.w, 0, 0, 0
CASE amomax.w, 0, 1, 1
CASE amomax.w, -1, 0, 0
CASE amomax.w 100, -100, 100
CASE amomaxu.w, 0, 0, 0
CASE amomaxu.w, 0, 1, 1
CASE amomaxu.w, -1, 0, -1
CASE amomaxu.w 100, -100, -100
CASE amomin.w, 0, 0, 0
CASE amomin.w, 0, 1, 0
CASE amomin.w, -1, 0, -1
CASE amomin.w 100, -100, -100
CASE amominu.w, 0, 0, 0
CASE amominu.w, 0, 1, 0
CASE amominu.w, -1, 0, 0
CASE amominu.w 100, -100, 100
PASS
|
Noratrieb/rustv32i | 4,858 | tests/check/int_comp.S | # Integer computational register-register instruction.
#include "../helper.S"
.macro CASER inst:req a:req b:req expected:req
li t0, \a
li t1, \b
\inst t2, t0, t1
ASSERT_EQ t2, \expected
.endm
.macro CASE_IMM inst:req a:req b:req expected:req
li t0, \a
\inst t2, t0, \b
ASSERT_EQ t2, \expected
.endm
.macro CASE_BOTH inst:req insti:req a:req b:req expected:req
CASER \inst, \a, \b, \expected
CASE_IMM \insti, \a, \b, \expected
.endm
.macro CASE inst:req a:req b:req expected:req
CASE_BOTH \inst, \inst\()i, \a, \b, \expected
.endm
START_TEST
# Base instructions
.macro CASE_ADD a:req, b:req
CASE add, \a, \b, \a + \b
.endm
WITH_TWO_TEST_NUMBERS CASE_ADD
CASE slt 10 20 1
CASE slt 20 10 0
CASE slt, -1 0 1
CASE slt 0, -1 0
CASE slt, -1, -1, 0
CASE slt, -100, -1, 1
CASE_BOTH sltu sltiu 10 20 1
CASE_BOTH sltu sltiu 20 10 0
CASE_BOTH sltu sltiu, -1, 0, 0
CASE_BOTH sltu sltiu, -100, -1, 1
CASE_BOTH sltu sltiu, 100, -1, 1
CASE and 0b11, 0b11, 0b11
CASE and, -1, -1, -1
CASE and, -1, 0, 0
CASE and, -1, 40, 40
CASE and, 0b101, 0b100, 0b100
.macro CASE_AND a:req, b:req
CASE and, \a, \b, \a & \b
.endm
WITH_TWO_TEST_NUMBERS CASE_AND
CASE or, -1, 0, -1
CASE or, -1, 40, -1
CASE or, 0, 0, 0
CASE or, 0b101, 0b110, 0b111
.macro CASE_OR a:req, b:req
CASE or, \a, \b, \a | \b
.endm
WITH_TWO_TEST_NUMBERS CASE_OR
CASE xor, -1, 0, -1
CASE xor, -1, -1, 0
CASE xor 0b101, 0b100, 0b001
.macro CASE_XOR a:req, b:req
CASE xor, \a, \b, \a ^ \b
.endm
WITH_TWO_TEST_NUMBERS CASE_XOR
CASE sll, 2, 1, 4
CASE sll, 2, 20, 2097152
CASE sll, 2, 30, 2147483648
CASE sll, 0, 10, 0
CASE sll, 10, 0, 10
#ifdef RV32
CASE sll, 2, 31, 0
CASE sll, -1, 31, -2147483648
CASER sll, -1, 32, -1 # error for immediate
CASER sll, 2, 32, 2 # error for immediate
#elif RV64
#CASE_BOTH sllw, slliw, 2, 31, 0
#CASE_BOTH sllw, slliw, -1, 31, -2147483648
#CASER sllw, -1, 32, -1 # error for immediate
#CASER sllw, 2, 32, 2 # error for immediate
CASE sll, -1, 31, 18446744071562067968
CASER sll, 2, 63, 0 # error for immediate
CASE sll, -1, 32, 18446744069414584320 # test with immediate as well
CASER sll, -1, 63, 9223372036854775808 # error for immediate
CASER sll, -1, 64, -1 # error for immediate
CASER sll, 2, 64, 2 # error for immediate
#endif
CASE srl, 4, 1, 2
CASE srl, 0, 10, 0
CASE srl, 10, 0, 10
CASE srl, 0b111, 2, 0b001
#ifdef RV32
CASE srl, -1, 1, 2147483647
CASER srl, -1, 32, -1 # error for immediate
#elif RV64
CASE srl, -1, 1, 9223372036854775807
CASE srl, -1, 32, 4294967295
CASER srl, -1, 64, -1 # error for immediate
#endif
CASER sub, 10, 5, 5
CASER sub, -1, 1, -2
CASER sub, 1, 2, -1
CASER sub, -1, -2, 1
#ifdef RV32
CASER sub, 0, 4294967295, 1
#elif RV64
CASER sub, 0, 18446744073709551615, 1
#endif
.macro CASE_SUB a:req, b:req
CASER sub, \a, \b, \a - \b
.endm
WITH_TWO_TEST_NUMBERS CASE_SUB
CASE sra, 4, 1, 2
CASE sra, 0, 10, 0
CASE sra, 10, 0, 10
CASE sra, -1, 1, -1
CASE sra, -1, 31, -1
CASE sra, 0b111, 2, 0b001
#ifdef RV32
CASER sra, 10, 32, 10 # error for immediate
#elif RV64
CASE sra, 10, 32, 0
CASER sra, 10, 64, 10 # error for immediate
#endif
# M extension
CASER mul, 4, 4, 16
CASER mul, 10, 0, 0
CASER mul, 10, 1, 10
CASER mul, -1, -1, 1
#ifdef RV32
CASER mul, 25252566, 5225225, 353909638
#elif RV64
// TODO
#endif
.macro CASE_MUL a:req, b:req
CASER mul, \a, \b, \a * \b
.endm
WITH_TWO_TEST_NUMBERS CASE_MUL
CASER mulh 4, 4, 0
CASER mulh, -1, -1, 0
#ifdef RV32
CASER mulh, 25252566, 5225225, 30722
#elif RV64
// TODO
#endif
CASER mulhu 4, 4, 0
#ifdef RV32
CASER mulhu, -1, -1, 4294967294
CASER mulhu, 25252566, 5225225, 30722
#elif RV64
// TODO
#endif
# mulhsu hasn't been implemented yet.
CASER div, 4, 2, 2
CASER div, -1, 1, -1
CASER div, 1, 1, 1
CASER div, 1, 0, -1
CASER div, -10, 2, -5
CASER div, 5, 2, 2
CASER div, 5, -1, -5
#ifdef RV32
CASER div, -2147483648, -1, -1
#elif RV64
// TODO
#endif
CASER divu, 4, 2, 2
CASER divu, -1, 1, -1
CASER divu, 1, 1, 1
CASER divu, 1, 0, -1
CASER divu, 5, 2, 2
#ifdef RV32
CASER divu, -10, 2, 2147483643
#elif RV64
// TODO
#endif
CASER rem, 4, 2, 0
CASER rem, 5, 2, 1
CASER rem, 5, 0, 5
CASER rem, -10, 3, -1
CASER rem, 5, -1, 0
#ifdef RV32
CASER rem, -2147483648, -1, 0
#elif RV64
// TODO
#endif
CASER remu, 4, 2, 0
CASER remu, 5, 2, 1
CASER remu, 5, 0, 5
CASER remu, -10, 3, 0
PASS
|
Noratrieb/rustv32i | 1,292 | tests/check/branch.S | # Control transfer instructions
#include "../helper.S"
START_TEST
j unconditional
FAIL
unconditional:
# Test branching instructions
li t0, 10
li t1, 10
beq t0, t1, branch2
FAIL
branch2:
bge t0, t1, branch3
FAIL
branch3:
bne t0, t1, fail
blt t0, t1, fail
bltu t0, t1, fail
li t0, -1
li t1, 1
blt t1, t0, fail
bltu t0, t1, fail
bge t0, t1, fail
bgeu t1, t0, fail
blt t0, t1, branch4
FAIL
branch4:
bltu t1, t0, branch5
FAIL
branch5:
bge t1, t0, branch6
FAIL
branch6:
bgeu t0, t1, branch7
FAIL
branch7:
# Backwards jal
li t0, 0
backwards:
li t1, 0
bne t0, t1, end_backwards
li t0, 1
jal backwards
end_backwards:
# Test link registers being set correctly:
auipc t1, 0
jal t0, link2
jal t5, fail # force uncompressed
link2:
addi t1, t1, 8 # the instruction following the jump
bne t1, t0, fail
auipc t1, 0
jalr t0, 12(t1) # 12 is the three instructions, so to the addi
jal t5, fail # force uncompressed
addi t1, t1, 8 # the instruction following the jump
bne t0, t1, fail
# Test a loop (t0=counter, t1=expected)
li t0, 0
li t1, 100
loop1:
addi t0, t0, 1
blt t0, t1, loop1
ASSERT_EQ t0, 100
# End
PASS
|
Noratrieb/rustv32i | 1,790 | tests/check/mem.S | # Load and Store Instructions
#include "../helper.S"
.macro CASE_NO_OFFSET_DIFF_RESULT s l value result
li t1, \value
\s t1, 0(t0)
\l t2, 0(t0)
ASSERT_EQ t2, \result
.endm
.macro CASE_NO_OFFSET s l value
CASE_NO_OFFSET_DIFF_RESULT \s, \l, \value, \value
.endm
.macro CASE_SAME_OFFSET s l value offset
li t1, \value
\s t1, \offset(t0)
\l t2, \offset(t0)
ASSERT_EQ t2, \value
.endm
.macro CASE_ADDED_OFFSET s l value offset
addi t3, t0, \offset
li t1, \value
\s t1, \offset(t0)
\l t2, 0(t3)
ASSERT_EQ t2, \value
.endm
START_TEST
li t0, 0
##### word
CASE_NO_OFFSET sw, lw, -4
CASE_SAME_OFFSET sw, lw, -5, 4
CASE_SAME_OFFSET sw, lw, -6, 1000
CASE_ADDED_OFFSET sw, lw, -7, 4
CASE_ADDED_OFFSET sw, lw, -8, 1000
##### half
CASE_NO_OFFSET_DIFF_RESULT sh, lh, 65535, -1
CASE_NO_OFFSET sh, lhu, 65535
CASE_NO_OFFSET sh, lh, 21450
CASE_SAME_OFFSET sh, lh, 21451, 4
CASE_SAME_OFFSET sh, lh, 21452, 1000
CASE_ADDED_OFFSET sh, lh, 21453, 4
CASE_ADDED_OFFSET sh, lh, 21454, 1000
CASE_NO_OFFSET sh, lhu, 20420
CASE_SAME_OFFSET sh, lhu, 20421, 4
CASE_SAME_OFFSET sh, lhu, 20422, 1000
CASE_ADDED_OFFSET sh, lhu, 20423, 4
CASE_ADDED_OFFSET sh, lhu, 20424, 1000
##### byte
CASE_NO_OFFSET_DIFF_RESULT sb, lb, 255, -1
CASE_NO_OFFSET sb, lbu, 255
CASE_NO_OFFSET sb, lb, 90
CASE_SAME_OFFSET sb, lb, 90, 4
CASE_SAME_OFFSET sb, lb, 91, 1000
CASE_ADDED_OFFSET sb, lb, 92, 4
CASE_ADDED_OFFSET sb, lb, 93, 1000
CASE_NO_OFFSET sb, lbu, 110
CASE_SAME_OFFSET sb, lbu, 110, 4
CASE_SAME_OFFSET sb, lbu, 111, 1000
CASE_ADDED_OFFSET sb, lbu, 112, 4
CASE_ADDED_OFFSET sb, lbu, 113, 1000
PASS
|
Not-Buddy/OmniBuild | 2,290 | src/ASM_Code/bitwise_ops_arm64.s | // src/ASM_Code/bitwise_ops_arm64.s
// Fast bitwise operations in ARM64 assembly (AArch64) for Linux
.text
.global asm_fast_and
.global asm_fast_or
.global asm_fast_xor
.global asm_fast_shift_left
.global asm_fast_shift_right
.global asm_count_bits
.global asm_reverse_bits
// Fast bitwise AND: asm_fast_and(uint64_t a, uint64_t b) -> uint64_t
asm_fast_and:
and x0, x0, x1
ret
// Fast bitwise OR: asm_fast_or(uint64_t a, uint64_t b) -> uint64_t
asm_fast_or:
orr x0, x0, x1
ret
// Fast bitwise XOR: asm_fast_xor(uint64_t a, uint64_t b) -> uint64_t
asm_fast_xor:
eor x0, x0, x1
ret
// Fast left shift: asm_fast_shift_left(uint64_t value, uint64_t shift) -> uint64_t
asm_fast_shift_left:
// shift amount in x1 (only lower 6 bits used)
lsl x0, x0, x1
ret
// Fast right shift: asm_fast_shift_right(uint64_t value, uint64_t shift) -> uint64_t
asm_fast_shift_right:
// Logical shift right
lsr x0, x0, x1
ret
// Count set bits (population count): asm_count_bits(uint64_t value) -> uint64_t
asm_count_bits:
// Use ARM64's builtin cnt instruction sequence for population count
// Break value into bytes, then count bits
// We'll implement using popcount instruction available in ARMv8.2+
// If not available, fall back to software loop or use compiler intrinsic in Rust/C.
// For maximum compatibility, here's a loop method:
mov x1, #0 // counter = 0
count_loop:
cbz x0, count_done // if x0 == 0 jump done
add x1, x1, #1 // counter++
sub x0, x0, #1
and x0, x0, x0, lsl #1 // clear lowest set bit (Brian Kernighan's algorithm)
b count_loop
count_done:
mov x0, x1
ret
// Reverse bits in a 64-bit integer: asm_reverse_bits(uint64_t value) -> uint64_t
asm_reverse_bits:
mov x1, x0 // input value
mov x0, #0 // result = 0
mov x2, #64 // bit counter
reverse_loop:
cbz x2, reverse_done
lsr x3, x1, #1 // shift right input by 1
and x4, x1, #1 // get lowest bit
lsl x0, x0, #1 // result << 1
orr x0, x0, x4 // result |= lowest bit
mov x1, x3 // input = shifted input
subs x2, x2, #1 // decrement bit counter
bne reverse_loop
reverse_done:
ret
|
Not-Buddy/OmniBuild | 2,689 | src/ASM_Code/bitwise_ops.s | # src/ASM_Code/bitwise_ops.s
# Fast bitwise operations in x86-64 assembly
.section .text
.globl asm_fast_and
.globl asm_fast_or
.globl asm_fast_xor
.globl asm_fast_shift_left
.globl asm_fast_shift_right
.globl asm_count_bits
.globl asm_reverse_bits
# Fast bitwise AND: asm_fast_and(a: u64, b: u64) -> u64
asm_fast_and:
movq %rdi, %rax # Move first argument (a) to return register
andq %rsi, %rax # Bitwise AND with second argument (b)
ret # Return result in %rax
# Fast bitwise OR: asm_fast_or(a: u64, b: u64) -> u64
asm_fast_or:
movq %rdi, %rax # Move first argument to return register
orq %rsi, %rax # Bitwise OR with second argument
ret
# Fast bitwise XOR: asm_fast_xor(a: u64, b: u64) -> u64
asm_fast_xor:
movq %rdi, %rax # Move first argument to return register
xorq %rsi, %rax # Bitwise XOR with second argument
ret
# Fast left shift: asm_fast_shift_left(value: u64, shift: u64) -> u64
asm_fast_shift_left:
movq %rdi, %rax # Move value to return register
movq %rsi, %rcx # Move shift amount to %rcx (required for shift ops)
salq %cl, %rax # Shift left by %cl bits (lower 8 bits of %rcx)
ret
# Fast right shift: asm_fast_shift_right(value: u64, shift: u64) -> u64
asm_fast_shift_right:
movq %rdi, %rax # Move value to return register
movq %rsi, %rcx # Move shift amount to %rcx
shrq %cl, %rax # Logical right shift by %cl bits
ret
# Count set bits (population count): asm_count_bits(value: u64) -> u64
asm_count_bits:
movq %rdi, %rax # Move input to working register
xorq %rdx, %rdx # Clear counter
count_loop:
testq %rax, %rax # Test if any bits left
jz count_done # Jump to done if zero
incq %rdx # Increment counter
decq %rcx # Decrement for next iteration
movq %rax, %rcx # Copy value
decq %rcx # Subtract 1
andq %rcx, %rax # Clear lowest set bit
jmp count_loop # Continue loop
count_done:
movq %rdx, %rax # Move result to return register
ret
# Reverse bits in a 64-bit integer: asm_reverse_bits(value: u64) -> u64
asm_reverse_bits:
movq %rdi, %rax # Move input to working register
movq $63, %rcx # Counter for bit position
xorq %rdx, %rdx # Clear result register
reverse_loop:
testq %rax, %rax # Check if any bits left to process
jz reverse_done # Jump if done
shrq $1, %rax # Shift input right by 1
rclq $1, %rdx # Rotate result left through carry
loop reverse_loop # Decrement %rcx and loop if not zero
reverse_done:
movq %rdx, %rax # Move result to return register
ret
|
o1-labs/proof-systems-vendors | 28,374 | secp256k1-sys/depend/secp256k1/src/asm/field_10x26_arm.s | @ vim: set tabstop=8 softtabstop=8 shiftwidth=8 noexpandtab syntax=armasm:
/***********************************************************************
* Copyright (c) 2014 Wladimir J. van der Laan *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
/*
ARM implementation of field_10x26 inner loops.
Note:
- To avoid unnecessary loads and make use of available registers, two
'passes' have every time been interleaved, with the odd passes accumulating c' and d'
which will be added to c and d respectively in the even passes
*/
.syntax unified
@ eabi attributes - see readelf -A
.eabi_attribute 24, 1 @ Tag_ABI_align_needed = 8-byte
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved = 8-byte, except leaf SP
.text
@ Field constants
.set field_R0, 0x3d10
.set field_R1, 0x400
.set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff
.align 2
.global rustsecp256k1_v0_9_2_fe_mul_inner
.type rustsecp256k1_v0_9_2_fe_mul_inner, %function
.hidden rustsecp256k1_v0_9_2_fe_mul_inner
@ Arguments:
@ r0 r Restrict: can overlap with a, not with b
@ r1 a
@ r2 b
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
rustsecp256k1_v0_9_2_fe_mul_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
/******************************************
* Main computation code.
******************************************
Allocation:
r0,r14,r7,r8 scratch
r1 a (pointer)
r2 b (pointer)
r3:r4 c
r5:r6 d
r11:r12 c'
r9:r10 d'
Note: do not write to r[] here, it may overlap with a[]
*/
/* A - interleaved with B */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #9*4] @ b[9]
ldr r0, [r1, #1*4] @ a[1]
umull r5, r6, r7, r8 @ d = a[0] * b[9]
ldr r14, [r2, #8*4] @ b[8]
umull r9, r10, r0, r8 @ d' = a[1] * b[9]
ldr r7, [r1, #2*4] @ a[2]
umlal r5, r6, r0, r14 @ d += a[1] * b[8]
ldr r8, [r2, #7*4] @ b[7]
umlal r9, r10, r7, r14 @ d' += a[2] * b[8]
ldr r0, [r1, #3*4] @ a[3]
umlal r5, r6, r7, r8 @ d += a[2] * b[7]
ldr r14, [r2, #6*4] @ b[6]
umlal r9, r10, r0, r8 @ d' += a[3] * b[7]
ldr r7, [r1, #4*4] @ a[4]
umlal r5, r6, r0, r14 @ d += a[3] * b[6]
ldr r8, [r2, #5*4] @ b[5]
umlal r9, r10, r7, r14 @ d' += a[4] * b[6]
ldr r0, [r1, #5*4] @ a[5]
umlal r5, r6, r7, r8 @ d += a[4] * b[5]
ldr r14, [r2, #4*4] @ b[4]
umlal r9, r10, r0, r8 @ d' += a[5] * b[5]
ldr r7, [r1, #6*4] @ a[6]
umlal r5, r6, r0, r14 @ d += a[5] * b[4]
ldr r8, [r2, #3*4] @ b[3]
umlal r9, r10, r7, r14 @ d' += a[6] * b[4]
ldr r0, [r1, #7*4] @ a[7]
umlal r5, r6, r7, r8 @ d += a[6] * b[3]
ldr r14, [r2, #2*4] @ b[2]
umlal r9, r10, r0, r8 @ d' += a[7] * b[3]
ldr r7, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r14 @ d += a[7] * b[2]
ldr r8, [r2, #1*4] @ b[1]
umlal r9, r10, r7, r14 @ d' += a[8] * b[2]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r8 @ d += a[8] * b[1]
ldr r14, [r2, #0*4] @ b[0]
umlal r9, r10, r0, r8 @ d' += a[9] * b[1]
ldr r7, [r1, #0*4] @ a[0]
umlal r5, r6, r0, r14 @ d += a[9] * b[0]
@ r7,r14 used in B
bic r0, r5, field_not_M @ t9 = d & M
str r0, [sp, #4 + 4*9]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
/* B */
umull r3, r4, r7, r14 @ c = a[0] * b[0]
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u0 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u0 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t0 = c & M
str r14, [sp, #4 + 0*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u0 * R1
umlal r3, r4, r0, r14
/* C - interleaved with D */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #2*4] @ b[2]
ldr r14, [r2, #1*4] @ b[1]
umull r11, r12, r7, r8 @ c' = a[0] * b[2]
ldr r0, [r1, #1*4] @ a[1]
umlal r3, r4, r7, r14 @ c += a[0] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r11, r12, r0, r14 @ c' += a[1] * b[1]
ldr r7, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r8 @ c += a[1] * b[0]
ldr r14, [r2, #9*4] @ b[9]
umlal r11, r12, r7, r8 @ c' += a[2] * b[0]
ldr r0, [r1, #3*4] @ a[3]
umlal r5, r6, r7, r14 @ d += a[2] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umull r9, r10, r0, r14 @ d' = a[3] * b[9]
ldr r7, [r1, #4*4] @ a[4]
umlal r5, r6, r0, r8 @ d += a[3] * b[8]
ldr r14, [r2, #7*4] @ b[7]
umlal r9, r10, r7, r8 @ d' += a[4] * b[8]
ldr r0, [r1, #5*4] @ a[5]
umlal r5, r6, r7, r14 @ d += a[4] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r9, r10, r0, r14 @ d' += a[5] * b[7]
ldr r7, [r1, #6*4] @ a[6]
umlal r5, r6, r0, r8 @ d += a[5] * b[6]
ldr r14, [r2, #5*4] @ b[5]
umlal r9, r10, r7, r8 @ d' += a[6] * b[6]
ldr r0, [r1, #7*4] @ a[7]
umlal r5, r6, r7, r14 @ d += a[6] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r9, r10, r0, r14 @ d' += a[7] * b[5]
ldr r7, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r8 @ d += a[7] * b[4]
ldr r14, [r2, #3*4] @ b[3]
umlal r9, r10, r7, r8 @ d' += a[8] * b[4]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r14 @ d += a[8] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r9, r10, r0, r14 @ d' += a[9] * b[3]
umlal r5, r6, r0, r8 @ d += a[9] * b[2]
bic r0, r5, field_not_M @ u1 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u1 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t1 = c & M
str r14, [sp, #4 + 1*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u1 * R1
umlal r3, r4, r0, r14
/* D */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u2 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u2 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t2 = c & M
str r14, [sp, #4 + 2*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u2 * R1
umlal r3, r4, r0, r14
/* E - interleaved with F */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #4*4] @ b[4]
umull r11, r12, r7, r8 @ c' = a[0] * b[4]
ldr r8, [r2, #3*4] @ b[3]
umlal r3, r4, r7, r8 @ c += a[0] * b[3]
ldr r7, [r1, #1*4] @ a[1]
umlal r11, r12, r7, r8 @ c' += a[1] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r3, r4, r7, r8 @ c += a[1] * b[2]
ldr r7, [r1, #2*4] @ a[2]
umlal r11, r12, r7, r8 @ c' += a[2] * b[2]
ldr r8, [r2, #1*4] @ b[1]
umlal r3, r4, r7, r8 @ c += a[2] * b[1]
ldr r7, [r1, #3*4] @ a[3]
umlal r11, r12, r7, r8 @ c' += a[3] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r3, r4, r7, r8 @ c += a[3] * b[0]
ldr r7, [r1, #4*4] @ a[4]
umlal r11, r12, r7, r8 @ c' += a[4] * b[0]
ldr r8, [r2, #9*4] @ b[9]
umlal r5, r6, r7, r8 @ d += a[4] * b[9]
ldr r7, [r1, #5*4] @ a[5]
umull r9, r10, r7, r8 @ d' = a[5] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umlal r5, r6, r7, r8 @ d += a[5] * b[8]
ldr r7, [r1, #6*4] @ a[6]
umlal r9, r10, r7, r8 @ d' += a[6] * b[8]
ldr r8, [r2, #7*4] @ b[7]
umlal r5, r6, r7, r8 @ d += a[6] * b[7]
ldr r7, [r1, #7*4] @ a[7]
umlal r9, r10, r7, r8 @ d' += a[7] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r5, r6, r7, r8 @ d += a[7] * b[6]
ldr r7, [r1, #8*4] @ a[8]
umlal r9, r10, r7, r8 @ d' += a[8] * b[6]
ldr r8, [r2, #5*4] @ b[5]
umlal r5, r6, r7, r8 @ d += a[8] * b[5]
ldr r7, [r1, #9*4] @ a[9]
umlal r9, r10, r7, r8 @ d' += a[9] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r5, r6, r7, r8 @ d += a[9] * b[4]
bic r0, r5, field_not_M @ u3 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u3 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t3 = c & M
str r14, [sp, #4 + 3*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u3 * R1
umlal r3, r4, r0, r14
/* F */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u4 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u4 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t4 = c & M
str r14, [sp, #4 + 4*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u4 * R1
umlal r3, r4, r0, r14
/* G - interleaved with H */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #6*4] @ b[6]
ldr r14, [r2, #5*4] @ b[5]
umull r11, r12, r7, r8 @ c' = a[0] * b[6]
ldr r0, [r1, #1*4] @ a[1]
umlal r3, r4, r7, r14 @ c += a[0] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r11, r12, r0, r14 @ c' += a[1] * b[5]
ldr r7, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r8 @ c += a[1] * b[4]
ldr r14, [r2, #3*4] @ b[3]
umlal r11, r12, r7, r8 @ c' += a[2] * b[4]
ldr r0, [r1, #3*4] @ a[3]
umlal r3, r4, r7, r14 @ c += a[2] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r11, r12, r0, r14 @ c' += a[3] * b[3]
ldr r7, [r1, #4*4] @ a[4]
umlal r3, r4, r0, r8 @ c += a[3] * b[2]
ldr r14, [r2, #1*4] @ b[1]
umlal r11, r12, r7, r8 @ c' += a[4] * b[2]
ldr r0, [r1, #5*4] @ a[5]
umlal r3, r4, r7, r14 @ c += a[4] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r11, r12, r0, r14 @ c' += a[5] * b[1]
ldr r7, [r1, #6*4] @ a[6]
umlal r3, r4, r0, r8 @ c += a[5] * b[0]
ldr r14, [r2, #9*4] @ b[9]
umlal r11, r12, r7, r8 @ c' += a[6] * b[0]
ldr r0, [r1, #7*4] @ a[7]
umlal r5, r6, r7, r14 @ d += a[6] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umull r9, r10, r0, r14 @ d' = a[7] * b[9]
ldr r7, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r8 @ d += a[7] * b[8]
ldr r14, [r2, #7*4] @ b[7]
umlal r9, r10, r7, r8 @ d' += a[8] * b[8]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r14 @ d += a[8] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r9, r10, r0, r14 @ d' += a[9] * b[7]
umlal r5, r6, r0, r8 @ d += a[9] * b[6]
bic r0, r5, field_not_M @ u5 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u5 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t5 = c & M
str r14, [sp, #4 + 5*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u5 * R1
umlal r3, r4, r0, r14
/* H */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u6 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u6 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t6 = c & M
str r14, [sp, #4 + 6*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u6 * R1
umlal r3, r4, r0, r14
/* I - interleaved with J */
ldr r8, [r2, #8*4] @ b[8]
ldr r7, [r1, #0*4] @ a[0]
ldr r14, [r2, #7*4] @ b[7]
umull r11, r12, r7, r8 @ c' = a[0] * b[8]
ldr r0, [r1, #1*4] @ a[1]
umlal r3, r4, r7, r14 @ c += a[0] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r11, r12, r0, r14 @ c' += a[1] * b[7]
ldr r7, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r8 @ c += a[1] * b[6]
ldr r14, [r2, #5*4] @ b[5]
umlal r11, r12, r7, r8 @ c' += a[2] * b[6]
ldr r0, [r1, #3*4] @ a[3]
umlal r3, r4, r7, r14 @ c += a[2] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r11, r12, r0, r14 @ c' += a[3] * b[5]
ldr r7, [r1, #4*4] @ a[4]
umlal r3, r4, r0, r8 @ c += a[3] * b[4]
ldr r14, [r2, #3*4] @ b[3]
umlal r11, r12, r7, r8 @ c' += a[4] * b[4]
ldr r0, [r1, #5*4] @ a[5]
umlal r3, r4, r7, r14 @ c += a[4] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r11, r12, r0, r14 @ c' += a[5] * b[3]
ldr r7, [r1, #6*4] @ a[6]
umlal r3, r4, r0, r8 @ c += a[5] * b[2]
ldr r14, [r2, #1*4] @ b[1]
umlal r11, r12, r7, r8 @ c' += a[6] * b[2]
ldr r0, [r1, #7*4] @ a[7]
umlal r3, r4, r7, r14 @ c += a[6] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r11, r12, r0, r14 @ c' += a[7] * b[1]
ldr r7, [r1, #8*4] @ a[8]
umlal r3, r4, r0, r8 @ c += a[7] * b[0]
ldr r14, [r2, #9*4] @ b[9]
umlal r11, r12, r7, r8 @ c' += a[8] * b[0]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r14 @ d += a[8] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umull r9, r10, r0, r14 @ d' = a[9] * b[9]
umlal r5, r6, r0, r8 @ d += a[9] * b[8]
bic r0, r5, field_not_M @ u7 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u7 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t7 = c & M
str r14, [sp, #4 + 7*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u7 * R1
umlal r3, r4, r0, r14
/* J */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u8 = d & M
str r0, [sp, #4 + 8*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u8 * R0
umlal r3, r4, r0, r14
/******************************************
* compute and write back result
******************************************
Allocation:
r0 r
r3:r4 c
r5:r6 d
r7 t0
r8 t1
r9 t2
r11 u8
r12 t9
r1,r2,r10,r14 scratch
Note: do not read from a[] after here, it may overlap with r[]
*/
ldr r0, [sp, #0]
add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9
ldmia r1, {r2,r7,r8,r9,r10,r11,r12}
add r1, r0, #3*4
stmia r1, {r2,r7,r8,r9,r10}
bic r2, r3, field_not_M @ r[8] = c & M
str r2, [r0, #8*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u8 * R1
umlal r3, r4, r11, r14
movw r14, field_R0 @ c += d * R0
umlal r3, r4, r5, r14
adds r3, r3, r12 @ c += t9
adc r4, r4, #0
add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2
ldmia r1, {r7,r8,r9}
ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4)
str r2, [r0, #9*4]
mov r3, r3, lsr #22 @ c >>= 22
orr r3, r3, r4, asl #10
mov r4, r4, lsr #22
movw r14, field_R1 << 4 @ c += d * (R1 << 4)
umlal r3, r4, r5, r14
movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add)
umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4)
adds r5, r5, r7 @ d.lo += t0
mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4)
adc r6, r6, 0 @ d.hi += carry
bic r2, r5, field_not_M @ r[0] = d & M
str r2, [r0, #0*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add)
umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4)
adds r5, r5, r8 @ d.lo += t1
adc r6, r6, #0 @ d.hi += carry
adds r5, r5, r1 @ d.lo += tmp.lo
mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4)
adc r6, r6, r2 @ d.hi += carry + tmp.hi
bic r2, r5, field_not_M @ r[1] = d & M
str r2, [r0, #1*4]
mov r5, r5, lsr #26 @ d >>= 26 (ignore hi)
orr r5, r5, r6, asl #6
add r5, r5, r9 @ d += t2
str r5, [r0, #2*4] @ r[2] = d
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size rustsecp256k1_v0_9_2_fe_mul_inner, .-rustsecp256k1_v0_9_2_fe_mul_inner
.align 2
.global rustsecp256k1_v0_9_2_fe_sqr_inner
.type rustsecp256k1_v0_9_2_fe_sqr_inner, %function
.hidden rustsecp256k1_v0_9_2_fe_sqr_inner
@ Arguments:
@ r0 r Can overlap with a
@ r1 a
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
rustsecp256k1_v0_9_2_fe_sqr_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
/******************************************
* Main computation code.
******************************************
Allocation:
r0,r14,r2,r7,r8 scratch
r1 a (pointer)
r3:r4 c
r5:r6 d
r11:r12 c'
r9:r10 d'
Note: do not write to r[] here, it may overlap with a[]
*/
/* A interleaved with B */
ldr r0, [r1, #1*4] @ a[1]*2
ldr r7, [r1, #0*4] @ a[0]
mov r0, r0, asl #1
ldr r14, [r1, #9*4] @ a[9]
umull r3, r4, r7, r7 @ c = a[0] * a[0]
ldr r8, [r1, #8*4] @ a[8]
mov r7, r7, asl #1
umull r5, r6, r7, r14 @ d = a[0]*2 * a[9]
ldr r7, [r1, #2*4] @ a[2]*2
umull r9, r10, r0, r14 @ d' = a[1]*2 * a[9]
ldr r14, [r1, #7*4] @ a[7]
umlal r5, r6, r0, r8 @ d += a[1]*2 * a[8]
mov r7, r7, asl #1
ldr r0, [r1, #3*4] @ a[3]*2
umlal r9, r10, r7, r8 @ d' += a[2]*2 * a[8]
ldr r8, [r1, #6*4] @ a[6]
umlal r5, r6, r7, r14 @ d += a[2]*2 * a[7]
mov r0, r0, asl #1
ldr r7, [r1, #4*4] @ a[4]*2
umlal r9, r10, r0, r14 @ d' += a[3]*2 * a[7]
ldr r14, [r1, #5*4] @ a[5]
mov r7, r7, asl #1
umlal r5, r6, r0, r8 @ d += a[3]*2 * a[6]
umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[6]
umlal r5, r6, r7, r14 @ d += a[4]*2 * a[5]
umlal r9, r10, r14, r14 @ d' += a[5] * a[5]
bic r0, r5, field_not_M @ t9 = d & M
str r0, [sp, #4 + 9*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
/* B */
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u0 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u0 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t0 = c & M
str r14, [sp, #4 + 0*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u0 * R1
umlal r3, r4, r0, r14
/* C interleaved with D */
ldr r0, [r1, #0*4] @ a[0]*2
ldr r14, [r1, #1*4] @ a[1]
mov r0, r0, asl #1
ldr r8, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r14 @ c += a[0]*2 * a[1]
mov r7, r8, asl #1 @ a[2]*2
umull r11, r12, r14, r14 @ c' = a[1] * a[1]
ldr r14, [r1, #9*4] @ a[9]
umlal r11, r12, r0, r8 @ c' += a[0]*2 * a[2]
ldr r0, [r1, #3*4] @ a[3]*2
ldr r8, [r1, #8*4] @ a[8]
umlal r5, r6, r7, r14 @ d += a[2]*2 * a[9]
mov r0, r0, asl #1
ldr r7, [r1, #4*4] @ a[4]*2
umull r9, r10, r0, r14 @ d' = a[3]*2 * a[9]
ldr r14, [r1, #7*4] @ a[7]
umlal r5, r6, r0, r8 @ d += a[3]*2 * a[8]
mov r7, r7, asl #1
ldr r0, [r1, #5*4] @ a[5]*2
umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[8]
ldr r8, [r1, #6*4] @ a[6]
mov r0, r0, asl #1
umlal r5, r6, r7, r14 @ d += a[4]*2 * a[7]
umlal r9, r10, r0, r14 @ d' += a[5]*2 * a[7]
umlal r5, r6, r0, r8 @ d += a[5]*2 * a[6]
umlal r9, r10, r8, r8 @ d' += a[6] * a[6]
bic r0, r5, field_not_M @ u1 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u1 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t1 = c & M
str r14, [sp, #4 + 1*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u1 * R1
umlal r3, r4, r0, r14
/* D */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u2 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u2 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t2 = c & M
str r14, [sp, #4 + 2*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u2 * R1
umlal r3, r4, r0, r14
/* E interleaved with F */
ldr r7, [r1, #0*4] @ a[0]*2
ldr r0, [r1, #1*4] @ a[1]*2
ldr r14, [r1, #2*4] @ a[2]
mov r7, r7, asl #1
ldr r8, [r1, #3*4] @ a[3]
ldr r2, [r1, #4*4]
umlal r3, r4, r7, r8 @ c += a[0]*2 * a[3]
mov r0, r0, asl #1
umull r11, r12, r7, r2 @ c' = a[0]*2 * a[4]
mov r2, r2, asl #1 @ a[4]*2
umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[3]
ldr r8, [r1, #9*4] @ a[9]
umlal r3, r4, r0, r14 @ c += a[1]*2 * a[2]
ldr r0, [r1, #5*4] @ a[5]*2
umlal r11, r12, r14, r14 @ c' += a[2] * a[2]
ldr r14, [r1, #8*4] @ a[8]
mov r0, r0, asl #1
umlal r5, r6, r2, r8 @ d += a[4]*2 * a[9]
ldr r7, [r1, #6*4] @ a[6]*2
umull r9, r10, r0, r8 @ d' = a[5]*2 * a[9]
mov r7, r7, asl #1
ldr r8, [r1, #7*4] @ a[7]
umlal r5, r6, r0, r14 @ d += a[5]*2 * a[8]
umlal r9, r10, r7, r14 @ d' += a[6]*2 * a[8]
umlal r5, r6, r7, r8 @ d += a[6]*2 * a[7]
umlal r9, r10, r8, r8 @ d' += a[7] * a[7]
bic r0, r5, field_not_M @ u3 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u3 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t3 = c & M
str r14, [sp, #4 + 3*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u3 * R1
umlal r3, r4, r0, r14
/* F */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u4 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u4 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t4 = c & M
str r14, [sp, #4 + 4*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u4 * R1
umlal r3, r4, r0, r14
/* G interleaved with H */
ldr r7, [r1, #0*4] @ a[0]*2
ldr r0, [r1, #1*4] @ a[1]*2
mov r7, r7, asl #1
ldr r8, [r1, #5*4] @ a[5]
ldr r2, [r1, #6*4] @ a[6]
umlal r3, r4, r7, r8 @ c += a[0]*2 * a[5]
ldr r14, [r1, #4*4] @ a[4]
mov r0, r0, asl #1
umull r11, r12, r7, r2 @ c' = a[0]*2 * a[6]
ldr r7, [r1, #2*4] @ a[2]*2
umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[5]
mov r7, r7, asl #1
ldr r8, [r1, #3*4] @ a[3]
umlal r3, r4, r0, r14 @ c += a[1]*2 * a[4]
mov r0, r2, asl #1 @ a[6]*2
umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[4]
ldr r14, [r1, #9*4] @ a[9]
umlal r3, r4, r7, r8 @ c += a[2]*2 * a[3]
ldr r7, [r1, #7*4] @ a[7]*2
umlal r11, r12, r8, r8 @ c' += a[3] * a[3]
mov r7, r7, asl #1
ldr r8, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r14 @ d += a[6]*2 * a[9]
umull r9, r10, r7, r14 @ d' = a[7]*2 * a[9]
umlal r5, r6, r7, r8 @ d += a[7]*2 * a[8]
umlal r9, r10, r8, r8 @ d' += a[8] * a[8]
bic r0, r5, field_not_M @ u5 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u5 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t5 = c & M
str r14, [sp, #4 + 5*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u5 * R1
umlal r3, r4, r0, r14
/* H */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u6 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u6 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t6 = c & M
str r14, [sp, #4 + 6*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u6 * R1
umlal r3, r4, r0, r14
/* I interleaved with J */
ldr r7, [r1, #0*4] @ a[0]*2
ldr r0, [r1, #1*4] @ a[1]*2
mov r7, r7, asl #1
ldr r8, [r1, #7*4] @ a[7]
ldr r2, [r1, #8*4] @ a[8]
umlal r3, r4, r7, r8 @ c += a[0]*2 * a[7]
ldr r14, [r1, #6*4] @ a[6]
mov r0, r0, asl #1
umull r11, r12, r7, r2 @ c' = a[0]*2 * a[8]
ldr r7, [r1, #2*4] @ a[2]*2
umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[7]
ldr r8, [r1, #5*4] @ a[5]
umlal r3, r4, r0, r14 @ c += a[1]*2 * a[6]
ldr r0, [r1, #3*4] @ a[3]*2
mov r7, r7, asl #1
umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[6]
ldr r14, [r1, #4*4] @ a[4]
mov r0, r0, asl #1
umlal r3, r4, r7, r8 @ c += a[2]*2 * a[5]
mov r2, r2, asl #1 @ a[8]*2
umlal r11, r12, r0, r8 @ c' += a[3]*2 * a[5]
umlal r3, r4, r0, r14 @ c += a[3]*2 * a[4]
umlal r11, r12, r14, r14 @ c' += a[4] * a[4]
ldr r8, [r1, #9*4] @ a[9]
umlal r5, r6, r2, r8 @ d += a[8]*2 * a[9]
@ r8 will be used in J
bic r0, r5, field_not_M @ u7 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u7 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t7 = c & M
str r14, [sp, #4 + 7*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u7 * R1
umlal r3, r4, r0, r14
/* J */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
umlal r5, r6, r8, r8 @ d += a[9] * a[9]
bic r0, r5, field_not_M @ u8 = d & M
str r0, [sp, #4 + 8*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u8 * R0
umlal r3, r4, r0, r14
/******************************************
* compute and write back result
******************************************
Allocation:
r0 r
r3:r4 c
r5:r6 d
r7 t0
r8 t1
r9 t2
r11 u8
r12 t9
r1,r2,r10,r14 scratch
Note: do not read from a[] after here, it may overlap with r[]
*/
ldr r0, [sp, #0]
add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9
ldmia r1, {r2,r7,r8,r9,r10,r11,r12}
add r1, r0, #3*4
stmia r1, {r2,r7,r8,r9,r10}
bic r2, r3, field_not_M @ r[8] = c & M
str r2, [r0, #8*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u8 * R1
umlal r3, r4, r11, r14
movw r14, field_R0 @ c += d * R0
umlal r3, r4, r5, r14
adds r3, r3, r12 @ c += t9
adc r4, r4, #0
add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2
ldmia r1, {r7,r8,r9}
ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4)
str r2, [r0, #9*4]
mov r3, r3, lsr #22 @ c >>= 22
orr r3, r3, r4, asl #10
mov r4, r4, lsr #22
movw r14, field_R1 << 4 @ c += d * (R1 << 4)
umlal r3, r4, r5, r14
movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add)
umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4)
adds r5, r5, r7 @ d.lo += t0
mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4)
adc r6, r6, 0 @ d.hi += carry
bic r2, r5, field_not_M @ r[0] = d & M
str r2, [r0, #0*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add)
umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4)
adds r5, r5, r8 @ d.lo += t1
adc r6, r6, #0 @ d.hi += carry
adds r5, r5, r1 @ d.lo += tmp.lo
mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4)
adc r6, r6, r2 @ d.hi += carry + tmp.hi
bic r2, r5, field_not_M @ r[1] = d & M
str r2, [r0, #1*4]
mov r5, r5, lsr #26 @ d >>= 26 (ignore hi)
orr r5, r5, r6, asl #6
add r5, r5, r9 @ d += t2
str r5, [r0, #2*4] @ r[2] = d
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size rustsecp256k1_v0_9_2_fe_sqr_inner, .-rustsecp256k1_v0_9_2_fe_sqr_inner
|
Obed-Ojingwa/Retail_Sales_Analysis | 15,124 | sales_env/lib/python3.12/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/atomic_support.s | // Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd1__TBB_full_fence#
.global __TBB_machine_fetchadd1__TBB_full_fence#
__TBB_machine_fetchadd1__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd1acquire
}
.endp __TBB_machine_fetchadd1__TBB_full_fence#
.proc __TBB_machine_fetchadd1acquire#
.global __TBB_machine_fetchadd1acquire#
__TBB_machine_fetchadd1acquire:
ld1 r9=[r32]
;;
Retry_1acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1__TBB_full_fence#
.global __TBB_machine_fetchstore1__TBB_full_fence#
__TBB_machine_fetchstore1__TBB_full_fence:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1__TBB_full_fence#
.proc __TBB_machine_fetchstore1acquire#
.global __TBB_machine_fetchstore1acquire#
__TBB_machine_fetchstore1acquire:
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp1__TBB_full_fence#
.global __TBB_machine_cmpswp1__TBB_full_fence#
__TBB_machine_cmpswp1__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp1acquire
}
.endp __TBB_machine_cmpswp1__TBB_full_fence#
.proc __TBB_machine_cmpswp1acquire#
.global __TBB_machine_cmpswp1acquire#
__TBB_machine_cmpswp1acquire:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd2__TBB_full_fence#
.global __TBB_machine_fetchadd2__TBB_full_fence#
__TBB_machine_fetchadd2__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd2acquire
}
.endp __TBB_machine_fetchadd2__TBB_full_fence#
.proc __TBB_machine_fetchadd2acquire#
.global __TBB_machine_fetchadd2acquire#
__TBB_machine_fetchadd2acquire:
ld2 r9=[r32]
;;
Retry_2acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2acquire
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2__TBB_full_fence#
.global __TBB_machine_fetchstore2__TBB_full_fence#
__TBB_machine_fetchstore2__TBB_full_fence:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2__TBB_full_fence#
.proc __TBB_machine_fetchstore2acquire#
.global __TBB_machine_fetchstore2acquire#
__TBB_machine_fetchstore2acquire:
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp2__TBB_full_fence#
.global __TBB_machine_cmpswp2__TBB_full_fence#
__TBB_machine_cmpswp2__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp2acquire
}
.endp __TBB_machine_cmpswp2__TBB_full_fence#
.proc __TBB_machine_cmpswp2acquire#
.global __TBB_machine_cmpswp2acquire#
__TBB_machine_cmpswp2acquire:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd4__TBB_full_fence#
.global __TBB_machine_fetchadd4__TBB_full_fence#
__TBB_machine_fetchadd4__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd4acquire
}
.endp __TBB_machine_fetchadd4__TBB_full_fence#
.proc __TBB_machine_fetchadd4acquire#
.global __TBB_machine_fetchadd4acquire#
__TBB_machine_fetchadd4acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4acquire
(p8) br.cond.dpnt Dec_4acquire
;;
ld4 r9=[r32]
;;
Retry_4acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4acquire
br.ret.sptk.many b0
Inc_4acquire:
fetchadd4.acq r8=[r32],1
br.ret.sptk.many b0
Dec_4acquire:
fetchadd4.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4__TBB_full_fence#
.global __TBB_machine_fetchstore4__TBB_full_fence#
__TBB_machine_fetchstore4__TBB_full_fence:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4__TBB_full_fence#
.proc __TBB_machine_fetchstore4acquire#
.global __TBB_machine_fetchstore4acquire#
__TBB_machine_fetchstore4acquire:
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp4__TBB_full_fence#
.global __TBB_machine_cmpswp4__TBB_full_fence#
__TBB_machine_cmpswp4__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp4acquire
}
.endp __TBB_machine_cmpswp4__TBB_full_fence#
.proc __TBB_machine_cmpswp4acquire#
.global __TBB_machine_cmpswp4acquire#
__TBB_machine_cmpswp4acquire:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchadd8__TBB_full_fence#
.global __TBB_machine_fetchadd8__TBB_full_fence#
__TBB_machine_fetchadd8__TBB_full_fence:
{
mf
br __TBB_machine_fetchadd8acquire
}
.endp __TBB_machine_fetchadd8__TBB_full_fence#
.proc __TBB_machine_fetchadd8acquire#
.global __TBB_machine_fetchadd8acquire#
__TBB_machine_fetchadd8acquire:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8acquire
(p8) br.cond.dpnt Dec_8acquire
;;
ld8 r9=[r32]
;;
Retry_8acquire:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.acq r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8acquire
br.ret.sptk.many b0
Inc_8acquire:
fetchadd8.acq r8=[r32],1
br.ret.sptk.many b0
Dec_8acquire:
fetchadd8.acq r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8acquire#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8__TBB_full_fence#
.global __TBB_machine_fetchstore8__TBB_full_fence#
__TBB_machine_fetchstore8__TBB_full_fence:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8__TBB_full_fence#
.proc __TBB_machine_fetchstore8acquire#
.global __TBB_machine_fetchstore8acquire#
__TBB_machine_fetchstore8acquire:
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8acquire#
# 88 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_cmpswp8__TBB_full_fence#
.global __TBB_machine_cmpswp8__TBB_full_fence#
__TBB_machine_cmpswp8__TBB_full_fence:
{
mf
br __TBB_machine_cmpswp8acquire
}
.endp __TBB_machine_cmpswp8__TBB_full_fence#
.proc __TBB_machine_cmpswp8acquire#
.global __TBB_machine_cmpswp8acquire#
__TBB_machine_cmpswp8acquire:
mov ar.ccv=r34
;;
cmpxchg8.acq r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8acquire#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd1release#
.global __TBB_machine_fetchadd1release#
__TBB_machine_fetchadd1release:
ld1 r9=[r32]
;;
Retry_1release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg1.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_1release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd1release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore1release#
.global __TBB_machine_fetchstore1release#
__TBB_machine_fetchstore1release:
mf
;;
xchg1 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore1release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp1release#
.global __TBB_machine_cmpswp1release#
__TBB_machine_cmpswp1release:
zxt1 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg1.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp1release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd2release#
.global __TBB_machine_fetchadd2release#
__TBB_machine_fetchadd2release:
ld2 r9=[r32]
;;
Retry_2release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg2.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_2release
br.ret.sptk.many b0
# 49 "<stdin>"
.endp __TBB_machine_fetchadd2release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore2release#
.global __TBB_machine_fetchstore2release#
__TBB_machine_fetchstore2release:
mf
;;
xchg2 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore2release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp2release#
.global __TBB_machine_cmpswp2release#
__TBB_machine_cmpswp2release:
zxt2 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg2.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp2release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd4release#
.global __TBB_machine_fetchadd4release#
__TBB_machine_fetchadd4release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_4release
(p8) br.cond.dpnt Dec_4release
;;
ld4 r9=[r32]
;;
Retry_4release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg4.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_4release
br.ret.sptk.many b0
Inc_4release:
fetchadd4.rel r8=[r32],1
br.ret.sptk.many b0
Dec_4release:
fetchadd4.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd4release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore4release#
.global __TBB_machine_fetchstore4release#
__TBB_machine_fetchstore4release:
mf
;;
xchg4 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore4release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp4release#
.global __TBB_machine_cmpswp4release#
__TBB_machine_cmpswp4release:
zxt4 r34=r34
;;
mov ar.ccv=r34
;;
cmpxchg4.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp4release#
// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
# 1 "<stdin>"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "<stdin>"
.section .text
.align 16
# 19 "<stdin>"
.proc __TBB_machine_fetchadd8release#
.global __TBB_machine_fetchadd8release#
__TBB_machine_fetchadd8release:
cmp.eq p6,p0=1,r33
cmp.eq p8,p0=-1,r33
(p6) br.cond.dptk Inc_8release
(p8) br.cond.dpnt Dec_8release
;;
ld8 r9=[r32]
;;
Retry_8release:
mov ar.ccv=r9
mov r8=r9;
add r10=r9,r33
;;
cmpxchg8.rel r9=[r32],r10,ar.ccv
;;
cmp.ne p7,p0=r8,r9
(p7) br.cond.dpnt Retry_8release
br.ret.sptk.many b0
Inc_8release:
fetchadd8.rel r8=[r32],1
br.ret.sptk.many b0
Dec_8release:
fetchadd8.rel r8=[r32],-1
br.ret.sptk.many b0
.endp __TBB_machine_fetchadd8release#
# 62 "<stdin>"
.section .text
.align 16
.proc __TBB_machine_fetchstore8release#
.global __TBB_machine_fetchstore8release#
__TBB_machine_fetchstore8release:
mf
;;
xchg8 r8=[r32],r33
br.ret.sptk.many b0
.endp __TBB_machine_fetchstore8release#
# 88 "<stdin>"
.section .text
.align 16
# 101 "<stdin>"
.proc __TBB_machine_cmpswp8release#
.global __TBB_machine_cmpswp8release#
__TBB_machine_cmpswp8release:
mov ar.ccv=r34
;;
cmpxchg8.rel r8=[r32],r33,ar.ccv
br.ret.sptk.many b0
.endp __TBB_machine_cmpswp8release#
|
Obed-Ojingwa/Retail_Sales_Analysis | 1,304 | sales_env/lib/python3.12/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/log2.s | // Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
.section .text
.align 16
// unsigned long __TBB_machine_lg( unsigned long x );
// r32 = x
.proc __TBB_machine_lg#
.global __TBB_machine_lg#
__TBB_machine_lg:
shr r16=r32,1 // .x
;;
shr r17=r32,2 // ..x
or r32=r32,r16 // xx
;;
shr r16=r32,3 // ...xx
or r32=r32,r17 // xxx
;;
shr r17=r32,5 // .....xxx
or r32=r32,r16 // xxxxx
;;
shr r16=r32,8 // ........xxxxx
or r32=r32,r17 // xxxxxxxx
;;
shr r17=r32,13
or r32=r32,r16 // 13x
;;
shr r16=r32,21
or r32=r32,r17 // 21x
;;
shr r17=r32,34
or r32=r32,r16 // 34x
;;
shr r16=r32,55
or r32=r32,r17 // 55x
;;
or r32=r32,r16 // 64x
;;
popcnt r8=r32
;;
add r8=-1,r8
br.ret.sptk.many b0
.endp __TBB_machine_lg#
|
Obed-Ojingwa/Retail_Sales_Analysis | 1,270 | sales_env/lib/python3.12/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/lock_byte.s | // Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Support for class TinyLock
.section .text
.align 16
// unsigned int __TBB_machine_trylockbyte( byte& flag );
// r32 = address of flag
.proc __TBB_machine_trylockbyte#
.global __TBB_machine_trylockbyte#
ADDRESS_OF_FLAG=r32
RETCODE=r8
FLAG=r9
BUSY=r10
SCRATCH=r11
__TBB_machine_trylockbyte:
ld1.acq FLAG=[ADDRESS_OF_FLAG]
mov BUSY=1
mov RETCODE=0
;;
cmp.ne p6,p0=0,FLAG
mov ar.ccv=r0
(p6) br.ret.sptk.many b0
;;
cmpxchg1.acq SCRATCH=[ADDRESS_OF_FLAG],BUSY,ar.ccv // Try to acquire lock
;;
cmp.eq p6,p0=0,SCRATCH
;;
(p6) mov RETCODE=1
br.ret.sptk.many b0
.endp __TBB_machine_trylockbyte#
|
Obed-Ojingwa/Retail_Sales_Analysis | 2,687 | sales_env/lib/python3.12/site-packages/prophet/stan_model/cmdstan-2.33.1/stan/lib/stan_math/lib/tbb_2020.3/src/tbb/ia64-gas/ia64_misc.s | // Copyright (c) 2005-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// RSE backing store pointer retrieval
.section .text
.align 16
.proc __TBB_get_bsp#
.global __TBB_get_bsp#
__TBB_get_bsp:
mov r8=ar.bsp
br.ret.sptk.many b0
.endp __TBB_get_bsp#
.section .text
.align 16
.proc __TBB_machine_load8_relaxed#
.global __TBB_machine_load8_relaxed#
__TBB_machine_load8_relaxed:
ld8 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load8_relaxed#
.section .text
.align 16
.proc __TBB_machine_store8_relaxed#
.global __TBB_machine_store8_relaxed#
__TBB_machine_store8_relaxed:
st8 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store8_relaxed#
.section .text
.align 16
.proc __TBB_machine_load4_relaxed#
.global __TBB_machine_load4_relaxed#
__TBB_machine_load4_relaxed:
ld4 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load4_relaxed#
.section .text
.align 16
.proc __TBB_machine_store4_relaxed#
.global __TBB_machine_store4_relaxed#
__TBB_machine_store4_relaxed:
st4 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store4_relaxed#
.section .text
.align 16
.proc __TBB_machine_load2_relaxed#
.global __TBB_machine_load2_relaxed#
__TBB_machine_load2_relaxed:
ld2 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load2_relaxed#
.section .text
.align 16
.proc __TBB_machine_store2_relaxed#
.global __TBB_machine_store2_relaxed#
__TBB_machine_store2_relaxed:
st2 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store2_relaxed#
.section .text
.align 16
.proc __TBB_machine_load1_relaxed#
.global __TBB_machine_load1_relaxed#
__TBB_machine_load1_relaxed:
ld1 r8=[r32]
br.ret.sptk.many b0
.endp __TBB_machine_load1_relaxed#
.section .text
.align 16
.proc __TBB_machine_store1_relaxed#
.global __TBB_machine_store1_relaxed#
__TBB_machine_store1_relaxed:
st1 [r32]=r33
br.ret.sptk.many b0
.endp __TBB_machine_store1_relaxed#
|
oknotokcomputer/libcrosvm | 1,406 | kernel_loader/src/test_elf.S | # Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Build instructions:
# x86_64-linux-gnu-as test_elf.S -o test_elf.o
# x86_64-linux-gnu-ld test_elf.o -o test_elf.bin -T test_elf.ld
.intel_syntax noprefix
.section .rodata
hello_world:
.string "Hello world!\n"
.set hello_size, .-hello_world
.text
.globl _start
_start:
lea rsi, [rip + hello_world] # rsi -> message string
mov rcx, hello_size # rcx = length of message
mov dx, 0x3F8 # dx = COM1 port
.print_loop:
# Wait for the transmit buffer to be empty by polling the line status.
add dx, 5 # dx = line status register
.wait_empty:
in al, dx # read line status
test al, 0x20 # check buffer empty flag
jz .wait_empty # keep waiting if flag is not set
.wait_done:
sub dx, 5 # dx = data register
# Load a byte of the message and send it to the serial port.
lodsb # load message byte from RSI to AL
out dx, al # send byte to serial port
dec rcx # rcx--
jnz .print_loop # repeat if rcx != 0
.done:
int3 # cause vcpu to exit
|
OluwamayowaMusa/kratos | 4,912 | src/boot.s | /* Declare constants for the multiboot header. */
.set ALIGN, 1<<0 /* align loaded modules on page boundaries */
.set MEMINFO, 1<<1 /* provide memory map */
.set FLAGS, ALIGN | MEMINFO /* this is the Multiboot 'flag' field */
.set MAGIC, 0x1BADB002 /* 'magic number' lets bootloader find the header */
.set CHECKSUM, -(MAGIC + FLAGS) /* checksum of above, to prove we are multiboot */
/*
Declare a multiboot header that marks the program as a kernel. These are magic
values that are documented in the multiboot standard. The bootloader will
search for this signature in the first 8 KiB of the kernel file, aligned at a
32-bit boundary. The signature is in its own section so the header can be
forced to be within the first 8 KiB of the kernel file.
*/
.section .multiboot
.align 4
.long MAGIC
.long FLAGS
.long CHECKSUM
/*
The multiboot standard does not define the value of the stack pointer register
(esp) and it is up to the kernel to provide a stack. This allocates room for a
small stack by creating a symbol at the bottom of it, then allocating 16384
bytes for it, and finally creating a symbol at the top. The stack grows
downwards on x86. The stack is in its own section so it can be marked nobits,
which means the kernel file is smaller because it does not contain an
uninitialized stack. The stack on x86 must be 16-byte aligned according to the
System V ABI standard and de-facto extensions. The compiler will assume the
stack is properly aligned and failure to align the stack will result in
undefined behavior.
*/
.section .bss
.align 16
stack_bottom:
.skip 16380 # 16 KiB - 4 bytes
stack_top:
.skip 4 # Allocated 4 bytes of space to prevent stack_top being the last element
/*
The linker script specifies _start as the entry point to the kernel and the
bootloader will jump to this position once the kernel has been loaded. It
doesn't make sense to return from this function as the bootloader is gone.
*/
.section .text
.global _start
.type _start, @function
_start:
/*
The bootloader has loaded us into 32-bit protected mode on a x86
machine. Interrupts are disabled. Paging is disabled. The processor
state is as defined in the multiboot standard. The kernel has full
control of the CPU. The kernel can only make use of hardware features
and any code it provides as part of itself. There's no printf
function, unless the kernel provides its own <stdio.h> header and a
printf implementation. There are no security restrictions, no
safeguards, no debugging mechanisms, only what the kernel provides
itself. It has absolute and complete power over the
machine.
*/
/*
To set up a stack, we set the esp register to point to the top of the
stack (as it grows downwards on x86 systems). This is necessarily done
in assembly as languages such as C cannot function without a stack.
*/
mov $stack_top, %esp
/*
This is a good place to initialize crucial processor state before the
high-level kernel is entered. It's best to minimize the early
environment where crucial features are offline. Note that the
processor is not fully initialized yet: Features such as floating
point instructions and instruction set extensions are not initialized
yet. The GDT should be loaded here. Paging should be enabled here.
C++ features such as global constructors and exceptions will require
runtime support to work as well.
*/
/*
Push the EBX register to the top of the stack cause it contains the
multiboot information data structure, EAX register causes it contains
the magic number
*/
push %ebx
push %eax
/*
Enter the high-level kernel. The ABI requires the stack is 16-byte
aligned at the time of the call instruction (which afterwards pushes
the return pointer of size 4 bytes). The stack was originally 16-byte
aligned above and we've pushed a multiple of 16 bytes to the
stack since (pushed 0 bytes so far), so the alignment has thus been
preserved and the call is well defined.
*/
call kernel_main
/*
If the system has nothing more to do, put the computer into an
infinite loop. To do that:
1) Disable interrupts with cli (clear interrupt enable in eflags).
They are already disabled by the bootloader, so this is not needed.
Mind that you might later enable interrupts and return from
kernel_main (which is sort of nonsensical to do).
2) Wait for the next interrupt to arrive with hlt (halt instruction).
Since they are disabled, this will lock up the computer.
3) Jump to the hlt instruction if it ever wakes up due to a
non-maskable interrupt occurring or due to system management mode.
*/
cli
1: hlt
jmp 1b
/* Get the Stack Pointer */
.global get_esp
.type get_esp, @function
get_esp:
mov %esp, %eax
ret
/*
Set the size of the _start symbol to the current location '.' minus its start.
This is useful when debugging or when you implement call tracing.
*/
.size _start, . - _start
|
Ollie-Pearce/rust | 11,809 | library/std/src/sys/pal/sgx/abi/entry.S | /* This symbol is used at runtime to figure out the virtual address that the */
/* enclave is loaded at. */
.section absolute
.global IMAGE_BASE
IMAGE_BASE:
.section ".note.x86_64-fortanix-unknown-sgx", "", @note
.align 4
.long 1f - 0f /* name length (not including padding) */
.long 3f - 2f /* desc length (not including padding) */
.long 1 /* type = NT_VERSION */
0: .asciz "toolchain-version" /* name */
1: .align 4
2: .long 1 /* desc - toolchain version number, 32-bit LE */
3: .align 4
.section .rodata
/* The XSAVE area needs to be a large chunk of readable memory, but since we are */
/* going to restore everything to its initial state (XSTATE_BV=0), only certain */
/* parts need to have a defined value. In particular: */
/* */
/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */
/* RFBM[2] is set, regardless of the value of XSTATE_BV */
/* * XSAVE header */
.align 64
.Lxsave_clear:
.org .+24
.Lxsave_mxcsr:
.short 0x1fbf
/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */
/* The following symbols point at read-only data that will be filled in by the */
/* post-linker. */
/* When using this macro, don't forget to adjust the linker version script! */
.macro globvar name:req size:req
.global \name
.protected \name
.align \size
.size \name , \size
\name :
.org .+\size
.endm
/* The base address (relative to enclave start) of the heap area */
globvar HEAP_BASE 8
/* The heap size in bytes */
globvar HEAP_SIZE 8
/* Value of the RELA entry in the dynamic table */
globvar RELA 8
/* Value of the RELACOUNT entry in the dynamic table */
globvar RELACOUNT 8
/* The enclave size in bytes */
globvar ENCLAVE_SIZE 8
/* The base address (relative to enclave start) of the enclave configuration area */
globvar CFGDATA_BASE 8
/* Non-zero if debugging is enabled, zero otherwise */
globvar DEBUG 1
/* The base address (relative to enclave start) of the enclave text section */
globvar TEXT_BASE 8
/* The size in bytes of enclave text section */
globvar TEXT_SIZE 8
/* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_OFFSET 8
/* The size in bytes of enclave .eh_frame_hdr section */
globvar EH_FRM_HDR_LEN 8
/* The base address (relative to enclave start) of the enclave .eh_frame section */
globvar EH_FRM_OFFSET 8
/* The size in bytes of enclave .eh_frame section */
globvar EH_FRM_LEN 8
.org .Lxsave_clear+512
.Lxsave_header:
.int 0, 0 /* XSTATE_BV */
.int 0, 0 /* XCOMP_BV */
.org .+48 /* reserved bits */
.data
.Laborted:
.byte 0
/* TCS local storage section */
.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */
.equ tcsls_flags, 0x08 /* initialized by loader */
.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */
.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */
/* 14 unused bits */
.equ tcsls_user_fcw, 0x0a
.equ tcsls_user_mxcsr, 0x0c
.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */
.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */
.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */
.equ tcsls_user_rsp, 0x28
.equ tcsls_user_retip, 0x30
.equ tcsls_user_rbp, 0x38
.equ tcsls_user_r12, 0x40
.equ tcsls_user_r13, 0x48
.equ tcsls_user_r14, 0x50
.equ tcsls_user_r15, 0x58
.equ tcsls_tls_ptr, 0x60
.equ tcsls_tcs_addr, 0x68
.macro load_tcsls_flag_secondary_bool reg:req comments:vararg
.ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */
.abort
.endif
mov $(1<<tcsls_flag_secondary),%e\reg
and %gs:tcsls_flags,%\reg
.endm
/* We place the ELF entry point in a separate section so it can be removed by
elf2sgxs */
.section .text_no_sgx, "ax"
.Lelf_entry_error_msg:
.ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n"
.Lelf_entry_error_msg_end:
.global elf_entry
.type elf_entry,function
elf_entry:
/* print error message */
movq $2,%rdi /* write to stderr (fd 2) */
lea .Lelf_entry_error_msg(%rip),%rsi
movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx
.Lelf_entry_call:
movq $1,%rax /* write() syscall */
syscall
test %rax,%rax
jle .Lelf_exit /* exit on error */
add %rax,%rsi
sub %rax,%rdx /* all chars written? */
jnz .Lelf_entry_call
.Lelf_exit:
movq $60,%rax /* exit() syscall */
movq $1,%rdi /* exit code 1 */
syscall
ud2 /* should not be reached */
/* end elf_entry */
/* This code needs to be called *after* the enclave stack has been setup. */
/* There are 3 places where this needs to happen, so this is put in a macro. */
.macro entry_sanitize_final
/* Sanitize rflags received from user */
/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */
/* - AC flag: AEX on misaligned memory accesses leaks side channel info */
pushfq
andq $~0x40400, (%rsp)
popfq
/* check for abort */
bt $0,.Laborted(%rip)
jc .Lreentry_panic
.endm
.text
.global sgx_entry
.type sgx_entry,function
sgx_entry:
/* save user registers */
mov %rcx,%gs:tcsls_user_retip
mov %rsp,%gs:tcsls_user_rsp
mov %rbp,%gs:tcsls_user_rbp
mov %r12,%gs:tcsls_user_r12
mov %r13,%gs:tcsls_user_r13
mov %r14,%gs:tcsls_user_r14
mov %r15,%gs:tcsls_user_r15
mov %rbx,%gs:tcsls_tcs_addr
stmxcsr %gs:tcsls_user_mxcsr
fnstcw %gs:tcsls_user_fcw
/* check for debug buffer pointer */
testb $0xff,DEBUG(%rip)
jz .Lskip_debug_init
mov %r10,%gs:tcsls_debug_panic_buf_ptr
.Lskip_debug_init:
/* reset cpu state */
mov %rdx, %r10
mov $-1, %rax
mov $-1, %rdx
xrstor .Lxsave_clear(%rip)
lfence
mov %r10, %rdx
/* check if returning from usercall */
mov %gs:tcsls_last_rsp,%r11
test %r11,%r11
jnz .Lusercall_ret
/* setup stack */
mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */
/* here. This is fixed below under "adjust stack". */
/* check for thread init */
bts $tcsls_flag_init_once,%gs:tcsls_flags
jc .Lskip_init
/* adjust stack */
lea IMAGE_BASE(%rip),%rax
add %rax,%rsp
mov %rsp,%gs:tcsls_tos
entry_sanitize_final
/* call tcs_init */
/* store caller-saved registers in callee-saved registers */
mov %rdi,%rbx
mov %rsi,%r12
mov %rdx,%r13
mov %r8,%r14
mov %r9,%r15
load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */
call tcs_init
/* reload caller-saved registers */
mov %rbx,%rdi
mov %r12,%rsi
mov %r13,%rdx
mov %r14,%r8
mov %r15,%r9
jmp .Lafter_init
.Lskip_init:
entry_sanitize_final
.Lafter_init:
/* call into main entry point */
load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */
call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */
mov %rax,%rsi /* RSI = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
xor %rdi,%rdi /* RDI = normal exit */
.Lexit:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set later */
/* RCX overwritten by ENCLU */
/* RDX contains return value */
/* RSP set later */
/* RBP set later */
/* RDI contains exit mode */
/* RSI contains return value */
xor %r8,%r8
xor %r9,%r9
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
.Lsgx_exit:
/* clear extended register state */
mov %rdx, %rcx /* save RDX */
mov $-1, %rax
mov %rax, %rdx
xrstor .Lxsave_clear(%rip)
mov %rcx, %rdx /* restore RDX */
/* clear flags */
pushq $0
popfq
/* restore user registers */
mov %gs:tcsls_user_r12,%r12
mov %gs:tcsls_user_r13,%r13
mov %gs:tcsls_user_r14,%r14
mov %gs:tcsls_user_r15,%r15
mov %gs:tcsls_user_retip,%rbx
mov %gs:tcsls_user_rsp,%rsp
mov %gs:tcsls_user_rbp,%rbp
fldcw %gs:tcsls_user_fcw
ldmxcsr %gs:tcsls_user_mxcsr
/* exit enclave */
mov $0x4,%eax /* EEXIT */
enclu
/* end sgx_entry */
.Lreentry_panic:
orq $8,%rsp
jmp abort_reentry
/* This *MUST* be called with 6 parameters, otherwise register information */
/* might leak! */
.global usercall
usercall:
test %rcx,%rcx /* check `abort` function argument */
jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */
jmp .Lusercall_save_state /* non-aborting usercall */
.Lusercall_abort:
/* set aborted bit */
movb $1,.Laborted(%rip)
/* save registers in DEBUG mode, so that debugger can reconstruct the stack */
testb $0xff,DEBUG(%rip)
jz .Lusercall_noreturn
.Lusercall_save_state:
/* save callee-saved state */
push %r15
push %r14
push %r13
push %r12
push %rbp
push %rbx
sub $8, %rsp
fstcw 4(%rsp)
stmxcsr (%rsp)
movq %rsp,%gs:tcsls_last_rsp
.Lusercall_noreturn:
/* clear general purpose register state */
/* RAX overwritten by ENCLU */
/* RBX set by sgx_exit */
/* RCX overwritten by ENCLU */
/* RDX contains parameter */
/* RSP set by sgx_exit */
/* RBP set by sgx_exit */
/* RDI contains parameter */
/* RSI contains parameter */
/* R8 contains parameter */
/* R9 contains parameter */
xor %r10,%r10
xor %r11,%r11
/* R12 ~ R15 set by sgx_exit */
/* extended registers/flags cleared by sgx_exit */
/* exit */
jmp .Lsgx_exit
.Lusercall_ret:
movq $0,%gs:tcsls_last_rsp
/* restore callee-saved state, cf. "save" above */
mov %r11,%rsp
/* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */
/* vector instructions is used. We omit the lfence here as one is required before */
/* the jmp instruction anyway. */
ldmxcsr (%rsp)
fldcw 4(%rsp)
add $8, %rsp
entry_sanitize_final
pop %rbx
pop %rbp
pop %r12
pop %r13
pop %r14
pop %r15
/* return */
mov %rsi,%rax /* RAX = return value */
/* NOP: mov %rdx,%rdx */ /* RDX = return value */
pop %r11
lfence
jmp *%r11
/*
The following functions need to be defined externally:
```
// Called by entry code on re-entry after exit
extern "C" fn abort_reentry() -> !;
// Called once when a TCS is first entered
extern "C" fn tcs_init(secondary: bool);
// Standard TCS entrypoint
extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64);
```
*/
.global get_tcs_addr
get_tcs_addr:
mov %gs:tcsls_tcs_addr,%rax
pop %r11
lfence
jmp *%r11
.global get_tls_ptr
get_tls_ptr:
mov %gs:tcsls_tls_ptr,%rax
pop %r11
lfence
jmp *%r11
.global set_tls_ptr
set_tls_ptr:
mov %rdi,%gs:tcsls_tls_ptr
pop %r11
lfence
jmp *%r11
.global take_debug_panic_buf_ptr
take_debug_panic_buf_ptr:
xor %rax,%rax
xchg %gs:tcsls_debug_panic_buf_ptr,%rax
pop %r11
lfence
jmp *%r11
|
omnirom/android_packages_modules_Virtualization | 4,679 | libs/libvmbase/exceptions.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes 14
* instructions, so it can be used in exception handlers with 18 instructions
* left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
* which can be used as the first and second arguments of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested exception
* and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently takes 14
* instructions, so it can be used in exception handlers while still leaving 18
* instructions left; if paired with save_volatile_to_stack, there are 4
* instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SP0. It behaves similarly to the SPx case by first switching to SPx, doing
* the work, then switching back to SP0 before returning.
*
* Switching to SPx and calling the Rust handler takes 16 instructions. To
* restore and return we need an additional 16 instructions, so we can implement
* the whole handler within the allotted 32 instructions.
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SPx. It saves volatile registers, calls the Rust handler, restores volatile
* registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care about
* non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions, and
* restoring and returning also takes 15 instructions, so we can fit the whole
* handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
omnirom/android_packages_modules_Virtualization | 1,788 | libs/libvmbase/exceptions_panic.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
/**
* The following table is intended to trap any fault resulting from the very
* first memory accesses. They assume that PSCI v0.2 is available and provides
* the PSCI_SYSTEM_RESET call in an attempt to gracefully exit but otherwise
* results in the core busy-looping.
*/
.section .text.vector_table_panic, "ax"
.global vector_table_panic
.balign 0x800
vector_table_panic:
sync_cur_sp0_panic:
reset_or_hang
.balign 0x80
irq_cur_sp0_panic:
reset_or_hang
.balign 0x80
fiq_cur_sp0_panic:
reset_or_hang
.balign 0x80
serr_cur_sp0_panic:
reset_or_hang
.balign 0x80
sync_cur_spx_panic:
reset_or_hang
.balign 0x80
irq_cur_spx_panic:
reset_or_hang
.balign 0x80
fiq_cur_spx_panic:
reset_or_hang
.balign 0x80
serr_cur_spx_panic:
reset_or_hang
.balign 0x80
sync_lower_64_panic:
reset_or_hang
.balign 0x80
irq_lower_64_panic:
reset_or_hang
.balign 0x80
fiq_lower_64_panic:
reset_or_hang
.balign 0x80
serr_lower_64_panic:
reset_or_hang
.balign 0x80
sync_lower_32_panic:
reset_or_hang
.balign 0x80
irq_lower_32_panic:
reset_or_hang
.balign 0x80
fiq_lower_32_panic:
reset_or_hang
.balign 0x80
serr_lower_32_panic:
reset_or_hang
|
omnirom/android_packages_modules_Virtualization | 5,154 | libs/libvmbase/entry.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/* Privileged Access Never is unchanged on taking an exception to EL1. */
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* All writable memory regions are treated as XN. */
.set .L_SCTLR_EL1_WXN, 0x1 << 19
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
* prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3
* for the Rust entry point, as these may contain boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/* Load and apply the memory management configuration, ready to enable MMU and caches. */
adr x30, vector_table_panic
msr vbar_el1, x30
/*
* Our load address is set by the host so validate it before proceeding.
*/
adr x30, entry
mov_i x29, entry
cmp x29, x30
b.eq 1f
reset_or_hang
1:
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then invalidate any potentially stale
* local TLB entries before they start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Copy the data section. */
adr_l x28, data_begin
adr_l x29, data_end
adr_l x30, data_lma
2: cmp x28, x29
b.ge 3f
ldp q0, q1, [x30], #32
stp q0, q1, [x28], #32
b 2b
3: /* Prepare the exception handler stack (SP_EL1). */
adr_l x30, init_eh_stack_pointer
msr spsel, #1
mov sp, x30
/* Prepare the main thread stack (SP_EL0). */
adr_l x30, init_stack_pointer
msr spsel, #0
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/*
* Set up Bionic-compatible thread-local storage.
*
* Note that TPIDR_EL0 can't be configured from rust_entry because the
* compiler will dereference it during function entry to access
* __stack_chk_guard and Rust doesn't support LLVM's
* __attribute__((no_stack_protector)).
*/
adr_l x30, __bionic_tls
msr tpidr_el0, x30
/* Call into Rust code. */
bl rust_entry
/* Loop forever waiting for interrupts. */
4: wfi
b 4b
|
omnirom/android_packages_modules_Virtualization | 2,102 | guest/vmbase_example/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.fill 509, 8, 0x0 // 509 GiB of remaining VA space
0: /* level 2 */
#if defined(VMBASE_EXAMPLE_IS_BIOS)
.quad 0 // 2 MiB not mapped (DT)
.quad .L_BLOCK_MEM_XIP | 0x80200000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80400000 // 2 MiB of writable DRAM
.fill 509, 8, 0x0
#elif defined(VMBASE_EXAMPLE_IS_KERNEL)
.quad .L_BLOCK_MEM_XIP | 0x80000000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80200000 // 2 MiB of writable DRAM
.fill 510, 8, 0x0
#else
#error "Unexpected vmbase_example mode: failed to generate idmap"
#endif
|
omnirom/android_packages_modules_Virtualization | 2,161 | guest/rialto/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Initial TTBR0 idmap activated before first memory write.
// Remains active until a new page table is created by early Rust.
//
.set .SZ_1K, 1024
.set .SZ_4K, 4 * .SZ_1K
.set .SZ_1M, 1024 * .SZ_1K
.set .SZ_2M, 2 * .SZ_1M
.set .SZ_1G, 1024 * .SZ_1M
.set .PAGE_SIZE, .SZ_4K
.set .ORIGIN_ADDR, 2 * .SZ_1G
.set .TEXT_ADDR, .ORIGIN_ADDR + (0 * .SZ_2M)
.set .DATA_ADDR, .ORIGIN_ADDR + (1 * .SZ_2M)
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
.set .L_TT_AF, 0x1 << 10 // Access flag
.set .L_TT_NG, 0x1 << 11 // Not global
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.balign .PAGE_SIZE
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.balign .PAGE_SIZE, 0 // unmapped
/* level 2 */
0:
.quad .L_BLOCK_MEM_XIP | .TEXT_ADDR // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | .DATA_ADDR // 2 MiB of writable DRAM
.balign .PAGE_SIZE, 0 // unmapped
|
omnirom/android_packages_modules_Virtualization | 1,745 | guest/pvmfw/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GB of device mappings
.quad .L_TT_TYPE_TABLE + 0f // Unmapped device memory, and pVM firmware
.fill 510, 8, 0x0 // 510 GB of remaining VA space
/* level 2 */
0: .fill 510, 8, 0x0
.quad .L_BLOCK_MEM_XIP | 0x7fc00000 // pVM firmware image
.quad .L_BLOCK_MEM | 0x7fe00000 // Writable memory for stack, heap &c.
|
oooobserver/rcore | 1,245 | os/src/link_app.S |
.align 3
.section .data
.global _num_app
_num_app:
.quad 6
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_5_start
.quad app_5_end
.section .data
.global app_0_start
.global app_0_end
app_0_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/00hello_world.bin"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
app_1_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/01store_fault.bin"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
app_2_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/02power.bin"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
app_3_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/03priv_inst.bin"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
app_4_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/04priv_csr.bin"
app_4_end:
.section .data
.global app_5_start
.global app_5_end
app_5_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/05my_test.bin"
app_5_end:
|
oooobserver/rcore | 1,588 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# case1: start running app by __restore
# case2: back to U after handling trap
mv sp, a0
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret |
ornfelt/my_rust | 5,156 | repos/spha/sphaerophoria_stream-os/src/boot.s | /* Declare constants for the multiboot header. */
.set MULTIBOOT2_MAGIC, 0xE85250D6 /* multiboot 2 magic */
.set MULTIBOOT2_ARCHITECTURE, 0 /*i386 */
/*
Declare a multiboot2 header that marks the program as a kernel. These are magic
values that are documented in the multiboot2 standard. The bootloader will
search for this signature in the first 8 KiB of the kernel file, aligned at a
32-bit boundary. The signature is in its own section so the header can be
forced to be within the first 8 KiB of the kernel file.
*/
.section .multiboot
.align 4
.long MULTIBOOT2_MAGIC
.long MULTIBOOT2_ARCHITECTURE
/* Set size to 0, because alignment makes size calculation tricky, and grub
* doesn't seem to care that it's wrong anyways*/
.long 0
.long 1<<32 - MULTIBOOT2_MAGIC - MULTIBOOT2_ARCHITECTURE
/*Framebuffer tag*/
.align 8
.short 5 /* type 5 */
.short 0 /* Don't ignore me */
.long 20 /* size 20 */
.long 640 /* 640 width */
.long 480 /* 480 height */
.long 32 /* 8 bits per channel */
/*Terminator tag */
.align 8
.short 0
.short 0
.long 8
.set MAX_NUM_CPUS, 8
.set STACK_SIZE, 16384
.section .bss
.align 16
stack_bottom:
.skip STACK_SIZE * MAX_NUM_CPUS
stack_top:
.skip 4 # We define stack top as the last element in our stack, but this is after all allocated space. Add another 4 bytes for one more element
/* clobbers eax, ebx, ecx, edx, and esp */
.macro set_cpu_stack
mov $1, %eax
cpuid
shrl $24, %ebx
add $1, %ebx
mov $STACK_SIZE, %eax
mul %ebx
add $stack_bottom, %eax
mov %eax, %esp
.endmacro
/*
The linker script specifies _start as the entry point to the kernel and the
bootloader will jump to this position once the kernel has been loaded. It
doesn't make sense to return from this function as the bootloader is gone.
*/
.section .text
.global _start
.type _start, @function
_start:
/* Stash multiboot info before clobbering registers when setting up our
* stack. Note that while we do not know which section of our stack we
* want to use for this CPU, we are still writing to valid memory. Our
* other CPUs haven't booted yet, so if we're wrong we don't care */
mov %eax,stack_top
mov %ebx,stack_top - 4
set_cpu_stack
/* Pull multiboot info back from where we put it, and push it to the
* stack where we wanted it*/
mov (stack_top), %eax
mov (stack_top - 4), %ebx
push %ebx
push %eax
/*
Enter the high-level kernel. The ABI requires the stack is 16-byte
aligned at the time of the call instruction (which afterwards pushes
the return pointer of size 4 bytes). The stack was originally 16-byte
aligned above and we've pushed a multiple of 16 bytes to the
stack since (pushed 0 bytes so far), so the alignment has thus been
preserved and the call is well defined.
*/
call kernel_main
/*
If the system has nothing more to do, put the computer into an
infinite loop. To do that:
1) Disable interrupts with cli (clear interrupt enable in eflags).
They are already disabled by the bootloader, so this is not needed.
Mind that you might later enable interrupts and return from
kernel_main (which is sort of nonsensical to do).
2) Wait for the next interrupt to arrive with hlt (halt instruction).
Since they are disabled, this will lock up the computer.
3) Jump to the hlt instruction if it ever wakes up due to a
non-maskable interrupt occurring or due to system management mode.
*/
cli
1: hlt
jmp 1b
/*
Set the size of the _start symbol to the current location '.' minus its start.
This is useful when debugging or when you implement call tracing.
*/
.size _start, . - _start
.global ap_trampoline
.type ap_trampoline, @function
/* Loaded to 0x8000 at runtime */
.set LGDT_ADDR, load_gdt - ap_trampoline + 0x8000
.set GDT_ADDR, GDT_value - ap_trampoline + 0x8000
.set AP_PROTECTED_CODE_ADDR, ap_trampoline_protected - ap_trampoline + 0x8000
/* Trampoline starts in real mode */
.code16
ap_trampoline:
cli
cld
ljmp $0, $LGDT_ADDR
.align 16
GDT_table:
/* Values for GDT table were stolen from our calculated GDT in gdt::init()
* in rust code. I assume _this_ table has to be in low memory, as we only
* have 16 bits to work with. It might make more sense for us to initialize
* the GDT in rust code and then copy it to 0x8000 - GDT_size or something,
* but this is good enough for now */
.long 0x0, 0x0
.long 0xffff, 0xcf9900
.long 0xffff, 0xcf9300
GDT_value:
.word GDT_value - GDT_table - 1
.long GDT_table - ap_trampoline + 0x8000
.long 0, 0
.align 64
load_gdt:
/* Load gdt */
xorw %ax, %ax
movw %ax, %ds
lgdtl GDT_ADDR
/* Move into protected mode */
movl %cr0, %eax
orl $1, %eax
movl %eax, %cr0
ljmp $8, $AP_PROTECTED_CODE_ADDR
.align 32
.code32
ap_trampoline_protected:
movw $16, %ax
movw %ax, %ds
movw %ax, %ss
set_cpu_stack
ljmp $8, $ap_startup
ap_trampoline_end:
.data
.global ap_trampoline_size
.align 4
ap_trampoline_size:
.long ap_trampoline_end - ap_trampoline
.global max_num_cpus
max_num_cpus:
.long MAX_NUM_CPUS
|
oscomp/test-la-load-arceos | 2,095 | modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
. = ALIGN(4K);
_erodata = .;
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %SMP%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
linkme_POST_TRAP : { *(linkme_POST_TRAP) }
linkm2_POST_TRAP : { *(linkm2_POST_TRAP) }
axns_resource : { *(axns_resource) }
}
INSERT AFTER .tbss;
|
oscomp/test-la-load-arceos | 4,325 | modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
oscomp/test-la-load-arceos | 1,965 | modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
oscomp/test-la-load-arceos | 1,791 | modules/axhal/src/arch/loongarch64/trap.S | .macro SAVE_REGS, from_user
move $t0, $sp
.if \from_user == 1
csrrd $sp, KSAVE_KSP // restore kernel sp
addi.d $sp, $sp, -{trapframe_size}
STD $tp, $sp, 2
STD $r21, $sp, 21
csrrd $tp, KSAVE_TP
csrrd $r21, KSAVE_R21
.else
addi.d $sp, $sp, -{trapframe_size}
.endif
STD $t0, $sp, 3
csrrd $t0, KSAVE_TEMP
PUSH_GENERAL_REGS
csrrd $t1, LA_CSR_PRMD
csrrd $t2, LA_CSR_ERA
STD $t1, $sp, 32 // prmd
STD $t2, $sp, 33 // era
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
csrwr $tp, KSAVE_TP
csrwr $r21, KSAVE_R21
LDD $tp, $sp, 2
LDD $r21, $sp, 21
addi.d $t1, $sp, {trapframe_size}
csrwr $t1, KSAVE_KSP // save kernel sp
.endif
LDD $t1, $sp, 33 // era
LDD $t2, $sp, 32 // prmd
csrwr $t1, LA_CSR_ERA
csrwr $t2, LA_CSR_PRMD
POP_GENERAL_REGS
LDD $sp, $sp, 3
.endm
.section .text
.balign 4096
.global exception_entry_base
exception_entry_base:
csrwr $t0, KSAVE_TEMP
csrrd $t0, LA_CSR_PRMD
andi $t0, $t0, 0x3
bnez $t0, .Lfrom_userspace
.Lfrom_kernel:
SAVE_REGS 0
move $a0, $sp
addi.d $a1, $zero, 0
bl loongarch64_trap_handler
RESTORE_REGS 0
ertn
.Lfrom_userspace:
SAVE_REGS 1
move $a0, $sp
addi.d $a1, $zero, 1
bl loongarch64_trap_handler
RESTORE_REGS 1
ertn
.section .text
.balign 4096
.global handle_tlb_refill
handle_tlb_refill:
csrwr $t0, LA_CSR_TLBRSAVE
csrrd $t0, LA_CSR_PGD
lddir $t0, $t0, 3
lddir $t0, $t0, 2
lddir $t0, $t0, 1
ldpte $t0, 0
ldpte $t0, 1
tlbfill
csrrd $t0, LA_CSR_TLBRSAVE
ertn
|
oscomp/test-la-load-arceos | 2,358 | modules/axhal/src/arch/riscv/trap.S | .macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.if \from_user == 1
LDR t0, sp, 2 // load supervisor gp
LDR t1, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t0
mv tp, t1
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2 // load user gp and tp
LDR t0, sp, 3
STR gp, sp, 2 // save supervisor gp
STR tp, sp, 3 // save supervisor gp and tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
// restore sepc
LDR t0, sp, 31
csrw sepc, t0
// restore sstatus, but don't change FS
LDR t0, sp, 32 // t0 = sstatus to restore
csrr t1, sstatus // t1 = current sstatus
li t2, 0x6000 // t2 = mask for FS
and t1, t1, t2 // t1 = current FS
not t2, t2 // t2 = ~(mask for FS)
and t0, t0, t2 // t0 = sstatus to restore(cleared FS)
or t0, t0, t1 // t0 = sstatus to restore with current FS
csrw sstatus, t0 // restore sstatus
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // swap sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
|
oscomp/test-la-load-arceos | 1,397 | modules/axhal/src/arch/x86_64/syscall.S | .section .text
.code64
syscall_entry:
swapgs // switch to kernel gs
mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp // save user rsp
mov rsp, gs:[offset __PERCPU_TSS + {tss_rsp0_offset}] // switch to kernel stack
sub rsp, 8 // skip user ss
push gs:[offset __PERCPU_USER_RSP_OFFSET] // user rsp
push r11 // rflags
push {ucode64} // cs
push rcx // rip
sub rsp, 4 * 8 // skip until general registers
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_syscall_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 9 * 8
mov rcx, [rsp - 5 * 8] // rip
mov r11, [rsp - 3 * 8] // rflags
mov rsp, [rsp - 2 * 8] // user rsp
swapgs
sysretq
|
oscomp/test-la-load-arceos | 1,627 | modules/axhal/src/arch/x86_64/trap.S | .equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
sub rsp, 16 # reserve space for fs_base
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 16 # pop fs_base
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
oscomp/test-la-load-arceos | 2,989 | modules/axhal/src/arch/aarch64/trap.S | .macro SAVE_REGS
sub sp, sp, {trapframe_size}
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
str x30, [sp, 30 * 8]
mrs x9, sp_el0
mrs x10, tpidr_el0
mrs x11, elr_el1
mrs x12, spsr_el1
stp x9, x10, [sp, 31 * 8]
stp x11, x12, [sp, 33 * 8]
# restore kernel tpidr_el0
mrs x1, tpidrro_el0
msr tpidr_el0, x1
# We may have interrupted userspace, or a guest, or exit-from or
# return-to either of those. So we can't trust sp_el0, and need to
# restore it.
bl {cache_current_task_ptr}
.endm
.macro RESTORE_REGS
# backup kernel tpidr_el0
mrs x1, tpidr_el0
msr tpidrro_el0, x1
ldp x11, x12, [sp, 33 * 8]
ldp x9, x10, [sp, 31 * 8]
msr sp_el0, x9
msr tpidr_el0, x10
msr elr_el1, x11
msr spsr_el1, x12
ldr x30, [sp, 30 * 8]
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, {trapframe_size}
.endm
.macro INVALID_EXCP, kind, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \kind
mov x2, \source
bl invalid_exception
b .Lexception_return
.endm
.macro HANDLE_SYNC, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_sync_exception
b .Lexception_return
.endm
.macro HANDLE_IRQ, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_irq_exception
b .Lexception_return
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
HANDLE_SYNC 1
HANDLE_IRQ 1
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
HANDLE_SYNC 2
HANDLE_IRQ 2
INVALID_EXCP 2 2
INVALID_EXCP 3 2
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Lexception_return:
RESTORE_REGS
eret
|
oscomp/test-la-load-arceos | 2,544 | tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s | // SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
OshinoShinobu-Chan/my_rCore_kernel | 7,666 | src/link_app.S |
.align 3
.section .data
.global _num_app
_num_app:
.quad 33
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_5_start
.quad app_6_start
.quad app_7_start
.quad app_8_start
.quad app_9_start
.quad app_10_start
.quad app_11_start
.quad app_12_start
.quad app_13_start
.quad app_14_start
.quad app_15_start
.quad app_16_start
.quad app_17_start
.quad app_18_start
.quad app_19_start
.quad app_20_start
.quad app_21_start
.quad app_22_start
.quad app_23_start
.quad app_24_start
.quad app_25_start
.quad app_26_start
.quad app_27_start
.quad app_28_start
.quad app_29_start
.quad app_30_start
.quad app_31_start
.quad app_32_start
.quad app_32_end
.global _app_names
_app_names:
.string "cat"
.string "cmdline_args"
.string "count_lines"
.string "exit"
.string "fantastic_text"
.string "filetest_simple"
.string "forktest"
.string "forktest2"
.string "forktest_simple"
.string "forktree"
.string "getchar"
.string "hello_world"
.string "huge_write"
.string "infloop"
.string "initproc"
.string "matrix"
.string "pipe_large_test"
.string "pipetest"
.string "priv_csr"
.string "priv_inst"
.string "run_pipe_test"
.string "sig_simple"
.string "sig_simple2"
.string "sig_tests"
.string "sleep"
.string "sleep_simple"
.string "stack_overflow"
.string "store_fault"
.string "until_timeout"
.string "user_shell"
.string "usertests"
.string "usertests_simple"
.string "yield"
.section .data
.global app_0_start
.global app_0_end
.align 3
app_0_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/cat"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
.align 3
app_1_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/cmdline_args"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
.align 3
app_2_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/count_lines"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
.align 3
app_3_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/exit"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
.align 3
app_4_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/fantastic_text"
app_4_end:
.section .data
.global app_5_start
.global app_5_end
.align 3
app_5_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/filetest_simple"
app_5_end:
.section .data
.global app_6_start
.global app_6_end
.align 3
app_6_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest"
app_6_end:
.section .data
.global app_7_start
.global app_7_end
.align 3
app_7_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest2"
app_7_end:
.section .data
.global app_8_start
.global app_8_end
.align 3
app_8_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest_simple"
app_8_end:
.section .data
.global app_9_start
.global app_9_end
.align 3
app_9_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktree"
app_9_end:
.section .data
.global app_10_start
.global app_10_end
.align 3
app_10_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/getchar"
app_10_end:
.section .data
.global app_11_start
.global app_11_end
.align 3
app_11_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/hello_world"
app_11_end:
.section .data
.global app_12_start
.global app_12_end
.align 3
app_12_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/huge_write"
app_12_end:
.section .data
.global app_13_start
.global app_13_end
.align 3
app_13_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/infloop"
app_13_end:
.section .data
.global app_14_start
.global app_14_end
.align 3
app_14_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/initproc"
app_14_end:
.section .data
.global app_15_start
.global app_15_end
.align 3
app_15_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/matrix"
app_15_end:
.section .data
.global app_16_start
.global app_16_end
.align 3
app_16_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/pipe_large_test"
app_16_end:
.section .data
.global app_17_start
.global app_17_end
.align 3
app_17_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/pipetest"
app_17_end:
.section .data
.global app_18_start
.global app_18_end
.align 3
app_18_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/priv_csr"
app_18_end:
.section .data
.global app_19_start
.global app_19_end
.align 3
app_19_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/priv_inst"
app_19_end:
.section .data
.global app_20_start
.global app_20_end
.align 3
app_20_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/run_pipe_test"
app_20_end:
.section .data
.global app_21_start
.global app_21_end
.align 3
app_21_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sig_simple"
app_21_end:
.section .data
.global app_22_start
.global app_22_end
.align 3
app_22_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sig_simple2"
app_22_end:
.section .data
.global app_23_start
.global app_23_end
.align 3
app_23_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sig_tests"
app_23_end:
.section .data
.global app_24_start
.global app_24_end
.align 3
app_24_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep"
app_24_end:
.section .data
.global app_25_start
.global app_25_end
.align 3
app_25_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep_simple"
app_25_end:
.section .data
.global app_26_start
.global app_26_end
.align 3
app_26_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/stack_overflow"
app_26_end:
.section .data
.global app_27_start
.global app_27_end
.align 3
app_27_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/store_fault"
app_27_end:
.section .data
.global app_28_start
.global app_28_end
.align 3
app_28_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/until_timeout"
app_28_end:
.section .data
.global app_29_start
.global app_29_end
.align 3
app_29_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/user_shell"
app_29_end:
.section .data
.global app_30_start
.global app_30_end
.align 3
app_30_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests"
app_30_end:
.section .data
.global app_31_start
.global app_31_end
.align 3
app_31_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests_simple"
app_31_end:
.section .data
.global app_32_start
.global app_32_end
.align 3
app_32_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/yield"
app_32_end:
|
OshinoShinobu-Chan/my_rCore_kernel | 2,109 | src/trap/trap.S | .altmacro
.macro SAVE_GP n # Macro used to save the nth general purpose register to stack
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n # Macro used to load the nth general purpose register to stack
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps: # function used for all trap, this address will be put in stvec
csrrw sp, sscratch, sp # sp -> kernel stack, sscratch -> user stack
# save gernal-perpose registers, no need to save x0 cause it's always zero
sd x1, 1*8(sp)
sd x3, 3*8(sp) # skip sp(x2), we will save it later
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n + 1
.endr
csrr t0, sstatus # we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t1, sepc
sd t0, 32*8(sp) # save sstatus and sepc to TrapContext
sd t1, 33*8(sp)
csrr t2, sscratch # read user stack from sscratch and save it on the kernel stack
sd t2, 2*8(sp)
ld t0, 34*8(sp) # load kernel satp into t0
ld t1, 36*8(sp) # load trap_handler into t1
ld sp, 35*8(sp) # move to kernel sp
csrw satp, t0 # switch to kernel address space
sfence.vma
jr t1 # jump to trap_handler
__restore:
# a0: *TrapContext in user address space(const), a1: user address space token
csrw satp, a1 # switch to user address space
sfence.vma
csrw sscratch, a0
mv sp, a0
# restore sstatus/sepc
ld t0, 32*8(sp) # read sstatus
ld t1, 33*8(sp) # read sepc
csrw sstatus, t0 # write sstatus
csrw sepc, t1 # write sepc
# restore general purpose registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n + 1
.endr
ld sp, 2*8(sp) # back to user stack
sret # return
|
oswald2/my_os | 1,081 | src/boot.s | .section .text._start
.global _start
.type _start, @function
_start:
# Initialize the stack
adr x7, {} // Move address of STACK into x7
mov x8, {} // Move size of STACK into x8
add x7, x7, x8 // Adjust stack pointer to end of memory region
mov sp, x7 // Now set the stack pointer
# Enable FP in register
mrs x7, cpacr_el1
orr x7, x7, #(3 << 20)
msr cpacr_el1, x7
adr x0, _start
adr x1, _rela_start
adr x2, _rela_end
bl _relocate_binary
# Call into main
bl main
.equ R_AARCH64_RELATIVE, 1027
_relocate_binary:
ldp x7, x8, [x1], 16 // Load pair into x7 and x8 from _rela_start and increment by 16
ldr x9, [x1], 8 // Load the next 8 bytes
cmp x8, R_AARCH64_RELATIVE //Compare with the value
bne 1f // If not, we return
add x10, x0, x7 // Add offset to base (_start)
add x11, x0, x9 // Add addend + sym to base (see readelf output)
str x11, [x10] // Store the relocated address back
cmp x1, x2
bne _relocate_binary
1:
ret |
OwenWangbattle/Rcore-Project | 2,218 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret
|
ozhang228/books_courses_exercises | 1,210 | ostep/homework/threads-locks/peterson.s | # array of 2 integers (each size 4 bytes)
# load address of flag into fx register
# access flag[] with 0(%fx,%index,4)
# where %index is a register holding 0 or 1
# index reg contains 0 -> flag[0], if 1->flag[1]
.var flag 2
# global turn variable
.var turn
# global count
.var count
.main
# put address of flag into fx
lea flag, %fx
# assume thread ID is in bx (0 or 1, scale by 4 to get proper flag address)
mov %bx, %cx # bx: self, now copies to cx
neg %cx # cx: - self
add $1, %cx # cx: 1 - self
.acquire
mov $1, 0(%fx,%bx,4) # flag[self] = 1
mov %cx, turn # turn = 1 - self
.spin1
mov 0(%fx,%cx,4), %ax # flag[1-self]
test $1, %ax
jne .fini # if flag[1-self] != 1, skip past loop to .fini
.spin2 # just labeled for fun, not needed
mov turn, %ax
test %cx, %ax # compare 'turn' and '1 - self'
je .spin1 # if turn==1-self, go back and start spin again
# fall out of spin
.fini
# do critical section now
mov count, %ax
add $1, %ax
mov %ax, count
.release
mov $0, 0(%fx,%bx,4) # flag[self] = 0
# end case: make sure it's other's turn
mov %cx, turn # turn = 1 - self
halt
|
P1erreCashon/rustcomp | 3,481 | arch/src/x86_64/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.entry
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _load_end - {offset} # load_end
.int end - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [_kernel_page_table - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.global _kernel_page_table
_kernel_page_table:
# 0x0000_0000 ~ 0xffff_ffff
.quad _kernel_mapping_pdpt - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 510
# 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff
.quad _kernel_mapping_pdpt - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.global _kernel_mapping_pdpt
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
_kernel_mapping_pdpt:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
P1erreCashon/rustcomp | 3,475 | arch/src/aarch64/trap.S | .macro INVALID_EXCP, kind, source
.p2align 7
msr daifset, #2
sub sp, sp, 35 * 8
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
mrs x9, sp_el0
mrs x10, elr_el1
mrs x11, spsr_el1
mrs x12, tpidr_el0
stp x30, x9, [sp, 30 * 8]
stp x10, x11, [sp, 32 * 8]
str x12, [sp, 34 * 8]
mov x0, sp
mov x1, \kind
mov x2, \source
bl handle_exception
b .Lexception_return
.endm
.macro USER_TRAP, kind
.p2align 7
msr daifset, #2
str x1, [sp, 17 * 8]
ldr x1, [sp, 16 * 8]
stp x0, x1, [x1]
stp x2, x3, [x1, 2 * 8]
stp x4, x5, [x1, 4 * 8]
stp x6, x7, [x1, 6 * 8]
stp x8, x9, [x1, 8 * 8]
stp x10, x11, [x1, 10 * 8]
stp x12, x13, [x1, 12 * 8]
stp x14, x15, [x1, 14 * 8]
stp x16, x17, [x1, 16 * 8]
stp x18, x19, [x1, 18 * 8]
stp x20, x21, [x1, 20 * 8]
stp x22, x23, [x1, 22 * 8]
stp x24, x25, [x1, 24 * 8]
stp x26, x27, [x1, 26 * 8]
stp x28, x29, [x1, 28 * 8]
mov x0, \kind
b .Luser_trap_external
.endm
.section .text
.p2align 12
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
INVALID_EXCP 0 1
INVALID_EXCP 1 1
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
USER_TRAP 0
USER_TRAP 1
USER_TRAP 2
USER_TRAP 3
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Luser_trap_external:
mrs x9, sp_el0
mrs x10, elr_el1
mrs x11, spsr_el1
mrs x12, tpidr_el0
stp x30, x9, [x1, 30 * 8]
stp x10, x11, [x1, 32 * 8]
str x12, [x1, 34 * 8]
ldr x3, [sp, 17 * 8]
str x3, [x1, 1 * 8]
ldp x8, x16, [sp]
ldp x17, x18, [sp, 2 * 8]
ldp x19, x20, [sp, 4 * 8]
ldp x21, x22, [sp, 6 * 8]
ldp x23, x24, [sp, 8 * 8]
ldp x25, x26, [sp, 10 * 8]
ldp x27, x28, [sp, 12 * 8]
ldp x29, x30, [sp, 14 * 8]
add sp, sp, 18 * 8
ret
.Lexception_return:
ldr x12, [sp, 34 * 8]
ldp x10, x11, [sp, 32 * 8]
ldp x30, x9, [sp, 30 * 8]
msr sp_el0, x9
msr elr_el1, x10
msr spsr_el1, x11
msr tpidr_el0, x12
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, 35 * 8
eret
|
parkwoodam/lumina_ai | 15,150 | .cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zstd-sys-2.0.15+zstd.1.5.7/zstd/lib/decompress/huf_decompress_amd64.S | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "../common/portability_macros.h"
#if defined(__ELF__) && defined(__GNUC__)
/* Stack marking
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
*/
.section .note.GNU-stack,"",%progbits
#if defined(__aarch64__)
/* Mark that this assembly supports BTI & PAC, because it is empty for aarch64.
* See: https://github.com/facebook/zstd/issues/3841
* See: https://gcc.godbolt.org/z/sqr5T4ffK
* See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/
* See: https://reviews.llvm.org/D62609
*/
.pushsection .note.gnu.property, "a"
.p2align 3
.long 4 /* size of the name - "GNU\0" */
.long 0x10 /* size of descriptor */
.long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */
.asciz "GNU"
.long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4 /* pr_datasz - 4 bytes */
.long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */
.p2align 3 /* pr_padding - bring everything to 8 byte alignment */
.popsection
#endif
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
/* Calling convention:
*
* %rdi (or %rcx on Windows) contains the first argument: HUF_DecompressAsmArgs*.
* %rbp isn't maintained (no frame pointer).
* %rsp contains the stack pointer that grows down.
* No red-zone is assumed, only addresses >= %rsp are used.
* All register contents are preserved.
*/
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.text
/* Sets up register mappings for clarity.
* op[], bits[], dtable & ip[0] each get their own register.
* ip[1,2,3] & olimit alias var[].
* %rax is a scratch register.
*/
#define op0 rsi
#define op1 rbx
#define op2 rcx
#define op3 rdi
#define ip0 r8
#define ip1 r9
#define ip2 r10
#define ip3 r11
#define bits0 rbp
#define bits1 rdx
#define bits2 r12
#define bits3 r13
#define dtable r14
#define olimit r15
/* var[] aliases ip[1,2,3] & olimit
* ip[1,2,3] are saved every iteration.
* olimit is only used in compute_olimit.
*/
#define var0 r15
#define var1 r9
#define var2 r10
#define var3 r11
/* 32-bit var registers */
#define vard0 r15d
#define vard1 r9d
#define vard2 r10d
#define vard3 r11d
/* Calls X(N) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM(X) \
X(0); \
X(1); \
X(2); \
X(3)
/* Calls X(N, idx) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \
X(0, idx); \
X(1, idx); \
X(2, idx); \
X(3, idx)
/* Define both _HUF_* & HUF_* symbols because MacOS
* C symbols are prefixed with '_' & Linux symbols aren't.
*/
_HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push 104(%rax) /* ilowest */
push 112(%rax) /* oend */
push %olimit /* olimit space */
subq $24, %rsp
.L_4X1_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rbx, rdx must be saved
* op3 & ip0 mustn't be clobbered
*/
movq %rbx, 0(%rsp)
movq %rdx, 8(%rsp)
movq 32(%rsp), %rax /* rax = oend */
subq %op3, %rax /* rax = oend - op3 */
/* r15 = (oend - op3) / 5 */
movabsq $-3689348814741910323, %rdx
mulq %rdx
movq %rdx, %r15
shrq $2, %r15
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %rbx /* rbx = ip0 - ilowest */
/* rdx = (ip0 - ilowest) / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %rbx
shrq %rbx
addq %rbx, %rdx
shrq $2, %rdx
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* r15 = r15 * 5 */
leaq (%r15, %r15, 4), %r15
/* olimit = op3 + r15 */
addq %op3, %olimit
movq 8(%rsp), %rdx
movq 0(%rsp), %rbx
/* If (op3 + 20 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X1_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X1_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X1_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X1_exit
/* Reads top 11 bits from bits[n]
* Loads dt[bits[n]] into var[n]
*/
#define GET_NEXT_DELT(n) \
movq $53, %var##n; \
shrxq %var##n, %bits##n, %var##n; \
movzwl (%dtable,%var##n,2),%vard##n
/* var[n] must contain the DTable entry computed with GET_NEXT_DELT
* Moves var[n] to %rax
* bits[n] <<= var[n] & 63
* op[n][idx] = %rax >> 8
* %ah is a way to access bits [8, 16) of %rax
*/
#define DECODE_FROM_DELT(n, idx) \
movq %var##n, %rax; \
shlxq %var##n, %bits##n, %bits##n; \
movb %ah, idx(%op##n)
/* Assumes GET_NEXT_DELT has been called.
* Calls DECODE_FROM_DELT then GET_NEXT_DELT
*/
#define DECODE_AND_GET_NEXT(n, idx) \
DECODE_FROM_DELT(n, idx); \
GET_NEXT_DELT(n) \
/* // ctz & nbBytes is stored in bits[n]
* // nbBits is stored in %rax
* ctz = CTZ[bits[n]]
* nbBits = ctz & 7
* nbBytes = ctz >> 3
* op[n] += 5
* ip[n] -= nbBytes
* // Note: x86-64 is little-endian ==> no bswap
* bits[n] = MEM_readST(ip[n]) | 1
* bits[n] <<= nbBits
*/
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
andq $7, %rax; \
shrq $3, %bits##n; \
leaq 5(%op##n), %op##n; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlx %rax, %bits##n, %bits##n
/* Store clobbered variables on the stack */
movq %olimit, 24(%rsp)
movq %ip1, 0(%rsp)
movq %ip2, 8(%rsp)
movq %ip3, 16(%rsp)
/* Call GET_NEXT_DELT for each stream */
FOR_EACH_STREAM(GET_NEXT_DELT)
.p2align 6
.L_4X1_loop_body:
/* Decode 5 symbols in each of the 4 streams (20 total)
* Must have called GET_NEXT_DELT for each stream
*/
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4)
/* Load ip[1,2,3] from stack (var[] aliases them)
* ip[] is needed for RELOAD_BITS
* Each will be stored back to the stack after RELOAD
*/
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Reload each stream & fetch the next table entry
* to prepare for the next iteration
*/
RELOAD_BITS(0)
GET_NEXT_DELT(0)
RELOAD_BITS(1)
movq %ip1, 0(%rsp)
GET_NEXT_DELT(1)
RELOAD_BITS(2)
movq %ip2, 8(%rsp)
GET_NEXT_DELT(2)
RELOAD_BITS(3)
movq %ip3, 16(%rsp)
GET_NEXT_DELT(3)
/* If op3 < olimit: continue the loop */
cmp %op3, 24(%rsp)
ja .L_4X1_loop_body
/* Reload ip[1,2,3] from stack */
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Re-compute olimit */
jmp .L_4X1_compute_olimit
#undef GET_NEXT_DELT
#undef DECODE_FROM_DELT
#undef DECODE
#undef RELOAD_BITS
.L_4X1_exit:
addq $24, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* olimit */
pop %rax /* oend */
pop %rax /* ilowest */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
_HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push %rax /* olimit */
push 104(%rax) /* ilowest */
movq 112(%rax), %rax
push %rax /* oend3 */
movq %op3, %rax
push %rax /* oend2 */
movq %op2, %rax
push %rax /* oend1 */
movq %op1, %rax
push %rax /* oend0 */
/* Scratch space */
subq $8, %rsp
.L_4X2_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rdx must be saved
* op[1,2,3,4] & ip0 mustn't be clobbered
*/
movq %rdx, 0(%rsp)
/* We can consume up to 7 input bytes each iteration. */
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %r15 /* r15 = ip0 - ilowest */
/* rdx = rax / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %r15
shrq %r15
addq %r15, %rdx
shrq $2, %rdx
/* r15 = (ip0 - ilowest) / 7 */
movq %rdx, %r15
/* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */
movq 8(%rsp), %rax /* rax = oend0 */
subq %op0, %rax /* rax = oend0 - op0 */
movq 16(%rsp), %rdx /* rdx = oend1 */
subq %op1, %rdx /* rdx = oend1 - op1 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 24(%rsp), %rax /* rax = oend2 */
subq %op2, %rax /* rax = oend2 - op2 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 32(%rsp), %rax /* rax = oend3 */
subq %op3, %rax /* rax = oend3 - op3 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movabsq $-3689348814741910323, %rax
mulq %rdx
shrq $3, %rdx /* rdx = rdx / 10 */
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* olimit = op3 + 5 * r15 */
movq %r15, %rax
leaq (%op3, %rax, 4), %olimit
addq %rax, %olimit
movq 0(%rsp), %rdx
/* If (op3 + 10 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X2_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X2_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X2_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X2_exit
#define DECODE(n, idx) \
movq %bits##n, %rax; \
shrq $53, %rax; \
movzwl 0(%dtable,%rax,4),%r8d; \
movzbl 2(%dtable,%rax,4),%r15d; \
movzbl 3(%dtable,%rax,4),%eax; \
movw %r8w, (%op##n); \
shlxq %r15, %bits##n, %bits##n; \
addq %rax, %op##n
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
shrq $3, %bits##n; \
andq $7, %rax; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlxq %rax, %bits##n, %bits##n
movq %olimit, 48(%rsp)
.p2align 6
.L_4X2_loop_body:
/* We clobber r8, so store it on the stack */
movq %r8, 0(%rsp)
/* Decode 5 symbols from each of the 4 streams (20 symbols total). */
FOR_EACH_STREAM_WITH_INDEX(DECODE, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 4)
/* Reload r8 */
movq 0(%rsp), %r8
FOR_EACH_STREAM(RELOAD_BITS)
cmp %op3, 48(%rsp)
ja .L_4X2_loop_body
jmp .L_4X2_compute_olimit
#undef DECODE
#undef RELOAD_BITS
.L_4X2_exit:
addq $8, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* oend0 */
pop %rax /* oend1 */
pop %rax /* oend2 */
pop %rax /* oend3 */
pop %rax /* ilowest */
pop %rax /* olimit */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
#endif
|
Parsifal1986/XV6-RISCV-in-RUST | 1,512 | kernel/src/kernelvec.S | #
# interrupts and exceptions while in supervisor
# mode come here.
#
# the current stack is a kernel stack.
# push registers, call kerneltrap().
# when kerneltrap() returns, restore registers, return.
#
.globl kerneltrap
.globl kernelvec
.align 4
kernelvec:
# make room to save registers.
addi sp, sp, -256
# save caller-saved registers.
sd ra, 0(sp)
# sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
# call the C trap handler in trap.c
call kerneltrap
# restore registers.
ld ra, 0(sp)
# ld sp, 8(sp)
ld gp, 16(sp)
# not tp (contains hartid), in case we moved CPUs
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
# return to whatever we were doing in the kernel.
sret |
Parsifal1986/XV6-RISCV-in-RUST | 3,745 | kernel/src/trampoline.S | #
# low-level code to handle traps from user space into
# the kernel, and returns from kernel to user.
#
# the kernel maps the page holding this code
# at the same virtual address (TRAMPOLINE)
# in user and kernel space so that it continues
# to work when it switches page tables.
# kernel.ld causes this code to start at
# a page boundary.
#
.extern TRAPFRAME
.section trampsec
.globl trampoline
.globl usertrap
trampoline:
.align 4
.globl uservec
uservec:
#
# trap.c sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# save user a0 in sscratch so
# a0 can be used to get at TRAPFRAME.
csrw sscratch, a0
# each process has a separate p->trapframe memory area,
# but it's mapped to the same virtual address
# (TRAPFRAME) in every process's user page table.
li a0, TRAPFRAME
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
# save the user a0 in p->trapframe->a0
csrr t0, sscratch
sd t0, 112(a0)
# initialize kernel stack pointer, from p->trapframe->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->trapframe->kernel_hartid
ld tp, 32(a0)
# load the address of usertrap(), from p->trapframe->kernel_trap
ld t0, 16(a0)
# fetch the kernel page table address, from p->trapframe->kernel_satp.
ld t1, 0(a0)
# wait for any previous memory operations to complete, so that
# they use the user page table.
sfence.vma zero, zero
# install the kernel page table.
csrw satp, t1
# flush now-stale user entries from the TLB.
sfence.vma zero, zero
# call usertrap()
jalr t0
.globl userret
userret:
# usertrap() returns here, with user satp in a0.
# return from kernel to user.
# switch to the user page table.
sfence.vma zero, zero
csrw satp, a0
sfence.vma zero, zero
li a0, TRAPFRAME
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0
ld a0, 112(a0)
# return to user mode and user pc.
# usertrapret() set up sstatus and sepc.
sret |
pascaldisse/gaia-script | 2,961 | comp/src/asm_runtime_x86_64.s | ; GaiaScript X86-64 Runtime Support
; This file contains the runtime functions needed by GaiaScript assembly
section .data
; Error messages
err_memory_alloc_failed db "Failed to allocate memory", 0
; Constants
float_one dq 1.0
section .text
global gaia_input_image
global gaia_input_text
global gaia_input_sequence
global gaia_input_latent
global gaia_conv_relu
global gaia_conv_sigmoid
global gaia_conv_tanh
global gaia_conv_softmax
global gaia_conv_none
global gaia_dense_relu
global gaia_dense_sigmoid
global gaia_dense_tanh
global gaia_dense_softmax
global gaia_dense_none
global gaia_pooling_none
global gaia_flatten_none
global gaia_loss_MSE
global gaia_loss_BCE
; Input layer implementations
gaia_input_image:
; Allocate memory for image input
; Parameters:
; r0 = width
; r1 = height
; r2 = channels
push rbp
mov rbp, rsp
; Calculate size needed (width * height * channels * sizeof(float))
imul r0, r1
imul r0, r2
imul r0, 4 ; 4 bytes per float
; Allocate memory
mov rdi, r0 ; Size
call malloc
; Check if allocation succeeded
test rax, rax
jz .alloc_failed
; Initialize with zeros
mov rdi, rax ; Destination
xor rsi, rsi ; Value (0)
mov rdx, r0 ; Size
call memset
; Return pointer to allocated memory
pop rbp
ret
.alloc_failed:
; Print error message
mov rdi, err_memory_alloc_failed
call puts
; Return NULL
xor rax, rax
pop rbp
ret
gaia_input_text:
; Similar to image input but for text
ret
gaia_input_sequence:
; For sequence data
ret
gaia_input_latent:
; For latent/random inputs
ret
; Layer implementations
gaia_conv_relu:
; Convolutional layer with ReLU activation
; Parameters:
; rdi = Input tensor
; rsi = Filters
; rdx = Other parameters
ret
gaia_conv_sigmoid:
; Convolutional layer with sigmoid activation
ret
gaia_conv_tanh:
; Convolutional layer with tanh activation
ret
gaia_conv_softmax:
; Convolutional layer with softmax activation
ret
gaia_conv_none:
; Convolutional layer without activation
ret
gaia_dense_relu:
; Dense layer with ReLU activation
ret
gaia_dense_sigmoid:
; Dense layer with sigmoid activation
ret
gaia_dense_tanh:
; Dense layer with tanh activation
ret
gaia_dense_softmax:
; Dense layer with softmax activation
ret
gaia_dense_none:
; Dense layer without activation
ret
gaia_pooling_none:
; Pooling layer
ret
gaia_flatten_none:
; Flatten layer
ret
; Loss functions
gaia_loss_MSE:
; Mean Squared Error loss function
ret
gaia_loss_BCE:
; Binary Cross Entropy loss function
ret
; External functions that would be linked in
extern malloc
extern free
extern memset
extern puts |
patmessina/chip8 | 3,359 | assembly/test.s | // print out all opcodes -- not intended to be run on chip 8
// vf -- is used as a flag instruction -- particularly for overflow
org 0x2
opcodes:
// add i vx - 0xfx1e - expected 0xF21E
// i = i + vx -- does not affect the vf on overflow
add i v2
// add vx vy - 0x8xy4 - expected 0x8234 -- vy is not affected
// vf is set to 1 if there is a carry (larger than 0xFF or 255)
// vf is set to 0 if there is no carry
add v2 v3
// add vx nn - 0x7xnn -- expected 0x7201
// vx = vx + nn -- does not affect the vf on overflow
add v2 0x01
// and vx vy - 0x8xy2 - expected 0x8232 -- vx = vx & vy -- vy is not affected
and v2 v3
// call addr - 0x2nnn - expected 0x2A3E -- call a subroutine at address
// address needs to be even
call 0xA3E
// cls - 0x00E0 -- clear the screen
cls
// drw vx vy n - 0xDxyn - expected 0xd234
// draw sprite n pixels tall from memory location in register i
// at position (vx, vy)
drw v2 v3 0x4
// jmp addr - 0x1nnn - expected 0x1A3E -- jump to address
// address needs to be even
jmp 0xA3E
// jmp v0 nnn - 0xBnnn - expected 0xB33F -- jump to address + v0
// address needs to be even
jmp v0 0x33E
// ld vx nn - 0x6xnn - expected 0x6223
ld v2 0x23
// ld vx vy - 0x8xy0 - expected 0x8320 -- load vy into vx
ld v3 v2
// ld i nnn - 0xAnnn - expected 0xA2AB -- load nnn into i
ld i 0x2AB
// ld vx dt - 0xFx07 - expected 0xF307 -- load delay timer into vx
ld v3 dt
// ld dt vx - 0xFx15 - expected 0xF315 -- load vx into delay timer
ld dt v3
// ld st v2 - 0xFx18 - expected 0xF218 -- load vx into sound timer
ld st v2
// ld f vx - 0xFx29 - expected 0xF329 -- load sprite location for digit vx into i
ld f v3
// ld b vx - 0xFx33 - expected 0xF233 -- store bcd representation of vx in memory locations i, i+1, i+2
ld b v2
// ld i vx - 0xFx55 - expected 0xF255 -- store registers v0 through vx in memory starting at i
ld i v2
// ld vx i - 0xFx65 - expected 0xF265 -- load registers v0 through vx from memory starting at i
ld v2 i
// or vx vy - 0x8xy1 - expected 0x8231 -- vx = vx | vy -- vy is not affected
or v2 v3
// ret - 0x00EE -- return from a subroutine
ret
// rnd vx nn - 0xCxnn - expected 0xC423 -- vx = random number & nn
rnd v4 0x23
// se vx vy - 0x5xy0 - expected 0x5230
// skip next instruction if vx == vy
se v2 v3
// se vx nn - 0x3xnn - expected 0x3231
// skip next instruction if vx == nn
se v2 0x31
// shl vx - 0x8xyE - expected 0x830E -- vx = vx << 1
shl v3
// shr vx - 0x8xy6 - expected 0x8306 -- vx = vx >> 1
shr v3
// sknp vx - 0xExA1 - expected 0xE2A1
// skip next instruction if key in vx is not pressed
sknp v2
// skp vx - 0xEx9E - expected 0xE29E
// skip next instruction if key in vx is pressed
skp v2
// sne vx vy - 0x9xy0 - expected 0x9230
// skip next instruction if vx != vy
sne v2 v3
// sne vx nn - 0x4xnn - expected 0x4231
sne v2 0x31
// sub vx, vy - 0x8xy5 - expected 0x8235 -- vx = vx - vy
// if vx >= vy, vf = 1 else vf = 0
sub v2 v3
// subn vx, vy - 0x8xy7 - expected 0x8237 -- vx = vy - vx
// if vy >= vx, vf = 1 else vf = 0
subn v2 v3
// wkp vx - 0xFx0A - expected 0xF30A -- wait for key press and store in vx
wkp v3
// xor vx vy - 0x8xy3 - expected 0x8233 -- vx = vx ^ vy -- vy is not affected
xor v2 v3
|
pcg108/crosvm | 1,406 | kernel_loader/src/test_elf.S | # Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Build instructions:
# x86_64-linux-gnu-as test_elf.S -o test_elf.o
# x86_64-linux-gnu-ld test_elf.o -o test_elf.bin -T test_elf.ld
.intel_syntax noprefix
.section .rodata
hello_world:
.string "Hello world!\n"
.set hello_size, .-hello_world
.text
.globl _start
_start:
lea rsi, [rip + hello_world] # rsi -> message string
mov rcx, hello_size # rcx = length of message
mov dx, 0x3F8 # dx = COM1 port
.print_loop:
# Wait for the transmit buffer to be empty by polling the line status.
add dx, 5 # dx = line status register
.wait_empty:
in al, dx # read line status
test al, 0x20 # check buffer empty flag
jz .wait_empty # keep waiting if flag is not set
.wait_done:
sub dx, 5 # dx = data register
# Load a byte of the message and send it to the serial port.
lodsb # load message byte from RSI to AL
out dx, al # send byte to serial port
dec rcx # rcx--
jnz .print_loop # repeat if rcx != 0
.done:
int3 # cause vcpu to exit
|
Peggyliao/Final-Project | 2,655 | firmware/extraops.S | // This is free and unencumbered software released into the public domain.
//
// Anyone is free to copy, modify, publish, use, compile, sell, or
// distribute this software, either in source code form or as a compiled
// binary, for any purpose, commercial or non-commercial, and by any
// means.
#define regnum_q0 0
#define regnum_q1 1
#define regnum_q2 2
#define regnum_q3 3
#define regnum_x0 0
#define regnum_x1 1
#define regnum_x2 2
#define regnum_x3 3
#define regnum_x4 4
#define regnum_x5 5
#define regnum_x6 6
#define regnum_x7 7
#define regnum_x8 8
#define regnum_x9 9
#define regnum_x10 10
#define regnum_x11 11
#define regnum_x12 12
#define regnum_x13 13
#define regnum_x14 14
#define regnum_x15 15
#define regnum_x16 16
#define regnum_x17 17
#define regnum_x18 18
#define regnum_x19 19
#define regnum_x20 20
#define regnum_x21 21
#define regnum_x22 22
#define regnum_x23 23
#define regnum_x24 24
#define regnum_x25 25
#define regnum_x26 26
#define regnum_x27 27
#define regnum_x28 28
#define regnum_x29 29
#define regnum_x30 30
#define regnum_x31 31
#define regnum_zero 0
#define regnum_ra 1
#define regnum_sp 2
#define regnum_gp 3
#define regnum_tp 4
#define regnum_t0 5
#define regnum_t1 6
#define regnum_t2 7
#define regnum_s0 8
#define regnum_s1 9
#define regnum_a0 10
#define regnum_a1 11
#define regnum_a2 12
#define regnum_a3 13
#define regnum_a4 14
#define regnum_a5 15
#define regnum_a6 16
#define regnum_a7 17
#define regnum_s2 18
#define regnum_s3 19
#define regnum_s4 20
#define regnum_s5 21
#define regnum_s6 22
#define regnum_s7 23
#define regnum_s8 24
#define regnum_s9 25
#define regnum_s10 26
#define regnum_s11 27
#define regnum_t3 28
#define regnum_t4 29
#define regnum_t5 30
#define regnum_t6 31
// x8 is s0 and also fp
#define regnum_fp 8
#define r_type_insn(_f7, _rs2, _rs1, _f3, _rd, _opc) \
.word (((_f7) << 25) | ((_rs2) << 20) | ((_rs1) << 15) | ((_f3) << 12) | ((_rd) << 7) | ((_opc) << 0))
#define picorv32_getq_insn(_rd, _qs) \
r_type_insn(0b0000000, 0, regnum_ ## _qs, 0b100, regnum_ ## _rd, 0b0001011)
#define picorv32_setq_insn(_qd, _rs) \
r_type_insn(0b0000001, 0, regnum_ ## _rs, 0b010, regnum_ ## _qd, 0b0001011)
#define picorv32_retirq_insn() \
r_type_insn(0b0000010, 0, 0, 0b000, 0, 0b0001011)
#define picorv32_maskirq_insn(_rd, _rs) \
r_type_insn(0b0000011, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
#define picorv32_waitirq_insn(_rd) \
r_type_insn(0b0000100, 0, 0, 0b100, regnum_ ## _rd, 0b0001011)
#define picorv32_timer_insn(_rd, _rs) \
r_type_insn(0b0000101, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
|
Peggyliao/Final-Project | 6,209 | firmware/start.S | /*
* Copyright 2018, Serge Bazanski <serge@bazanski.pl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted.
*/
#include "../extraops.S"
/*
* Interrupt vector.
*/
.global _start
_start:
.org 0x00000000 # Reset
j _crt0
.org 0x00000010 # IRQ
_irq_vector:
addi sp, sp, -16
sw t0, 4(sp)
sw ra, 8(sp)
/* By convention, q2 holds true IRQ vector, but remains caller-save.
We rely on the assumption that compiler-generated code will never touch
the QREGs. q3 is truly scratch/caller-save. */
picorv32_getq_insn(t0, q2)
sw t0, 12(sp)
jalr t0 // Call the true IRQ vector.
lw t0, 12(sp)
picorv32_setq_insn(q2, t0) // Restore the true IRQ vector.
lw ra, 8(sp)
lw t0, 4(sp)
addi sp, sp, 16
picorv32_retirq_insn() // return from interrupt
/*
* IRQ handler, branched to from the vector.
*/
_irq:
/* save x1/x2 to q1/q2 */
picorv32_setq_insn(q2, x1)
picorv32_setq_insn(q3, x2)
/* use x1 to index into irq_regs */
lui x1, %hi(irq_regs)
addi x1, x1, %lo(irq_regs)
/* use x2 as scratch space for saving registers */
/* q0 (== x1), q2(== x2), q3 */
picorv32_getq_insn(x2, q0)
sw x2, 0*4(x1)
picorv32_getq_insn(x2, q2)
sw x2, 1*4(x1)
picorv32_getq_insn(x2, q3)
sw x2, 2*4(x1)
/* save x3 - x31 */
sw x3, 3*4(x1)
sw x4, 4*4(x1)
sw x5, 5*4(x1)
sw x6, 6*4(x1)
sw x7, 7*4(x1)
sw x8, 8*4(x1)
sw x9, 9*4(x1)
sw x10, 10*4(x1)
sw x11, 11*4(x1)
sw x12, 12*4(x1)
sw x13, 13*4(x1)
sw x14, 14*4(x1)
sw x15, 15*4(x1)
sw x16, 16*4(x1)
sw x17, 17*4(x1)
sw x18, 18*4(x1)
sw x19, 19*4(x1)
sw x20, 20*4(x1)
sw x21, 21*4(x1)
sw x22, 22*4(x1)
sw x23, 23*4(x1)
sw x24, 24*4(x1)
sw x25, 25*4(x1)
sw x26, 26*4(x1)
sw x27, 27*4(x1)
sw x28, 28*4(x1)
sw x29, 29*4(x1)
sw x30, 30*4(x1)
sw x31, 31*4(x1)
/* update _irq_pending to the currently pending interrupts */
picorv32_getq_insn(t0, q1)
la t1, (_irq_pending)
sw t0, 0(t1)
/* prepare C handler stack */
lui sp, %hi(_irq_stack)
addi sp, sp, %lo(_irq_stack)
/* call C handler */
jal ra, isr
/* use x1 to index into irq_regs */
lui x1, %hi(irq_regs)
addi x1, x1, %lo(irq_regs)
/* restore q0 - q2 */
lw x2, 0*4(x1)
picorv32_setq_insn(q0, x2)
lw x2, 1*4(x1)
picorv32_setq_insn(q1, x2)
lw x2, 2*4(x1)
picorv32_setq_insn(q2, x2)
/* restore x3 - x31 */
lw x3, 3*4(x1)
lw x4, 4*4(x1)
lw x5, 5*4(x1)
lw x6, 6*4(x1)
lw x7, 7*4(x1)
lw x8, 8*4(x1)
lw x9, 9*4(x1)
lw x10, 10*4(x1)
lw x11, 11*4(x1)
lw x12, 12*4(x1)
lw x13, 13*4(x1)
lw x14, 14*4(x1)
lw x15, 15*4(x1)
lw x16, 16*4(x1)
lw x17, 17*4(x1)
lw x18, 18*4(x1)
lw x19, 19*4(x1)
lw x20, 20*4(x1)
lw x21, 21*4(x1)
lw x22, 22*4(x1)
lw x23, 23*4(x1)
lw x24, 24*4(x1)
lw x25, 25*4(x1)
lw x26, 26*4(x1)
lw x27, 27*4(x1)
lw x28, 28*4(x1)
lw x29, 29*4(x1)
lw x30, 30*4(x1)
lw x31, 31*4(x1)
/* restore x1 - x2 from q registers */
picorv32_getq_insn(x1, q1)
picorv32_getq_insn(x2, q2)
ret
/*
* Reset handler, branched to from the vector.
*/
_crt0:
/* zero-initialize all registers */
addi x1, zero, 0
addi x2, zero, 0
addi x3, zero, 0
addi x4, zero, 0
addi x5, zero, 0
addi x6, zero, 0
addi x7, zero, 0
addi x8, zero, 0
addi x9, zero, 0
addi x10, zero, 0
addi x11, zero, 0
addi x12, zero, 0
addi x13, zero, 0
addi x14, zero, 0
addi x15, zero, 0
addi x16, zero, 0
addi x17, zero, 0
addi x18, zero, 0
addi x19, zero, 0
addi x20, zero, 0
addi x21, zero, 0
addi x22, zero, 0
addi x23, zero, 0
addi x24, zero, 0
addi x25, zero, 0
addi x26, zero, 0
addi x27, zero, 0
addi x28, zero, 0
addi x29, zero, 0
addi x30, zero, 0
addi x31, zero, 0
/* mask all interrupts */
li t0, 0xffffffff
picorv32_maskirq_insn(zero, t0)
/* reflect that in _irq_mask */
la t1, _irq_mask
sw t0, 0(t1)
/* Load DATA */
la t0, _fdata_rom
la t1, _fdata
la t2, _edata
3:
lw t3, 0(t0)
sw t3, 0(t1)
/* _edata is aligned to 16 bytes. Use word-xfers. */
addi t0, t0, 4
addi t1, t1, 4
bltu t1, t2, 3b
/* Clear BSS */
#la t0, _fbss
#la t1, _ebss
2:
#sw zero, 0(t0)
#addi t0, t0, 4
#bltu t0, t1, 2b
/* set main stack */
la sp, _fstack
/* Set up address to IRQ handler since vector is hardcoded.
By convention, q2 keeps the pointer to the true IRQ handler,
to emulate relocatable interrupts. */
la t0, _irq
picorv32_setq_insn(q2, t0)
/* jump to main */
jal ra, main
1:
/* loop forever */
j 1b
/*
* Enable interrupts by copying the software mask to the hardware mask
*/
.global _irq_enable
_irq_enable:
/* Set _irq_enabled to true */
la t0, _irq_enabled
addi t1, zero, 1
sw t1, 0(t0)
/* Set the HW IRQ mask to _irq_mask */
la t0, _irq_mask
lw t0, 0(t0)
picorv32_maskirq_insn(zero, t0)
ret
/*
* Disable interrupts by masking all interrupts (the mask should already be
* up to date)
*/
.global _irq_disable
_irq_disable:
/* Mask all IRQs */
li t0, 0xffffffff
picorv32_maskirq_insn(zero, t0)
/* Set _irq_enabled to false */
la t0, _irq_enabled
sw zero, (t0)
ret
/*
* Set interrrupt mask.
* This updates the software mask (for readback and interrupt inable/disable)
* and the hardware mask.
* 1 means interrupt is masked (disabled).
*/
.global _irq_setmask
_irq_setmask:
/* Update _irq_mask */
la t0, _irq_mask
sw a0, (t0)
/* Are interrupts enabled? */
la t0, _irq_enabled
lw t0, 0(t0)
beq t0, zero, 1f
/* If so, update the HW IRQ mask */
picorv32_maskirq_insn(zero, a0)
1:
ret
.section .bss
irq_regs:
/* saved interrupt registers, x0 - x31 */
.fill 32,4
/* interrupt stack */
.fill 256,4
_irq_stack:
/*
* Bitfield of pending interrupts, updated on ISR entry.
*/
.global _irq_pending
_irq_pending:
.word 0
/*
* Software copy of enabled interrupts. Do not write directly, use
* _irq_set_mask instead.
*/
.global _irq_mask
_irq_mask:
.word 0
/*
* Software state of global interrupts being enabled or disabled. Do not write
* directly, use _irq_disable / _irq_enable instead.
*/
.global _irq_enabled
_irq_enabled:
.word 0
|
Peggyliao/Final-Project | 1,803 | firmware/crt0_ibex.S | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
#include "simple_system_regs.h"
.section .text
default_exc_handler:
jal x0, simple_exc_handler
timer_handler:
jal x0, simple_timer_handler
reset_handler:
/* set all registers to zero */
mv x1, x0
mv x2, x1
mv x3, x1
mv x4, x1
mv x5, x1
mv x6, x1
mv x7, x1
mv x8, x1
mv x9, x1
mv x10, x1
mv x11, x1
mv x12, x1
mv x13, x1
mv x14, x1
mv x15, x1
mv x16, x1
mv x17, x1
mv x18, x1
mv x19, x1
mv x20, x1
mv x21, x1
mv x22, x1
mv x23, x1
mv x24, x1
mv x25, x1
mv x26, x1
mv x27, x1
mv x28, x1
mv x29, x1
mv x30, x1
mv x31, x1
/* stack initilization */
# la x2, _stack_start
la x2, 0x01000800
_start:
.global _start
/* clear BSS */
la x26, _bss_start
la x27, _bss_end
bge x26, x27, zero_loop_end
zero_loop:
sw x0, 0(x26)
addi x26, x26, 4
ble x26, x27, zero_loop
zero_loop_end:
main_entry:
/* jump to main program entry point (argc = argv = 0) */
addi x10, x0, 0
addi x11, x0, 0
jal x1, main
/* Halt simulation */
#li x5, SIM_CTRL_BASE + SIM_CTRL_CTRL
#li x6, 1
#sw x6, 0(x5)
/* If execution ends up here just put the core to sleep */
sleep_loop:
wfi
j sleep_loop
/* =================================================== [ exceptions ] === */
/* This section has to be down here, since we have to disable rvc for it */
.section .vectors, "ax"
.option norvc;
// All unimplemented interrupts/exceptions go to the default_exc_handler.
.org 0x00
.rept 7
jal x0, default_exc_handler
.endr
jal x0, timer_handler
.rept 23
jal x0, default_exc_handler
.endr
// reset vector
.org 0x80
jal x0, reset_handler
|
Peggyliao/Final-Project | 3,215 | firmware/start_caravel_vexriscv.s | # SPDX-FileCopyrightText: 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
.section .text
start:
# zero-initialize register file
addi x1, zero, 0
# x2 (sp) is initialized by reset
addi x3, zero, 0
addi x4, zero, 0
addi x5, zero, 0
addi x6, zero, 0
addi x7, zero, 0
addi x8, zero, 0
addi x9, zero, 0
addi x10, zero, 0
addi x11, zero, 0
addi x12, zero, 0
addi x13, zero, 0
addi x14, zero, 0
addi x15, zero, 0
addi x16, zero, 0
addi x17, zero, 0
addi x18, zero, 0
addi x19, zero, 0
addi x20, zero, 0
addi x21, zero, 0
addi x22, zero, 0
addi x23, zero, 0
addi x24, zero, 0
addi x25, zero, 0
addi x26, zero, 0
addi x27, zero, 0
addi x28, zero, 0
addi x29, zero, 0
addi x30, zero, 0
addi x31, zero, 0
# zero initialize scratchpad memory
# setmemloop:
# sw zero, 0(x1)
# addi x1, x1, 4
# blt x1, sp, setmemloop
# copy data section
la a0, _sidata
la a1, _sdata
la a2, _edata
bge a1, a2, end_init_data
loop_init_data:
lw a3, 0(a0)
sw a3, 0(a1)
addi a0, a0, 4
addi a1, a1, 4
blt a1, a2, loop_init_data
end_init_data:
# zero-init bss section
la a0, _sbss
la a1, _ebss
bge a0, a1, end_init_bss
loop_init_bss:
sw zero, 0(a0)
addi a0, a0, 4
blt a0, a1, loop_init_bss
end_init_bss:
la sp, _fstack
# call main
call main
loop:
j loop
.global flashio_worker_begin
.global flashio_worker_end
.balign 4
flashio_worker_begin:
# a0 ... data pointer
# a1 ... data length
# a2 ... optional WREN cmd (0 = disable)
# address of SPI ctrl reg
li t0, 0x28000000
# Set CS high, IO0 is output
li t1, 0x120
sh t1, 0(t0)
# Enable Manual SPI Ctrl
sb zero, 3(t0)
# Send optional WREN cmd
beqz a2, flashio_worker_L1
li t5, 8
andi t2, a2, 0xff
flashio_worker_L4:
srli t4, t2, 7
sb t4, 0(t0)
ori t4, t4, 0x10
sb t4, 0(t0)
slli t2, t2, 1
andi t2, t2, 0xff
addi t5, t5, -1
bnez t5, flashio_worker_L4
sb t1, 0(t0)
# SPI transfer
flashio_worker_L1:
# If byte count is zero, we're done
beqz a1, flashio_worker_L3
# Set t5 to count down 32 bits
li t5, 32
# Load t2 from address a0 (4 bytes)
lw t2, 0(a0)
flashio_worker_LY:
# Set t6 to count down 8 bits
li t6, 8
flashio_worker_L2:
# Clock out the bit (msb first) on IO0 and read bit in from IO1
srli t4, t2, 31
sb t4, 0(t0)
ori t4, t4, 0x10
sb t4, 0(t0)
lbu t4, 0(t0)
andi t4, t4, 2
srli t4, t4, 1
slli t2, t2, 1
or t2, t2, t4
# Decrement 32 bit count
addi t5, t5, -1
bnez t5, flashio_worker_LX
sw t2, 0(a0)
addi a0, a0, 4
lw t2, 0(a0)
flashio_worker_LX:
addi t6, t6, -1
bnez t6, flashio_worker_L2
addi a1, a1, -1
bnez a1, flashio_worker_LY
beqz t5, flashio_worker_L3
sw t2, 0(a0)
flashio_worker_L3:
# Back to MEMIO mode
li t1, 0x80
sb t1, 3(t0)
ret
.balign 4
flashio_worker_end:
|
Peggyliao/Final-Project | 1,662 | firmware/crt0_vex.S | .global main
.global isr
.global _start
_start:
j crt_init
nop
nop
nop
nop
nop
nop
nop
.global trap_entry
trap_entry:
sw x1, - 1*4(sp)
sw x5, - 2*4(sp)
sw x6, - 3*4(sp)
sw x7, - 4*4(sp)
sw x10, - 5*4(sp)
sw x11, - 6*4(sp)
sw x12, - 7*4(sp)
sw x13, - 8*4(sp)
sw x14, - 9*4(sp)
sw x15, -10*4(sp)
sw x16, -11*4(sp)
sw x17, -12*4(sp)
sw x28, -13*4(sp)
sw x29, -14*4(sp)
sw x30, -15*4(sp)
sw x31, -16*4(sp)
addi sp,sp,-16*4
call isr
lw x1 , 15*4(sp)
lw x5, 14*4(sp)
lw x6, 13*4(sp)
lw x7, 12*4(sp)
lw x10, 11*4(sp)
lw x11, 10*4(sp)
lw x12, 9*4(sp)
lw x13, 8*4(sp)
lw x14, 7*4(sp)
lw x15, 6*4(sp)
lw x16, 5*4(sp)
lw x17, 4*4(sp)
lw x28, 3*4(sp)
lw x29, 2*4(sp)
lw x30, 1*4(sp)
lw x31, 0*4(sp)
addi sp,sp,16*4
mret
.text
crt_init:
la sp, _fstack
la a0, trap_entry
csrw mtvec, a0
sram_init:
la a0, _fsram
la a1, _esram
la a2, _esram_rom
sram_loop:
beq a0,a1,sram_done
lw a3,0(a2)
sw a3,0(a0)
add a0,a0,4
add a2,a2,4
j sram_loop
sram_done:
data_init:
la a0, _fdata
la a1, _edata
la a2, _fdata_rom
data_loop:
beq a0,a1,data_done
lw a3,0(a2)
sw a3,0(a0)
add a0,a0,4
add a2,a2,4
j data_loop
data_done:
bss_init:
la a0, _fbss
la a1, _ebss
bss_loop:
beq a0,a1,bss_done
sw zero,0(a0)
add a0,a0,4
#ifndef SIM
j bss_loop
#endif
bss_done:
li a0, 0x880 //880 enable timer + external interrupt sources (until mstatus.MIE is set, they will never trigger an interrupt)
csrw mie,a0
#ifdef USER_PROJ_IRQ0_EN
csrrs a0, mstatus, 0x8 //0x8 set mstatus.MIE
#endif
call main
infinit_loop:
j infinit_loop
|
Peggyliao/Final-Project | 3,199 | firmware/start_caravel_ibex.s | # SPDX-FileCopyrightText: 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
.section .text
start:
# zero-initialize register file
addi x1, zero, 0
# x2 (sp) is initialized by reset
addi x3, zero, 0
addi x4, zero, 0
addi x5, zero, 0
addi x6, zero, 0
addi x7, zero, 0
addi x8, zero, 0
addi x9, zero, 0
addi x10, zero, 0
addi x11, zero, 0
addi x12, zero, 0
addi x13, zero, 0
addi x14, zero, 0
addi x15, zero, 0
addi x16, zero, 0
addi x17, zero, 0
addi x18, zero, 0
addi x19, zero, 0
addi x20, zero, 0
addi x21, zero, 0
addi x22, zero, 0
addi x23, zero, 0
addi x24, zero, 0
addi x25, zero, 0
addi x26, zero, 0
addi x27, zero, 0
addi x28, zero, 0
addi x29, zero, 0
addi x30, zero, 0
addi x31, zero, 0
# zero initialize scratchpad memory
# setmemloop:
# sw zero, 0(x1)
# addi x1, x1, 4
# blt x1, sp, setmemloop
# copy data section
la a0, _sidata
la a1, _sdata
la a2, _edata
bge a1, a2, end_init_data
loop_init_data:
lw a3, 0(a0)
sw a3, 0(a1)
addi a0, a0, 4
addi a1, a1, 4
blt a1, a2, loop_init_data
end_init_data:
# zero-init bss section
la a0, _sbss
la a1, _ebss
bge a0, a1, end_init_bss
loop_init_bss:
sw zero, 0(a0)
addi a0, a0, 4
blt a0, a1, loop_init_bss
end_init_bss:
# call main
call main
loop:
j loop
.global flashio_worker_begin
.global flashio_worker_end
.balign 4
flashio_worker_begin:
# a0 ... data pointer
# a1 ... data length
# a2 ... optional WREN cmd (0 = disable)
# address of SPI ctrl reg
li t0, 0x28000000
# Set CS high, IO0 is output
li t1, 0x120
sh t1, 0(t0)
# Enable Manual SPI Ctrl
sb zero, 3(t0)
# Send optional WREN cmd
beqz a2, flashio_worker_L1
li t5, 8
andi t2, a2, 0xff
flashio_worker_L4:
srli t4, t2, 7
sb t4, 0(t0)
ori t4, t4, 0x10
sb t4, 0(t0)
slli t2, t2, 1
andi t2, t2, 0xff
addi t5, t5, -1
bnez t5, flashio_worker_L4
sb t1, 0(t0)
# SPI transfer
flashio_worker_L1:
# If byte count is zero, we're done
beqz a1, flashio_worker_L3
# Set t5 to count down 32 bits
li t5, 32
# Load t2 from address a0 (4 bytes)
lw t2, 0(a0)
flashio_worker_LY:
# Set t6 to count down 8 bits
li t6, 8
flashio_worker_L2:
# Clock out the bit (msb first) on IO0 and read bit in from IO1
srli t4, t2, 31
sb t4, 0(t0)
ori t4, t4, 0x10
sb t4, 0(t0)
lbu t4, 0(t0)
andi t4, t4, 2
srli t4, t4, 1
slli t2, t2, 1
or t2, t2, t4
# Decrement 32 bit count
addi t5, t5, -1
bnez t5, flashio_worker_LX
sw t2, 0(a0)
addi a0, a0, 4
lw t2, 0(a0)
flashio_worker_LX:
addi t6, t6, -1
bnez t6, flashio_worker_L2
addi a1, a1, -1
bnez a1, flashio_worker_LY
beqz t5, flashio_worker_L3
sw t2, 0(a0)
flashio_worker_L3:
# Back to MEMIO mode
li t1, 0x80
sb t1, 3(t0)
ret
.balign 4
flashio_worker_end:
|
Peggyliao/Final-Project | 6,209 | firmware/start_pico.S | /*
* Copyright 2018, Serge Bazanski <serge@bazanski.pl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted.
*/
#include "../extraops.S"
/*
* Interrupt vector.
*/
.global _start
_start:
.org 0x00000000 # Reset
j _crt0
.org 0x00000010 # IRQ
_irq_vector:
addi sp, sp, -16
sw t0, 4(sp)
sw ra, 8(sp)
/* By convention, q2 holds true IRQ vector, but remains caller-save.
We rely on the assumption that compiler-generated code will never touch
the QREGs. q3 is truly scratch/caller-save. */
picorv32_getq_insn(t0, q2)
sw t0, 12(sp)
jalr t0 // Call the true IRQ vector.
lw t0, 12(sp)
picorv32_setq_insn(q2, t0) // Restore the true IRQ vector.
lw ra, 8(sp)
lw t0, 4(sp)
addi sp, sp, 16
picorv32_retirq_insn() // return from interrupt
/*
* IRQ handler, branched to from the vector.
*/
_irq:
/* save x1/x2 to q1/q2 */
picorv32_setq_insn(q2, x1)
picorv32_setq_insn(q3, x2)
/* use x1 to index into irq_regs */
lui x1, %hi(irq_regs)
addi x1, x1, %lo(irq_regs)
/* use x2 as scratch space for saving registers */
/* q0 (== x1), q2(== x2), q3 */
picorv32_getq_insn(x2, q0)
sw x2, 0*4(x1)
picorv32_getq_insn(x2, q2)
sw x2, 1*4(x1)
picorv32_getq_insn(x2, q3)
sw x2, 2*4(x1)
/* save x3 - x31 */
sw x3, 3*4(x1)
sw x4, 4*4(x1)
sw x5, 5*4(x1)
sw x6, 6*4(x1)
sw x7, 7*4(x1)
sw x8, 8*4(x1)
sw x9, 9*4(x1)
sw x10, 10*4(x1)
sw x11, 11*4(x1)
sw x12, 12*4(x1)
sw x13, 13*4(x1)
sw x14, 14*4(x1)
sw x15, 15*4(x1)
sw x16, 16*4(x1)
sw x17, 17*4(x1)
sw x18, 18*4(x1)
sw x19, 19*4(x1)
sw x20, 20*4(x1)
sw x21, 21*4(x1)
sw x22, 22*4(x1)
sw x23, 23*4(x1)
sw x24, 24*4(x1)
sw x25, 25*4(x1)
sw x26, 26*4(x1)
sw x27, 27*4(x1)
sw x28, 28*4(x1)
sw x29, 29*4(x1)
sw x30, 30*4(x1)
sw x31, 31*4(x1)
/* update _irq_pending to the currently pending interrupts */
picorv32_getq_insn(t0, q1)
la t1, (_irq_pending)
sw t0, 0(t1)
/* prepare C handler stack */
lui sp, %hi(_irq_stack)
addi sp, sp, %lo(_irq_stack)
/* call C handler */
jal ra, isr
/* use x1 to index into irq_regs */
lui x1, %hi(irq_regs)
addi x1, x1, %lo(irq_regs)
/* restore q0 - q2 */
lw x2, 0*4(x1)
picorv32_setq_insn(q0, x2)
lw x2, 1*4(x1)
picorv32_setq_insn(q1, x2)
lw x2, 2*4(x1)
picorv32_setq_insn(q2, x2)
/* restore x3 - x31 */
lw x3, 3*4(x1)
lw x4, 4*4(x1)
lw x5, 5*4(x1)
lw x6, 6*4(x1)
lw x7, 7*4(x1)
lw x8, 8*4(x1)
lw x9, 9*4(x1)
lw x10, 10*4(x1)
lw x11, 11*4(x1)
lw x12, 12*4(x1)
lw x13, 13*4(x1)
lw x14, 14*4(x1)
lw x15, 15*4(x1)
lw x16, 16*4(x1)
lw x17, 17*4(x1)
lw x18, 18*4(x1)
lw x19, 19*4(x1)
lw x20, 20*4(x1)
lw x21, 21*4(x1)
lw x22, 22*4(x1)
lw x23, 23*4(x1)
lw x24, 24*4(x1)
lw x25, 25*4(x1)
lw x26, 26*4(x1)
lw x27, 27*4(x1)
lw x28, 28*4(x1)
lw x29, 29*4(x1)
lw x30, 30*4(x1)
lw x31, 31*4(x1)
/* restore x1 - x2 from q registers */
picorv32_getq_insn(x1, q1)
picorv32_getq_insn(x2, q2)
ret
/*
* Reset handler, branched to from the vector.
*/
_crt0:
/* zero-initialize all registers */
addi x1, zero, 0
addi x2, zero, 0
addi x3, zero, 0
addi x4, zero, 0
addi x5, zero, 0
addi x6, zero, 0
addi x7, zero, 0
addi x8, zero, 0
addi x9, zero, 0
addi x10, zero, 0
addi x11, zero, 0
addi x12, zero, 0
addi x13, zero, 0
addi x14, zero, 0
addi x15, zero, 0
addi x16, zero, 0
addi x17, zero, 0
addi x18, zero, 0
addi x19, zero, 0
addi x20, zero, 0
addi x21, zero, 0
addi x22, zero, 0
addi x23, zero, 0
addi x24, zero, 0
addi x25, zero, 0
addi x26, zero, 0
addi x27, zero, 0
addi x28, zero, 0
addi x29, zero, 0
addi x30, zero, 0
addi x31, zero, 0
/* mask all interrupts */
li t0, 0xffffffff
picorv32_maskirq_insn(zero, t0)
/* reflect that in _irq_mask */
la t1, _irq_mask
sw t0, 0(t1)
/* Load DATA */
la t0, _fdata_rom
la t1, _fdata
la t2, _edata
3:
lw t3, 0(t0)
sw t3, 0(t1)
/* _edata is aligned to 16 bytes. Use word-xfers. */
addi t0, t0, 4
addi t1, t1, 4
bltu t1, t2, 3b
/* Clear BSS */
#la t0, _fbss
#la t1, _ebss
2:
#sw zero, 0(t0)
#addi t0, t0, 4
#bltu t0, t1, 2b
/* set main stack */
la sp, _fstack
/* Set up address to IRQ handler since vector is hardcoded.
By convention, q2 keeps the pointer to the true IRQ handler,
to emulate relocatable interrupts. */
la t0, _irq
picorv32_setq_insn(q2, t0)
/* jump to main */
jal ra, main
1:
/* loop forever */
j 1b
/*
* Enable interrupts by copying the software mask to the hardware mask
*/
.global _irq_enable
_irq_enable:
/* Set _irq_enabled to true */
la t0, _irq_enabled
addi t1, zero, 1
sw t1, 0(t0)
/* Set the HW IRQ mask to _irq_mask */
la t0, _irq_mask
lw t0, 0(t0)
picorv32_maskirq_insn(zero, t0)
ret
/*
* Disable interrupts by masking all interrupts (the mask should already be
* up to date)
*/
.global _irq_disable
_irq_disable:
/* Mask all IRQs */
li t0, 0xffffffff
picorv32_maskirq_insn(zero, t0)
/* Set _irq_enabled to false */
la t0, _irq_enabled
sw zero, (t0)
ret
/*
* Set interrrupt mask.
* This updates the software mask (for readback and interrupt inable/disable)
* and the hardware mask.
* 1 means interrupt is masked (disabled).
*/
.global _irq_setmask
_irq_setmask:
/* Update _irq_mask */
la t0, _irq_mask
sw a0, (t0)
/* Are interrupts enabled? */
la t0, _irq_enabled
lw t0, 0(t0)
beq t0, zero, 1f
/* If so, update the HW IRQ mask */
picorv32_maskirq_insn(zero, a0)
1:
ret
.section .bss
irq_regs:
/* saved interrupt registers, x0 - x31 */
.fill 32,4
/* interrupt stack */
.fill 256,4
_irq_stack:
/*
* Bitfield of pending interrupts, updated on ISR entry.
*/
.global _irq_pending
_irq_pending:
.word 0
/*
* Software copy of enabled interrupts. Do not write directly, use
* _irq_set_mask instead.
*/
.global _irq_mask
_irq_mask:
.word 0
/*
* Software state of global interrupts being enabled or disabled. Do not write
* directly, use _irq_disable / _irq_enable instead.
*/
.global _irq_enabled
_irq_enabled:
.word 0
|
PeterDing372/rCore-notes | 2,218 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret
|
PhamNamSon/ECE_421 | 1,735 | Lab_5/question_4.s | example::question_2:
push rax
test rsi, rsi
je .LBB0_11
xor edx, edx
jmp .LBB0_2
.LBB0_10:
cmp rdx, rsi
je .LBB0_11
.LBB0_2:
mov r8, rdx
inc rdx
mov r9, rdx
mov rcx, r8
cmp rdx, rsi
jb .LBB0_3
jmp .LBB0_10
.LBB0_6:
inc r9
mov rcx, rax
cmp r9, rsi
jae .LBB0_7
.LBB0_3:
cmp rcx, rsi
jae .LBB0_12
mov r10, qword ptr [rdi + 8*r9]
mov rax, r9
cmp r10, qword ptr [rdi + 8*rcx]
jl .LBB0_6
mov rax, rcx
jmp .LBB0_6
.LBB0_7:
cmp rax, r8
je .LBB0_10
cmp rax, rsi
jae .LBB0_13
mov rcx, qword ptr [rdi + 8*r8]
mov r9, qword ptr [rdi + 8*rax]
mov qword ptr [rdi + 8*r8], r9
mov qword ptr [rdi + 8*rax], rcx
jmp .LBB0_10
.LBB0_11:
mov rax, rdi
mov rdx, rsi
pop rcx
ret
.LBB0_12:
lea rdx, [rip + .L__unnamed_1]
mov rdi, rcx
call qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
.LBB0_13:
lea rdx, [rip + .L__unnamed_2]
mov rdi, rax
call qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
.L__unnamed_3:
.ascii "/app/example.rs"
.L__unnamed_2:
.quad .L__unnamed_3
.asciz "\017\000\000\000\000\000\000\000\013\000\000\000\022\000\000"
.L__unnamed_1:
.quad .L__unnamed_3
.asciz "\017\000\000\000\000\000\000\000\006\000\000\000\032\000\000" |
PhamNamSon/ECE_421 | 7,996 | Lab_5/question_3.s | <usize as core::iter::range::Step>::forward_unchecked:
mov rax, rdi
add rax, rsi
ret
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next:
push rax
mov rax, qword ptr [rip + <core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next@GOTPCREL]
call rax
pop rcx
ret
core::slice::<impl [T]>::swap:
sub rsp, 56
mov qword ptr [rsp + 8], rdi
mov qword ptr [rsp + 16], rsi
mov qword ptr [rsp + 24], rdx
mov qword ptr [rsp + 32], rcx
mov qword ptr [rsp + 40], r8
cmp rdx, rsi
setb al
test al, 1
jne .LBB2_1
jmp .LBB2_2
.LBB2_1:
mov rax, qword ptr [rsp + 32]
mov rcx, qword ptr [rsp + 16]
mov rdx, qword ptr [rsp + 8]
mov rsi, qword ptr [rsp + 24]
shl rsi, 3
add rdx, rsi
mov qword ptr [rsp], rdx
cmp rax, rcx
setb al
test al, 1
jne .LBB2_3
jmp .LBB2_4
.LBB2_2:
mov rdx, qword ptr [rsp + 40]
mov rsi, qword ptr [rsp + 16]
mov rdi, qword ptr [rsp + 24]
mov rax, qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
call rax
.LBB2_3:
mov rax, qword ptr [rsp + 8]
mov rcx, qword ptr [rsp + 32]
mov rdx, qword ptr [rsp]
mov rdi, rcx
shl rdi, 3
mov rsi, rax
add rsi, rdi
mov rdi, qword ptr [rdx]
mov qword ptr [rsp + 48], rdi
mov rsi, qword ptr [rsi]
mov qword ptr [rdx], rsi
mov rdx, qword ptr [rsp + 48]
mov qword ptr [rax + 8*rcx], rdx
add rsp, 56
ret
.LBB2_4:
mov rdx, qword ptr [rsp + 40]
mov rsi, qword ptr [rsp + 16]
mov rdi, qword ptr [rsp + 32]
mov rax, qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
call rax
<I as core::iter::traits::collect::IntoIterator>::into_iter:
mov rdx, rsi
mov rax, rdi
ret
<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next:
sub rsp, 40
mov qword ptr [rsp + 16], rdi
mov rax, qword ptr [rdi]
cmp rax, qword ptr [rdi + 8]
jb .LBB4_2
mov qword ptr [rsp + 24], 0
jmp .LBB4_3
.LBB4_2:
mov rax, qword ptr [rsp + 16]
mov rdi, qword ptr [rax]
mov qword ptr [rsp + 8], rdi
mov esi, 1
call <usize as core::iter::range::Step>::forward_unchecked
mov rcx, qword ptr [rsp + 16]
mov rdx, rax
mov rax, qword ptr [rsp + 8]
mov qword ptr [rcx], rdx
mov qword ptr [rsp + 32], rax
mov qword ptr [rsp + 24], 1
.LBB4_3:
mov rax, qword ptr [rsp + 24]
mov rdx, qword ptr [rsp + 32]
add rsp, 40
ret
example::question_2:
sub rsp, 168
mov qword ptr [rsp + 48], rdi
mov qword ptr [rsp + 56], rsi
mov qword ptr [rsp + 64], 0
mov qword ptr [rsp + 72], rsi
mov rdi, qword ptr [rsp + 64]
mov rsi, qword ptr [rsp + 72]
call qword ptr [rip + <I as core::iter::traits::collect::IntoIterator>::into_iter@GOTPCREL]
mov qword ptr [rsp + 80], rax
mov qword ptr [rsp + 88], rdx
.LBB5_1:
mov rax, qword ptr [rip + core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next@GOTPCREL]
lea rdi, [rsp + 80]
call rax
mov qword ptr [rsp + 104], rdx
mov qword ptr [rsp + 96], rax
cmp qword ptr [rsp + 96], 0
jne .LBB5_3
mov rdx, qword ptr [rsp + 56]
mov rax, qword ptr [rsp + 48]
add rsp, 168
ret
.LBB5_3:
mov rax, qword ptr [rsp + 104]
mov qword ptr [rsp + 32], rax
mov qword ptr [rsp + 112], rax
add rax, 1
mov qword ptr [rsp + 40], rax
setb al
test al, 1
jne .LBB5_5
mov rax, qword ptr [rsp + 56]
mov rcx, qword ptr [rsp + 40]
mov qword ptr [rsp + 120], rcx
mov qword ptr [rsp + 128], rax
mov rdi, qword ptr [rsp + 120]
mov rsi, qword ptr [rsp + 128]
call qword ptr [rip + <I as core::iter::traits::collect::IntoIterator>::into_iter@GOTPCREL]
mov qword ptr [rsp + 136], rax
mov qword ptr [rsp + 144], rdx
jmp .LBB5_6
.LBB5_5:
lea rdi, [rip + str.0]
lea rdx, [rip + .L__unnamed_1]
mov rax, qword ptr [rip + core::panicking::panic@GOTPCREL]
mov esi, 28
call rax
.LBB5_6:
mov rax, qword ptr [rip + core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next@GOTPCREL]
lea rdi, [rsp + 136]
call rax
mov qword ptr [rsp + 160], rdx
mov qword ptr [rsp + 152], rax
cmp qword ptr [rsp + 152], 0
jne .LBB5_8
mov rax, qword ptr [rsp + 32]
cmp qword ptr [rsp + 112], rax
jne .LBB5_9
jmp .LBB5_1
.LBB5_8:
mov rcx, qword ptr [rsp + 56]
mov rax, qword ptr [rsp + 160]
mov qword ptr [rsp + 24], rax
cmp rax, rcx
setb al
test al, 1
jne .LBB5_10
jmp .LBB5_11
.LBB5_9:
mov rdx, qword ptr [rsp + 32]
mov rsi, qword ptr [rsp + 56]
mov rdi, qword ptr [rsp + 48]
mov rcx, qword ptr [rsp + 112]
lea r8, [rip + .L__unnamed_2]
call qword ptr [rip + core::slice::<impl [T]>::swap@GOTPCREL]
jmp .LBB5_1
.LBB5_10:
mov rcx, qword ptr [rsp + 56]
mov rax, qword ptr [rsp + 48]
mov rdx, qword ptr [rsp + 24]
mov rax, qword ptr [rax + 8*rdx]
mov qword ptr [rsp + 8], rax
mov rax, qword ptr [rsp + 112]
mov qword ptr [rsp + 16], rax
cmp rax, rcx
setb al
test al, 1
jne .LBB5_12
jmp .LBB5_13
.LBB5_11:
mov rsi, qword ptr [rsp + 56]
mov rdi, qword ptr [rsp + 24]
lea rdx, [rip + .L__unnamed_3]
mov rax, qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
call rax
.LBB5_12:
mov rax, qword ptr [rsp + 8]
mov rcx, qword ptr [rsp + 48]
mov rdx, qword ptr [rsp + 16]
cmp rax, qword ptr [rcx + 8*rdx]
jl .LBB5_14
jmp .LBB5_6
.LBB5_13:
mov rsi, qword ptr [rsp + 56]
mov rdi, qword ptr [rsp + 16]
lea rdx, [rip + .L__unnamed_4]
mov rax, qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
call rax
.LBB5_14:
mov rax, qword ptr [rsp + 24]
mov qword ptr [rsp + 112], rax
jmp .LBB5_6
.L__unnamed_5:
.ascii "/app/example.rs"
.L__unnamed_1:
.quad .L__unnamed_5
.asciz "\017\000\000\000\000\000\000\000\005\000\000\000\022\000\000"
str.0:
.ascii "attempt to add with overflow"
.L__unnamed_2:
.quad .L__unnamed_5
.asciz "\017\000\000\000\000\000\000\000\013\000\000\000\022\000\000"
.L__unnamed_3:
.quad .L__unnamed_5
.asciz "\017\000\000\000\000\000\000\000\006\000\000\000\020\000\000"
.L__unnamed_4:
.quad .L__unnamed_5
.asciz "\017\000\000\000\000\000\000\000\006\000\000\000\032\000\000" |
philippeZim/OSTEP_Solutions | 1,210 | chapter28/threads-locks/peterson.s | # array of 2 integers (each size 4 bytes)
# load address of flag into fx register
# access flag[] with 0(%fx,%index,4)
# where %index is a register holding 0 or 1
# index reg contains 0 -> flag[0], if 1->flag[1]
.var flag 2
# global turn variable
.var turn
# global count
.var count
.main
# put address of flag into fx
lea flag, %fx
# assume thread ID is in bx (0 or 1, scale by 4 to get proper flag address)
mov %bx, %cx # bx: self, now copies to cx
neg %cx # cx: - self
add $1, %cx # cx: 1 - self
.acquire
mov $1, 0(%fx,%bx,4) # flag[self] = 1
mov %cx, turn # turn = 1 - self
.spin1
mov 0(%fx,%cx,4), %ax # flag[1-self]
test $1, %ax
jne .fini # if flag[1-self] != 1, skip past loop to .fini
.spin2 # just labeled for fun, not needed
mov turn, %ax
test %cx, %ax # compare 'turn' and '1 - self'
je .spin1 # if turn==1-self, go back and start spin again
# fall out of spin
.fini
# do critical section now
mov count, %ax
add $1, %ax
mov %ax, count
.release
mov $0, 0(%fx,%bx,4) # flag[self] = 0
# end case: make sure it's other's turn
mov %cx, turn # turn = 1 - self
halt
|
philippeZim/OSTEP_Solutions | 2,026 | chapter27/threads-api/file.s | .file "main-signal.c"
.text
.globl done
.bss
.align 4
.type done, @object
.size done, 4
done:
.zero 4
.section .rodata
.LC0:
.string "this should print first"
.text
.globl worker
.type worker, @function
worker:
.LFB6:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
subq $16, %rsp
movq %rdi, -8(%rbp)
leaq .LC0(%rip), %rax
movq %rax, %rdi
call puts@PLT
movl $1, done(%rip)
movl $0, %eax
leave
.cfi_def_cfa 7, 8
ret
.cfi_endproc
.LFE6:
.size worker, .-worker
.section .rodata
.LC1:
.string "main-signal.c"
.align 8
.LC2:
.string "pthread_create(&p, ((void *)0), worker, ((void *)0)) == 0"
.LC3:
.string "this should print last"
.text
.globl main
.type main, @function
main:
.LFB7:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
subq $32, %rsp
movl %edi, -20(%rbp)
movq %rsi, -32(%rbp)
movq %fs:40, %rax
movq %rax, -8(%rbp)
xorl %eax, %eax
leaq -16(%rbp), %rax
movl $0, %ecx
leaq worker(%rip), %rdx
movl $0, %esi
movq %rax, %rdi
call pthread_create@PLT
testl %eax, %eax
je .L8
leaq __PRETTY_FUNCTION__.0(%rip), %rax
movq %rax, %rcx
movl $15, %edx
leaq .LC1(%rip), %rax
movq %rax, %rsi
leaq .LC2(%rip), %rax
movq %rax, %rdi
call __assert_fail@PLT
.L8:
nop
.L5:
movl done(%rip), %eax
testl %eax, %eax
je .L5
leaq .LC3(%rip), %rax
movq %rax, %rdi
call puts@PLT
movl $0, %eax
movq -8(%rbp), %rdx
subq %fs:40, %rdx
je .L7
call __stack_chk_fail@PLT
.L7:
leave
.cfi_def_cfa 7, 8
ret
.cfi_endproc
.LFE7:
.size main, .-main
.section .rodata
.type __PRETTY_FUNCTION__.0, @object
.size __PRETTY_FUNCTION__.0, 5
__PRETTY_FUNCTION__.0:
.string "main"
.ident "GCC: (Ubuntu 13.2.0-23ubuntu4) 13.2.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
PhamNamSon/ECE_421 | 1,735 | Lab_5/question_4.s | example::question_2:
push rax
test rsi, rsi
je .LBB0_11
xor edx, edx
jmp .LBB0_2
.LBB0_10:
cmp rdx, rsi
je .LBB0_11
.LBB0_2:
mov r8, rdx
inc rdx
mov r9, rdx
mov rcx, r8
cmp rdx, rsi
jb .LBB0_3
jmp .LBB0_10
.LBB0_6:
inc r9
mov rcx, rax
cmp r9, rsi
jae .LBB0_7
.LBB0_3:
cmp rcx, rsi
jae .LBB0_12
mov r10, qword ptr [rdi + 8*r9]
mov rax, r9
cmp r10, qword ptr [rdi + 8*rcx]
jl .LBB0_6
mov rax, rcx
jmp .LBB0_6
.LBB0_7:
cmp rax, r8
je .LBB0_10
cmp rax, rsi
jae .LBB0_13
mov rcx, qword ptr [rdi + 8*r8]
mov r9, qword ptr [rdi + 8*rax]
mov qword ptr [rdi + 8*r8], r9
mov qword ptr [rdi + 8*rax], rcx
jmp .LBB0_10
.LBB0_11:
mov rax, rdi
mov rdx, rsi
pop rcx
ret
.LBB0_12:
lea rdx, [rip + .L__unnamed_1]
mov rdi, rcx
call qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
.LBB0_13:
lea rdx, [rip + .L__unnamed_2]
mov rdi, rax
call qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
.L__unnamed_3:
.ascii "/app/example.rs"
.L__unnamed_2:
.quad .L__unnamed_3
.asciz "\017\000\000\000\000\000\000\000\013\000\000\000\022\000\000"
.L__unnamed_1:
.quad .L__unnamed_3
.asciz "\017\000\000\000\000\000\000\000\006\000\000\000\032\000\000" |
PhamNamSon/ECE_421 | 7,996 | Lab_5/question_3.s | <usize as core::iter::range::Step>::forward_unchecked:
mov rax, rdi
add rax, rsi
ret
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next:
push rax
mov rax, qword ptr [rip + <core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next@GOTPCREL]
call rax
pop rcx
ret
core::slice::<impl [T]>::swap:
sub rsp, 56
mov qword ptr [rsp + 8], rdi
mov qword ptr [rsp + 16], rsi
mov qword ptr [rsp + 24], rdx
mov qword ptr [rsp + 32], rcx
mov qword ptr [rsp + 40], r8
cmp rdx, rsi
setb al
test al, 1
jne .LBB2_1
jmp .LBB2_2
.LBB2_1:
mov rax, qword ptr [rsp + 32]
mov rcx, qword ptr [rsp + 16]
mov rdx, qword ptr [rsp + 8]
mov rsi, qword ptr [rsp + 24]
shl rsi, 3
add rdx, rsi
mov qword ptr [rsp], rdx
cmp rax, rcx
setb al
test al, 1
jne .LBB2_3
jmp .LBB2_4
.LBB2_2:
mov rdx, qword ptr [rsp + 40]
mov rsi, qword ptr [rsp + 16]
mov rdi, qword ptr [rsp + 24]
mov rax, qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
call rax
.LBB2_3:
mov rax, qword ptr [rsp + 8]
mov rcx, qword ptr [rsp + 32]
mov rdx, qword ptr [rsp]
mov rdi, rcx
shl rdi, 3
mov rsi, rax
add rsi, rdi
mov rdi, qword ptr [rdx]
mov qword ptr [rsp + 48], rdi
mov rsi, qword ptr [rsi]
mov qword ptr [rdx], rsi
mov rdx, qword ptr [rsp + 48]
mov qword ptr [rax + 8*rcx], rdx
add rsp, 56
ret
.LBB2_4:
mov rdx, qword ptr [rsp + 40]
mov rsi, qword ptr [rsp + 16]
mov rdi, qword ptr [rsp + 32]
mov rax, qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
call rax
<I as core::iter::traits::collect::IntoIterator>::into_iter:
mov rdx, rsi
mov rax, rdi
ret
<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next:
sub rsp, 40
mov qword ptr [rsp + 16], rdi
mov rax, qword ptr [rdi]
cmp rax, qword ptr [rdi + 8]
jb .LBB4_2
mov qword ptr [rsp + 24], 0
jmp .LBB4_3
.LBB4_2:
mov rax, qword ptr [rsp + 16]
mov rdi, qword ptr [rax]
mov qword ptr [rsp + 8], rdi
mov esi, 1
call <usize as core::iter::range::Step>::forward_unchecked
mov rcx, qword ptr [rsp + 16]
mov rdx, rax
mov rax, qword ptr [rsp + 8]
mov qword ptr [rcx], rdx
mov qword ptr [rsp + 32], rax
mov qword ptr [rsp + 24], 1
.LBB4_3:
mov rax, qword ptr [rsp + 24]
mov rdx, qword ptr [rsp + 32]
add rsp, 40
ret
example::question_2:
sub rsp, 168
mov qword ptr [rsp + 48], rdi
mov qword ptr [rsp + 56], rsi
mov qword ptr [rsp + 64], 0
mov qword ptr [rsp + 72], rsi
mov rdi, qword ptr [rsp + 64]
mov rsi, qword ptr [rsp + 72]
call qword ptr [rip + <I as core::iter::traits::collect::IntoIterator>::into_iter@GOTPCREL]
mov qword ptr [rsp + 80], rax
mov qword ptr [rsp + 88], rdx
.LBB5_1:
mov rax, qword ptr [rip + core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next@GOTPCREL]
lea rdi, [rsp + 80]
call rax
mov qword ptr [rsp + 104], rdx
mov qword ptr [rsp + 96], rax
cmp qword ptr [rsp + 96], 0
jne .LBB5_3
mov rdx, qword ptr [rsp + 56]
mov rax, qword ptr [rsp + 48]
add rsp, 168
ret
.LBB5_3:
mov rax, qword ptr [rsp + 104]
mov qword ptr [rsp + 32], rax
mov qword ptr [rsp + 112], rax
add rax, 1
mov qword ptr [rsp + 40], rax
setb al
test al, 1
jne .LBB5_5
mov rax, qword ptr [rsp + 56]
mov rcx, qword ptr [rsp + 40]
mov qword ptr [rsp + 120], rcx
mov qword ptr [rsp + 128], rax
mov rdi, qword ptr [rsp + 120]
mov rsi, qword ptr [rsp + 128]
call qword ptr [rip + <I as core::iter::traits::collect::IntoIterator>::into_iter@GOTPCREL]
mov qword ptr [rsp + 136], rax
mov qword ptr [rsp + 144], rdx
jmp .LBB5_6
.LBB5_5:
lea rdi, [rip + str.0]
lea rdx, [rip + .L__unnamed_1]
mov rax, qword ptr [rip + core::panicking::panic@GOTPCREL]
mov esi, 28
call rax
.LBB5_6:
mov rax, qword ptr [rip + core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next@GOTPCREL]
lea rdi, [rsp + 136]
call rax
mov qword ptr [rsp + 160], rdx
mov qword ptr [rsp + 152], rax
cmp qword ptr [rsp + 152], 0
jne .LBB5_8
mov rax, qword ptr [rsp + 32]
cmp qword ptr [rsp + 112], rax
jne .LBB5_9
jmp .LBB5_1
.LBB5_8:
mov rcx, qword ptr [rsp + 56]
mov rax, qword ptr [rsp + 160]
mov qword ptr [rsp + 24], rax
cmp rax, rcx
setb al
test al, 1
jne .LBB5_10
jmp .LBB5_11
.LBB5_9:
mov rdx, qword ptr [rsp + 32]
mov rsi, qword ptr [rsp + 56]
mov rdi, qword ptr [rsp + 48]
mov rcx, qword ptr [rsp + 112]
lea r8, [rip + .L__unnamed_2]
call qword ptr [rip + core::slice::<impl [T]>::swap@GOTPCREL]
jmp .LBB5_1
.LBB5_10:
mov rcx, qword ptr [rsp + 56]
mov rax, qword ptr [rsp + 48]
mov rdx, qword ptr [rsp + 24]
mov rax, qword ptr [rax + 8*rdx]
mov qword ptr [rsp + 8], rax
mov rax, qword ptr [rsp + 112]
mov qword ptr [rsp + 16], rax
cmp rax, rcx
setb al
test al, 1
jne .LBB5_12
jmp .LBB5_13
.LBB5_11:
mov rsi, qword ptr [rsp + 56]
mov rdi, qword ptr [rsp + 24]
lea rdx, [rip + .L__unnamed_3]
mov rax, qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
call rax
.LBB5_12:
mov rax, qword ptr [rsp + 8]
mov rcx, qword ptr [rsp + 48]
mov rdx, qword ptr [rsp + 16]
cmp rax, qword ptr [rcx + 8*rdx]
jl .LBB5_14
jmp .LBB5_6
.LBB5_13:
mov rsi, qword ptr [rsp + 56]
mov rdi, qword ptr [rsp + 16]
lea rdx, [rip + .L__unnamed_4]
mov rax, qword ptr [rip + core::panicking::panic_bounds_check@GOTPCREL]
call rax
.LBB5_14:
mov rax, qword ptr [rsp + 24]
mov qword ptr [rsp + 112], rax
jmp .LBB5_6
.L__unnamed_5:
.ascii "/app/example.rs"
.L__unnamed_1:
.quad .L__unnamed_5
.asciz "\017\000\000\000\000\000\000\000\005\000\000\000\022\000\000"
str.0:
.ascii "attempt to add with overflow"
.L__unnamed_2:
.quad .L__unnamed_5
.asciz "\017\000\000\000\000\000\000\000\013\000\000\000\022\000\000"
.L__unnamed_3:
.quad .L__unnamed_5
.asciz "\017\000\000\000\000\000\000\000\006\000\000\000\020\000\000"
.L__unnamed_4:
.quad .L__unnamed_5
.asciz "\017\000\000\000\000\000\000\000\006\000\000\000\032\000\000" |
Pico-KID/picod | 5,802 | crypto/muhash/src/keccakf1600_x86-64-osx.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.p2align 5
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp L$oop
.p2align 5
L$oop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz L$oop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.globl _KeccakF1600
.p2align 5
_KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
addq $200,%rsp
.cfi_adjust_cfa_offset -200
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
Pico-KID/picod | 6,073 | crypto/muhash/src/keccakf1600_x86-64.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.type __KeccakF1600,@function
.align 32
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.align 32
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.size __KeccakF1600,.-__KeccakF1600
.globl KeccakF1600
.type KeccakF1600,@function
.align 32
KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
addq $200,%rsp
.cfi_adjust_cfa_offset -200
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size KeccakF1600,.-KeccakF1600
.align 256
.quad 0,0,0,0,0,0,0,0
.type iotas,@object
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.size iotas,.-iotas
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .note.gnu.property,"a",@note
.long 4,2f-1f,5
.byte 0x47,0x4E,0x55,0
1: .long 0xc0000002,4,3
.align 8
2:
|
Pico-KID/picod | 5,802 | crypto/hashes/src/keccakf1600_x86-64-osx.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.p2align 5
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp L$oop
.p2align 5
L$oop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz L$oop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.globl _KeccakF1600
.p2align 5
_KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
addq $200,%rsp
.cfi_adjust_cfa_offset -200
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
Pico-KID/picod | 6,073 | crypto/hashes/src/keccakf1600_x86-64.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.type __KeccakF1600,@function
.align 32
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.align 32
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.size __KeccakF1600,.-__KeccakF1600
.globl KeccakF1600
.type KeccakF1600,@function
.align 32
KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
addq $200,%rsp
.cfi_adjust_cfa_offset -200
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size KeccakF1600,.-KeccakF1600
.align 256
.quad 0,0,0,0,0,0,0,0
.type iotas,@object
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.size iotas,.-iotas
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .note.gnu.property,"a",@note
.long 4,2f-1f,5
.byte 0x47,0x4E,0x55,0
1: .long 0xc0000002,4,3
.align 8
2:
|
philippeZim/OSTEP_Solutions | 1,210 | chapter28/threads-locks/peterson.s | # array of 2 integers (each size 4 bytes)
# load address of flag into fx register
# access flag[] with 0(%fx,%index,4)
# where %index is a register holding 0 or 1
# index reg contains 0 -> flag[0], if 1->flag[1]
.var flag 2
# global turn variable
.var turn
# global count
.var count
.main
# put address of flag into fx
lea flag, %fx
# assume thread ID is in bx (0 or 1, scale by 4 to get proper flag address)
mov %bx, %cx # bx: self, now copies to cx
neg %cx # cx: - self
add $1, %cx # cx: 1 - self
.acquire
mov $1, 0(%fx,%bx,4) # flag[self] = 1
mov %cx, turn # turn = 1 - self
.spin1
mov 0(%fx,%cx,4), %ax # flag[1-self]
test $1, %ax
jne .fini # if flag[1-self] != 1, skip past loop to .fini
.spin2 # just labeled for fun, not needed
mov turn, %ax
test %cx, %ax # compare 'turn' and '1 - self'
je .spin1 # if turn==1-self, go back and start spin again
# fall out of spin
.fini
# do critical section now
mov count, %ax
add $1, %ax
mov %ax, count
.release
mov $0, 0(%fx,%bx,4) # flag[self] = 0
# end case: make sure it's other's turn
mov %cx, turn # turn = 1 - self
halt
|
philippeZim/OSTEP_Solutions | 2,026 | chapter27/threads-api/file.s | .file "main-signal.c"
.text
.globl done
.bss
.align 4
.type done, @object
.size done, 4
done:
.zero 4
.section .rodata
.LC0:
.string "this should print first"
.text
.globl worker
.type worker, @function
worker:
.LFB6:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
subq $16, %rsp
movq %rdi, -8(%rbp)
leaq .LC0(%rip), %rax
movq %rax, %rdi
call puts@PLT
movl $1, done(%rip)
movl $0, %eax
leave
.cfi_def_cfa 7, 8
ret
.cfi_endproc
.LFE6:
.size worker, .-worker
.section .rodata
.LC1:
.string "main-signal.c"
.align 8
.LC2:
.string "pthread_create(&p, ((void *)0), worker, ((void *)0)) == 0"
.LC3:
.string "this should print last"
.text
.globl main
.type main, @function
main:
.LFB7:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
subq $32, %rsp
movl %edi, -20(%rbp)
movq %rsi, -32(%rbp)
movq %fs:40, %rax
movq %rax, -8(%rbp)
xorl %eax, %eax
leaq -16(%rbp), %rax
movl $0, %ecx
leaq worker(%rip), %rdx
movl $0, %esi
movq %rax, %rdi
call pthread_create@PLT
testl %eax, %eax
je .L8
leaq __PRETTY_FUNCTION__.0(%rip), %rax
movq %rax, %rcx
movl $15, %edx
leaq .LC1(%rip), %rax
movq %rax, %rsi
leaq .LC2(%rip), %rax
movq %rax, %rdi
call __assert_fail@PLT
.L8:
nop
.L5:
movl done(%rip), %eax
testl %eax, %eax
je .L5
leaq .LC3(%rip), %rax
movq %rax, %rdi
call puts@PLT
movl $0, %eax
movq -8(%rbp), %rdx
subq %fs:40, %rdx
je .L7
call __stack_chk_fail@PLT
.L7:
leave
.cfi_def_cfa 7, 8
ret
.cfi_endproc
.LFE7:
.size main, .-main
.section .rodata
.type __PRETTY_FUNCTION__.0, @object
.size __PRETTY_FUNCTION__.0, 5
__PRETTY_FUNCTION__.0:
.string "main"
.ident "GCC: (Ubuntu 13.2.0-23ubuntu4) 13.2.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
Pico-KID/picod | 5,802 | crypto/muhash/src/keccakf1600_x86-64-osx.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.p2align 5
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp L$oop
.p2align 5
L$oop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz L$oop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.globl _KeccakF1600
.p2align 5
_KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
addq $200,%rsp
.cfi_adjust_cfa_offset -200
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
Pico-KID/picod | 6,073 | crypto/muhash/src/keccakf1600_x86-64.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.type __KeccakF1600,@function
.align 32
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.align 32
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.size __KeccakF1600,.-__KeccakF1600
.globl KeccakF1600
.type KeccakF1600,@function
.align 32
KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
addq $200,%rsp
.cfi_adjust_cfa_offset -200
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size KeccakF1600,.-KeccakF1600
.align 256
.quad 0,0,0,0,0,0,0,0
.type iotas,@object
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.size iotas,.-iotas
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .note.gnu.property,"a",@note
.long 4,2f-1f,5
.byte 0x47,0x4E,0x55,0
1: .long 0xc0000002,4,3
.align 8
2:
|
Pico-KID/picod | 5,802 | crypto/hashes/src/keccakf1600_x86-64-osx.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.p2align 5
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp L$oop
.p2align 5
L$oop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz L$oop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.globl _KeccakF1600
.p2align 5
_KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
addq $200,%rsp
.cfi_adjust_cfa_offset -200
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.p2align 8
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
Pico-KID/picod | 6,073 | crypto/hashes/src/keccakf1600_x86-64.s | # Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl
.text
.type __KeccakF1600,@function
.align 32
__KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
movq 60(%rdi),%rax
movq 68(%rdi),%rbx
movq 76(%rdi),%rcx
movq 84(%rdi),%rdx
movq 92(%rdi),%rbp
jmp .Loop
.align 32
.Loop:
movq -100(%rdi),%r8
movq -52(%rdi),%r9
movq -4(%rdi),%r10
movq 44(%rdi),%r11
xorq -84(%rdi),%rcx
xorq -76(%rdi),%rdx
xorq %r8,%rax
xorq -92(%rdi),%rbx
xorq -44(%rdi),%rcx
xorq -60(%rdi),%rax
movq %rbp,%r12
xorq -68(%rdi),%rbp
xorq %r10,%rcx
xorq -20(%rdi),%rax
xorq -36(%rdi),%rdx
xorq %r9,%rbx
xorq -28(%rdi),%rbp
xorq 36(%rdi),%rcx
xorq 20(%rdi),%rax
xorq 4(%rdi),%rdx
xorq -12(%rdi),%rbx
xorq 12(%rdi),%rbp
movq %rcx,%r13
rolq $1,%rcx
xorq %rax,%rcx
xorq %r11,%rdx
rolq $1,%rax
xorq %rdx,%rax
xorq 28(%rdi),%rbx
rolq $1,%rdx
xorq %rbx,%rdx
xorq 52(%rdi),%rbp
rolq $1,%rbx
xorq %rbp,%rbx
rolq $1,%rbp
xorq %r13,%rbp
xorq %rcx,%r9
xorq %rdx,%r10
rolq $44,%r9
xorq %rbp,%r11
xorq %rax,%r12
rolq $43,%r10
xorq %rbx,%r8
movq %r9,%r13
rolq $21,%r11
orq %r10,%r9
xorq %r8,%r9
rolq $14,%r12
xorq (%r15),%r9
leaq 8(%r15),%r15
movq %r12,%r14
andq %r11,%r12
movq %r9,-100(%rsi)
xorq %r10,%r12
notq %r10
movq %r12,-84(%rsi)
orq %r11,%r10
movq 76(%rdi),%r12
xorq %r13,%r10
movq %r10,-92(%rsi)
andq %r8,%r13
movq -28(%rdi),%r9
xorq %r14,%r13
movq -20(%rdi),%r10
movq %r13,-68(%rsi)
orq %r8,%r14
movq -76(%rdi),%r8
xorq %r11,%r14
movq 28(%rdi),%r11
movq %r14,-76(%rsi)
xorq %rbp,%r8
xorq %rdx,%r12
rolq $28,%r8
xorq %rcx,%r11
xorq %rax,%r9
rolq $61,%r12
rolq $45,%r11
xorq %rbx,%r10
rolq $20,%r9
movq %r8,%r13
orq %r12,%r8
rolq $3,%r10
xorq %r11,%r8
movq %r8,-36(%rsi)
movq %r9,%r14
andq %r13,%r9
movq -92(%rdi),%r8
xorq %r12,%r9
notq %r12
movq %r9,-28(%rsi)
orq %r11,%r12
movq -44(%rdi),%r9
xorq %r10,%r12
movq %r12,-44(%rsi)
andq %r10,%r11
movq 60(%rdi),%r12
xorq %r14,%r11
movq %r11,-52(%rsi)
orq %r10,%r14
movq 4(%rdi),%r10
xorq %r13,%r14
movq 52(%rdi),%r11
movq %r14,-60(%rsi)
xorq %rbp,%r10
xorq %rax,%r11
rolq $25,%r10
xorq %rdx,%r9
rolq $8,%r11
xorq %rbx,%r12
rolq $6,%r9
xorq %rcx,%r8
rolq $18,%r12
movq %r10,%r13
andq %r11,%r10
rolq $1,%r8
notq %r11
xorq %r9,%r10
movq %r10,-12(%rsi)
movq %r12,%r14
andq %r11,%r12
movq -12(%rdi),%r10
xorq %r13,%r12
movq %r12,-4(%rsi)
orq %r9,%r13
movq 84(%rdi),%r12
xorq %r8,%r13
movq %r13,-20(%rsi)
andq %r8,%r9
xorq %r14,%r9
movq %r9,12(%rsi)
orq %r8,%r14
movq -60(%rdi),%r9
xorq %r11,%r14
movq 36(%rdi),%r11
movq %r14,4(%rsi)
movq -68(%rdi),%r8
xorq %rcx,%r10
xorq %rdx,%r11
rolq $10,%r10
xorq %rbx,%r9
rolq $15,%r11
xorq %rbp,%r12
rolq $36,%r9
xorq %rax,%r8
rolq $56,%r12
movq %r10,%r13
orq %r11,%r10
rolq $27,%r8
notq %r11
xorq %r9,%r10
movq %r10,28(%rsi)
movq %r12,%r14
orq %r11,%r12
xorq %r13,%r12
movq %r12,36(%rsi)
andq %r9,%r13
xorq %r8,%r13
movq %r13,20(%rsi)
orq %r8,%r9
xorq %r14,%r9
movq %r9,52(%rsi)
andq %r14,%r8
xorq %r11,%r8
movq %r8,44(%rsi)
xorq -84(%rdi),%rdx
xorq -36(%rdi),%rbp
rolq $62,%rdx
xorq 68(%rdi),%rcx
rolq $55,%rbp
xorq 12(%rdi),%rax
rolq $2,%rcx
xorq 20(%rdi),%rbx
xchgq %rsi,%rdi
rolq $39,%rax
rolq $41,%rbx
movq %rdx,%r13
andq %rbp,%rdx
notq %rbp
xorq %rcx,%rdx
movq %rdx,92(%rdi)
movq %rax,%r14
andq %rbp,%rax
xorq %r13,%rax
movq %rax,60(%rdi)
orq %rcx,%r13
xorq %rbx,%r13
movq %r13,84(%rdi)
andq %rbx,%rcx
xorq %r14,%rcx
movq %rcx,76(%rdi)
orq %r14,%rbx
xorq %rbp,%rbx
movq %rbx,68(%rdi)
movq %rdx,%rbp
movq %r13,%rdx
testq $255,%r15
jnz .Loop
leaq -192(%r15),%r15
.byte 0xf3,0xc3
.cfi_endproc
.size __KeccakF1600,.-__KeccakF1600
.globl KeccakF1600
.type KeccakF1600,@function
.align 32
KeccakF1600:
.cfi_startproc
.byte 0xf3,0x0f,0x1e,0xfa
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-16
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
leaq 100(%rdi),%rdi
subq $200,%rsp
.cfi_adjust_cfa_offset 200
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq iotas(%rip),%r15
leaq 100(%rsp),%rsi
call __KeccakF1600
notq -92(%rdi)
notq -84(%rdi)
notq -36(%rdi)
notq -4(%rdi)
notq 36(%rdi)
notq 60(%rdi)
leaq -100(%rdi),%rdi
addq $200,%rsp
.cfi_adjust_cfa_offset -200
popq %r15
.cfi_adjust_cfa_offset -8
.cfi_restore %r15
popq %r14
.cfi_adjust_cfa_offset -8
.cfi_restore %r14
popq %r13
.cfi_adjust_cfa_offset -8
.cfi_restore %r13
popq %r12
.cfi_adjust_cfa_offset -8
.cfi_restore %r12
popq %rbp
.cfi_adjust_cfa_offset -8
.cfi_restore %rbp
popq %rbx
.cfi_adjust_cfa_offset -8
.cfi_restore %rbx
.byte 0xf3,0xc3
.cfi_endproc
.size KeccakF1600,.-KeccakF1600
.align 256
.quad 0,0,0,0,0,0,0,0
.type iotas,@object
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.size iotas,.-iotas
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.section .note.gnu.property,"a",@note
.long 4,2f-1f,5
.byte 0x47,0x4E,0x55,0
1: .long 0xc0000002,4,3
.align 8
2:
|
pipijing13/FT2-LLM-inference-protection | 27,813 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-aarch64-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# Args passed via 8 registers (64 bytes)
# x0: mr
# x1: nr
# x2: k
# x3: a
# x4: a_stride
# x5: w
# x6: c
# x7: c_stride
#
# Args passed via stack.
# TOS
# |-----------|
# |out ch indx| 0
# |params | 8
# |-----------|
# void pytorch_q8gemm_ukernel_8x8__aarch64_neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t*restrict a,
# size_t a_stride,
# const void*restrict w,
# uint8_t*restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_ukernel_8x8__aarch64_neon
# https://developer.arm.com/docs/ihi0055/d/procedure-call-standard-for-the-arm-64-bit-architecture
# Callee need to save 8-15 vector registers and only the lower 64 bits of each.
# Load params
LDP x16, x8, [sp]
STP d15, d14, [sp, -16]
# Load pointer to per channel zero points array
# And go to the a_zero_point with post-index
LDR x17, [x8], 8
STP d13, d12, [sp, -32]
STP d11, d10, [sp, -48]
STP d9, d8, [sp, -64]
# Load bias0123, bias4567
LD1 {v8.4s, v9.4s}, [x5], 32
# Add offset to the base pointer
ADD x17, x17, x16
# Load b_zero_point
LD1 {v25.8b}, [x17]
# Load a_zero_point
LD1R {v24.8b}, [x8]
# Load pointer to per channel requant scale
LDR x17, [x8, 8]
ADD x8, x8, 16
# v10 := vacc1x0123
MOV v10.16b, v8.16b
# v11 := vacc1x4567
MOV v11.16b, v9.16b
# v12 := vacc2x0123
MOV v12.16b, v8.16b
# v13 := vacc2x4567
MOV v13.16b, v9.16b
# v14 := vacc3x0123
MOV v14.16b, v8.16b
# v15 := vacc3x4567
MOV v15.16b, v9.16b
# v16 := vacc4x0123
MOV v16.16b, v8.16b
# v17 := vacc4x4567
MOV v17.16b, v9.16b
# v18 := vacc5x0123
MOV v18.16b, v8.16b
# v19 := vacc5x4567
MOV v19.16b, v9.16b
# v20 := vacc6x0123
MOV v20.16b, v8.16b
# v21 := vacc6x4567
MOV v21.16b, v9.16b
# v22 := vacc7x0123
MOV v22.16b, v8.16b
# v23 := vacc7x4567
MOV v23.16b, v9.16b
# Fold mul by 4 to get byte offset for requant scale.
# Add offset to the base pointer
ADD x17, x17, x16, lsl#2
// Load requantization_scale
// - v26 = requantization_scale channels 0-3
// - v27 = requantization_scale channels 4-7
LD1 {v26.4s}, [x17], 16
# a1
CMP x0, 2
ADD x9, x3, x4
CSEL x9, x3, x9, LO
# a2
ADD x10, x9, x4
CSEL x10, x9, x10, LS
# a3
CMP x0, 4
ADD x11, x10, x4
CSEL x11, x10, x11, LO
# a4
ADD x12, x11, x4
CSEL x12, x11, x12, LS
# a5
CMP x0, 6
ADD x13, x12, x4
CSEL x13, x12, x13, LO
# a6
ADD x14, x13, x4
CSEL x14, x13, x14, LS
# a7
CMP x0, 8
ADD x15, x14, x4
CSEL x15, x14, x15, NE
SUBS x2, x2, 8
B.LO 1f
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 5
#endif
0:
// b0-7 (channel 0)
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
# va0 - va7 := va - va_zero_point
LD1 {v0.8b}, [x3], 8
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// b0-7 (channel 1)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
// b0-7 (channel 2)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
// b0-7 (channel 3)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
// b0-7 (channel 4)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
// b0-7 (channel 5)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
// b0-7 (channel 6)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
// b0-7 (channel 7)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
SUBS x2, x2, 8
SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7]
SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7]
SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7]
SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7]
SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7]
SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7]
SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7]
SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7]
SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7]
SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7]
SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7]
SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7]
SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7]
SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7]
SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7]
SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7]
B.HS 0b
1:
CMP x2, -8
B.EQ 2f
// Adjust a0-a7
ADD x3, x3, x2
ADD x9, x9, x2
ADD x10, x10, x2
ADD x11, x11, x2
ADD x12, x12, x2
ADD x13, x13, x2
ADD x14, x14, x2
ADD x15, x15, x2
// a_shift = 8 * k - 64
LSL x2, x2, 3
FMOV d29, x2
USHL d24, d24, d29
// Load x0-a7
LD1 {v0.8b}, [x3], 8
USHL d0, d0, d29
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
USHL d1, d1, d29
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
USHL d2, d2, d29
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
USHL d3, d3, d29
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
USHL d4, d4, d29
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
USHL d5, d5, d29
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
USHL d6, d6, d29
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
USHL d7, d7, d29
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// Channel 0
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
CMP x2, -48
B.LO 2f
// Channel 1
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
B.LS 2f
// Channel 2
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
CMP x2, -32
B.LO 2f
// Channel 3
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
B.LS 2f
// Channel 4
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
CMP x2, -16
B.LO 2f
// Channel 5
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
B.LS 2f
// Channel 6
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
2:
# Load requant scale for channels 4-7
LD1 {v27.4s}, [x17]
// Load zero_point:
// - v29 = vzero_point
LD1R {v29.8h}, [x8], 2
// Load max:
// - v30 = vmax
LD1R {v30.16b}, [x8], 1
// Load min:
// - v31 = vmin
LD1R {v31.16b}, [x8]
SCVTF v8.4s, v8.4s
SCVTF v9.4s, v9.4s
SCVTF v10.4s, v10.4s
SCVTF v11.4s, v11.4s
SCVTF v12.4s, v12.4s
SCVTF v13.4s, v13.4s
SCVTF v14.4s, v14.4s
SCVTF v15.4s, v15.4s
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
FMUL v8.4s, v8.4s, v26.4s
FMUL v9.4s, v9.4s, v27.4s
FMUL v10.4s, v10.4s, v26.4s
FMUL v11.4s, v11.4s, v27.4s
FMUL v12.4s, v12.4s, v26.4s
FMUL v13.4s, v13.4s, v27.4s
FMUL v14.4s, v14.4s, v26.4s
FMUL v15.4s, v15.4s, v27.4s
FMUL v16.4s, v16.4s, v26.4s
FMUL v17.4s, v17.4s, v27.4s
FMUL v18.4s, v18.4s, v26.4s
FMUL v19.4s, v19.4s, v27.4s
FMUL v20.4s, v20.4s, v26.4s
FMUL v21.4s, v21.4s, v27.4s
FMUL v22.4s, v22.4s, v26.4s
FMUL v23.4s, v23.4s, v27.4s
FCVTNS v8.4s, v8.4s
FCVTNS v9.4s, v9.4s
FCVTNS v10.4s, v10.4s
FCVTNS v11.4s, v11.4s
FCVTNS v12.4s, v12.4s
FCVTNS v13.4s, v13.4s
FCVTNS v14.4s, v14.4s
FCVTNS v15.4s, v15.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
SQXTN v8.4h, v8.4s
SQXTN v10.4h, v10.4s
SQXTN v12.4h, v12.4s
SQXTN v14.4h, v14.4s
SQXTN v16.4h, v16.4s
SQXTN v18.4h, v18.4s
SQXTN v20.4h, v20.4s
SQXTN v22.4h, v22.4s
SQXTN2 v8.8h, v9.4s
SQXTN2 v10.8h, v11.4s
SQXTN2 v12.8h, v13.4s
SQXTN2 v14.8h, v15.4s
SQXTN2 v16.8h, v17.4s
SQXTN2 v18.8h, v19.4s
SQXTN2 v20.8h, v21.4s
SQXTN2 v22.8h, v23.4s
SQADD v8.8h, v8.8h, v29.8h
SQADD v10.8h, v10.8h, v29.8h
SQADD v12.8h, v12.8h, v29.8h
SQADD v14.8h, v14.8h, v29.8h
SQADD v16.8h, v16.8h, v29.8h
SQADD v18.8h, v18.8h, v29.8h
SQADD v20.8h, v20.8h, v29.8h
SQADD v22.8h, v22.8h, v29.8h
SQXTUN v8.8b, v8.8h
SQXTUN v12.8b, v12.8h
SQXTUN v16.8b, v16.8h
SQXTUN v20.8b, v20.8h
SQXTUN2 v8.16b, v10.8h
SQXTUN2 v12.16b, v14.8h
SQXTUN2 v16.16b, v18.8h
SQXTUN2 v20.16b, v22.8h
UMIN v8.16b, v8.16b, v30.16b
UMIN v12.16b, v12.16b, v30.16b
UMIN v16.16b, v16.16b, v30.16b
UMIN v20.16b, v20.16b, v30.16b
UMAX v8.16b, v8.16b, v31.16b
UMAX v12.16b, v12.16b, v31.16b
UMAX v16.16b, v16.16b, v31.16b
UMAX v20.16b, v20.16b, v31.16b
// Compute c0-c7
ADD x9, x6, x7
CMP x0, 2
CSEL x9, x6, x9, LO
ADD x10, x9, x7
CSEL x10, x9, x10, LS
ADD x11, x10, x7
CMP x0, 4
CSEL x11, x10, x11, LO
ADD x12, x11, x7
CSEL x12, x11, x12, LS
ADD x13, x12, x7
CMP x0, 6
CSEL x13, x12, x13, LO
ADD x14, x13, x7
CSEL x14, x13, x14, LS
ADD x15, x14, x7
CMP x0, 8
CSEL x15, x14, x15, NE
CMP x1, 8
B.NE 4f
// Store results
ST1 {v8.d}[0], [x6]
ST1 {v8.d}[1], [x9]
ST1 {v12.d}[0], [x10]
ST1 {v12.d}[1], [x11]
ST1 {v16.d}[0], [x12]
ST1 {v16.d}[1], [x13]
ST1 {v20.d}[0], [x14]
ST1 {v20.d}[1], [x15]
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 3
#endif
4:
CMP x1, 4
B.LO 5f
ST1 {v8.s}[0], [x6], 4
ST1 {v8.s}[2], [x9], 4
ST1 {v12.s}[0], [x10], 4
ST1 {v12.s}[2], [x11], 4
ST1 {v16.s}[0], [x12], 4
ST1 {v16.s}[2], [x13], 4
ST1 {v20.s}[0], [x14], 4
ST1 {v20.s}[2], [x15], 4
SUB x1, x1, 4
EXT v8.16b, v8.16b, v8.16b, 4
EXT v12.16b, v12.16b, v12.16b, 4
EXT v16.16b, v16.16b, v16.16b, 4
EXT v20.16b, v20.16b, v20.16b, 4
5:
CMP x1, 2
B.LO 6f
ST1 {v8.h}[0], [x6], 2
ST1 {v8.h}[4], [x9], 2
ST1 {v12.h}[0], [x10], 2
ST1 {v12.h}[4], [x11], 2
ST1 {v16.h}[0], [x12], 2
ST1 {v16.h}[4], [x13], 2
ST1 {v20.h}[0], [x14], 2
ST1 {v20.h}[4], [x15], 2
SUB x1, x1, 2
EXT v8.16b, v8.16b, v8.16b, 2
EXT v12.16b, v12.16b, v12.16b, 2
EXT v16.16b, v16.16b, v16.16b, 2
EXT v20.16b, v20.16b, v20.16b, 2
6:
CMP x1, 1
B.LO 7f
ST1 {v8.b}[0], [x6]
ST1 {v8.b}[8], [x9]
ST1 {v12.b}[0], [x10]
ST1 {v12.b}[8], [x11]
ST1 {v16.b}[0], [x12]
ST1 {v16.b}[8], [x13]
ST1 {v20.b}[0], [x14]
ST1 {v20.b}[8], [x15]
7:
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
END_FUNCTION pytorch_q8gemm_ukernel_8x8__aarch64_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 18,740 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# Args passed via 4 registers (16 bytes)
# r0: mr
# r1: nr
# r2: k
# r3: a
#
# Args passed via stack.
# TOS
# |-----------|
# |a_stride | 0
# |w | 4
# |c | 8
# |c_stride | 12
# |out ch indx| 16
# |params | 20
# |-----------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |-----------|
# |d8 - d15 | 0
# |r4 - r9 | 64
# |a_stride | 88
# |w | 92
# |c | 96
# |c_stride | 100
# |out ch indx| 104
# |params | 108
# |-----------|
#
#
# New Struct for pytorch_qnnp_conv_quantization_params
# kernel zp : 0 offset
# input zp : 2
# requantization_scale : 4
# output zp : 8
# output max : 10
# output min : 11
# vfmin : 12
# vfmax : 16
# vfmagic : 20
# vimagic : 24
#
# void pytorch_q8gemm_ukernel_4x8__aarch32_neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t*restrict a,
# size_t a_stride,
# const void*restrict w,
# uint8_t*restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_ukernel_4x8__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 4]
PUSH {r4, r5, r6, r7, r8, r9}
# Load quantization params
# - r7 = quantization_params
LDR r7, [sp, 44]
VPUSH {d8-d15}
# Load bias0123, bias4567
VLDM ip!, {d16-d19}
# Load output channel index
LDR r5, [sp, 104]
# Load pointer to per channel zero points array
# Post-index: After load increment r7 by 4
LDR r4, [r7], #4
# Load a_zero_point:
# - d14 = a_zero_point
VLD1.8 {d14[]}, [r7]
# Load a_stride
# - r6 = a_stride
LDR r9, [sp, 88]
# Byte offset of output channel index for requant scale.
LSL r6, r5, 2
# Load pointer to per channel requant scale
# Register offset, load r7+4
LDR r8, [r7, 4]
# Add output_channel_index to the b_zero_point pointer
ADD r4, r4, r5
# Load b_zero_point:
# - d15 = b_zero_point
VLD1.8 {d15}, [r4]
# add 8 bytes to get to vfmax
ADD r7, r7, 12
CMP r0, 2
ADD r4, r3, r9
# Store in r8 pointer from where to load requant scale.
ADD r8, r8, r6
MOVLO r4, r3
ADD r5, r4, r9
# q10 := vacc1x0123
VMOV.I32 q10, q8
MOVLS r5, r4
# q11 := vacc1x4567
VMOV.I32 q11, q9
ADD r6, r5, r9
# q12 := vacc2x0123
VMOV.I32 q12, q8
CMP r0, 4
# q13 := vacc2x4567
VMOV.I32 q13, q9
MOVNE r6, r5
# q14 := vacc3x0123
VMOV.I32 q14, q8
SUBS r2, r2, 8
# q15 := vacc3x4567
VMOV.I32 q15, q9
BLO 1f
.p2align 5
0:
# Load a0
# - d1 = a0
VLD1.8 {d1}, [r3]!
# Load a1
# - d3 = a1
VLD1.8 {d3}, [r4]!
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r5]!
# q0 = va0 = a0
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r6]!
# q1 = va1 = a1
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
### Channel 1 ###
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
# Load b0-b7 (channel 3)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 3)
# - d11 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
# Load b0-b7 (channel 4)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
# Load b0-b7 (channel 5)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 5)
# - d9 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
# Load b0-b7 (channel 7)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 7)
# - d11 = vb4567 (channel 7)
VSUBL.U8 q5, d11, d15
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
### Channel 8 ###
SUBS r2, r2, 8
# vacc0x0123 += vb0123 * va0[7]
VMLAL.S16 q8, d10, d1[3]
# vacc0x4567 += vb4567 * va0[7]
VMLAL.S16 q9, d11, d1[3]
# vacc1x0123 += vb0123 * va1[7]
VMLAL.S16 q10, d10, d3[3]
# vacc1x4567 += vb4567 * va1[7]
VMLAL.S16 q11, d11, d3[3]
# vacc2x0123 += vb0123 * va2[7]
VMLAL.S16 q12, d10, d5[3]
# vacc2x4567 += vb4567 * va2[7]
VMLAL.S16 q13, d11, d5[3]
# vacc3x0123 += vb0123 * va3[7]
VMLAL.S16 q14, d10, d7[3]
# vacc3x4567 += vb4567 * va3[7]
VMLAL.S16 q15, d11, d7[3]
BHS 0b
1:
CMP r2, -8
BEQ 2f
# Adjust a0, a1, a2, a3
ADD r3, r2
ADD r4, r2
ADD r5, r2
ADD r6, r2
# a_shift = 8 * k - 64
LSL r2, r2, 3
VDUP.32 d13, r2
# Load a0
# - d1 = a0
VLD1.8 {d1}, [r3]
# Load a1
# - d3 = a1
VLD1.8 {d3}, [r4]
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r5]
# q0 = va0 = a0
VSHL.U64 d1, d1, d13
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r6]
# q1 = va1 = a1
VSHL.U64 d3, d3, d13
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
VSHL.U64 d5, d5, d13
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
VSHL.U64 d7, d7, d13
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
CMP r2, -48
BLO 2f
### Channel 1 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
BLS 2f
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
CMP r2, -32
BLO 2f
# Load b0-b7 (channel 3)
# - d9 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 3)
# - d9 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
BLS 2f
# Load b0-b7 (channel 4)
# - d11 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
CMP r2, -16
BLO 2f
# Load b0-b7 (channel 5)
# - d13 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 5)
# - d11 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
BLS 2f
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
.p2align 4
2:
# Load requantization_scale:
# - d12 = requantization_scale
VLD1.32 {d12, d13}, [r8]!
# Load vfmax:
VLD1.32 {d10[], d11[]}, [r7]!
VLD1.32 {d4, d5}, [r8]
# Load vfmin:
VLD1.32 {d8[], d9[]}, [r7]!
# Load vfmagic:
VLD1.32 {d0[], d1[]}, [r7]!
# Load vimagic:
VLD1.32 {d2[], d3[]}, [r7]!
# Moved here to hide load latency on d14
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q6
VMUL.F32 q9, q9, q2
VMUL.F32 q10, q10, q6
VMUL.F32 q11, q11, q2
VMUL.F32 q12, q12, q6
VMUL.F32 q13, q13, q2
VMUL.F32 q14, q14, q6
VMUL.F32 q15, q15, q2
VMIN.F32 q8, q8, q5
VMIN.F32 q9, q9, q5
VMIN.F32 q10, q10, q5
VMIN.F32 q11, q11, q5
VMIN.F32 q12, q12, q5
VMIN.F32 q13, q13, q5
VMIN.F32 q14, q14, q5
VMIN.F32 q15, q15, q5
VMAX.F32 q8, q8, q4
VMAX.F32 q9, q9, q4
VMAX.F32 q10, q10, q4
VMAX.F32 q11, q11, q4
VMAX.F32 q12, q12, q4
VMAX.F32 q13, q13, q4
VMAX.F32 q14, q14, q4
VMAX.F32 q15, q15, q4
VADD.F32 q8, q8, q0
VADD.F32 q9, q9, q0
VADD.F32 q10, q10, q0
VADD.F32 q11, q11, q0
VADD.F32 q12, q12, q0
VADD.F32 q13, q13, q0
VADD.F32 q14, q14, q0
VADD.F32 q15, q15, q0
# Load c, c_stride:
# - r2 = c
# - r2 = c_stride
LDRD r2, r3, [sp, 96]
VSUB.S32 q8, q8, q1
VSUB.S32 q9, q9, q1
VSUB.S32 q10, q10, q1
VSUB.S32 q11, q11, q1
VSUB.S32 q12, q12, q1
VSUB.S32 q13, q13, q1
VSUB.S32 q14, q14, q1
VSUB.S32 q15, q15, q1
ADD r4, r2, r3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
CMP r0, 2
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
MOVLO r4, r2
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
ADD r5, r4, r3
VQMOVUN.S16 d16, q8
MOVLS r5, r4
VQMOVUN.S16 d17, q9
VQMOVUN.S16 d18, q10
CMP r0, 4
ADD r3, r5, r3
MOVNE r3, r5
CMP r1, 8
VQMOVUN.S16 d19, q11
BNE 4f
VST1.8 {d16}, [r2]
VST1.8 {d17}, [r4]
VST1.8 {d18}, [r5]
VST1.8 {d19}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9}
BX lr
.p2align 3
4:
CMP r1, 4
BLO 5f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d17[0]}, [r4]!
VST1.32 {d18[0]}, [r5]!
VST1.32 {d19[0]}, [r3]!
SUB r1, 4
VEXT.8 q8, q8, q8, 4
VEXT.8 q9, q9, q9, 4
5:
CMP r1, 2
BLO 6f
VST1.16 {d16[0]}, [r2]!
VST1.16 {d17[0]}, [r4]!
VST1.16 {d18[0]}, [r5]!
VST1.16 {d19[0]}, [r3]!
SUB r1, 2
VEXT.8 q8, q8, q8, 2
VEXT.8 q9, q9, q9, 2
6:
TEQ r1, 0
BEQ 7f
VST1.8 {d16[0]}, [r2]
VST1.8 {d17[0]}, [r4]
VST1.8 {d18[0]}, [r5]
VST1.8 {d19[0]}, [r3]
7:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9}
BX lr
END_FUNCTION pytorch_q8gemm_ukernel_4x8__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 11,694 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8c2-xzp-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
.syntax unified
# void pytorch_q8gemm_xzp_ukernel_4x8c2__neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t* restrict a,
# size_t a_stride,
# const int32_t* restrict a_sum,
# const void* restrict w,
# uint8_t* restrict c,
# size_t c_stride,
# const union pytorch_qnnp_q31_requantization_params requantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_xzp_ukernel_4x8c2__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 8]
# Load bias0123(q8), bias4567(q9)
# q8 := vacc0x0123
# q9 := vacc0x4567
VLD1.8 {d16-d19}, [ip]!
# q10 := vacc1x0123
VMOV.I32 q10, q8
# q11 := vacc1x4567
VMOV.I32 q11, q9
# q12 := vacc2x0123
VMOV.I32 q12, q8
# q13 := vacc2x4567
VMOV.I32 q13, q9
# q14 := vacc3x0123
VMOV.I32 q14, q8
# q15 := vacc3x4567
VMOV.I32 q15, q9
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
VPUSH {d8-d15}
# r3 := a0
# r4 := a1
# r5 := a2
# r6 := a3
# r7 := a_sum0
# r8 := a_sum1
# r9 := a_sum2
# r10 := a_sum3
# a_sum0 := a_sum
LDR r7, [sp, 100]
# Load a_stride
# - ip = a_stride
LDR r10, [sp, 96]
# compare mr to 2
CMP r0, 2
# a1 += a_stride
ADD r4, r3, r10
# mr < 2, a1 := a0
MOVLO r4, r3
# r8 := a_sum1
ADD r8, r7, 4
# mr < 2, a_sum1 := a_sum0
MOVLO r8, r7
# r5 := a2
ADD r5, r4, r10
# mr <= 2, a2 := a1
MOVLS r5, r4
# r9 := a_sum2
ADD r9, r8, 4
# mr <= 2, a_sum2 := a_sum1
MOVLS r9, r8
# compare mr to 4
CMP r0, 4
# r6 := a3
ADD r6, r5, r10
# mr != 4, a3 := a2
MOVNE r6, r5
# a_sum3 := a_sum2 + 1
# r10 := a_sum3
ADD r10, r9, 4
# mr != 4, a_sum3 := a_sum2
MOVNE r10, r9
# load a_sum
# q0: va_sum0
VLD1.32 {d0[], d1[]}, [r7]
# q1: va_sum1
VLD1.32 {d2[], d3[]}, [r8]
# q2: va_sum2
VLD1.32 {d4[], d5[]}, [r9]
# q3: va_sum3
VLD1.32 {d6[], d7[]}, [r10]
# accumulate a_sum into vacc
# vacc0x0123 = vaddq_s32(vacc0x0123, va_sum0)
VADD.I32 q8, q8, q0
# vacc0x4567 = vaddq_s32(vacc0x4567, va_sum0)
VADD.I32 q9, q9, q0
# vacc1x0123 = vaddq_s32(vacc1x0123, va_sum1)
VADD.I32 q10, q10, q1
# vacc1x4567 = vaddq_s32(vacc1x4567, va_sum1)
VADD.I32 q11, q11, q1
# vacc2x0123 = vaddq_s32(vacc2x0123, va_sum2)
VADD.I32 q12, q12, q2
# vacc2x4567 = vaddq_s32(vacc2x4567, va_sum2)
VADD.I32 q13, q13, q2
# vacc3x0123 = vaddq_s32(vacc3x0123, va_sum3)
VADD.I32 q14, q14, q3
# vacc3x4567 = vaddq_s32(vacc3x4567, va_sum3)
VADD.I32 q15, q15, q3
# k -= 8
SUBS r2, r2, 8
BLO 1f
.p2align 5
0:
# load a
# d0 := va0x01234567
VLD1.8 {d0}, [r3]!
# d1 := va1x01234567
VLD1.8 {d1}, [r4]!
# d2 := va1x01234567
VLD1.8 {d2}, [r5]!
# d3 := va2x01234567
VLD1.8 {d3}, [r6]!
##### k = 0, 1 #####
# load b
# q2 := vb01234567x01
VLD1.8 {d4, d5}, [ip]!
VMULL.U8 q4, d0, d4
VPADAL.U16 q8, q4
VMULL.U8 q5, d0, d5
VPADAL.U16 q9, q5
VMULL.U8 q6, d1, d4
VPADAL.U16 q10, q6
VMULL.U8 q7, d1, d5
VPADAL.U16 q11, q7
VMULL.U8 q4, d2, d4
VPADAL.U16 q12, q4
VMULL.U8 q5, d2, d5
VPADAL.U16 q13, q5
VMULL.U8 q6, d3, d4
VPADAL.U16 q14, q6
VMULL.U8 q7, d3, d5
VPADAL.U16 q15, q7
##### k = 2, 3 #####
# load b
# q2 := vb01234567x01
VLD1.8 {d4, d5}, [ip]!
# rotate a
VEXT.8 d0, d0, d0, 2
VEXT.8 d1, d1, d1, 2
VEXT.8 d2, d2, d2, 2
VEXT.8 d3, d3, d3, 2
VMULL.U8 q4, d0, d4
VPADAL.U16 q8, q4
VMULL.U8 q5, d0, d5
VPADAL.U16 q9, q5
VMULL.U8 q6, d1, d4
VPADAL.U16 q10, q6
VMULL.U8 q7, d1, d5
VPADAL.U16 q11, q7
VMULL.U8 q4, d2, d4
VPADAL.U16 q12, q4
VMULL.U8 q5, d2, d5
VPADAL.U16 q13, q5
VMULL.U8 q6, d3, d4
VPADAL.U16 q14, q6
VMULL.U8 q7, d3, d5
VPADAL.U16 q15, q7
##### k = 4, 5 #####
# load b
# q2 := vb01234567x01
VLD1.8 {d4, d5}, [ip]!
# rotate a
VEXT.8 d0, d0, d0, 2
VEXT.8 d1, d1, d1, 2
VEXT.8 d2, d2, d2, 2
VEXT.8 d3, d3, d3, 2
VMULL.U8 q4, d0, d4
VPADAL.U16 q8, q4
VMULL.U8 q5, d0, d5
VPADAL.U16 q9, q5
VMULL.U8 q6, d1, d4
VPADAL.U16 q10, q6
VMULL.U8 q7, d1, d5
VPADAL.U16 q11, q7
VMULL.U8 q4, d2, d4
VPADAL.U16 q12, q4
VMULL.U8 q5, d2, d5
VPADAL.U16 q13, q5
VMULL.U8 q6, d3, d4
VPADAL.U16 q14, q6
VMULL.U8 q7, d3, d5
VPADAL.U16 q15, q7
##### k = 6, 7 #####
# load b
# q2 := vb01234567x01
VLD1.8 {d4, d5}, [ip]!
# rotate a
VEXT.8 d0, d0, d0, 2
VEXT.8 d1, d1, d1, 2
VEXT.8 d2, d2, d2, 2
VEXT.8 d3, d3, d3, 2
VMULL.U8 q4, d0, d4
VPADAL.U16 q8, q4
VMULL.U8 q5, d0, d5
VPADAL.U16 q9, q5
VMULL.U8 q6, d1, d4
VPADAL.U16 q10, q6
VMULL.U8 q7, d1, d5
VPADAL.U16 q11, q7
VMULL.U8 q4, d2, d4
VPADAL.U16 q12, q4
VMULL.U8 q5, d2, d5
VPADAL.U16 q13, q5
VMULL.U8 q6, d3, d4
VPADAL.U16 q14, q6
VMULL.U8 q7, d3, d5
VPADAL.U16 q15, q7
# k -= 8
SUBS r2, r2, 8
# k >= 0, loop
BHS 0b
1:
# k >= 4
ADDS r2, 8
CMP r2, 4
# branch to 2f when k < 4
BLO 2f
SUB r2, r2, 4
##### k = 0, 1 #####
# d0 := va0x01010101
VLD1.16 {d0[]}, [r3]!
# d1 := va1x01010101
VLD1.16 {d1[]}, [r4]!
# d2 := va2x01010101
VLD1.16 {d2[]}, [r5]!
# d3 := va3x01010101
VLD1.16 {d3[]}, [r6]!
# q7 := vb01234567x01
VLD1.8 {d14, d15}, [ip]!
# row 0
VMULL.U8 q2, d0, d14
VPADAL.U16 q8, q2
VMULL.U8 q3, d0, d15
VPADAL.U16 q9, q3
# row 1
VMULL.U8 q4, d1, d14
VPADAL.U16 q10, q4
VMULL.U8 q5, d1, d15
VPADAL.U16 q11, q5
# row 2
VMULL.U8 q2, d2, d14
VPADAL.U16 q12, q2
VMULL.U8 q3, d2, d15
VPADAL.U16 q13, q3
# row 3
VMULL.U8 q4, d3, d14
VPADAL.U16 q14, q4
VMULL.U8 q5, d3, d15
VPADAL.U16 q15, q5
##### k = 2, 3 #####
# d0 := va0x01010101
VLD1.16 {d0[]}, [r3]!
# d1 := va1x01010101
VLD1.16 {d1[]}, [r4]!
# d2 := va2x01010101
VLD1.16 {d2[]}, [r5]!
# d3 := va3x01010101
VLD1.16 {d3[]}, [r6]!
# q7 := vb01234567x01
VLD1.8 {d14, d15}, [ip]!
# row 0
VMULL.U8 q2, d0, d14
VPADAL.U16 q8, q2
VMULL.U8 q3, d0, d15
VPADAL.U16 q9, q3
# row 1
VMULL.U8 q4, d1, d14
VPADAL.U16 q10, q4
VMULL.U8 q5, d1, d15
VPADAL.U16 q11, q5
# row 2
VMULL.U8 q2, d2, d14
VPADAL.U16 q12, q2
VMULL.U8 q3, d2, d15
VPADAL.U16 q13, q3
# row 3
VMULL.U8 q4, d3, d14
VPADAL.U16 q14, q4
VMULL.U8 q5, d3, d15
VPADAL.U16 q15, q5
2:
# k >= 2
CMP r2, 2
BLO 3f
SUB r2, r2, 2
##### k = 0, 1 #####
# d0 := va0x01010101
VLD1.16 {d0[]}, [r3]!
# d1 := va1x01010101
VLD1.16 {d1[]}, [r4]!
# d2 := va2x01010101
VLD1.16 {d2[]}, [r5]!
# d3 := va3x01010101
VLD1.16 {d3[]}, [r6]!
# q7 := vb01234567x01
VLD1.8 {d14, d15}, [ip]!
# row 0
VMULL.U8 q2, d0, d14
VPADAL.U16 q8, q2
VMULL.U8 q3, d0, d15
VPADAL.U16 q9, q3
# row 1
VMULL.U8 q4, d1, d14
VPADAL.U16 q10, q4
VMULL.U8 q5, d1, d15
VPADAL.U16 q11, q5
# row 2
VMULL.U8 q2, d2, d14
VPADAL.U16 q12, q2
VMULL.U8 q3, d2, d15
VPADAL.U16 q13, q3
# row 3
VMULL.U8 q4, d3, d14
VPADAL.U16 q14, q4
VMULL.U8 q5, d3, d15
VPADAL.U16 q15, q5
3:
# k == 1
CMP r2, 1
BLO 4f
# d0 := va0x01010101
VLD1.8 {d0[]}, [r3]
# d1 := va1x01010101
VLD1.8 {d1[]}, [r4]
# d2 := va2x01010101
VLD1.8 {d2[]}, [r5]
# d3 := va3x01010101
VLD1.8 {d3[]}, [r6]
# q7 := vb01234567x01
VLD1.8 {d14, d15}, [ip]
# row 0
VMULL.U8 q2, d0, d14
VPADAL.U16 q8, q2
VMULL.U8 q3, d0, d15
VPADAL.U16 q9, q3
# row 1
VMULL.U8 q4, d1, d14
VPADAL.U16 q10, q4
VMULL.U8 q5, d1, d15
VPADAL.U16 q11, q5
# row 2
VMULL.U8 q2, d2, d14
VPADAL.U16 q12, q2
VMULL.U8 q3, d2, d15
VPADAL.U16 q13, q3
# row 3
VMULL.U8 q4, d3, d14
VPADAL.U16 q14, q4
VMULL.U8 q5, d3, d15
VPADAL.U16 q15, q5
.p2align 4
4:
# Load params:
# - ip = params
LDR ip, [sp, 116]
# Load multiplier:
# - d12 = vmultiplier
VLD1.32 {d12[]}, [ip]!
# Load right_shift
# - q4 = d8:d9 = vright_shift
VLD1.32 {d8[], d9[]}, [ip]!
VQRDMULH.S32 q8, q8, d12[0]
VQRDMULH.S32 q9, q9, d12[0]
VQRDMULH.S32 q10, q10, d12[0]
VQRDMULH.S32 q11, q11, d12[0]
# Compute vzero_shift_mask
# - q5 = vzero_shift_mask
VCEQ.S32 q5, q4, 0
VQRDMULH.S32 q12, q12, d12[0]
VQRDMULH.S32 q13, q13, d12[0]
VQRDMULH.S32 q14, q14, d12[0]
VQRDMULH.S32 q15, q15, d12[0]
VBIC q0, q8, q5
VBIC q1, q9, q5
VBIC q2, q10, q5
VBIC q3, q11, q5
VSRA.S32 q8, q0, 31
VSRA.S32 q9, q1, 31
VSRA.S32 q10, q2, 31
VSRA.S32 q11, q3, 31
# Load zero_point
# - q7 = d14:d15 = vzero_point
VLD1.16 {d14[], d15[]}, [ip]!
VBIC q0, q12, q5
VBIC q1, q13, q5
VBIC q2, q14, q5
VBIC q3, q15, q5
VSRA.S32 q12, q0, 31
VSRA.S32 q13, q1, 31
VSRA.S32 q14, q2, 31
VSRA.S32 q15, q3, 31
# Load max:
# - q5 = d10:d11 = vmax
VLD1.8 {d10[], d11[]}, [ip]!
VRSHL.S32 q8, q8, q4
VRSHL.S32 q9, q9, q4
VRSHL.S32 q10, q10, q4
VRSHL.S32 q11, q11, q4
VRSHL.S32 q12, q12, q4
VRSHL.S32 q13, q13, q4
VRSHL.S32 q14, q14, q4
VRSHL.S32 q15, q15, q4
# Load c, c_stride:
# - r2 = c
# - r3 = c_stride
LDRD r2, r3, [sp, 108]
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
# Load min:
# - q4 = q8:q9 = vmin
VLD1.8 {d8[], d9[]}, [ip]!
ADD r4, r2, r3
VQADD.S16 q8, q8, q7
VQADD.S16 q9, q9, q7
CMP r0, 2
VQADD.S16 q10, q10, q7
VQADD.S16 q11, q11, q7
MOVLO r4, r2
VQMOVUN.S16 d16, q8
VQMOVUN.S16 d17, q9
ADD r5, r4, r3
VQMOVUN.S16 d18, q10
VQMOVUN.S16 d19, q11
MOVLS r5, r4
VMIN.U8 q8, q8, q5
CMP r0, 4
VMIN.U8 q9, q9, q5
ADD r3, r5, r3
VMAX.U8 q8, q8, q4
MOVNE r3, r5
CMP r1, 8
VMAX.U8 q9, q9, q4
BNE 5f
VST1.8 {d16}, [r2]
VST1.8 {d17}, [r4]
VST1.8 {d18}, [r5]
VST1.8 {d19}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
.p2align 3
5:
CMP r1, 4
BLO 6f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d17[0]}, [r4]!
VST1.32 {d18[0]}, [r5]!
VST1.32 {d19[0]}, [r3]!
SUB r1, 4
VEXT.8 q8, q8, q8, 4
VEXT.8 q9, q9, q9, 4
6:
CMP r1, 2
BLO 7f
VST1.16 {d16[0]}, [r2]!
VST1.16 {d17[0]}, [r4]!
VST1.16 {d18[0]}, [r5]!
VST1.16 {d19[0]}, [r3]!
SUB r1, 2
VEXT.8 q8, q8, q8, 2
VEXT.8 q9, q9, q9, 2
7:
TEQ r1, 0
BEQ 8f
VST1.8 {d16[0]}, [r2]
VST1.8 {d17[0]}, [r4]
VST1.8 {d18[0]}, [r5]
VST1.8 {d19[0]}, [r3]
8:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_q8gemm_xzp_ukernel_4x8c2__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
pipijing13/FT2-LLM-inference-protection | 17,365 | aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/4x8-dq-aarch32-neon.S | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# r0 mr
# r1 nr
# r2 k
# r3 a
# r6 a_stride
# d14 a_zero_point
# d15 b_zero_point
## Stack
# 4 quantization_params
# 4 c_stride
# 4 c
# 4 b
# 4 w
# 4 a_stride
# --
# 16 r4-r7
# 64 d8-d18
.syntax unified
# Args passed via stack.
# TOS
# |-----------|
# |a_stride | 0
# |w | 4
# |c | 8
# |c_stride | 12
# |out ch indx| 16
# |params | 20
# |-----------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r8 and d8-d15 on stack
# |-----------|
# |d8 - d15 | 0
# |r4 - r7 | 64
# |a_stride | 80
# |w | 84
# |b | 88
# |c | 92
# |c_stride | 96
# |out ch indx| 100
# |params | 104
# |-----------|
#
# void pytorch_q8gemm_ukernel_4x8__aarch32_neon(
# size_t mr,
# size_t nr,
# size_t k,
# const uint8_t* restrict a,
# size_t a_stride,
# const void* restrict w,
# const float* restrict b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8gemm_dq_ukernel_4x8__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 4]
ADD ip, ip, 32
PUSH {r4, r5, r6, r7}
VPUSH {d8-d15}
# Load output channel index
LDR r5, [sp, 100]
# Load quantization params
# - r7 = quantization_params
LDR r7, [sp, 104]
# Load input_zero_point
VLD1.8 {d14[]}, [r7]
ADD r7, r7, 4
# Load pointer to per channel zero points array
# Post-index: After load increment r7 by 4
LDR r4, [r7], #4
# Byte offset of output channel index for requant scale.
LSL r6, r5, 2
VEOR q8, q8, q8
VEOR q9, q9, q9
# Load pointer to per channel requant scale
LDR r7, [r7]
# Add output_channel_index to the b_zero_point pointer
ADD r4, r4, r5
# Now r7 has the base_addr + offset for multipliers
ADD r7, r7, r6
# Load a_stride
# - r6 = a_stride
LDR r6, [sp, 80]
VEOR q10, q10, q10
VEOR q11, q11, q11
VLD1.8 {d15}, [r4]
CMP r0, 2
ADD r4, r3, r6
MOVLO r4, r3
ADD r5, r4, r6
MOVLS r5, r4
CMP r0, 4
ADD r6, r5, r6
MOVNE r6, r5
VEOR q12, q12, q12
VEOR q13, q13, q13
VEOR q14, q14, q14
VEOR q15, q15, q15
SUBS r2, r2, 8
BLO 1f
.p2align 5
0:
# Load a0
# - d1 = a0
VLD1.8 {d1}, [r3]!
# Load a1
# - d3 = a1
VLD1.8 {d3}, [r4]!
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r5]!
# q0 = va0 = a0
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r6]!
# q1 = va1 = a1
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
### Channel 1 ###
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
# Load b0-b7 (channel 3)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 3)
# - d11 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
# Load b0-b7 (channel 4)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
# Load b0-b7 (channel 5)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 5)
# - d9 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
# Load b0-b7 (channel 7)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 7)
# - d11 = vb4567 (channel 7)
VSUBL.U8 q5, d11, d15
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
### Channel 8 ###
SUBS r2, r2, 8
# vacc0x0123 += vb0123 * va0[7]
VMLAL.S16 q8, d10, d1[3]
# vacc0x4567 += vb4567 * va0[7]
VMLAL.S16 q9, d11, d1[3]
# vacc1x0123 += vb0123 * va1[7]
VMLAL.S16 q10, d10, d3[3]
# vacc1x4567 += vb4567 * va1[7]
VMLAL.S16 q11, d11, d3[3]
# vacc2x0123 += vb0123 * va2[7]
VMLAL.S16 q12, d10, d5[3]
# vacc2x4567 += vb4567 * va2[7]
VMLAL.S16 q13, d11, d5[3]
# vacc3x0123 += vb0123 * va3[7]
VMLAL.S16 q14, d10, d7[3]
# vacc3x4567 += vb4567 * va3[7]
VMLAL.S16 q15, d11, d7[3]
BHS 0b
1:
CMP r2, -8
BEQ 2f
# Adjust a0, a1, a2, a3
ADD r3, r2
ADD r4, r2
ADD r5, r2
ADD r6, r2
# a_shift = 8 * k - 64
LSL r2, r2, 3
VDUP.32 d13, r2
# Load a0
# - d1 = a0
VLD1.8 {d1}, [r3]
# Load a1
# - d3 = a1
VLD1.8 {d3}, [r4]
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r5]
# q0 = va0 = a0
VSHL.U64 d1, d1, d13
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r6]
# q1 = va1 = a1
VSHL.U64 d3, d3, d13
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
VSHL.U64 d5, d5, d13
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
VSHL.U64 d7, d7, d13
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
CMP r2, -48
BLO 2f
### Channel 1 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
BLS 2f
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
CMP r2, -32
BLO 2f
# Load b0-b7 (channel 3)
# - d9 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 3)
# - d9 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
BLS 2f
# Load b0-b7 (channel 4)
# - d11 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
CMP r2, -16
BLO 2f
# Load b0-b7 (channel 5)
# - d13 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - b_zero_point
# - d10 = vb0123 (channel 5)
# - d11 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
BLS 2f
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]
# q4 = b0:7 - b_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
.p2align 4
2:
LDR r6, [sp, 88]
# Load q6: vmultiplier_c0123
VLD1.32 {d12, d13}, [r7]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
# Load q7: vmultiplier_c4567
VLD1.32 {d14, d15}, [r7]
VLD1.32 {q0, q1}, [r6]
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q6
VMUL.F32 q9, q9, q7
VMUL.F32 q10, q10, q6
VMUL.F32 q11, q11, q7
VMUL.F32 q12, q12, q6
VMUL.F32 q13, q13, q7
VMUL.F32 q14, q14, q6
VMUL.F32 q15, q15, q7
VADD.F32 q8, q8, q0
VADD.F32 q9, q9, q1
VADD.F32 q10, q10, q0
VADD.F32 q11, q11, q1
VADD.F32 q12, q12, q0
VADD.F32 q13, q13, q1
VADD.F32 q14, q14, q0
VADD.F32 q15, q15, q1
# Load c, c_stride:
# - r2 = c
# - r3 = c_stride
LDRD r2, r3, [sp, 92]
LSL r3, r3, 2
ADD r4, r2, r3
CMP r0, 2
MOVLO r4, r2
ADD r5, r4, r3
MOVLS r5, r4
CMP r0, 4
ADD r3, r5, r3
MOVNE r3, r5
CMP r1, 8
BNE 4f
VST1.32 {q8}, [r2]!
VST1.32 {q10}, [r4]!
VST1.32 {q12}, [r5]!
VST1.32 {q14}, [r3]!
VST1.32 {q9}, [r2]
VST1.32 {q11}, [r4]
VST1.32 {q13}, [r5]
VST1.32 {q15}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7}
BX lr
.p2align 3
4:
CMP r1, 4
BLO 5f
VST1.32 {q8}, [r2]!
VST1.32 {q10}, [r4]!
VST1.32 {q12}, [r5]!
VST1.32 {q14}, [r3]!
SUB r1, 4
VMOV.32 q8, q9
VMOV.32 q10, q11
VMOV.32 q12, q13
VMOV.32 q14, q15
5:
CMP r1, 2
BLO 6f
VST1.32 {d16}, [r2]!
VST1.32 {d20}, [r4]!
VST1.32 {d24}, [r5]!
VST1.32 {d28}, [r3]!
SUB r1, 2
VEXT.32 q8, q8, 2
VEXT.32 q10, q10, 2
VEXT.32 q12, q12, 2
VEXT.32 q14, q14, 2
6:
TEQ r1, 0
BEQ 7f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d20[0]}, [r4]!
VST1.32 {d24[0]}, [r5]!
VST1.32 {d28[0]}, [r3]!
7:
VPOP {d8-d15}
POP {r4, r5, r6, r7}
BX lr
END_FUNCTION pytorch_q8gemm_dq_ukernel_4x8__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.