repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
9front/9front
| 1,216
|
sys/src/libc/68020/memmove.s
|
TEXT memmove(SB), $0
move:
MOVL n+8(FP),R0
BEQ return
BGT ok
MOVL 0, R0
ok:
MOVL s1+0(FP),A2
MOVL s2+4(FP),A1
CMPL A2,A1
BHI back
/*
* speed depends on source allignment
* destination allignment is secondary
* byte-at-a-time foreward copy to
* get source (A1) alligned.
*/
f1:
MOVL A1, R1
ANDL $3, R1
BEQ f2
SUBL $1, R0
BLT return
MOVB (A1)+, (A2)+
BRA f1
/*
* quad-long-at-a-time forward copy
*/
f2:
SUBL $16, R0
BLT f3
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
BRA f2
/*
* cleanup byte-at-a-time
*/
f3:
ADDL $15, R0
BLT return
f4:
MOVB (A1)+, (A2)+
SUBL $1, R0
BGE f4
BRA return
return:
MOVL s1+0(FP),R0
RTS
/*
* everything the same, but
* copy backwards
*/
back:
ADDL R0, A1
ADDL R0, A2
/*
* byte-at-a-time backward copy to
* get source (A1) alligned.
*/
b1:
MOVL A1, R1
ANDL $3, R1
BEQ b2
SUBL $1, R0
BLT return
MOVB -(A1), -(A2)
BRA b1
/*
* quad-long-at-a-time backward copy
*/
b2:
SUBL $16, R0
BLT b3
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
BRA b2
/*
* cleanup byte-at-a-time backward
*/
b3:
ADDL $15, R0
BLT return
b4:
MOVB -(A1), -(A2)
SUBL $1, R0
BGE b4
BRA return
|
9front/9front
| 1,209
|
sys/src/libc/68020/memcpy.s
|
TEXT memcpy(SB), $0
MOVL n+8(FP),R0
BEQ return
BGT ok
MOVL 0, R0
ok:
MOVL s1+0(FP),A2
MOVL s2+4(FP),A1
CMPL A2,A1
BHI back
/*
* speed depends on source allignment
* destination allignment is secondary
* byte-at-a-time foreward copy to
* get source (A1) alligned.
*/
f1:
MOVL A1, R1
ANDL $3, R1
BEQ f2
SUBL $1, R0
BLT return
MOVB (A1)+, (A2)+
BRA f1
/*
* quad-long-at-a-time forward copy
*/
f2:
SUBL $16, R0
BLT f3
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
BRA f2
/*
* cleanup byte-at-a-time
*/
f3:
ADDL $15, R0
BLT return
f4:
MOVB (A1)+, (A2)+
SUBL $1, R0
BGE f4
BRA return
return:
MOVL s1+0(FP),R0
RTS
/*
* everything the same, but
* copy backwards
*/
back:
ADDL R0, A1
ADDL R0, A2
/*
* byte-at-a-time backward copy to
* get source (A1) alligned.
*/
b1:
MOVL A1, R1
ANDL $3, R1
BEQ b2
SUBL $1, R0
BLT return
MOVB -(A1), -(A2)
BRA b1
/*
* quad-long-at-a-time backward copy
*/
b2:
SUBL $16, R0
BLT b3
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
BRA b2
/*
* cleanup byte-at-a-time backward
*/
b3:
ADDL $15, R0
BLT return
b4:
MOVB -(A1), -(A2)
SUBL $1, R0
BGE b4
BRA return
|
9front/9front
| 1,476
|
sys/src/libc/68000/memmove.s
|
TEXT memmove(SB), $0
MOVL n+8(FP), R0 /* count */
BEQ return
BGT ok
MOVL 0, R0
ok:
MOVL s1+0(FP), A2 /* dest pointer */
MOVL s2+4(FP), A1 /* source pointer */
CMPL A2,A1
BHI back
/*
* byte-at-a-time foreward copy to
* get source (A1) alligned.
*/
f1:
MOVL A1, R1
ANDL $3, R1
BEQ f2
SUBL $1, R0
BLT return
MOVB (A1)+, (A2)+
BRA f1
/*
* check that dest is alligned
* if not, just go byte-at-a-time
*/
f2:
MOVL A2, R1
ANDL $3, R1
BEQ f3
SUBL $1, R0
BLT return
BRA f5
/*
* quad-long-at-a-time forward copy
*/
f3:
SUBL $16, R0
BLT f4
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
BRA f3
/*
* cleanup byte-at-a-time
*/
f4:
ADDL $15, R0
BLT return
f5:
MOVB (A1)+, (A2)+
SUBL $1, R0
BGE f5
BRA return
return:
MOVL s1+0(FP),R0
RTS
/*
* everything the same, but
* copy backwards
*/
back:
ADDL R0, A1
ADDL R0, A2
/*
* byte-at-a-time backward copy to
* get source (A1) alligned.
*/
b1:
MOVL A1, R1
ANDL $3, R1
BEQ b2
SUBL $1, R0
BLT return
MOVB -(A1), -(A2)
BRA b1
/*
* check that dest is alligned
* if not, just go byte-at-a-time
*/
b2:
MOVL A2, R1
ANDL $3, R1
BEQ b3
SUBL $1, R0
BLT return
BRA b5
/*
* quad-long-at-a-time backward copy
*/
b3:
SUBL $16, R0
BLT b4
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
BRA b3
/*
* cleanup byte-at-a-time backward
*/
b4:
ADDL $15, R0
BLT return
b5:
MOVB -(A1), -(A2)
SUBL $1, R0
BGE b5
BRA return
|
9front/9front
| 2,033
|
sys/src/libc/68000/muldivrt.s
|
/*
* calls _divul with
* absolute value arguments
*/
TEXT _divsl(SB), $0
MOVL R0, TOS
MOVL b+4(FP), R0
BPL y1
NEGL R0
MOVL R0, TOS
MOVL a+0(FP), R0
BPL y3
NEGL R0
MOVL R0, TOS
/* neg/neg */
JSR _divul(SB)
MOVL TOS, R0
MOVL R0, a+0(FP)
MOVL TOS, R0
NEGL R0
MOVL R0, b+4(FP)
MOVL TOS, R0
RTS
y1: MOVL R0, TOS
MOVL a+0(FP), R0
BPL y2
NEGL R0
MOVL R0, TOS
/* neg/pos */
JSR _divul(SB)
MOVL TOS, R0
NEGL R0
MOVL R0, a+0(FP)
MOVL TOS, R0
NEGL R0
MOVL R0, b+4(FP)
MOVL TOS, R0
RTS
y2: MOVL R0, TOS
/* pos/pos */
JSR _divul(SB)
MOVL TOS, R0
MOVL R0, a+0(FP)
MOVL TOS, R0
MOVL R0, b+4(FP)
MOVL TOS, R0
RTS
y3: MOVL R0, TOS
/* pos/neg */
JSR _divul(SB)
MOVL TOS, R0
NEGL R0
MOVL R0, a+0(FP)
MOVL TOS, R0
MOVL R0, b+4(FP)
MOVL TOS, R0
RTS
/*
* for(i=1;; i++) {
* if(den & (1<<31))
* break;
* den <<= 1;
* }
*
* for(; i; i--) {
* quo <<= 1;
* if(num >= den) {
* num -= den;
* quo |= 1;
* }
* den >>= 1;
* }
*/
TEXT _divul(SB), $0
MOVL R0, TOS /* i */
MOVL R1, TOS /* num */
MOVL R2, TOS /* den */
MOVL R3, TOS /* quo */
MOVL $0, R0
MOVL $0, R3
MOVL a+0(FP), R1
MOVL b+4(FP), R2
BEQ xout
BMI x1
ADDL $1, R0
LSLL $1, R2
BPL -2(PC)
x1: LSLL $1, R3
CMPL R1, R2
BCS 3(PC)
SUBL R2, R1
ORL $1, R3
LSRL $1, R2
DBMI R0, x1
MOVL R3, a+0(FP)
MOVL R1, b+4(FP)
xout:
MOVL TOS, R3
MOVL TOS, R2
MOVL TOS, R1
MOVL TOS, R0
RTS
/*
* x = 0;
* for(i=0; i<32; i++) {
* if(a & 1)
* x += b;
* a >>= 1;
* b <<= 1;
* }
* a = x;
*/
TEXT _mull(SB), $0
MOVL R0, TOS /* i */
MOVL R1, TOS /* a */
MOVL R2, TOS /* b */
MOVL R3, TOS /* x */
MOVL a+0(FP), R1
MOVL b+4(FP), R2
MOVL $32, R0
CLRL R3
z1: ROTRL $1, R1
BCC 2(PC)
ADDL R2, R3
LSLL $1, R2
DBEQ R0, z1
MOVL R3, b+4(FP)
MOVL TOS, R3
MOVL TOS, R2
MOVL TOS, R1
MOVL TOS, R0
RTS
TEXT _ccr(SB), $0
PEA (A0)
SUBL A0, A0
BCC 2(PC)
LEA 1(A0), A0
BVC 2(PC)
LEA 2(A0), A0
BNE 2(PC)
LEA 4(A0), A0
BPL 2(PC)
LEA 8(A0), A0
MOVW A0, a+0(FP)
MOVL TOS, A0
RTS
|
9front/9front
| 1,475
|
sys/src/libc/68000/memcpy.s
|
TEXT memcpy(SB), $0
MOVL n+8(FP), R0 /* count */
BEQ return
BGT ok
MOVL 0, R0
ok:
MOVL s1+0(FP), A2 /* dest pointer */
MOVL s2+4(FP), A1 /* source pointer */
CMPL A2,A1
BHI back
/*
* byte-at-a-time foreward copy to
* get source (A1) alligned.
*/
f1:
MOVL A1, R1
ANDL $3, R1
BEQ f2
SUBL $1, R0
BLT return
MOVB (A1)+, (A2)+
BRA f1
/*
* check that dest is alligned
* if not, just go byte-at-a-time
*/
f2:
MOVL A2, R1
ANDL $3, R1
BEQ f3
SUBL $1, R0
BLT return
BRA f5
/*
* quad-long-at-a-time forward copy
*/
f3:
SUBL $16, R0
BLT f4
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
MOVL (A1)+, (A2)+
BRA f3
/*
* cleanup byte-at-a-time
*/
f4:
ADDL $15, R0
BLT return
f5:
MOVB (A1)+, (A2)+
SUBL $1, R0
BGE f5
BRA return
return:
MOVL s1+0(FP),R0
RTS
/*
* everything the same, but
* copy backwards
*/
back:
ADDL R0, A1
ADDL R0, A2
/*
* byte-at-a-time backward copy to
* get source (A1) alligned.
*/
b1:
MOVL A1, R1
ANDL $3, R1
BEQ b2
SUBL $1, R0
BLT return
MOVB -(A1), -(A2)
BRA b1
/*
* check that dest is alligned
* if not, just go byte-at-a-time
*/
b2:
MOVL A2, R1
ANDL $3, R1
BEQ b3
SUBL $1, R0
BLT return
BRA b5
/*
* quad-long-at-a-time backward copy
*/
b3:
SUBL $16, R0
BLT b4
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
MOVL -(A1), -(A2)
BRA b3
/*
* cleanup byte-at-a-time backward
*/
b4:
ADDL $15, R0
BLT return
b5:
MOVB -(A1), -(A2)
SUBL $1, R0
BGE b5
BRA return
|
9front/9front
| 4,194
|
sys/src/libc/arm/memmove.s
|
TS = 0
TE = 1
FROM = 2
N = 3
TMP = 3 /* N and TMP don't overlap */
TMP1 = 4
TEXT memcpy(SB), $0
B _memmove
TEXT memmove(SB), $0
_memmove:
MOVW R(TS), to+0(FP) /* need to save for return value */
MOVW from+4(FP), R(FROM)
MOVW n+8(FP), R(N)
ADD R(N), R(TS), R(TE) /* to end pointer */
CMP R(FROM), R(TS)
BLS _forward
_back:
ADD R(N), R(FROM) /* from end pointer */
CMP $4, R(N) /* need at least 4 bytes to copy */
BLT _b1tail
_b4align: /* align destination on 4 */
AND.S $3, R(TE), R(TMP)
BEQ _b4aligned
MOVBU.W -1(R(FROM)), R(TMP) /* pre-indexed */
MOVBU.W R(TMP), -1(R(TE)) /* pre-indexed */
B _b4align
_b4aligned: /* is source now aligned? */
AND.S $3, R(FROM), R(TMP)
BNE _bunaligned
ADD $31, R(TS), R(TMP) /* do 32-byte chunks if possible */
_b32loop:
CMP R(TMP), R(TE)
BLS _b4tail
MOVM.DB.W (R(FROM)), [R4-R7]
MOVM.DB.W [R4-R7], (R(TE))
MOVM.DB.W (R(FROM)), [R4-R7]
MOVM.DB.W [R4-R7], (R(TE))
B _b32loop
_b4tail: /* do remaining words if possible */
ADD $3, R(TS), R(TMP)
_b4loop:
CMP R(TMP), R(TE)
BLS _b1tail
MOVW.W -4(R(FROM)), R(TMP1) /* pre-indexed */
MOVW.W R(TMP1), -4(R(TE)) /* pre-indexed */
B _b4loop
_b1tail: /* remaining bytes */
CMP R(TE), R(TS)
BEQ _return
MOVBU.W -1(R(FROM)), R(TMP) /* pre-indexed */
MOVBU.W R(TMP), -1(R(TE)) /* pre-indexed */
B _b1tail
_forward:
CMP $4, R(N) /* need at least 4 bytes to copy */
BLT _f1tail
_f4align: /* align destination on 4 */
AND.S $3, R(TS), R(TMP)
BEQ _f4aligned
MOVBU.P 1(R(FROM)), R(TMP) /* implicit write back */
MOVBU.P R(TMP), 1(R(TS)) /* implicit write back */
B _f4align
_f4aligned: /* is source now aligned? */
AND.S $3, R(FROM), R(TMP)
BNE _funaligned
SUB $31, R(TE), R(TMP) /* do 32-byte chunks if possible */
_f32loop:
CMP R(TMP), R(TS)
BHS _f4tail
MOVM.IA.W (R(FROM)), [R4-R7]
MOVM.IA.W [R4-R7], (R(TS))
MOVM.IA.W (R(FROM)), [R4-R7]
MOVM.IA.W [R4-R7], (R(TS))
B _f32loop
_f4tail:
SUB $3, R(TE), R(TMP) /* do remaining words if possible */
_f4loop:
CMP R(TMP), R(TS)
BHS _f1tail
MOVW.P 4(R(FROM)), R(TMP1) /* implicit write back */
MOVW.P R4, 4(R(TS)) /* implicit write back */
B _f4loop
_f1tail:
CMP R(TS), R(TE)
BEQ _return
MOVBU.P 1(R(FROM)), R(TMP) /* implicit write back */
MOVBU.P R(TMP), 1(R(TS)) /* implicit write back */
B _f1tail
_return:
MOVW to+0(FP), R0
RET
RSHIFT = 4
LSHIFT = 5
OFFSET = 11
BR0 = 6
BW0 = 7
BR1 = 7
BW1 = 8
_bunaligned:
CMP $2, R(TMP) /* is R(TMP) < 2 ? */
MOVW.LT $8, R(RSHIFT) /* (R(n)<<24)|(R(n-1)>>8) */
MOVW.LT $24, R(LSHIFT)
MOVW.LT $1, R(OFFSET)
MOVW.EQ $16, R(RSHIFT) /* (R(n)<<16)|(R(n-1)>>16) */
MOVW.EQ $16, R(LSHIFT)
MOVW.EQ $2, R(OFFSET)
MOVW.GT $24, R(RSHIFT) /* (R(n)<<8)|(R(n-1)>>24) */
MOVW.GT $8, R(LSHIFT)
MOVW.GT $3, R(OFFSET)
ADD $8, R(TS), R(TMP) /* do 8-byte chunks if possible */
CMP R(TMP), R(TE)
BLS _b1tail
BIC $3, R(FROM) /* align source */
MOVW (R(FROM)), R(BR0) /* prime first block register */
_bu8loop:
CMP R(TMP), R(TE)
BLS _bu1tail
MOVW R(BR0)<<R(LSHIFT), R(BW1)
MOVM.DB.W (R(FROM)), [R(BR0)-R(BR1)]
ORR R(BR1)>>R(RSHIFT), R(BW1)
MOVW R(BR1)<<R(LSHIFT), R(BW0)
ORR R(BR0)>>R(RSHIFT), R(BW0)
MOVM.DB.W [R(BW0)-R(BW1)], (R(TE))
B _bu8loop
_bu1tail:
ADD R(OFFSET), R(FROM)
B _b1tail
RSHIFT = 4
LSHIFT = 5
OFFSET = 11
FW0 = 6
FR0 = 7
FW1 = 7
FR1 = 8
_funaligned:
CMP $2, R(TMP)
MOVW.LT $8, R(RSHIFT) /* (R(n+1)<<24)|(R(n)>>8) */
MOVW.LT $24, R(LSHIFT)
MOVW.LT $3, R(OFFSET)
MOVW.EQ $16, R(RSHIFT) /* (R(n+1)<<16)|(R(n)>>16) */
MOVW.EQ $16, R(LSHIFT)
MOVW.EQ $2, R(OFFSET)
MOVW.GT $24, R(RSHIFT) /* (R(n+1)<<8)|(R(n)>>24) */
MOVW.GT $8, R(LSHIFT)
MOVW.GT $1, R(OFFSET)
SUB $8, R(TE), R(TMP) /* do 8-byte chunks if possible */
CMP R(TMP), R(TS)
BHS _f1tail
BIC $3, R(FROM) /* align source */
MOVW.P 4(R(FROM)), R(FR1) /* prime last block register, implicit write back */
_fu8loop:
CMP R(TMP), R(TS)
BHS _fu1tail
MOVW R(FR1)>>R(RSHIFT), R(FW0)
MOVM.IA.W (R(FROM)), [R(FR0)-R(FR1)]
ORR R(FR0)<<R(LSHIFT), R(FW0)
MOVW R(FR0)>>R(RSHIFT), R(FW1)
ORR R(FR1)<<R(LSHIFT), R(FW1)
MOVM.IA.W [R(FW0)-R(FW1)], (R(TS))
B _fu8loop
_fu1tail:
SUB R(OFFSET), R(FROM)
B _f1tail
|
9front/9front
| 1,639
|
sys/src/libc/arm/div.s
|
Q = 0
N = 1
D = 2
CC = 3
TMP = 11
TEXT save<>(SB), 1, $0
MOVW R(Q), 0(FP)
MOVW R(N), 4(FP)
MOVW R(D), 8(FP)
MOVW R(CC), 12(FP)
MOVW R(TMP), R(Q) /* numerator */
MOVW 20(FP), R(D) /* denominator */
CMP $0, R(D)
BNE s1
MOVW -1(R(D)), R(TMP) /* divide by zero fault */
s1: RET
TEXT rest<>(SB), 1, $0
MOVW 0(FP), R(Q)
MOVW 4(FP), R(N)
MOVW 8(FP), R(D)
MOVW 12(FP), R(CC)
/*
* return to caller
* of rest<>
*/
MOVW 0(R13), R14
ADD $20, R13
B (R14)
TEXT div<>(SB), 1, $0
MOVW $32, R(CC)
/*
* skip zeros 8-at-a-time
*/
e1:
AND.S $(0xff<<24),R(Q), R(N)
BNE e2
SLL $8, R(Q)
SUB.S $8, R(CC)
BNE e1
RET
e2:
MOVW $0, R(N)
loop:
/*
* shift R(N||Q) left one
*/
SLL $1, R(N)
CMP $0, R(Q)
ORR.LT $1, R(N)
SLL $1, R(Q)
/*
* compare numerator to denominator
* if less, subtract and set quotent bit
*/
CMP R(D), R(N)
ORR.HS $1, R(Q)
SUB.HS R(D), R(N)
SUB.S $1, R(CC)
BNE loop
RET
TEXT _div(SB), 1, $16
BL save<>(SB)
CMP $0, R(Q)
BGE d1
RSB $0, R(Q), R(Q)
CMP $0, R(D)
BGE d2
RSB $0, R(D), R(D)
d0:
BL div<>(SB) /* none/both neg */
MOVW R(Q), R(TMP)
B out
d1:
CMP $0, R(D)
BGE d0
RSB $0, R(D), R(D)
d2:
BL div<>(SB) /* one neg */
RSB $0, R(Q), R(TMP)
B out
TEXT _mod(SB), 1, $16
BL save<>(SB)
CMP $0, R(D)
RSB.LT $0, R(D), R(D)
CMP $0, R(Q)
BGE m1
RSB $0, R(Q), R(Q)
BL div<>(SB) /* neg numerator */
RSB $0, R(N), R(TMP)
B out
m1:
BL div<>(SB) /* pos numerator */
MOVW R(N), R(TMP)
B out
TEXT _divu(SB), 1, $16
BL save<>(SB)
BL div<>(SB)
MOVW R(Q), R(TMP)
B out
TEXT _modu(SB), 1, $16
BL save<>(SB)
BL div<>(SB)
MOVW R(N), R(TMP)
B out
out:
BL rest<>(SB)
B out
|
a2fpga/a2fpga_core
| 2,549
|
boards/a2n20v2-Enhanced/src/cardrom/cardrom.s
|
; ******************************************************************************
; A2FPGA - STARTROM.S
;
; /INH ROM for A2FPGA startup
;
; This ROM code is used to initialize the A2FPGA board. It is mapped into
; the 6502 address space at $F800-$FFFF. The code is executed by the 6502
; at startup time by the A2FPGA board asserting the /INH signal prior to
; releasing the 6502 from reset. The primary purpose of this code is to
; poll the Apple II keyboard and wait for the FPGA to signal that it is ready
; for the 6502 to resume the normal Apple II boot process.
; ******************************************************************************
;
KBD = $C000 ; APPLE KEYBOARD DATA
KBDSTRB = $C010 ; KEYBOARD DATA CLEAR
FPGADONE = $F7FF ; TBD - SOME MEMORY LOCATION
RESETVEC = $FFFC ; JUMP TARGET
SPKR = $C030 ; SPEAKER
;
; ************************** INITIALIZE ***************************************
;
ORG $F800 ; PROGRAM START ADDRESS
RESET CLD
;JSR BELL ; RING BELL
;JSR BELL ; RING BELL
;JSR BELL ; RING BELL
KBDLOOP LDA KBD ; TEST KEYBOARD
BPL CHKDONE
BIT KBDSTRB ; CLEAR KEYBOARD DATA
CHKDONE LDA FPGADONE ; FETCH FPGADONE
BEQ KBDLOOP ; CONTINUE TO LOOP IF FPGADONE IS 0
BIT KBDSTRB ; CLEAR KEYBOARD DATA
JMP (RESETVEC) ; JUMP TO RESET VECTOR
IRQ PHA
TXA
PHA
; TBD - Interrupt code goes here
PLA
TAX
PLA
RTI
BELL LDA #$40
JSR WAIT
LDY #$C0
BELL2 LDA #$0C
JSR WAIT
LDA SPKR
DEY
BNE BELL2
RTS2B RTS
WAIT SEC
WAIT2 PHA
WAIT3 SBC #$01
BNE WAIT3
PLA
SBC #$01
BNE WAIT2
RTS
; Dynamically pad from current address up to $FFFA
ORG * ; Ensure we are at the current location
PAD_SIZE = $FFFA - * ; Calculate the number of bytes needed to reach $FFFA
DS PAD_SIZE ; Reserve the required number of padding bytes
ORG $FFFA ; Set up interrupt vectors at the exact memory location
VECTORS DW IRQ ; Set NMI vector
DW RESET ; Set RESET vector
DW IRQ ; Set IRQ vector
;
; <<EoF>>
;
|
9front/9front
| 1,147
|
sys/src/libc/arm64/atomic.s
|
#define ISH (2<<2|3)
/* get variants */
TEXT agetl+0(SB),1,$0
LDARW (R0), R0
DMB $ISH
RETURN
TEXT agetp+0(SB),1,$0
LDAR (R0), R0
DMB $ISH
RETURN
/* set variants */
TEXT aswapl+0(SB),1,$0
MOV 0x08(FP), R1
MOV R0, R2
_setl:
LDXRW (R2), R0
STXRW R1, (R2), R3
CBNZW R3, _setl
DMB $ISH
RETURN
TEXT aswapp+0(SB),1,$0
MOV 0x08(FP), R1
MOV R0, R2
_setp:
LDXR (R2), R0
STXR R1, (R2), R3
CBNZW R3, _setp
DMB $ISH
RETURN
/* inc variants */
TEXT aincl+0(SB),1,$0
MOV 0x08(FP), R1
MOV R0, R2
_incl:
LDXRW (R2), R0
ADDW R1, R0, R3
STXRW R3, (R2), R4
CBNZW R4, _incl
DMB $ISH
MOVW R3, R0
RETURN
/* cas variants */
TEXT acasl+0(SB),1,$0
MOV 0x08(FP), R1
MOV 0x10(FP), R2
DMB $ISH
_casl:
LDXRW (R0), R3
CMPW R1, R3
BNE _caslf
STXRW R2, (R0), R4
CBNZ R4, _casl
MOV $1, R0
DMB $ISH
RETURN
_caslf:
CLREX
MOV $0, R0
DMB $ISH
RETURN
TEXT acasp+0(SB),1,$0
MOV 0x08(FP), R1
MOV 0x10(FP), R2
DMB $ISH
_casp:
LDXR (R0), R3
CMP R1, R3
BNE _caspf
STXR R2, (R0), R4
CBNZW R4, _casp
MOV $1, R0
DMB $ISH
RETURN
_caspf:
CLREX
MOV $0, R0
DMB $ISH
RETURN
/* barriers */
TEXT coherence+0(SB),1,$0
DMB $ISH
RETURN
|
a2fpga/a2fpga_core
| 5,373
|
boards/a2n20v2-Enhanced/src/firmware/firmware.S
|
.section .init
.global main
#define STACKADDR 0x04800000 // Program stack at end of 8MB SDRAM
#define IRQSTACK 14336 // IRQ stack at end of 14KB SRAM
/* begin: custom PicoRV32 opcodes */
#define regnum_q0 0
#define regnum_q1 1
#define regnum_q2 2
#define regnum_q3 3
#define regnum_x0 0
#define regnum_x1 1
#define regnum_x2 2
#define regnum_x3 3
#define regnum_x4 4
#define regnum_x5 5
#define regnum_x6 6
#define regnum_x7 7
#define regnum_x8 8
#define regnum_x9 9
#define regnum_x10 10
#define regnum_x11 11
#define regnum_x12 12
#define regnum_x13 13
#define regnum_x14 14
#define regnum_x15 15
#define regnum_x16 16
#define regnum_x17 17
#define regnum_x18 18
#define regnum_x19 19
#define regnum_x20 20
#define regnum_x21 21
#define regnum_x22 22
#define regnum_x23 23
#define regnum_x24 24
#define regnum_x25 25
#define regnum_x26 26
#define regnum_x27 27
#define regnum_x28 28
#define regnum_x29 29
#define regnum_x30 30
#define regnum_x31 31
#define regnum_zero 0
#define regnum_ra 1
#define regnum_sp 2
#define regnum_gp 3
#define regnum_tp 4
#define regnum_t0 5
#define regnum_t1 6
#define regnum_t2 7
#define regnum_s0 8
#define regnum_s1 9
#define regnum_a0 10
#define regnum_a1 11
#define regnum_a2 12
#define regnum_a3 13
#define regnum_a4 14
#define regnum_a5 15
#define regnum_a6 16
#define regnum_a7 17
#define regnum_s2 18
#define regnum_s3 19
#define regnum_s4 20
#define regnum_s5 21
#define regnum_s6 22
#define regnum_s7 23
#define regnum_s8 24
#define regnum_s9 25
#define regnum_s10 26
#define regnum_s11 27
#define regnum_t3 28
#define regnum_t4 29
#define regnum_t5 30
#define regnum_t6 31
// x8 is s0 and also fp
#define regnum_fp 8
#define r_type_insn(_f7, _rs2, _rs1, _f3, _rd, _opc) \
.word (((_f7) << 25) | ((_rs2) << 20) | ((_rs1) << 15) | ((_f3) << 12) | ((_rd) << 7) | ((_opc) << 0))
#define picorv32_getq_insn(_rd, _qs) \
r_type_insn(0b0000000, 0, regnum_ ## _qs, 0b100, regnum_ ## _rd, 0b0001011)
#define picorv32_setq_insn(_qd, _rs) \
r_type_insn(0b0000001, 0, regnum_ ## _rs, 0b010, regnum_ ## _qd, 0b0001011)
#define picorv32_retirq_insn() \
r_type_insn(0b0000010, 0, 0, 0b000, 0, 0b0001011)
#define picorv32_maskirq_insn(_rd, _rs) \
r_type_insn(0b0000011, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
#define picorv32_waitirq_insn(_rd) \
r_type_insn(0b0000100, 0, 0, 0b100, regnum_ ## _rd, 0b0001011)
#define picorv32_timer_insn(_rd, _rs) \
r_type_insn(0b0000101, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
/* end: custom PicoRV32 opcodes */
reset_vec:
// no more than 8 bytes here !
j start
.balign 8
irq_handler_addr:
.dword 0x0000000000000000
.balign 16
irq_vec:
/* save registers */
picorv32_setq_insn(q2, x1)
picorv32_setq_insn(q3, x2)
// x2 is the stack pointer
lui x2, %hi(IRQSTACK-4*32)
addi x2, x2, %lo(IRQSTACK-4*32)
picorv32_getq_insn(x1, q0)
sw x1, 0*4(x2)
picorv32_getq_insn(x1, q2)
sw x1, 1*4(x2)
picorv32_getq_insn(x1, q3)
sw x1, 2*4(x2)
sw x3, 3*4(x2)
sw x4, 4*4(x2)
sw x5, 5*4(x2)
sw x6, 6*4(x2)
sw x7, 7*4(x2)
sw x8, 8*4(x2)
sw x9, 9*4(x2)
sw x10, 10*4(x2)
sw x11, 11*4(x2)
sw x12, 12*4(x2)
sw x13, 13*4(x2)
sw x14, 14*4(x2)
sw x15, 15*4(x2)
sw x16, 16*4(x2)
sw x17, 17*4(x2)
sw x18, 18*4(x2)
sw x19, 19*4(x2)
sw x20, 20*4(x2)
sw x21, 21*4(x2)
sw x22, 22*4(x2)
sw x23, 23*4(x2)
sw x24, 24*4(x2)
sw x25, 25*4(x2)
sw x26, 26*4(x2)
sw x27, 27*4(x2)
sw x28, 28*4(x2)
sw x29, 29*4(x2)
sw x30, 30*4(x2)
sw x31, 31*4(x2)
/* call interrupt handler C function */
// arg0 = interrupt type bitmask
picorv32_getq_insn(x10, q1)
// arg1 = pointer to stored registers
mv x11, x2
// load irq handler address to x1 (ra)
lw x1, 8(x0)
// call to C function
beq x0, x1, 1f
jalr x1, x1, 0
1:
/* restore registers */
lw x1, 0*4(x2)
picorv32_setq_insn(q0, x1)
lw x1, 1*4(x2)
picorv32_setq_insn(q1, x1)
lw x1, 2*4(x2)
picorv32_setq_insn(q2, x1)
lw x3, 3*4(x2)
lw x4, 4*4(x2)
lw x5, 5*4(x2)
lw x6, 6*4(x2)
lw x7, 7*4(x2)
lw x8, 8*4(x2)
lw x9, 9*4(x2)
lw x10, 10*4(x2)
lw x11, 11*4(x2)
lw x12, 12*4(x2)
lw x13, 13*4(x2)
lw x14, 14*4(x2)
lw x15, 15*4(x2)
lw x16, 16*4(x2)
lw x17, 17*4(x2)
lw x18, 18*4(x2)
lw x19, 19*4(x2)
lw x20, 20*4(x2)
lw x21, 21*4(x2)
lw x22, 22*4(x2)
lw x23, 23*4(x2)
lw x24, 24*4(x2)
lw x25, 25*4(x2)
lw x26, 26*4(x2)
lw x27, 27*4(x2)
lw x28, 28*4(x2)
lw x29, 29*4(x2)
lw x30, 30*4(x2)
lw x31, 31*4(x2)
picorv32_getq_insn(x1, q1)
picorv32_getq_insn(x2, q2)
picorv32_retirq_insn()
start:
/* zero-initialize all registers */
addi x1, zero, 0
addi x2, zero, 0
addi x3, zero, 0
addi x4, zero, 0
addi x5, zero, 0
addi x6, zero, 0
addi x7, zero, 0
addi x8, zero, 0
addi x9, zero, 0
addi x10, zero, 0
addi x11, zero, 0
addi x12, zero, 0
addi x13, zero, 0
addi x14, zero, 0
addi x15, zero, 0
addi x16, zero, 0
addi x17, zero, 0
addi x18, zero, 0
addi x19, zero, 0
addi x20, zero, 0
addi x21, zero, 0
addi x22, zero, 0
addi x23, zero, 0
addi x24, zero, 0
addi x25, zero, 0
addi x26, zero, 0
addi x27, zero, 0
addi x28, zero, 0
addi x29, zero, 0
addi x30, zero, 0
addi x31, zero, 0
/* set stack pointer */
lui sp, %hi(STACKADDR)
addi sp, sp, %lo(STACKADDR)
/* call main */
call main
/* halt */
loop:
j loop
.balign 4
|
9front/9front
| 1,214
|
sys/src/libc/amd64/atom.s
|
TEXT ainc(SB), 1, $0 /* int ainc(int *); */
ainclp:
MOVL (RARG), AX /* exp */
MOVL AX, BX
INCL BX /* new */
LOCK; CMPXCHGL BX, (RARG)
JNZ ainclp
MOVL BX, AX
RET
TEXT adec(SB), 1, $0 /* int adec(int*); */
adeclp:
MOVL (RARG), AX
MOVL AX, BX
DECL BX
LOCK; CMPXCHGL BX, (RARG)
JNZ adeclp
MOVL BX, AX
RET
/*
* int cas32(u32int *p, u32int ov, u32int nv);
* int cas(uint *p, int ov, int nv);
* int casl(ulong *p, ulong ov, ulong nv);
*/
TEXT cas32(SB), 1, $0
TEXT cas(SB), 1, $0
TEXT casul(SB), 1, $0
TEXT casl(SB), 1, $0 /* back compat */
MOVL exp+8(FP), AX
MOVL new+16(FP), BX
LOCK; CMPXCHGL BX, (RARG)
MOVL $1, AX /* use CMOVLEQ etc. here? */
JNZ _cas32r0
_cas32r1:
RET
_cas32r0:
DECL AX
RET
/*
* int cas64(u64int *p, u64int ov, u64int nv);
* int casp(void **p, void *ov, void *nv);
*/
TEXT cas64(SB), 1, $0
TEXT casp(SB), 1, $0
MOVQ exp+8(FP), AX
MOVQ new+16(FP), BX
LOCK; CMPXCHGQ BX, (RARG)
MOVL $1, AX /* use CMOVLEQ etc. here? */
JNZ _cas64r0
_cas64r1:
RET
_cas64r0:
DECL AX
RET
TEXT fas64(SB), 1, $-4
TEXT fasp(SB), 1, $-4
MOVQ p+8(FP), AX
LOCK; XCHGQ AX, (RARG) /* */
RET
TEXT fas32(SB), 1, $-4
MOVL p+8(FP), AX
LOCK; XCHGL AX, (RARG) /* */
RET
|
9front/9front
| 1,089
|
sys/src/libc/amd64/memmove.s
|
TEXT memmove(SB), $0
MOVQ RARG, DI
MOVQ DI, AX /* return value */
MOVQ p2+8(FP), SI
MOVQ n+16(FP), BX
CMPQ BX, $0
JGT _ok
JEQ _return /* nothing to do if n == 0 */
MOVL $0, SI /* fault if n < 0 */
/*
* check and set for backwards:
* (p2 < p1) && ((p2+n) > p1)
*/
_ok:
CMPQ SI, DI
JGT _forward
JEQ _return /* nothing to do if p2 == p1 */
MOVQ SI, DX
ADDQ BX, DX
CMPQ DX, DI
JGT _back
/*
* copy whole longs if aligned
*/
_forward:
CLD
MOVQ SI, DX
ORQ DI, DX
ANDL $3, DX
JNE c3f
MOVQ BX, CX
SHRQ $2, CX
ANDL $3, BX
REP; MOVSL
/*
* copy the rest, by bytes
*/
JEQ _return /* flags set by above ANDL */
c3f:
MOVQ BX, CX
REP; MOVSB
RET
/*
* whole thing backwards has
* adjusted addresses
*/
_back:
ADDQ BX, DI
ADDQ BX, SI
STD
SUBQ $4, DI
SUBQ $4, SI
/*
* copy whole longs, if aligned
*/
MOVQ DI, DX
ORQ SI, DX
ANDL $3, DX
JNE c3b
MOVQ BX, CX
SHRQ $2, CX
ANDL $3, BX
REP; MOVSL
/*
* copy the rest, by bytes
*/
JEQ _return /* flags set by above ANDL */
c3b:
ADDQ $3, DI
ADDQ $3, SI
MOVQ BX, CX
REP; MOVSB
_return:
RET
|
9front/9front
| 1,088
|
sys/src/libc/amd64/memcpy.s
|
TEXT memcpy(SB), $0
MOVQ RARG, DI
MOVQ DI, AX /* return value */
MOVQ p2+8(FP), SI
MOVQ n+16(FP), BX
CMPQ BX, $0
JGT _ok
JEQ _return /* nothing to do if n == 0 */
MOVL $0, SI /* fault if n < 0 */
/*
* check and set for backwards:
* (p2 < p1) && ((p2+n) > p1)
*/
_ok:
CMPQ SI, DI
JGT _forward
JEQ _return /* nothing to do if p2 == p1 */
MOVQ SI, DX
ADDQ BX, DX
CMPQ DX, DI
JGT _back
/*
* copy whole longs if aligned
*/
_forward:
CLD
MOVQ SI, DX
ORQ DI, DX
ANDL $3, DX
JNE c3f
MOVQ BX, CX
SHRQ $2, CX
ANDL $3, BX
REP; MOVSL
/*
* copy the rest, by bytes
*/
JEQ _return /* flags set by above ANDL */
c3f:
MOVQ BX, CX
REP; MOVSB
RET
/*
* whole thing backwards has
* adjusted addresses
*/
_back:
ADDQ BX, DI
ADDQ BX, SI
STD
SUBQ $4, DI
SUBQ $4, SI
/*
* copy whole longs, if aligned
*/
MOVQ DI, DX
ORQ SI, DX
ANDL $3, DX
JNE c3b
MOVQ BX, CX
SHRQ $2, CX
ANDL $3, BX
REP; MOVSL
/*
* copy the rest, by bytes
*/
JEQ _return /* flags set by above ANDL */
c3b:
ADDQ $3, DI
ADDQ $3, SI
MOVQ BX, CX
REP; MOVSB
_return:
RET
|
9front/9front
| 1,734
|
sys/src/libc/spim/memcmp.s
|
TEXT memcmp(SB), $0
MOVW R1, 0(FP)
/*
* performance:
* alligned about 1.0us/call and 17.4mb/sec
* unalligned is about 3.1mb/sec
*/
MOVW n+8(FP), R3 /* R3 is count */
MOVW s1+0(FP), R4 /* R4 is pointer1 */
MOVW s2+4(FP), R5 /* R5 is pointer2 */
ADDU R3,R4, R6 /* R6 is end pointer1 */
/* TODO(mischief): fix multibyte copy */
JMP out
/*
* if not at least 4 chars,
* dont even mess around.
* 3 chars to guarantee any
* rounding up to a word
* boundary and 4 characters
* to get at least maybe one
* full word cmp.
*/
SGT $4,R3, R1
BNE R1, out
/*
* test if both pointers
* are similarly word alligned
*/
XOR R4,R5, R1
AND $3, R1
BNE R1, out
/*
* byte at a time to word allign
*/
l1:
AND $3,R4, R1
BEQ R1, l2
MOVBU 0(R4), R8
MOVBU 0(R5), R9
ADDU $1, R4
BNE R8,R9, ne
ADDU $1, R5
JMP l1
/*
* turn R3 into end pointer1-15
* cmp 16 at a time while theres room
*/
l2:
ADDU $-15,R6, R3
l3:
SGTU R3,R4, R1
BEQ R1, l4
MOVW 0(R4), R8
MOVW 0(R5), R9
MOVW 4(R4), R10
BNE R8,R9, ne
MOVW 4(R5), R11
MOVW 8(R4), R8
BNE R10,R11, ne1
MOVW 8(R5), R9
MOVW 12(R4), R10
BNE R8,R9, ne
MOVW 12(R5), R11
ADDU $16, R4
BNE R10,R11, ne1
BNE R8,R9, ne
ADDU $16, R5
JMP l3
/*
* turn R3 into end pointer1-3
* cmp 4 at a time while theres room
*/
l4:
ADDU $-3,R6, R3
l5:
SGTU R3,R4, R1
BEQ R1, out
MOVW 0(R4), R8
MOVW 0(R5), R9
ADDU $4, R4
BNE R8,R9, ne /* only works because big endian */
ADDU $4, R5
JMP l5
/*
* last loop, cmp byte at a time
*/
out:
SGTU R6,R4, R1
BEQ R1, ret
MOVBU 0(R4), R8
MOVBU 0(R5), R9
ADDU $1, R4
BNE R8,R9, ne
ADDU $1, R5
JMP out
ne1:
SGTU R10,R11, R1
BNE R1, ret
MOVW $-1,R1
RET
ne:
SGTU R8,R9, R1
BNE R1, ret
MOVW $-1,R1
ret:
RET
END
|
9front/9front
| 1,159
|
sys/src/libc/power64/atomic.s
|
/* get variants */
TEXT agetl+0(SB),1,$0
SYNC
LWAR (RARG), RARG
CMPW RARG, RARG
BNE -1(PC)
ISYNC
RETURN
TEXT agetp+0(SB),1,$0
SYNC
LDAR (RARG), RARG
CMP RARG, RARG
BNE -1(PC)
ISYNC
RETURN
/* set variants */
TEXT aswapl+0(SB),1,$0
MOVD RARG, R4
MOVW val+8(FP), R5
SYNC
_aswapl:
LWAR (R4), RARG
STWCCC R5, (R4)
BNE _aswapl
RETURN
TEXT aswapp+0(SB),1,$0
MOVD RARG, R4
MOVD val+8(FP), R5
SYNC
_aswapp:
LDAR (R4), RARG
STDCCC R5, (R4)
BNE _aswapp
RETURN
/* inc variants */
TEXT aincl+0(SB),1,$0
MOVD RARG, R4
MOVW delta+8(FP), R5
LWSYNC
_aincl:
LWAR (R4), RARG
ADD R5, RARG
STWCCC RARG, (R4)
BNE _aincl
RETURN
/* cas variants */
TEXT acasl+0(SB),1,$0
MOVWZ old+8(FP), R4
MOVWZ new+16(FP), R5
LWSYNC
_casl:
LWAR (RARG), R6
CMPW R6, R4
BNE _caslf
STWCCC R5, (RARG)
BNE _casl
MOVD $1, RARG
LWSYNC
RETURN
_caslf:
LWSYNC
AND R0, RARG
RETURN
TEXT acasp+0(SB),1,$0
MOVD old+8(FP), R4
MOVD new+16(FP), R5
LWSYNC
_casp:
LDAR (RARG), R6
CMP R6, R4
BNE _caspf
STDCCC R5, (RARG)
BNE _casp
MOVD $1, RARG
LWSYNC
RETURN
_caspf:
LWSYNC
AND R0, RARG
RETURN
/* barriers */
TEXT coherence+0(SB),1,$0
SYNC
RETURN
|
9front/9front
| 1,270
|
sys/src/libc/mips/memset.s
|
TEXT memset(SB),$12
MOVW R1, 0(FP)
/*
* performance:
* about 1us/call and 28mb/sec
*/
MOVW n+8(FP), R3 /* R3 is count */
MOVW p+0(FP), R4 /* R4 is pointer */
MOVW c+4(FP), R5 /* R5 is char */
ADDU R3,R4, R6 /* R6 is end pointer */
/*
* if not at least 4 chars,
* dont even mess around.
* 3 chars to guarantee any
* rounding up to a word
* boundary and 4 characters
* to get at least maybe one
* full word store.
*/
SGT $4,R3, R1
BNE R1, out
/*
* turn R5 into a word of characters
*/
AND $0xff, R5
SLL $8,R5, R1
OR R1, R5
SLL $16,R5, R1
OR R1, R5
/*
* store one byte at a time until pointer
* is alligned on a word boundary
*/
l1:
AND $3,R4, R1
BEQ R1, l2
MOVB R5, 0(R4)
ADDU $1, R4
JMP l1
/*
* turn R3 into end pointer-15
* store 16 at a time while theres room
*/
l2:
ADDU $-15,R6, R3
l3:
SGTU R3,R4, R1
BEQ R1, l4
MOVW R5, 0(R4)
MOVW R5, 4(R4)
ADDU $16, R4
MOVW R5, -8(R4)
MOVW R5, -4(R4)
JMP l3
/*
* turn R3 into end pointer-3
* store 4 at a time while theres room
*/
l4:
ADDU $-3,R6, R3
l5:
SGTU R3,R4, R1
BEQ R1, out
MOVW R5, 0(R4)
ADDU $4, R4
JMP l5
/*
* last loop, store byte at a time
*/
out:
SGTU R6,R4 ,R1
BEQ R1, ret
MOVB R5, 0(R4)
ADDU $1, R4
JMP out
ret:
MOVW s1+0(FP), R1
RET
END
|
9front/9front
| 3,481
|
sys/src/libc/mips/memmove.s
|
TEXT memmove(SB), $0
JMP move
TEXT memcpy(SB), $0
move:
MOVW R1, s1+0(FP)
MOVW n+8(FP), R3 /* R3 is count */
MOVW R1, R4 /* R4 is to-pointer */
SGT R0, R3, R5
BEQ R5, ok
MOVW (R0), R0 /* abort if negative count */
ok:
MOVW s2+4(FP), R5 /* R5 is from-pointer */
ADDU R3,R5, R7 /* R7 is end from-pointer */
ADDU R3,R4, R6 /* R6 is end to-pointer */
/*
* easiest test is copy backwards if
* destination string has higher mem address
*/
SGT $4,R3, R2
SGTU R4,R5, R1
BNE R1, back
/*
* if not at least 4 chars,
* don't even mess around.
* 3 chars to guarantee any
* rounding up to a word
* boundary and 4 characters
* to get at least maybe one
* full word store.
*/
BNE R2, fout
/*
* byte at a time to word align destination
*/
f1:
AND $3,R4, R1
BEQ R1, f2
MOVB 0(R5), R8
ADDU $1, R5
MOVB R8, 0(R4)
ADDU $1, R4
JMP f1
/*
* test if source is now word aligned
*/
f2:
AND $3, R5, R1
BNE R1, fun2
/*
* turn R3 into to-end pointer-15
* copy 16 at a time while theres room.
* R6 is smaller than R7 --
* there are problems if R7 is 0.
*/
ADDU $-15,R6, R3
f3:
SGTU R3,R4, R1
BEQ R1, f4
MOVW 0(R5), R8
MOVW 4(R5), R9
MOVW R8, 0(R4)
MOVW 8(R5), R8
MOVW R9, 4(R4)
MOVW 12(R5), R9
ADDU $16, R5
MOVW R8, 8(R4)
MOVW R9, 12(R4)
ADDU $16, R4
JMP f3
/*
* turn R3 into to-end pointer-3
* copy 4 at a time while theres room
*/
f4:
ADDU $-3,R6, R3
f5:
SGTU R3,R4, R1
BEQ R1, fout
MOVW 0(R5), R8
ADDU $4, R5
MOVW R8, 0(R4)
ADDU $4, R4
JMP f5
/*
* forward copy, unaligned
* turn R3 into to-end pointer-15
* copy 16 at a time while theres room.
* R6 is smaller than R7 --
* there are problems if R7 is 0.
*/
fun2:
ADDU $-15,R6, R3
fun3:
SGTU R3,R4, R1
BEQ R1, fun4
MOVWL 0(R5), R8
MOVWR 3(R5), R8
MOVWL 4(R5), R9
MOVWR 7(R5), R9
MOVW R8, 0(R4)
MOVWL 8(R5), R8
MOVWR 11(R5), R8
MOVW R9, 4(R4)
MOVWL 12(R5), R9
MOVWR 15(R5), R9
ADDU $16, R5
MOVW R8, 8(R4)
MOVW R9, 12(R4)
ADDU $16, R4
JMP fun3
/*
* turn R3 into to-end pointer-3
* copy 4 at a time while theres room
*/
fun4:
ADDU $-3,R6, R3
fun5:
SGTU R3,R4, R1
BEQ R1, fout
MOVWL 0(R5), R8
MOVWR 3(R5), R8
ADDU $4, R5
MOVW R8, 0(R4)
ADDU $4, R4
JMP fun5
/*
* last loop, copy byte at a time
*/
fout:
BEQ R7,R5, ret
MOVB 0(R5), R8
ADDU $1, R5
MOVB R8, 0(R4)
ADDU $1, R4
JMP fout
/*
* whole thing repeated for backwards
*/
back:
BNE R2, bout
b1:
AND $3,R6, R1
BEQ R1, b2
MOVB -1(R7), R8
ADDU $-1, R7
MOVB R8, -1(R6)
ADDU $-1, R6
JMP b1
b2:
AND $3, R7, R1
BNE R1, bun2
ADDU $15,R5, R3
b3:
SGTU R7,R3, R1
BEQ R1, b4
MOVW -4(R7), R8
MOVW -8(R7), R9
MOVW R8, -4(R6)
MOVW -12(R7), R8
MOVW R9, -8(R6)
MOVW -16(R7), R9
ADDU $-16, R7
MOVW R8, -12(R6)
MOVW R9, -16(R6)
ADDU $-16, R6
JMP b3
b4:
ADDU $3,R5, R3
b5:
SGTU R7,R3, R1
BEQ R1, bout
MOVW -4(R7), R8
ADDU $-4, R7
MOVW R8, -4(R6)
ADDU $-4, R6
JMP b5
bun2:
ADDU $15,R5, R3
bun3:
SGTU R7,R3, R1
BEQ R1, bun4
MOVWL -4(R7), R8
MOVWR -1(R7), R8
MOVWL -8(R7), R9
MOVWR -5(R7), R9
MOVW R8, -4(R6)
MOVWL -12(R7), R8
MOVWR -9(R7), R8
MOVW R9, -8(R6)
MOVWL -16(R7), R9
MOVWR -13(R7), R9
ADDU $-16, R7
MOVW R8, -12(R6)
MOVW R9, -16(R6)
ADDU $-16, R6
JMP bun3
bun4:
ADDU $3,R5, R3
bun5:
SGTU R7,R3, R1
BEQ R1, bout
MOVWL -4(R7), R8
MOVWR -1(R7), R8
ADDU $-4, R7
MOVW R8, -4(R6)
ADDU $-4, R6
JMP bun5
bout:
BEQ R7,R5, ret
MOVB -1(R7), R8
ADDU $-1, R7
MOVB R8, -1(R6)
ADDU $-1, R6
JMP bout
ret:
MOVW s1+0(FP), R1
RET
END
|
9front/9front
| 1,682
|
sys/src/libc/mips/memcmp.s
|
TEXT memcmp(SB), $0
MOVW R1, 0(FP)
/*
* performance:
* alligned about 1.0us/call and 17.4mb/sec
* unalligned is about 3.1mb/sec
*/
MOVW n+8(FP), R3 /* R3 is count */
MOVW s1+0(FP), R4 /* R4 is pointer1 */
MOVW s2+4(FP), R5 /* R5 is pointer2 */
ADDU R3,R4, R6 /* R6 is end pointer1 */
/*
* if not at least 4 chars,
* dont even mess around.
* 3 chars to guarantee any
* rounding up to a word
* boundary and 4 characters
* to get at least maybe one
* full word cmp.
*/
SGT $4,R3, R1
BNE R1, out
/*
* test if both pointers
* are similarly word alligned
*/
XOR R4,R5, R1
AND $3, R1
BNE R1, out
/*
* byte at a time to word allign
*/
l1:
AND $3,R4, R1
BEQ R1, l2
MOVBU 0(R4), R8
MOVBU 0(R5), R9
ADDU $1, R4
BNE R8,R9, ne
ADDU $1, R5
JMP l1
/*
* turn R3 into end pointer1-15
* cmp 16 at a time while theres room
*/
l2:
ADDU $-15,R6, R3
l3:
SGTU R3,R4, R1
BEQ R1, l4
MOVW 0(R4), R8
MOVW 0(R5), R9
MOVW 4(R4), R10
BNE R8,R9, ne
MOVW 4(R5), R11
MOVW 8(R4), R8
BNE R10,R11, ne1
MOVW 8(R5), R9
MOVW 12(R4), R10
BNE R8,R9, ne
MOVW 12(R5), R11
ADDU $16, R4
BNE R10,R11, ne1
BNE R8,R9, ne
ADDU $16, R5
JMP l3
/*
* turn R3 into end pointer1-3
* cmp 4 at a time while theres room
*/
l4:
ADDU $-3,R6, R3
l5:
SGTU R3,R4, R1
BEQ R1, out
MOVW 0(R4), R8
MOVW 0(R5), R9
ADDU $4, R4
BNE R8,R9, ne /* only works because big endian */
ADDU $4, R5
JMP l5
/*
* last loop, cmp byte at a time
*/
out:
SGTU R6,R4, R1
BEQ R1, ret
MOVBU 0(R4), R8
MOVBU 0(R5), R9
ADDU $1, R4
BNE R8,R9, ne
ADDU $1, R5
JMP out
ne1:
SGTU R10,R11, R1
BNE R1, ret
MOVW $-1,R1
RET
ne:
SGTU R8,R9, R1
BNE R1, ret
MOVW $-1,R1
ret:
RET
END
|
9front/9front
| 1,202
|
sys/src/libc/mips/strcpy.s
|
TEXT strcpy(SB), $0
MOVW s2+4(FP),R2 /* R2 is from pointer */
MOVW R1, R3 /* R3 is to pointer */
/*
* align 'from' pointer
*/
l1:
AND $3, R2, R5
ADDU $1, R2
BEQ R5, l2
MOVB -1(R2), R5
ADDU $1, R3
MOVB R5, -1(R3)
BNE R5, l1
RET
/*
* test if 'to' is also alligned
*/
l2:
AND $3,R3, R5
BEQ R5, l4
/*
* copy 4 at a time, 'to' not aligned
*/
l3:
MOVW -1(R2), R4
ADD $4, R2
ADD $4, R3
SRL $24,R4, R5
MOVB R5, -4(R3)
BEQ R5, out
SRL $16,R4, R5
AND $0xff, R5
MOVB R5, -3(R3)
BEQ R5, out
SRL $8,R4, R5
AND $0xff, R5
MOVB R5, -2(R3)
BEQ R5, out
AND $0xff,R4, R5
MOVB R5, -1(R3)
BNE R5, l3
out:
RET
/*
* word at a time both aligned
*/
l4:
MOVW $0xff000000, R7
MOVW $0x00ff0000, R8
l5:
ADDU $4, R3
MOVW -1(R2), R4 /* fetch */
ADDU $4, R2
AND R7,R4, R5 /* is it byte 0 */
AND R8,R4, R6 /* is it byte 1 */
BEQ R5, b0
AND $0xff00,R4, R5 /* is it byte 2 */
BEQ R6, b1
AND $0xff,R4, R6 /* is it byte 3 */
BEQ R5, b2
MOVW R4, -4(R3) /* store */
BNE R6, l5
JMP out
b0:
MOVB $0, -4(R3)
JMP out
b1:
SRL $24, R4
MOVB R4, -4(R3)
MOVB $0, -3(R3)
JMP out
b2:
SRL $24,R4, R5
MOVB R5, -4(R3)
SRL $16, R4
MOVB R4, -3(R3)
MOVB $0, -2(R3)
JMP out
|
9front/9front
| 4,949
|
sys/src/boot/zynq/ddr.s
|
#define OUTPUT_EN (3<<9)
#define DCI_EN (7<<4)
#define INP_VREF (1<<1)
#define INP_DIFF (2<<1)
TEXT ddriob(SB), $-4
WORD $(OUTPUT_EN) // DDRIOB_ADDR0
WORD $(OUTPUT_EN) // DDRIOB_ADDR1
WORD $(OUTPUT_EN | DCI_EN | INP_VREF) // DDRIOB_DATA0
WORD $(OUTPUT_EN | DCI_EN | INP_VREF) // DDRIOB_DATA1
WORD $(OUTPUT_EN | DCI_EN | INP_DIFF) // DDRIOB_DIFF0
WORD $(OUTPUT_EN | DCI_EN | INP_DIFF) // DDRIOB_DIFF1
WORD $(OUTPUT_EN) // DDRIOB_CLOCK
WORD $0x0018C61C // DDRIOB_DRIVE_SLEW_ADDR
WORD $0x00F9861C // DDRIOB_DRIVE_SLEW_DATA
WORD $0x00F9861C // DDRIOB_DRIVE_SLEW_DIFF
WORD $0x00F9861C // DDRIOB_DRIVE_SLEW_CLOCK
WORD $0xE60 // DDRIOB_DDR_CTRL
TEXT ddrdata(SB), $-4
WORD $0XF8006000
WORD $0x0001FFFF
WORD $0x00000080
WORD $0XF8006004
WORD $0x1FFFFFFF
WORD $0x00081081
WORD $0XF8006008
WORD $0x03FFFFFF
WORD $0x03C0780F
WORD $0XF800600C
WORD $0x03FFFFFF
WORD $0x02001001
WORD $0XF8006010
WORD $0x03FFFFFF
WORD $0x00014001
WORD $0XF8006014
WORD $0x001FFFFF
WORD $0x0004281A
WORD $0XF8006018
WORD $0xF7FFFFFF
WORD $0x44E458D2
WORD $0XF800601C
WORD $0xFFFFFFFF
WORD $0x82023965
WORD $0XF8006020
WORD $0xFFFFFFFC
WORD $0x2B288290
WORD $0XF8006024
WORD $0x0FFFFFFF
WORD $0x0000003C
WORD $0XF8006028
WORD $0x00003FFF
WORD $0x00002007
WORD $0XF800602C
WORD $0xFFFFFFFF
WORD $0x00000008
WORD $0XF8006030
WORD $0xFFFFFFFF
WORD $0x00040970
WORD $0XF8006034
WORD $0x13FF3FFF
WORD $0x00011054
WORD $0XF8006038
WORD $0x00001FC3
WORD $0x00000000
WORD $0XF800603C
WORD $0x000FFFFF
WORD $0x00000777
WORD $0XF8006040
WORD $0xFFFFFFFF
WORD $0xFFF00000
WORD $0XF8006044
WORD $0x0FFFFFFF
WORD $0x0F666666
WORD $0XF8006048
WORD $0x3FFFFFFF
WORD $0x0003C248
WORD $0XF8006050
WORD $0xFF0F8FFF
WORD $0x77010800
WORD $0XF8006058
WORD $0x0001FFFF
WORD $0x00000101
WORD $0XF800605C
WORD $0x0000FFFF
WORD $0x00005003
WORD $0XF8006060
WORD $0x000017FF
WORD $0x0000003E
WORD $0XF8006064
WORD $0x00021FE0
WORD $0x00020000
WORD $0XF8006068
WORD $0x03FFFFFF
WORD $0x00284545
WORD $0XF800606C
WORD $0x0000FFFF
WORD $0x00001610
WORD $0XF80060A0
WORD $0x00FFFFFF
WORD $0x00008000
WORD $0XF80060A4
WORD $0xFFFFFFFF
WORD $0x10200802
WORD $0XF80060A8
WORD $0x0FFFFFFF
WORD $0x0690CB73
WORD $0XF80060AC
WORD $0x000001FF
WORD $0x000001FE
WORD $0XF80060B0
WORD $0x1FFFFFFF
WORD $0x04FFFFFF
WORD $0XF80060B4
WORD $0x000007FF
WORD $0x00000200
WORD $0XF80060B8
WORD $0x01FFFFFF
WORD $0x0020006A
WORD $0XF80060C4
WORD $0x00000003
WORD $0x00000003
WORD $0XF80060C4
WORD $0x00000003
WORD $0x00000000
WORD $0XF80060C8
WORD $0x000000FF
WORD $0x00000000
WORD $0XF80060DC
WORD $0x00000001
WORD $0x00000000
WORD $0XF80060F0
WORD $0x0000FFFF
WORD $0x00000000
WORD $0XF80060F4
WORD $0x0000000F
WORD $0x00000008
WORD $0XF8006114
WORD $0x000000FF
WORD $0x00000000
WORD $0XF8006118
WORD $0x7FFFFFFF
WORD $0x40000001
WORD $0XF800611C
WORD $0x7FFFFFFF
WORD $0x40000001
WORD $0XF8006120
WORD $0x7FFFFFFF
WORD $0x40000001
WORD $0XF8006124
WORD $0x7FFFFFFF
WORD $0x40000001
WORD $0XF800612C
WORD $0x000FFFFF
WORD $0x00000000
WORD $0XF8006130
WORD $0x000FFFFF
WORD $0x00000000
WORD $0XF8006134
WORD $0x000FFFFF
WORD $0x00000000
WORD $0XF8006138
WORD $0x000FFFFF
WORD $0x00000000
WORD $0XF8006140
WORD $0x000FFFFF
WORD $0x00000035
WORD $0XF8006144
WORD $0x000FFFFF
WORD $0x00000035
WORD $0XF8006148
WORD $0x000FFFFF
WORD $0x00000035
WORD $0XF800614C
WORD $0x000FFFFF
WORD $0x00000035
WORD $0XF8006154
WORD $0x000FFFFF
WORD $0x00000080
WORD $0XF8006158
WORD $0x000FFFFF
WORD $0x00000080
WORD $0XF800615C
WORD $0x000FFFFF
WORD $0x00000080
WORD $0XF8006160
WORD $0x000FFFFF
WORD $0x00000075
WORD $0XF8006168
WORD $0x001FFFFF
WORD $0x000000EE
WORD $0XF800616C
WORD $0x001FFFFF
WORD $0x000000E4
WORD $0XF8006170
WORD $0x001FFFFF
WORD $0x000000FC
WORD $0XF8006174
WORD $0x001FFFFF
WORD $0x000000F4
WORD $0XF800617C
WORD $0x000FFFFF
WORD $0x000000C0
WORD $0XF8006180
WORD $0x000FFFFF
WORD $0x000000C0
WORD $0XF8006184
WORD $0x000FFFFF
WORD $0x000000C0
WORD $0XF8006188
WORD $0x000FFFFF
WORD $0x000000B5
WORD $0XF8006190
WORD $0xFFFFFFFF
WORD $0x10040080
WORD $0XF8006194
WORD $0x000FFFFF
WORD $0x00007D02
WORD $0XF8006204
WORD $0xFFFFFFFF
WORD $0x00000000
WORD $0XF8006208
WORD $0x000F03FF
WORD $0x000803FF
WORD $0XF800620C
WORD $0x000F03FF
WORD $0x000803FF
WORD $0XF8006210
WORD $0x000F03FF
WORD $0x000803FF
WORD $0XF8006214
WORD $0x000F03FF
WORD $0x000803FF
WORD $0XF8006218
WORD $0x000F03FF
WORD $0x000003FF
WORD $0XF800621C
WORD $0x000F03FF
WORD $0x000003FF
WORD $0XF8006220
WORD $0x000F03FF
WORD $0x000003FF
WORD $0XF8006224
WORD $0x000F03FF
WORD $0x000003FF
WORD $0XF80062A8
WORD $0x00000FF7
WORD $0x00000000
WORD $0XF80062AC
WORD $0xFFFFFFFF
WORD $0x00000000
WORD $0XF80062B0
WORD $0x003FFFFF
WORD $0x00005125
WORD $0xF80062B4
WORD $0x003FFFFF
WORD $0x000012A8
WORD $0
|
9front/9front
| 6,004
|
sys/src/boot/zynq/fsbl.s
|
#include "mem.h"
#define Rb R10
#define SET(R, V) MOVW $(V), R0 ; MOVW R0, (R)(Rb)
#define RMW(r, m, v) MOVW (r)(Rb), R0; BIC $(m), R0; ORR $(v), R0; MOVW R0, (r)(Rb)
TEXT _start(SB), $-4
WORD $0xea000006
MOVW $abort(SB), R15
MOVW $abort(SB), R15
MOVW $abort(SB), R15
MOVW $abort(SB), R15
MOVW $abort(SB), R15
MOVW $abort(SB), R15
MOVW $abort(SB), R15
TEXT reloc(SB), $-4
MOVW $(1<<7|1<<6|0x13), R0
MOVW R0, CPSR
MOVW $STACKTOP, R13
MOVW $_start(SB), R0
MCR CpMMU, 0, R0, C(12), C(0)
MOVW $SLCR_BASE, Rb
SET(SLCR_UNLOCK, UNLOCK_KEY)
MOVW $0, R0
MCR 15, 0, R0, C(8), C(7), 0
MCR 15, 0, R0, C(7), C(5), 0
MCR 15, 0, R0, C(7), C(5), 6
MOVW $0xc5047a, R1
MCR 15, 0, R1, C(1), C(0), 0
DSB
ISB
CMP.S $0, R15
BL.LT reset(SB)
MOVW $0xf, R1
MOVW $0xffff0000, R3
MOVW $0xe58a1910, R0
MOVW R0, (R3)
MOVW $0xf57ff04f, R0
MOVW R0, 4(R3)
MOVW $0xf57ff06f, R0
MOVW R0, 8(R3)
MOVW $0xe28ef000, R0
MOVW R0, 12(R3)
MOVW $reset(SB), R14
DSB
ISB
MOVW R3, R15
TEXT reset(SB), $-4
BL pllsetup(SB)
BL miosetup(SB)
BL ddrsetup(SB)
BL uartsetup(SB)
MOVW $SLCR_BASE, Rb
SET(SLCR_LOCK, LOCK_KEY)
// BL memtest(SB)
MOVW $setR12(SB), R12
BL main(SB)
B abort(SB)
TEXT pllsetup(SB), $0
MOVW $SLCR_BASE, Rb
SET(ARM_PLL_CFG, ARM_PLL_CFG_VAL)
SET(DDR_PLL_CFG, DDR_PLL_CFG_VAL)
SET(IO_PLL_CFG, IO_PLL_CFG_VAL)
MOVW $(ARM_FDIV | PLL_BYPASS_FORCE), R0
MOVW R0, ARM_PLL_CTRL(Rb)
ORR $(PLL_RESET), R4
MOVW R4, ARM_PLL_CTRL(Rb)
MOVW R0, ARM_PLL_CTRL(Rb)
MOVW $(DDR_FDIV | PLL_BYPASS_FORCE), R0
MOVW R0, DDR_PLL_CTRL(Rb)
ORR $(PLL_RESET), R4
MOVW R4, DDR_PLL_CTRL(Rb)
MOVW R0, DDR_PLL_CTRL(Rb)
MOVW $(IO_FDIV | PLL_BYPASS_FORCE), R0
MOVW R0, IO_PLL_CTRL(Rb)
ORR $(PLL_RESET), R4
MOVW R4, IO_PLL_CTRL(Rb)
MOVW R0, IO_PLL_CTRL(Rb)
_pllsetupl:
MOVW PLL_STATUS(Rb), R0
AND $7, R0
CMP.S $7, R0
BNE _pllsetupl
SET(ARM_PLL_CTRL, ARM_FDIV)
SET(DDR_PLL_CTRL, DDR_FDIV)
SET(IO_PLL_CTRL, IO_FDIV)
SET(ARM_CLK_CTRL, 0x1f << 24 | CPU_DIV << 8)
SET(UART_CLK_CTRL, UART_DIV << 8 | 3)
SET(DDR_CLK_CTRL, DDR_DIV3 << 20 | DDR_DIV2 << 26 | 3)
SET(DCI_CLK_CTRL, DCI_DIV0 << 8 | DCI_DIV1 << 20 | 1)
SET(GEM0_RCLK_CTRL, 1)
SET(GEM1_RCLK_CTRL, 0)
SET(GEM0_CLK_CTRL, ETH_DIV0 << 8 | ETH_DIV1 << 20 | 1)
SET(GEM1_CLK_CTRL, 0)
SET(GPIOB_CTRL, VREF_SW_EN)
SET(APER_CLK_CTRL, LQSPI_CLK_EN | GPIO_CLK_EN | UART0_CLK_EN | UART1_CLK_EN | I2C0_CLK_EN | SDIO1_CLK_EN | GEM0_CLK_EN | USB0_CLK_EN | USB1_CLK_EN | DMA_CLK_EN)
SET(SMC_CLK_CTRL, 0x3C20)
SET(LQSPI_CLK_CTRL, QSPI_DIV << 8 | 1)
SET(SDIO_CLK_CTRL, SDIO_DIV << 8 | 2)
SET(SPI_CLK_CTRL, 0x3F00)
SET(CAN_CLK_CTRL, 0x501900)
SET(PCAP_CLK_CTRL, PCAP_DIV << 8 | 1)
RET
TEXT miosetup(SB), $0
MOVW $SLCR_BASE, Rb
SET(UART_RST_CTRL, 0xf)
SET(UART_RST_CTRL, 0)
MOVW $miodata(SB), R1
ADD $MIO_PIN_0, Rb, R2
MOVW $54, R3
BL copy(SB)
MOVW $0, R0
MOVW R0, MIO_MST_TRI0(Rb)
MOVW R0, MIO_MST_TRI1(Rb)
RET
TEXT copy(SB), $0
_copyl:
MOVW.P 4(R1), R0
MOVW.P R0, 4(R2)
SUB.S $1, R3
BNE _copyl
RET
TEXT ddrsetup(SB), $0
MOVW $SLCR_BASE, Rb
RMW(DDRIOB_DCI_CTRL, DCI_RESET, DCI_RESET)
RMW(DDRIOB_DCI_CTRL, DCI_RESET, 0)
RMW(DDRIOB_DCI_CTRL, DDRIOB_DCI_CTRL_MASK, DCI_NREF | DCI_ENABLE | DCI_RESET)
MOVW $ddriob(SB), R1
ADD $DDRIOB_ADDR0, Rb, R2
MOVW $12, R3
BL copy(SB)
MOVW $ddrdata(SB), R1
_ddrl1:
MOVW.P 4(R1), R2
ORR.S $0, R2
BEQ _ddrl2
MOVW.P 4(R1), R3
MOVW.P 4(R1), R4
AND R3, R4
MOVW (R2), R0
BIC R3, R0
ORR R4, R0
MOVW R0, (R2)
B _ddrl1
_ddrl2:
MOVW DDRIOB_DCI_STATUS(Rb), R0
AND.S $(1<<13), R0
BEQ _ddrl2
MOVW $DDR_BASE, Rb
RMW(DDRC_CTRL, 0x1ffff, 0x81)
_ddrl4:
MOVW DDR_MODE_STS(Rb), R0
AND.S $7, R0
BEQ _ddrl4
MOVW $MP_BASE, Rb
SET(FILTER_START, 0)
RET
TEXT memtest(SB), $0
MOVW $0, R0
ADD $(1024 * 1024 * 10), R0, R1
_testl:
MOVW R0, (R0)
ADD $4, R0
CMP.S R0, R1
BNE _testl
MOVW $0, R0
_testl2:
MOVW (R0), R2
CMP.S R0, R2
BNE _no
ADD $4, R0
CMP.S R0, R1
BNE _testl2
MOVW $'.', R0
BL putc(SB)
RET
_no:
MOVW $'!', R0
BL putc(SB)
RET
TEXT uartsetup(SB), $0
MOVW $UART1_BASE, Rb
SET(UART_CTRL, 0x17)
SET(UART_MODE, 0x20)
SET(UART_SAMP, 15)
SET(UART_BAUD, 14)
RET
TEXT putc(SB), $0
MOVW $UART1_BASE, Rb
CMP.S $10, R0
BNE _putcl
MOVW R0, R2
MOVW $13, R0
BL putc(SB)
MOVW R2, R0
_putcl:
MOVW UART_STAT(Rb), R1
AND.S $0x10, R1
BNE _putcl
AND $0xFF, R0
MOVW R0, UART_DATA(Rb)
RET
TEXT jump(SB), $-4
MOVW R0, R15
TEXT abort(SB), $0
MOVW $'?', R0
BL putc(SB)
_loop:
WFE
B _loop
#define TRI 1
#define LVCMOS18 (1<<9)
#define LVCMOS25 (2<<9)
#define LVCMOS33 (3<<9)
#define HSTL (4<<9)
#define PULLUP (1<<12)
#define NORECV (1<<13)
#define FAST (1<<8)
#define MUX(a, b, c, d) ((a)<<1 | (b)<<2 | (c)<<3 | (d)<<5)
#define NO (TRI | LVCMOS33)
#define SPI (MUX(1, 0, 0, 0) | LVCMOS33)
#define UART (MUX(0, 0, 0, 7) | LVCMOS33)
#define SD (MUX(0, 0, 0, 4) | LVCMOS33)
#define ETX (MUX(1, 0, 0, 0) | HSTL | NORECV | PULLUP)
#define ERX (MUX(1, 0, 0, 0) | HSTL | TRI | PULLUP)
#define USB (MUX(0, 1, 0, 0) | LVCMOS18)
#define MDCLK (MUX(0, 0, 0, 4) | HSTL)
#define MDDATA (MUX(0, 0, 0, 4) | HSTL)
TEXT miodata(SB), $-4
WORD $NO // 0
WORD $SPI // 1
WORD $SPI // 2
WORD $SPI // 3
WORD $SPI // 4
WORD $SPI // 5
WORD $SPI // 6
WORD $NO // 7
WORD $UART // 8
WORD $(UART|TRI) // 9
WORD $SD // 10
WORD $SD // 11
WORD $SD // 12
WORD $SD // 13
WORD $SD // 14
WORD $SD // 15
WORD $ETX // 16
WORD $ETX // 17
WORD $ETX // 18
WORD $ETX // 19
WORD $ETX // 20
WORD $ETX // 21
WORD $ERX // 22
WORD $ERX // 23
WORD $ERX // 24
WORD $ERX // 25
WORD $ERX // 26
WORD $ERX // 27
WORD $USB // 28
WORD $USB // 29
WORD $USB // 30
WORD $USB // 31
WORD $USB // 32
WORD $USB // 33
WORD $USB // 34
WORD $USB // 35
WORD $USB // 36
WORD $USB // 37
WORD $USB // 38
WORD $USB // 39
WORD $USB // 40
WORD $USB // 41
WORD $USB // 42
WORD $USB // 43
WORD $USB // 44
WORD $USB // 45
WORD $USB // 46
WORD $USB // 47
WORD $USB // 48
WORD $USB // 49
WORD $USB // 50
WORD $USB // 51
WORD $MDCLK // 52
WORD $MDDATA // 53
|
9front/9front
| 2,032
|
sys/src/boot/efi/x64.s
|
MODE $64
TEXT start(SB), 1, $-4
/* spill arguments */
MOVQ CX, 8(SP)
MOVQ DX, 16(SP)
CALL reloc(SP)
TEXT reloc(SB), 1, $-4
MOVQ 0(SP), SI
SUBQ $reloc-IMAGEBASE(SB), SI
MOVQ $IMAGEBASE, DI
MOVQ $edata-IMAGEBASE(SB), CX
CLD
REP; MOVSB
MOVQ 16(SP), BP
MOVQ $efimain(SB), DI
MOVQ DI, (SP)
RET
TEXT eficall(SB), 1, $-4
MOVQ SP, SI
MOVQ SP, DI
MOVL $(8*16), CX
SUBQ CX, DI
ANDQ $~15ULL, DI
LEAQ 16(DI), SP
CLD
REP; MOVSB
SUBQ $(8*16), SI
MOVQ 0(SP), CX
MOVQ 8(SP), DX
MOVQ 16(SP), R8
MOVQ 24(SP), R9
CALL BP
MOVQ SI, SP
RET
TEXT rebase(SB), 1, $-4
MOVQ BP, AX
RET
#include "mem.h"
TEXT jump(SB), 1, $-4
CLI
/* load zero length idt */
MOVL $_idtptr64p<>(SB), AX
MOVL (AX), IDTR
/* load temporary gdt */
MOVL $_gdtptr64p<>(SB), AX
MOVL (AX), GDTR
/* load CS with 32bit code segment */
PUSHQ $SELECTOR(3, SELGDT, 0)
PUSHQ $_warp32<>(SB)
RETFQ
MODE $32
TEXT _warp32<>(SB), 1, $-4
/* load 32bit data segments */
MOVL $SELECTOR(2, SELGDT, 0), AX
MOVW AX, DS
MOVW AX, ES
MOVW AX, FS
MOVW AX, GS
MOVW AX, SS
/* turn off paging */
MOVL CR0, AX
ANDL $0x7fffffff, AX /* ~(PG) */
MOVL AX, CR0
MOVL $0, AX
MOVL AX, CR3
/* disable long mode */
MOVL $0xc0000080, CX /* Extended Feature Enable */
RDMSR
ANDL $0xfffffeff, AX /* Long Mode Disable */
WRMSR
/* diable pae */
MOVL CR4, AX
ANDL $0xffffff5f, AX /* ~(PAE|PGE) */
MOVL AX, CR4
JMP *BP
TEXT _gdt<>(SB), 1, $-4
/* null descriptor */
LONG $0
LONG $0
/* (KESEG) 64 bit long mode exec segment */
LONG $(0xFFFF)
LONG $(SEGL|SEGG|SEGP|(0xF<<16)|SEGPL(0)|SEGEXEC|SEGR)
/* 32 bit data segment descriptor for 4 gigabytes (PL 0) */
LONG $(0xFFFF)
LONG $(SEGG|SEGB|(0xF<<16)|SEGP|SEGPL(0)|SEGDATA|SEGW)
/* 32 bit exec segment descriptor for 4 gigabytes (PL 0) */
LONG $(0xFFFF)
LONG $(SEGG|SEGD|(0xF<<16)|SEGP|SEGPL(0)|SEGEXEC|SEGR)
TEXT _gdtptr64p<>(SB), 1, $-4
WORD $(4*8-1)
QUAD $_gdt<>(SB)
TEXT _idtptr64p<>(SB), 1, $-4
WORD $0
QUAD $0
GLOBL confaddr(SB), $8
DATA confaddr(SB)/8, $CONFADDR
|
9front/9front
| 1,647
|
sys/src/boot/efi/aa64.s
|
#define SYSREG(op0,op1,Cn,Cm,op2) SPR(((op0)<<19|(op1)<<16|(Cn)<<12|(Cm)<<8|(op2)<<5))
#define SCTLR_EL1 SYSREG(3,0,1,0,0)
#define NSH (1<<2 | 3)
#define NSHST (1<<2 | 2)
#define SY (3<<2 | 3)
TEXT start(SB), 1, $-4
_base:
MOV R0, R3
MOV R1, R4
MOV $setSB(SB), R0
BL rebase(SB)
MOV R0, R28
MOV $argsbuf<>(SB), R0
MOV R0, confaddr(SB)
MOV R3, R0
MOV R4, 0x08(FP)
B efimain(SB)
TEXT rebase(SB), 1, $-4
ADR _base, R1
SUB $0x8200, R0
ADD R1, R0
RETURN
TEXT eficall(SB), 1, $-4
MOV R0, R8
MOV 0x08(FP), R0
MOV 0x10(FP), R1
MOV 0x18(FP), R2
MOV 0x20(FP), R3
MOV 0x28(FP), R4
MOV 0x30(FP), R5
MOV 0x38(FP), R6
MOV 0x40(FP), R7
B (R8)
TEXT mmudisable<>(SB), 1, $-4
#define SCTLRCLR \
/* RES0 */ ( 3<<30 \
/* RES0 */ | 1<<27 \
/* UCI */ | 1<<26 \
/* EE */ | 1<<25 \
/* RES0 */ | 1<<21 \
/* E0E */ | 1<<24 \
/* WXN */ | 1<<19 \
/* nTWE */ | 1<<18 \
/* RES0 */ | 1<<17 \
/* nTWI */ | 1<<16 \
/* UCT */ | 1<<15 \
/* DZE */ | 1<<14 \
/* RES0 */ | 1<<13 \
/* RES0 */ | 1<<10 \
/* UMA */ | 1<<9 \
/* SA0 */ | 1<<4 \
/* SA */ | 1<<3 \
/* A */ | 1<<1 )
#define SCTLRSET \
/* RES1 */ ( 3<<28 \
/* RES1 */ | 3<<22 \
/* RES1 */ | 1<<20 \
/* RES1 */ | 1<<11 )
#define SCTLRMMU \
/* I */ ( 1<<12 \
/* C */ | 1<<2 \
/* M */ | 1<<0 )
/* initialise SCTLR, MMU and caches off */
ISB $SY
MRS SCTLR_EL1, R0
BIC $(SCTLRCLR | SCTLRMMU), R0
ORR $SCTLRSET, R0
ISB $SY
MSR R0, SCTLR_EL1
ISB $SY
DSB $NSHST
TLBI R0, 0,8,7,0 /* VMALLE1 */
DSB $NSH
ISB $SY
RETURN
TEXT jump(SB), 1, $-4
MOV R0, R3
MOV R1, R4
BL mmudisable<>(SB)
MOV R4, R0
B (R3)
GLOBL confaddr(SB), $8
GLOBL argsbuf<>(SB), $0x1000
|
9front/9front
| 10,805
|
sys/src/boot/bitsy/l.s
|
#include "mem.h"
/*
* Entered here from Compaq's bootldr with MMU disabled.
*/
TEXT _start(SB), $-4
MOVW $setR12(SB), R12 /* load the SB */
_main:
/* SVC mode, interrupts disabled */
MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R1
MOVW R1, CPSR
/* disable the MMU */
MOVW $0x130, R1
MCR CpMMU, 0, R1, C(CpControl), C(0x0)
/* flush caches */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x7), 0
/* drain prefetch */
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
/* drain write buffer */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
MOVW $(MACHADDR+BY2PG), R13 /* stack */
SUB $4, R13 /* link */
BL main(SB)
BL exit(SB)
/* we shouldn't get here */
_mainloop:
B _mainloop
BL _div(SB) /* hack to get _div etc loaded */
/* flush tlb's */
TEXT mmuinvalidate(SB), $-4
MCR CpMMU, 0, R0, C(CpTLBFlush), C(0x7)
RET
/* flush tlb's */
TEXT mmuinvalidateaddr(SB), $-4
MCR CpMMU, 0, R0, C(CpTLBFlush), C(0x6), 1
RET
/* write back and invalidate i and d caches */
TEXT cacheflush(SB), $-4
/* write back any dirty data */
MOVW $0xe0000000,R0
ADD $(8*1024),R0,R1
_cfloop:
MOVW.P 32(R0),R2
CMP.S R0,R1
BNE _cfloop
/* drain write buffer and invalidate i&d cache contents */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x7), 0
/* drain prefetch */
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
RET
/* write back d cache */
TEXT cachewb(SB), $-4
/* write back any dirty data */
_cachewb:
MOVW $0xe0000000,R0
ADD $(8*1024),R0,R1
_cwbloop:
MOVW.P 32(R0),R2
CMP.S R0,R1
BNE _cwbloop
/* drain write buffer */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
RET
/* write back a single cache line */
TEXT cachewbaddr(SB), $-4
BIC $31,R0
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 1
B _wbflush
/* write back a region of cache lines */
TEXT cachewbregion(SB), $-4
MOVW 4(FP),R1
CMP.S $(4*1024),R1
BGT _cachewb
ADD R0,R1
BIC $31,R0
_cwbrloop:
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 1
ADD $32,R0
CMP.S R0,R1
BGT _cwbrloop
B _wbflush
/* invalidate the dcache */
TEXT dcacheinvalidate(SB), $-4
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x6)
RET
/* invalidate the icache */
TEXT icacheinvalidate(SB), $-4
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x9)
RET
/* drain write buffer */
TEXT wbflush(SB), $-4
_wbflush:
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
RET
/* return cpu id */
TEXT getcpuid(SB), $-4
MRC CpMMU, 0, R0, C(CpCPUID), C(0x0)
RET
/* return fault status */
TEXT getfsr(SB), $-4
MRC CpMMU, 0, R0, C(CpFSR), C(0x0)
RET
/* return fault address */
TEXT getfar(SB), $-4
MRC CpMMU, 0, R0, C(CpFAR), C(0x0)
RET
/* return fault address */
TEXT putfar(SB), $-4
MRC CpMMU, 0, R0, C(CpFAR), C(0x0)
RET
/* set the translation table base */
TEXT putttb(SB), $-4
MCR CpMMU, 0, R0, C(CpTTB), C(0x0)
RET
/*
* enable mmu, i and d caches
*/
TEXT mmuenable(SB), $-4
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
ORR $(CpCmmuena|CpCdcache|CpCicache|CpCwb), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
RET
TEXT mmudisable(SB), $-4
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
BIC $(CpCmmuena|CpCdcache|CpCicache|CpCwb|CpCvivec), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
RET
/*
* use exception vectors at 0xffff0000
*/
TEXT mappedIvecEnable(SB), $-4
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
ORR $(CpCvivec), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
RET
TEXT mappedIvecDisable(SB), $-4
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
BIC $(CpCvivec), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
RET
/* set the translation table base */
TEXT putdac(SB), $-4
MCR CpMMU, 0, R0, C(CpDAC), C(0x0)
RET
/* set address translation pid */
TEXT putpid(SB), $-4
MCR CpMMU, 0, R0, C(CpPID), C(0x0)
RET
/*
* set the stack value for the mode passed in R0
*/
TEXT setr13(SB), $-4
MOVW 4(FP), R1
MOVW CPSR, R2
BIC $PsrMask, R2, R3
ORR R0, R3
MOVW R3, CPSR
MOVW R13, R0
MOVW R1, R13
MOVW R2, CPSR
RET
/*
* exception vectors, copied by trapinit() to somewhere useful
*/
TEXT vectors(SB), $-4
MOVW 0x18(R15), R15 /* reset */
MOVW 0x18(R15), R15 /* undefined */
MOVW 0x18(R15), R15 /* SWI */
MOVW 0x18(R15), R15 /* prefetch abort */
MOVW 0x18(R15), R15 /* data abort */
MOVW 0x18(R15), R15 /* reserved */
MOVW 0x18(R15), R15 /* IRQ */
MOVW 0x18(R15), R15 /* FIQ */
TEXT vtable(SB), $-4
WORD $_vsvc(SB) /* reset, in svc mode already */
WORD $_vund(SB) /* undefined, switch to svc mode */
WORD $_vsvc(SB) /* swi, in svc mode already */
WORD $_vpabt(SB) /* prefetch abort, switch to svc mode */
WORD $_vdabt(SB) /* data abort, switch to svc mode */
WORD $_vsvc(SB) /* reserved */
WORD $_virq(SB) /* IRQ, switch to svc mode */
WORD $_vfiq(SB) /* FIQ, switch to svc mode */
TEXT _vrst(SB), $-4
BL resettrap(SB)
TEXT _vsvc(SB), $-4 /* SWI */
MOVW.W R14, -4(R13) /* ureg->pc = interupted PC */
MOVW SPSR, R14 /* ureg->psr = SPSR */
MOVW.W R14, -4(R13) /* ... */
MOVW $PsrMsvc, R14 /* ureg->type = PsrMsvc */
MOVW.W R14, -4(R13) /* ... */
MOVM.DB.W.S [R0-R14], (R13) /* save user level registers, at end r13 points to ureg */
MOVW $setR12(SB), R12 /* Make sure we've got the kernel's SB loaded */
MOVW R13, R0 /* first arg is pointer to ureg */
SUB $8, R13 /* space for argument+link */
BL syscall(SB)
ADD $(8+4*15), R13 /* make r13 point to ureg->type */
MOVW 8(R13), R14 /* restore link */
MOVW 4(R13), R0 /* restore SPSR */
MOVW R0, SPSR /* ... */
MOVM.DB.S (R13), [R0-R14] /* restore registers */
ADD $8, R13 /* pop past ureg->{type+psr} */
RFE /* MOVM.IA.S.W (R13), [R15] */
TEXT _vund(SB), $-4 /* undefined */
MOVM.IA [R0-R4], (R13) /* free some working space */
MOVW $PsrMund, R0
B _vswitch
TEXT _vpabt(SB), $-4 /* prefetch abort */
MOVM.IA [R0-R4], (R13) /* free some working space */
MOVW $PsrMabt, R0 /* r0 = type */
B _vswitch
TEXT _vdabt(SB), $-4 /* prefetch abort */
MOVM.IA [R0-R4], (R13) /* free some working space */
MOVW $(PsrMabt+1), R0 /* r0 = type */
B _vswitch
TEXT _virq(SB), $-4 /* IRQ */
MOVM.IA [R0-R4], (R13) /* free some working space */
MOVW $PsrMirq, R0 /* r0 = type */
B _vswitch
/*
* come here with type in R0 and R13 pointing above saved [r0-r4]
* and type in r0. we'll switch to SVC mode and then call trap.
*/
_vswitch:
MOVW SPSR, R1 /* save SPSR for ureg */
MOVW R14, R2 /* save interrupted pc for ureg */
MOVW R13, R3 /* save pointer to where the original [R0-R3] are */
/* switch to svc mode */
MOVW CPSR, R14
BIC $PsrMask, R14
ORR $(PsrDirq|PsrDfiq|PsrMsvc), R14
MOVW R14, CPSR
/* interupted code kernel or user? */
AND.S $0xf, R1, R4
BEQ _userexcep
/* here for trap from SVC mode */
MOVM.DB.W [R0-R2], (R13) /* set ureg->{type, psr, pc}; r13 points to ureg->type */
MOVM.IA (R3), [R0-R4] /* restore [R0-R4] from previous mode's stack */
MOVM.DB.W [R0-R14], (R13) /* save kernel level registers, at end r13 points to ureg */
MOVW $setR12(SB), R12 /* Make sure we've got the kernel's SB loaded */
MOVW R13, R0 /* first arg is pointer to ureg */
SUB $8, R13 /* space for argument+link (for debugger) */
MOVW $0xdeaddead,R11 /* marker */
BL trap(SB)
ADD $(8+4*15), R13 /* make r13 point to ureg->type */
MOVW 8(R13), R14 /* restore link */
MOVW 4(R13), R0 /* restore SPSR */
MOVW R0, SPSR /* ... */
MOVM.DB (R13), [R0-R14] /* restore registers */
ADD $8, R13 /* pop past ureg->{type+psr} */
RFE /* MOVM.IA.S.W (R13), [R15] */
/* here for trap from USER mode */
_userexcep:
MOVM.DB.W [R0-R2], (R13) /* set ureg->{type, psr, pc}; r13 points to ureg->type */
MOVM.IA (R3), [R0-R4] /* restore [R0-R4] from previous mode's stack */
MOVM.DB.W.S [R0-R14], (R13) /* save kernel level registers, at end r13 points to ureg */
MOVW $setR12(SB), R12 /* Make sure we've got the kernel's SB loaded */
MOVW R13, R0 /* first arg is pointer to ureg */
SUB $8, R13 /* space for argument+link (for debugger) */
BL trap(SB)
ADD $(8+4*15), R13 /* make r13 point to ureg->type */
MOVW 8(R13), R14 /* restore link */
MOVW 4(R13), R0 /* restore SPSR */
MOVW R0, SPSR /* ... */
MOVM.DB.S (R13), [R0-R14] /* restore registers */
ADD $8, R13 /* pop past ureg->{type+psr} */
RFE /* MOVM.IA.S.W (R13), [R15] */
TEXT _vfiq(SB), $-4 /* FIQ */
RFE /* FIQ is special, ignore it for now */
/*
* This is the first jump from kernel to user mode.
* Fake a return from interrupt.
*
* Enter with R0 containing the user stack pointer.
* UTZERO + 0x20 is always the entry point.
*
*/
TEXT touser(SB),$-4
/* store the user stack pointer into the USR_r13 */
MOVM.DB.W [R0], (R13)
MOVM.S.IA.W (R13),[R13]
/* set up a PSR for user level */
MOVW $(PsrMusr), R0
MOVW R0,SPSR
/* save the PC on the stack */
MOVW $(UTZERO+0x20), R0
MOVM.DB.W [R0],(R13)
/* return from interrupt */
RFE /* MOVM.IA.S.W (R13), [R15] */
/*
* here to jump to a newly forked process
*/
TEXT forkret(SB),$-4
ADD $(4*15), R13 /* make r13 point to ureg->type */
MOVW 8(R13), R14 /* restore link */
MOVW 4(R13), R0 /* restore SPSR */
MOVW R0, SPSR /* ... */
MOVM.DB.S (R13), [R0-R14] /* restore registers */
ADD $8, R13 /* pop past ureg->{type+psr} */
RFE /* MOVM.IA.S.W (R13), [R15] */
TEXT splhi(SB), $-4
/* save caller pc in Mach */
MOVW $(MACHADDR+0x04),R2
MOVW R14,0(R2)
/* turn off interrupts */
MOVW CPSR, R0
ORR $(PsrDfiq|PsrDirq), R0, R1
MOVW R1, CPSR
RET
TEXT spllo(SB), $-4
MOVW CPSR, R0
BIC $(PsrDfiq|PsrDirq), R0, R1
MOVW R1, CPSR
RET
TEXT splx(SB), $-4
/* save caller pc in Mach */
MOVW $(MACHADDR+0x04),R2
MOVW R14,0(R2)
/* reset interrupt level */
MOVW R0, R1
MOVW CPSR, R0
MOVW R1, CPSR
RET
TEXT splxpc(SB), $-4 /* for iunlock */
MOVW R0, R1
MOVW CPSR, R0
MOVW R1, CPSR
RET
TEXT spldone(SB), $0
RET
TEXT islo(SB), $-4
MOVW CPSR, R0
AND $(PsrDfiq|PsrDirq), R0
EOR $(PsrDfiq|PsrDirq), R0
RET
TEXT cpsrr(SB), $-4
MOVW CPSR, R0
RET
TEXT spsrr(SB), $-4
MOVW SPSR, R0
RET
TEXT getcallerpc(SB), $-4
MOVW 0(R13), R0
RET
TEXT tas(SB), $-4
MOVW R0, R1
MOVW $0xDEADDEAD, R2
SWPW R2, (R1), R0
RET
TEXT setlabel(SB), $-4
MOVW R13, 0(R0) /* sp */
MOVW R14, 4(R0) /* pc */
MOVW $0, R0
RET
TEXT gotolabel(SB), $-4
MOVW 0(R0), R13 /* sp */
MOVW 4(R0), R14 /* pc */
MOVW $1, R0
RET
/* The first MCR instruction of this function needs to be on a cache-line
* boundary; to make this happen, it will be copied (in trap.c).
*
* Doze puts the machine into idle mode. Any interrupt will get it out
* at the next instruction (the RET, to be precise).
*/
TEXT _doze(SB), $-4
MOVW $UCDRAMZERO, R1
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MCR CpPWR, 0, R0, C(CpTest), C(0x2), 2
MOVW (R1), R0
MCR CpPWR, 0, R0, C(CpTest), C(0x8), 2
RET
|
9front/9front
| 2,185
|
sys/src/boot/bitsy/il.s
|
#include "mem.h"
/*
* Entered here from Compaq's bootldr. First relocate to
* the location we're linked for and then copy back the
* decompressed kernel.
*
* All
*/
TEXT _start(SB), $-4
MOVW $setR12(SB), R12 /* load the SB */
MOVW $1, R0 /* dance to make 5l think that the magic */
MOVW $1, R1 /* numbers in WORDs below are being used */
CMP.S R0, R1 /* and to align them to where bootldr wants */
BEQ _start2
WORD $0x016f2818 /* magic number to say we are a kernel */
WORD $0xc0008000 /* entry point address */
WORD $0 /* size?, or end of data? */
_start2:
/* SVC mode, interrupts disabled */
MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R1
MOVW R1, CPSR
/* disable the MMU */
MOVW $0x130, R1
MCR CpMMU, 0, R1, C(CpControl), C(0x0)
/* enable caches */
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
ORR $(CpCdcache|CpCicache|CpCwb), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
/* flush caches */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x7), 0
/* drain prefetch */
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
/* drain write buffer */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
/* relocate to where we expect to be */
MOVW $(512*1024),R3
MOVW $0xC0008000,R1
MOVW $0xC0200000,R2
ADD R1,R3
_relloop:
MOVW (R1),R0
MOVW R0,(R2)
ADD $4,R1
ADD $4,R2
CMP.S R1,R3
BNE _relloop
MOVW $(MACHADDR+BY2PG), R13 /* stack */
SUB $4, R13 /* link */
/* jump to where we've been relocated */
MOVW $_relocated(SB),R15
TEXT _relocated(SB),$-4
BL main(SB)
BL exit(SB)
/* we shouldn't get here */
_mainloop:
B _mainloop
BL _div(SB) /* hack to get _div etc loaded */
TEXT mypc(SB),$-4
MOVW R14,R0
RET
TEXT draincache(SB),$-4
/* write back any dirty data */
MOVW $0xe0000000,R0
ADD $(8*1024),R0,R1
_cfloop:
MOVW.P 32(R0),R2
CMP.S R0,R1
BNE _cfloop
/* drain write buffer and invalidate i&d cache contents */
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0xa), 4
MCR CpMMU, 0, R0, C(CpCacheFlush), C(0x7), 0
/* drain prefetch */
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
MOVW R0,R0
/* disable caches */
MRC CpMMU, 0, R0, C(CpControl), C(0x0)
BIC $(CpCdcache|CpCicache|CpCwb), R0
MCR CpMMU, 0, R0, C(CpControl), C(0x0)
RET
|
9front/9front
| 4,671
|
sys/src/boot/pc/l.s
|
#include "x16.h"
#include "mem.h"
#undef ORB
#define DATA32SEL SELECTOR(1, SELGDT, 0)
#define EXEC32SEL SELECTOR(2, SELGDT, 0)
#define DATA16SEL SELECTOR(3, SELGDT, 0)
#define EXEC16SEL SELECTOR(4, SELGDT, 0)
#define SEGSS BYTE $0x36
#define SEGES BYTE $0x26
#define FARRET BYTE $0xCB
TEXT origin(SB), $0
CLI
CLR(rCX)
MTSR(rCX, rSS)
OPSIZE; MOVL $origin(SB), SP
PUSHA
OPSIZE; ADSIZE; PUSHL SP
OPSIZE; ADSIZE; PUSHL CX
PUSHI(start(SB))
TEXT pmode32(SB), $0
CLI
/* get return pc */
POPR(rDI)
/* make sure stack is at 0000: */
CLR(rCX)
MTSR(rCX, rSS)
OPSIZE; ANDL $0xFFFF, SP
/* convert 16-bit return pc to far pointer */
PUSHI(EXEC32SEL)
PUSHR(rDI)
/* load gdt */
SEGSS; LGDT(tgdtptr(SB))
/* enable protected mode */
MFCR(rCR0, rCX)
ORB $1, CL
MTCR(rCX, rCR0)
/* flush */
FARJUMP16(EXEC16SEL, pmode32flush(SB));
TEXT pmode32flush(SB), $0
/* load 32-bit protected mode data selector */
LWI(DATA32SEL, rCX)
_segret:
/* load all data segments */
MTSR(rCX, rDS)
MTSR(rCX, rES)
MTSR(rCX, rFS)
MTSR(rCX, rGS)
MTSR(rCX, rSS)
FARRET
TEXT rmode16(SB), $0
/* setup farret to rmode16x */
PUSHL $EXEC16SEL
PUSHL $rmode16x(SB)
/* load 16-bit protected mode data selector */
MOVL $DATA16SEL, CX
JMP _segret
TEXT rmode16x(SB), $0
/* disable protected mode */
MFCR(rCR0, rCX)
ANDB $0xfe, CL
MTCR(rCX, rCR0)
/* flush */
FARJUMP16(0, rmode16flush(SB));
TEXT rmode16flush(SB), $0
/*
* load 16-bit realmode data segment 0000: and
* return to 32 bit return pc interpreted
* as 16 bit far pointer.
*/
CLR(rCX)
JMP _segret
TEXT tgdt(SB), $0
/* null descriptor */
LONG $0
LONG $0
/* data segment descriptor for 4 gigabytes (PL 0) */
LONG $(0xFFFF)
LONG $(SEGG|SEGB|(0xF<<16)|SEGP|SEGPL(0)|SEGDATA|SEGW)
/* exec segment descriptor for 4 gigabytes (PL 0) */
LONG $(0xFFFF)
LONG $(SEGG|SEGD|(0xF<<16)|SEGP|SEGPL(0)|SEGEXEC|SEGR)
/* data segment descriptor for (PL 0) 16-bit */
LONG $(0xFFFF)
LONG $((0xF<<16)|SEGP|SEGPL(0)|SEGDATA|SEGW)
/* exec segment descriptor for (PL 0) 16-bit */
LONG $(0xFFFF)
LONG $((0xF<<16)|SEGP|SEGPL(0)|SEGEXEC|SEGR)
TEXT tgdtptr(SB), $0
WORD $(5*8)
LONG $tgdt(SB)
TEXT jump(SB), $0
MOVL 4(SP), AX
JMP *AX
TEXT halt(SB), $0
_halt:
JMP _halt
TEXT kbdgetc(SB), $0
CALL rmode16(SB)
STI
MOVB $0x01, AH
BIOSCALL(0x16)
JNZ _gotkey
CLR(rAX)
JMP _pret32
_gotkey:
CLR(rAX)
BIOSCALL(0x16)
JMP _pret32
TEXT cgaputc(SB), $0
MOVL 4(SP),AX
CALL rmode16(SB)
STI
MOVB $0x0E, AH
BIOSCALL(0x10)
_pret32:
CALL16(pmode32(SB))
ANDL $0xFFFF, AX
RET
#ifdef PXE
TEXT pxeinit(SB), $0
CALL rmode16(SB)
/* get pxe env structure in ES:BX */
LWI(0x5650, rAX)
BIOSCALL(0x1A)
JC _pret32
/* !PXE or PXEENV+ signature */
SEGES; LXW(0, xBX, rAX)
CMPI((('!'<<0)|('P'<<8)), rAX)
JEQ _getentry
CMPI((('P'<<0)|('X'<<8)), rAX)
JNE _pret32
SEGES; LXW(0x2A, xBX, rAX)
SEGES; LXW(0x28, xBX, rBX)
MTSR(rAX, rES)
_getentry:
SEGES; LXW(0x12, xBX, rAX)
SW(rAX, pxepseg(SB))
SEGES; LXW(0x10, xBX, rAX)
SW(rAX, pxepoff(SB))
CLR(rAX)
JMP _pret32
TEXT pxecallret(SB), $0
ADDI(6, rSP)
JMP _pret32
TEXT pxecall(SB), $0
MOVL op+4(SP),AX
MOVL buf+8(SP),SI
CALL rmode16(SB)
CLR(rCX)
PUSHR(rCX)
PUSHR(rSI)
/* opcode */
PUSHR(rAX)
/* farcall */
PUSHR(rCX)
PUSHI(pxecallret(SB))
LW(pxepseg(SB), rAX)
PUSHR(rAX)
LW(pxepoff(SB), rAX)
PUSHR(rAX)
STI
CLR(rAX)
CLR(rBX)
CLR(rCX)
CLR(rDX)
CLR(rDI)
CLR(rSI)
FARRET
TEXT pxepseg(SB), $0
WORD $0
TEXT pxepoff(SB), $0
WORD $0
#else /* PXE */
/*
* in:
* DL drive
* AX:BX lba32,
* 0000:SI buffer
*/
TEXT readsect16(SB), $0
PUSHA
CLR(rCX)
PUSHR(rCX) /* qword lba */
PUSHR(rCX)
PUSHR(rBX)
PUSHR(rAX)
PUSHR(rCX) /* dword buffer */
PUSHR(rSI)
INC(rCX)
PUSHR(rCX) /* word # of sectors */
PUSHI(0x0010) /* byte reserved, byte packet size */
MW(rSP, rSI)
LWI(0x4200, rAX)
BIOSCALL(0x13)
JCC _readok
ADDI(0x10, rSP)
POPA
CLR(rAX)
DEC(rAX)
RET
_readok:
ADDI(0x10, rSP)
POPA
CLR(rAX)
RET
TEXT readsect(SB), $0
MOVL 4(SP), DX
MOVW 8(SP), AX
MOVW 10(SP), BX
MOVL 12(SP), SI
CALL rmode16(SB)
STI
CALL16(readsect16(SB))
CALL16(pmode32(SB))
ANDL $0xFFFF, AX
RET
#endif
#ifdef ISO
TEXT bootname(SB), $0
BYTE $'3'; BYTE $'8'; BYTE $'6'; BYTE $'/';
BYTE $'9'; BYTE $'b'; BYTE $'o'; BYTE $'o';
BYTE $'t'; BYTE $'i'; BYTE $'s'; BYTE $'o';
BYTE $0
#endif
TEXT uart(SB), $0
BYTE $0xff
TEXT nocga(SB), $0
BYTE $0x00
TEXT nokbd(SB), $0
BYTE $0x00
TEXT hex(SB), $0
BYTE $'0'; BYTE $'1'; BYTE $'2'; BYTE $'3';
BYTE $'4'; BYTE $'5'; BYTE $'6'; BYTE $'7';
BYTE $'8'; BYTE $'9'; BYTE $'a'; BYTE $'b';
BYTE $'c'; BYTE $'d'; BYTE $'e'; BYTE $'f'
|
9front/9front
| 6,234
|
sys/src/boot/pc/mbr.s
|
/*
* Hard disc boot block. Loaded at 0x7C00, relocates to 0x0600:
* 8a mbr.s; 8l -o mbr -l -H3 -T0x0600 mbr.8
*/
#include "x16.h"
#include "mem.h"
/*#define FLOPPY 1 /* test on a floppy */
#define TRACE(C) PUSHA;\
CLR(rBX);\
MOVB $C, AL;\
LBI(0x0E, rAH);\
BIOSCALL(0x10);\
POPA
/*
* We keep data on the stack, indexed by BP.
*/
#define Xdap 0x00 /* disc address packet */
#define Xtable 0x10 /* partition table entry */
#define Xdrive 0x12 /* starting disc */
#define Xtotal 0x14 /* sum of allocated data above */
/*
* Start: loaded at 0000:7C00, relocate to 0000:0600.
* Boot drive is in rDL.
*/
TEXT _start(SB), $0
CLI
CLR(rAX)
MTSR(rAX, rSS) /* 0000 -> rSS */
LWI((0x7C00-Xtotal), rSP) /* 7Bxx -> rSP */
MW(rSP, rBP) /* set the indexed-data pointer */
MTSR(rAX, rDS) /* 0000 -> rDS, source segment */
LWI(0x7C00, rSI) /* 7C00 -> rSI, source offset */
MTSR(rAX, rES) /* 0000 -> rES, destination segment */
LWI(0x600, rDI) /* 0600 -> rDI, destination offset */
LWI(0x100, rCX) /* 0100 -> rCX, loop count (words) */
CLD
REP; MOVSL /* MOV DS:[(E)SI] -> ES:[(E)DI] */
FARJUMP16(0x0000, _start0600(SB))
TEXT _start0600(SB), $0
#ifdef FLOPPY
LBI(0x80, rDL)
#else
CLRB(rAL) /* some systems pass 0 */
CMPBR(rAL, rDL)
JNE _save
LBI(0x80, rDL)
#endif /* FLOPPY */
_save:
SXB(rDL, Xdrive, xBP) /* save disc */
LWI(confidence(SB), rSI) /* for that warm, fuzzy feeling */
CALL16(BIOSputs(SB))
LWI(_start+0x01BE(SB), rSI) /* address of partition table */
LWI(0x04, rCX) /* 4 entries in table */
LBI(0x80, rAH) /* active entry value */
CLRB(rAL) /* inactive entry value */
_activeloop0:
LXB(0x00, xSI, rBL) /* get active entry from table */
CMPBR(rBL, rAH) /* is this an active entry? */
JEQ _active
CMPBR(rBL, rAL) /* if not active it should be 0 */
JNE _invalidMBR
ADDI(0x10, rSI) /* next table entry */
DEC(rCX)
JNE _activeloop0
LWI(noentry(SB), rSI)
CALL16(buggery(SB))
_active:
MW(rSI, rDI) /* save table address */
_activeloop1:
ADDI(0x10, rSI) /* next table entry */
DEC(rCX)
JEQ _readsector
LXB(0x00, xSI, rBL) /* get active entry from table */
CMPBR(rBL, rAH) /* is this an active entry? */
JNE _activeloop1 /* should only be one active */
_invalidMBR:
LWI(invalidMBR(SB), rSI)
CALL16(buggery(SB))
_readsector:
LBI(0x41, rAH) /* check extensions present */
LWI(0x55AA, rBX)
LXB(Xdrive, xBP, rDL) /* drive */
BIOSCALL(0x13) /* CF set on failure */
JCS _readsector2
CMPI(0xAA55, rBX)
JNE _readsector2
ANDI(0x0001, rCX)
JEQ _readsector2
_readsector42:
SBPBI(0x10, Xdap+0) /* packet size */
SBPBI(0x00, Xdap+1) /* reserved */
SBPBI(0x01, Xdap+2) /* number of blocks to transfer */
SBPBI(0x00, Xdap+3) /* reserved */
SBPWI(0x7C00, Xdap+4) /* transfer buffer :offset */
SBPWI(0x0000, Xdap+6) /* transfer buffer seg: */
LXW(0x08, xDI, rAX) /* LBA (64-bits) */
SBPW(rAX, Xdap+8)
LXW(0x0A, xDI, rAX)
SBPW(rAX, Xdap+10)
SBPWI(0x0000, Xdap+12)
SBPWI(0x0000, Xdap+14)
MW(rBP, rSI) /* disk address packet */
LBI(0x42, rAH) /* extended read */
BIOSCALL(0x13) /* CF set on failure */
JCC _readsectorok
LWI(ioerror(SB), rSI)
CALL16(buggery(SB))
/*
* Read a sector from a disc using the traditional BIOS call.
* For BIOSCALL(0x13/AH=0x02):
* rAH 0x02
* rAL number of sectors to read (1)
* rCH low 8 bits of cylinder
* rCL high 2 bits of cylinder (7-6), sector (5-0)
* rDH head
* rDL drive
* rES:rBX buffer address
*/
_readsector2:
LXB(0x01, xDI, rDH) /* head */
LXW(0x02, xDI, rCX) /* save active cylinder/sector */
LWI(0x0201, rAX) /* read one sector */
LXB(Xdrive, xBP, rDL) /* drive */
LWI(0x7C00, rBX) /* buffer address (rES already OK) */
BIOSCALL(0x13) /* CF set on failure */
JCC _readsectorok
LWI(ioerror(SB), rSI)
CALL16(buggery(SB))
_readsectorok:
LWI(0x7C00, rBX) /* buffer address (rES already OK) */
LXW(0x1FE, xBX, rAX)
CMPI(0xAA55, rAX)
JNE _bbnotok
/*
* Jump to the loaded PBS.
* rDL and rSI should still contain the drive
* and partition table pointer respectively.
*/
MW(rDI, rSI)
FARJUMP16(0x0000, 0x7C00)
_bbnotok:
LWI(invalidPBS(SB), rSI)
TEXT buggery(SB), $0
CALL16(BIOSputs(SB))
LWI(reboot(SB), rSI)
CALL16(BIOSputs(SB))
_wait:
CLR(rAX) /* wait for any key */
BIOSCALL(0x16)
_reset:
CLR(rBX) /* set ES segment for BIOS area */
MTSR(rBX, rES)
LWI(0x0472, rBX) /* warm-start code address */
LWI(0x1234, rAX) /* warm-start code */
POKEW /* MOVW AX, ES:[BX] */
FARJUMP16(0xFFFF, 0x0000) /* reset */
/*
* Output a string to the display.
* String argument is in rSI.
*/
TEXT BIOSputs(SB), $0
PUSHA
CLR(rBX)
_BIOSputs:
LODSB
ORB(rAL, rAL)
JEQ _BIOSputsret
LBI(0x0E, rAH)
BIOSCALL(0x10)
JMP _BIOSputs
_BIOSputsret:
POPA
RET
/* "No active entry in MBR" */
TEXT noentry(SB), $0
BYTE $'N'; BYTE $'o'; BYTE $' '; BYTE $'a';
BYTE $'c'; BYTE $'t'; BYTE $'i'; BYTE $'v';
BYTE $'e'; BYTE $' '; BYTE $'e'; BYTE $'n';
BYTE $'t'; BYTE $'r'; BYTE $'y'; BYTE $' ';
BYTE $'i'; BYTE $'n'; BYTE $' '; BYTE $'M';
BYTE $'B'; BYTE $'R';
BYTE $'\z';
/* "Invalid MBR" */
TEXT invalidMBR(SB), $0
BYTE $'I'; BYTE $'n'; BYTE $'v'; BYTE $'a';
BYTE $'l'; BYTE $'i'; BYTE $'d'; BYTE $' ';
BYTE $'M'; BYTE $'B'; BYTE $'R';
BYTE $'\z';
/* "I/O error" */
TEXT ioerror(SB), $0
BYTE $'I'; BYTE $'/'; BYTE $'O'; BYTE $' ';
BYTE $'e'; BYTE $'r'; BYTE $'r'; BYTE $'o';
BYTE $'r';
BYTE $'\z';
/* "Invalid PBS" */
TEXT invalidPBS(SB), $0
BYTE $'I'; BYTE $'n'; BYTE $'v'; BYTE $'a';
BYTE $'l'; BYTE $'i'; BYTE $'d'; BYTE $' ';
BYTE $'P'; BYTE $'B'; BYTE $'S';
BYTE $'\z';
/* "\r\nPress almost any key to reboot..." */
TEXT reboot(SB), $0
BYTE $'\r';BYTE $'\n';
BYTE $'P'; BYTE $'r'; BYTE $'e'; BYTE $'s';
BYTE $'s'; BYTE $' '; BYTE $'a'; BYTE $'l';
BYTE $'m'; BYTE $'o'; BYTE $'s'; BYTE $'t';
BYTE $' '; BYTE $'a'; BYTE $'n'; BYTE $'y';
BYTE $' '; BYTE $'k'; BYTE $'e'; BYTE $'y';
BYTE $' '; BYTE $'t'; BYTE $'o'; BYTE $' ';
BYTE $'r'; BYTE $'e'; BYTE $'b'; BYTE $'o';
BYTE $'o'; BYTE $'t'; BYTE $'.'; BYTE $'.';
BYTE $'.';
BYTE $'\z';
/* "MBR..." */
TEXT confidence(SB), $0
BYTE $'M'; BYTE $'B'; BYTE $'R'; BYTE $'.';
BYTE $'.'; BYTE $'.';
BYTE $'\z';
|
9front/9front
| 5,272
|
sys/src/boot/pc/pbs.s
|
#include "x16.h"
#include "mem.h"
#define RELOC 0x7c00
TEXT _magic(SB), $0
BYTE $0xEB; BYTE $0x58; /* jmp .+ 0x58 (_start0x5A) */
BYTE $0x90 /* nop */
TEXT _version(SB), $0
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00;
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00
TEXT _sectsize(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _clustsize(SB), $0
BYTE $0x00
TEXT _nresrv(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _nfats(SB), $0
BYTE $0x00
TEXT _rootsize(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _volsize(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _mediadesc(SB), $0
BYTE $0x00
TEXT _fatsize(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _trksize(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _nheads(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _nhiddenlo(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _nhiddenhi(SB), $0
BYTE $0x00; BYTE $0x00;
TEXT _bigvolsize(SB), $0
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00;
/* FAT32 structure, starting @0x24 */
TEXT _fatsz32lo(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _fatsz32hi(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _extflags(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _fsver(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _rootclust(SB), $0
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00
TEXT _fsinfo(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _bkboot(SB), $0
BYTE $0x00; BYTE $0x00
TEXT _reserved0(SB), $0
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00;
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00;
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00
TEXT _driveno(SB), $0
BYTE $0x00
TEXT _reserved1(SB), $0
BYTE $0x00
TEXT _bootsig(SB), $0
BYTE $0x00
TEXT _volid(SB), $0
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00;
TEXT _label(SB), $0
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00;
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00
BYTE $0x00; BYTE $0x00; BYTE $0x00
TEXT _type(SB), $0
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00;
BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x00
_start0x5A:
CLI
CLR(rAX)
MTSR(rAX, rSS) /* 0000 -> rSS */
MTSR(rAX, rDS) /* 0000 -> rDS, source segment */
MTSR(rAX, rES)
LWI(0x100, rCX)
LWI(RELOC, rSI)
MW(rSI, rSP)
LWI(_magic(SB), rDI)
CLD
REP; MOVSL /* MOV DS:[(E)SI] -> ES:[(E)DI] */
MW(rSP, rBP)
PUSHR(rCX)
PUSHI(start16(SB))
BYTE $0xCB /* FAR RET */
TEXT start16(SB), $0
STI
LWI(hello(SB), rSI)
CALL16(print16(SB))
STB(rDL, _driveno(SB))
CLR(rDX)
LW(_fatsize(SB), rAX)
CLR(rCX)
LB(_nfats(SB), rCL)
MUL(rCX)
OR(rAX, rAX)
JNE _fatszok /* zero? it's FAT32 */
LW(_fatsz32hi(SB), rBX)
IMUL(rCX, rBX)
LW(_fatsz32lo(SB), rAX)
MUL(rCX)
ADD(rBX, rDX)
_fatszok:
LW(_nhiddenlo(SB), rCX)
ADD(rCX, rAX)
LW(_nhiddenhi(SB), rCX)
ADC(rCX, rDX)
CLR(rBX)
LW(_nresrv(SB), rCX)
ADD(rCX, rAX)
ADC(rDX, rBX)
SW(rAX, _volid(SB)) /* save for later use */
SW(rBX, _volid+2(SB))
PUSHR(rBP)
LW(_sectsize(SB), rCX)
SUB(rCX, rSP)
MW(rSP, rBP)
MW(rSP, rSI)
_nextsect:
CALL16(readsect16(SB))
LW(_sectsize(SB), rCX)
SHRI(5, rCX)
_nextdir:
PUSHR(rCX)
PUSHR(rSI) /* save for later if it matches */
LWI(bootname(SB), rDI)
LW(bootnamelen(SB), rCX)
CLD
REP
CMPSB
POPR(rSI)
POPR(rCX)
JEQ _found
ADDI(0x20, rSI)
LOOP _nextdir
ADDI(1, rAX)
ADC(rCX, rBX)
JMP _nextsect
_found:
CLR(rBX)
LW(_rootsize(SB), rAX) /* calculate and save Xrootsz */
LWI(0x20, rCX)
MUL(rCX)
LW(_sectsize(SB), rCX)
DEC(rCX)
ADD(rCX, rAX)
ADC(rBX, rDX)
INC(rCX)
DIV(rCX)
PUSHR(rAX) /* Xrootsz */
CLR(rCX)
LXW(0x1a, xSI, rAX) /* start cluster low */
LXW(0x14, xSI, rBX) /* start cluster high */
SUBI(2, rAX) /* cluster -= 2 */
SBB(rCX, rBX)
LB(_clustsize(SB), rCL) /* convert to sectors (AX:DX) */
IMUL(rCX, rBX)
MUL(rCX)
ADD(rBX, rDX)
LW(_volid(SB), rCX) /* Xrootlo */
ADD(rCX, rAX)
LW(_volid+2(SB), rCX) /* Xroothi */
ADC(rCX, rDX)
CLR(rBX)
POPR(rCX) /* Xrootsz */
ADD(rCX, rAX)
ADC(rBX, rDX)
PUSHR(rAX) /* calculate how many sectors to read (CX) */
PUSHR(rDX)
LXW(0x1c, xSI, rAX)
LXW(0x1e, xSI, rDX)
LW(_sectsize(SB), rCX)
DEC(rCX)
ADD(rCX, rAX)
ADC(rBX, rDX)
INC(rCX)
DIV(rCX)
MW(rAX, rCX)
POPR(rBX)
POPR(rAX)
LWI(RELOC, rSI)
PUSHR(rSI) /* entry */
_loadnext:
CALL16(readsect16(SB))
LW(_sectsize(SB), rDX)
ADD(rDX, rSI)
CLR(rDX)
ADDI(1, rAX)
ADC(rDX, rBX)
LOOP _loadnext
LWI(ok(SB), rSI)
CALL16(print16(SB))
LB(_driveno(SB), rDL)
CLI
RET
TEXT print16(SB), $0
PUSHA
CLR(rBX)
_printnext:
LODSB
ORB(rAL, rAL)
JEQ _printret
LBI(0x0E, rAH)
BIOSCALL(0x10)
JMP _printnext
_printret:
POPA
RET
/*
* in:
* AX:BX lba32,
* 0000:SI buffer
*/
TEXT readsect16(SB), $0
_retry:
PUSHA
CLR(rDX)
PUSHR(rDX) /* qword lba */
PUSHR(rDX)
PUSHR(rBX)
PUSHR(rAX)
PUSHR(rDX) /* dword buffer */
PUSHR(rSI)
INC(rDX)
PUSHR(rDX) /* word # of sectors */
PUSHI(0x0010) /* byte reserved, byte packet size */
MW(rSP, rSI)
LB(_driveno(SB), rDL)
LWI(0x4200, rAX)
BIOSCALL(0x13)
JCC _readok
LWI((0x0E00|'!'), rAX)
BIOSCALL(0x10)
ADDI(0x10, rSP)
POPA
JMP _retry
_readok:
LWI((0x0E00|'.'), rAX)
BIOSCALL(0x10)
ADDI(0x10, rSP)
POPA
RET
TEXT bootnamelen(SB), $0
WORD $8
TEXT bootname(SB), $0
BYTE $'9'; BYTE $'B'; BYTE $'O'; BYTE $'O';
BYTE $'T'; BYTE $'F'; BYTE $'A'; BYTE $'T';
BYTE $0
TEXT hello(SB), $0
BYTE $'p'; BYTE $'b'; BYTE $'s'; BYTE $0
TEXT ok(SB), $0
BYTE $'o'; BYTE $'k'; BYTE $'\r'; BYTE $'\n';
BYTE $0
|
9front/9front
| 1,092
|
sys/src/boot/pc/a20.s
|
#include "x16.h"
#undef ORB
TEXT a20test(SB), $0
LONG $1234567
TEXT a20check(SB), $0
MOVL $10000, CX
_loop:
LEAL a20test(SB), AX
MOVL (AX), BX
ADDL $12345, BX
MOVL BX, (AX)
ORL $(1<<20), AX
MOVL (AX), AX
CMPL AX, BX
JNZ _done
LOOP _loop
RET
_done:
/* return directly to caller of a20() */
ADDL $4, SP
XORL AX, AX
RET
TEXT a20(SB), $0
CALL a20check(SB)
/* try bios */
CALL rmode16(SB)
STI
LWI(0x2401, rAX)
BIOSCALL(0x15)
CALL16(pmode32(SB))
CALL a20check(SB)
/* try fast a20 */
MOVL $0x92, DX
INB
TESTB $2, AL
JNZ _no92
ORB $2, AL
ANDB $0xfe, AL
OUTB
_no92:
CALL a20check(SB)
/* try keyboard */
CALL kbdempty(SB)
MOVL $0x64, DX
MOVB $0xd1, AL /* command write */
OUTB
CALL kbdempty(SB)
MOVL $0x60, DX
MOVB $0xdf, AL /* a20 on */
OUTB
CALL kbdempty(SB)
MOVL $0x64, DX
MOVB $0xff, AL /* magic */
OUTB
CALL kbdempty(SB)
CALL a20check(SB)
/* fail */
XORL AX, AX
DECL AX
RET
TEXT kbdempty(SB), $0
_kbdwait:
MOVL $0x64, DX
INB
TESTB $1, AL
JZ _kbdempty
MOVL $0x60, DX
INB
JMP _kbdwait
_kbdempty:
TESTB $2, AL
JNZ _kbdwait
RET
|
9front/9front
| 5,369
|
sys/src/libsec/386/md5block.s
|
/*
* rfc1321 requires that I include this. The code is new. The constants
* all come from the rfc (hence the copyright). We trade a table for the
* macros in rfc. The total size is a lot less. -- presotto
*
* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
* rights reserved.
*
* License to copy and use this software is granted provided that it
* is identified as the "RSA Data Security, Inc. MD5 Message-Digest
* Algorithm" in all material mentioning or referencing this software
* or this function.
*
* License is also granted to make and use derivative works provided
* that such works are identified as "derived from the RSA Data
* Security, Inc. MD5 Message-Digest Algorithm" in all material
* mentioning or referencing the derived work.
*
* RSA Data Security, Inc. makes no representations concerning either
* the merchantability of this software or the suitability of this
* software forany particular purpose. It is provided "as is"
* without express or implied warranty of any kind.
* These notices must be retained in any copies of any part of this
* documentation and/or software.
*/
#define S11 7
#define S12 12
#define S13 17
#define S14 22
#define S21 5
#define S22 9
#define S23 14
#define S24 20
#define S31 4
#define S32 11
#define S33 16
#define S34 23
#define S41 6
#define S42 10
#define S43 15
#define S44 21
/*
* SI is data
* a += FN(B,C,D);
* a += x[sh] + t[sh];
* a = (a << S11) | (a >> (32 - S11));
* a += b;
*/
#define BODY1(off,V,FN,SH,A,B,C,D)\
FN(B,C,D)\
LEAL V(A)(DI*1),A;\
ADDL (off)(BP),A;\
ROLL $SH,A;\
ADDL B,A;\
#define BODY(off,V,FN,SH,A,B,C,D)\
FN(B,C,D)\
LEAL V(A)(DI*1),A;\
ADDL (off)(BP),A;\
ROLL $SH,A;\
ADDL B,A;\
/*
* fn1 = ((c ^ d) & b) ^ d
*/
#define FN1(B,C,D)\
MOVL C,DI;\
XORL D,DI;\
ANDL B,DI;\
XORL D,DI;\
/*
* fn2 = ((b ^ c) & d) ^ c;
*/
#define FN2(B,C,D)\
MOVL B,DI;\
XORL C,DI;\
ANDL D,DI;\
XORL C,DI;\
/*
* fn3 = b ^ c ^ d;
*/
#define FN3(B,C,D)\
MOVL B,DI;\
XORL C,DI;\
XORL D,DI;\
/*
* fn4 = c ^ (b | ~d);
*/
#define FN4(B,C,D)\
MOVL D,DI;\
XORL $-1,DI;\
ORL B,DI;\
XORL C,DI;\
#define DATA 0
#define LEN 4
#define STATE 8
#define EDATA (-4)
TEXT _md5block+0(SB),$4
MOVL data+DATA(FP),AX
ADDL len+LEN(FP),AX
MOVL AX,edata+EDATA(SP)
MOVL data+DATA(FP),BP
mainloop:
MOVL state+STATE(FP),SI
MOVL (SI),AX
MOVL 4(SI),BX
MOVL 8(SI),CX
MOVL 12(SI),DX
BODY1( 0*4,0xd76aa478,FN1,S11,AX,BX,CX,DX)
BODY1( 1*4,0xe8c7b756,FN1,S12,DX,AX,BX,CX)
BODY1( 2*4,0x242070db,FN1,S13,CX,DX,AX,BX)
BODY1( 3*4,0xc1bdceee,FN1,S14,BX,CX,DX,AX)
BODY1( 4*4,0xf57c0faf,FN1,S11,AX,BX,CX,DX)
BODY1( 5*4,0x4787c62a,FN1,S12,DX,AX,BX,CX)
BODY1( 6*4,0xa8304613,FN1,S13,CX,DX,AX,BX)
BODY1( 7*4,0xfd469501,FN1,S14,BX,CX,DX,AX)
BODY1( 8*4,0x698098d8,FN1,S11,AX,BX,CX,DX)
BODY1( 9*4,0x8b44f7af,FN1,S12,DX,AX,BX,CX)
BODY1(10*4,0xffff5bb1,FN1,S13,CX,DX,AX,BX)
BODY1(11*4,0x895cd7be,FN1,S14,BX,CX,DX,AX)
BODY1(12*4,0x6b901122,FN1,S11,AX,BX,CX,DX)
BODY1(13*4,0xfd987193,FN1,S12,DX,AX,BX,CX)
BODY1(14*4,0xa679438e,FN1,S13,CX,DX,AX,BX)
BODY1(15*4,0x49b40821,FN1,S14,BX,CX,DX,AX)
BODY( 1*4,0xf61e2562,FN2,S21,AX,BX,CX,DX)
BODY( 6*4,0xc040b340,FN2,S22,DX,AX,BX,CX)
BODY(11*4,0x265e5a51,FN2,S23,CX,DX,AX,BX)
BODY( 0*4,0xe9b6c7aa,FN2,S24,BX,CX,DX,AX)
BODY( 5*4,0xd62f105d,FN2,S21,AX,BX,CX,DX)
BODY(10*4,0x02441453,FN2,S22,DX,AX,BX,CX)
BODY(15*4,0xd8a1e681,FN2,S23,CX,DX,AX,BX)
BODY( 4*4,0xe7d3fbc8,FN2,S24,BX,CX,DX,AX)
BODY( 9*4,0x21e1cde6,FN2,S21,AX,BX,CX,DX)
BODY(14*4,0xc33707d6,FN2,S22,DX,AX,BX,CX)
BODY( 3*4,0xf4d50d87,FN2,S23,CX,DX,AX,BX)
BODY( 8*4,0x455a14ed,FN2,S24,BX,CX,DX,AX)
BODY(13*4,0xa9e3e905,FN2,S21,AX,BX,CX,DX)
BODY( 2*4,0xfcefa3f8,FN2,S22,DX,AX,BX,CX)
BODY( 7*4,0x676f02d9,FN2,S23,CX,DX,AX,BX)
BODY(12*4,0x8d2a4c8a,FN2,S24,BX,CX,DX,AX)
BODY( 5*4,0xfffa3942,FN3,S31,AX,BX,CX,DX)
BODY( 8*4,0x8771f681,FN3,S32,DX,AX,BX,CX)
BODY(11*4,0x6d9d6122,FN3,S33,CX,DX,AX,BX)
BODY(14*4,0xfde5380c,FN3,S34,BX,CX,DX,AX)
BODY( 1*4,0xa4beea44,FN3,S31,AX,BX,CX,DX)
BODY( 4*4,0x4bdecfa9,FN3,S32,DX,AX,BX,CX)
BODY( 7*4,0xf6bb4b60,FN3,S33,CX,DX,AX,BX)
BODY(10*4,0xbebfbc70,FN3,S34,BX,CX,DX,AX)
BODY(13*4,0x289b7ec6,FN3,S31,AX,BX,CX,DX)
BODY( 0*4,0xeaa127fa,FN3,S32,DX,AX,BX,CX)
BODY( 3*4,0xd4ef3085,FN3,S33,CX,DX,AX,BX)
BODY( 6*4,0x04881d05,FN3,S34,BX,CX,DX,AX)
BODY( 9*4,0xd9d4d039,FN3,S31,AX,BX,CX,DX)
BODY(12*4,0xe6db99e5,FN3,S32,DX,AX,BX,CX)
BODY(15*4,0x1fa27cf8,FN3,S33,CX,DX,AX,BX)
BODY( 2*4,0xc4ac5665,FN3,S34,BX,CX,DX,AX)
BODY( 0*4,0xf4292244,FN4,S41,AX,BX,CX,DX)
BODY( 7*4,0x432aff97,FN4,S42,DX,AX,BX,CX)
BODY(14*4,0xab9423a7,FN4,S43,CX,DX,AX,BX)
BODY( 5*4,0xfc93a039,FN4,S44,BX,CX,DX,AX)
BODY(12*4,0x655b59c3,FN4,S41,AX,BX,CX,DX)
BODY( 3*4,0x8f0ccc92,FN4,S42,DX,AX,BX,CX)
BODY(10*4,0xffeff47d,FN4,S43,CX,DX,AX,BX)
BODY( 1*4,0x85845dd1,FN4,S44,BX,CX,DX,AX)
BODY( 8*4,0x6fa87e4f,FN4,S41,AX,BX,CX,DX)
BODY(15*4,0xfe2ce6e0,FN4,S42,DX,AX,BX,CX)
BODY( 6*4,0xa3014314,FN4,S43,CX,DX,AX,BX)
BODY(13*4,0x4e0811a1,FN4,S44,BX,CX,DX,AX)
BODY( 4*4,0xf7537e82,FN4,S41,AX,BX,CX,DX)
BODY(11*4,0xbd3af235,FN4,S42,DX,AX,BX,CX)
BODY( 2*4,0x2ad7d2bb,FN4,S43,CX,DX,AX,BX)
BODY( 9*4,0xeb86d391,FN4,S44,BX,CX,DX,AX)
ADDL $(16*4),BP
MOVL state+STATE(FP),DI
ADDL AX,0(DI)
ADDL BX,4(DI)
ADDL CX,8(DI)
ADDL DX,12(DI)
MOVL edata+EDATA(SP),DI
CMPL BP,DI
JCS mainloop
RET
END
|
9front/9front
| 3,682
|
sys/src/libsec/386/sha1block.s
|
TEXT _sha1block+0(SB),$352
/* x = (wp[off-f] ^ wp[off-8] ^ wp[off-14] ^ wp[off-16]) <<< 1;
* wp[off] = x;
* x += A <<< 5;
* E += 0xca62c1d6 + x;
* x = FN(B,C,D);
* E += x;
* B >>> 2
*/
#define BSWAPDI BYTE $0x0f; BYTE $0xcf;
#define BODY(off,FN,V,A,B,C,D,E)\
MOVL (off-64)(BP),DI;\
XORL (off-56)(BP),DI;\
XORL (off-32)(BP),DI;\
XORL (off-12)(BP),DI;\
ROLL $1,DI;\
MOVL DI,off(BP);\
LEAL V(DI)(E*1),E;\
MOVL A,DI;\
ROLL $5,DI;\
ADDL DI,E;\
FN(B,C,D)\
ADDL DI,E;\
RORL $2,B;\
#define BODY0(off,FN,V,A,B,C,D,E)\
MOVL off(BX),DI;\
BSWAPDI;\
MOVL DI,off(BP);\
LEAL V(DI)(E*1),E;\
MOVL A,DI;\
ROLL $5,DI;\
ADDL DI,E;\
FN(B,C,D)\
ADDL DI,E;\
RORL $2,B;\
/*
* fn1 = (((C^D)&B)^D);
*/
#define FN1(B,C,D)\
MOVL C,DI;\
XORL D,DI;\
ANDL B,DI;\
XORL D,DI;\
/*
* fn24 = B ^ C ^ D
*/
#define FN24(B,C,D)\
MOVL B,DI;\
XORL C,DI;\
XORL D,DI;\
/*
* fn3 = ((B ^ C) & (D ^= B)) ^ B
* D ^= B to restore D
*/
#define FN3(B,C,D)\
MOVL B,DI;\
XORL C,DI;\
XORL B,D;\
ANDL D,DI;\
XORL B,DI;\
XORL B,D;\
/*
* stack offsets
* void sha1block(uchar *DATA, int LEN, ulong *STATE)
*/
#define DATA 0
#define LEN 4
#define STATE 8
/*
* stack offsets for locals
* ulong w[80];
* uchar *edata;
* ulong *w15, *w40, *w60, *w80;
* register local
* ulong *wp = BP
* ulong a = eax, b = ebx, c = ecx, d = edx, e = esi
* ulong tmp = edi
*/
#define WARRAY (-4-(80*4))
#define TMP1 (-8-(80*4))
#define TMP2 (-12-(80*4))
#define W15 (-16-(80*4))
#define W40 (-20-(80*4))
#define W60 (-24-(80*4))
#define W80 (-28-(80*4))
#define EDATA (-32-(80*4))
MOVL data+DATA(FP),AX
ADDL len+LEN(FP),AX
MOVL AX,edata+EDATA(SP)
LEAL aw15+(WARRAY+15*4)(SP),DI
MOVL DI,w15+W15(SP)
LEAL aw40+(WARRAY+40*4)(SP),DX
MOVL DX,w40+W40(SP)
LEAL aw60+(WARRAY+60*4)(SP),CX
MOVL CX,w60+W60(SP)
LEAL aw80+(WARRAY+80*4)(SP),DI
MOVL DI,w80+W80(SP)
mainloop:
LEAL warray+WARRAY(SP),BP
MOVL state+STATE(FP),DI
MOVL (DI),AX
MOVL 4(DI),BX
MOVL BX,tmp1+TMP1(SP)
MOVL 8(DI),CX
MOVL 12(DI),DX
MOVL 16(DI),SI
MOVL data+DATA(FP),BX
loop1:
BODY0(0,FN1,0x5a827999,AX,tmp1+TMP1(SP),CX,DX,SI)
MOVL SI,tmp2+TMP2(SP)
BODY0(4,FN1,0x5a827999,SI,AX,tmp1+TMP1(SP),CX,DX)
MOVL tmp1+TMP1(SP),SI
BODY0(8,FN1,0x5a827999,DX,tmp2+TMP2(SP),AX,SI,CX)
BODY0(12,FN1,0x5a827999,CX,DX,tmp2+TMP2(SP),AX,SI)
MOVL SI,tmp1+TMP1(SP)
BODY0(16,FN1,0x5a827999,SI,CX,DX,tmp2+TMP2(SP),AX)
MOVL tmp2+TMP2(SP),SI
ADDL $20,BX
ADDL $20,BP
CMPL BP,w15+W15(SP)
JCS loop1
BODY0(0,FN1,0x5a827999,AX,tmp1+TMP1(SP),CX,DX,SI)
ADDL $4,BX
MOVL BX,data+DATA(FP)
MOVL tmp1+TMP1(SP),BX
BODY(4,FN1,0x5a827999,SI,AX,BX,CX,DX)
BODY(8,FN1,0x5a827999,DX,SI,AX,BX,CX)
BODY(12,FN1,0x5a827999,CX,DX,SI,AX,BX)
BODY(16,FN1,0x5a827999,BX,CX,DX,SI,AX)
ADDL $20,BP
loop2:
BODY(0,FN24,0x6ed9eba1,AX,BX,CX,DX,SI)
BODY(4,FN24,0x6ed9eba1,SI,AX,BX,CX,DX)
BODY(8,FN24,0x6ed9eba1,DX,SI,AX,BX,CX)
BODY(12,FN24,0x6ed9eba1,CX,DX,SI,AX,BX)
BODY(16,FN24,0x6ed9eba1,BX,CX,DX,SI,AX)
ADDL $20,BP
CMPL BP,w40+W40(SP)
JCS loop2
loop3:
BODY(0,FN3,0x8f1bbcdc,AX,BX,CX,DX,SI)
BODY(4,FN3,0x8f1bbcdc,SI,AX,BX,CX,DX)
BODY(8,FN3,0x8f1bbcdc,DX,SI,AX,BX,CX)
BODY(12,FN3,0x8f1bbcdc,CX,DX,SI,AX,BX)
BODY(16,FN3,0x8f1bbcdc,BX,CX,DX,SI,AX)
ADDL $20,BP
CMPL BP,w60+W60(SP)
JCS loop3
loop4:
BODY(0,FN24,0xca62c1d6,AX,BX,CX,DX,SI)
BODY(4,FN24,0xca62c1d6,SI,AX,BX,CX,DX)
BODY(8,FN24,0xca62c1d6,DX,SI,AX,BX,CX)
BODY(12,FN24,0xca62c1d6,CX,DX,SI,AX,BX)
BODY(16,FN24,0xca62c1d6,BX,CX,DX,SI,AX)
ADDL $20,BP
CMPL BP,w80+W80(SP)
JCS loop4
MOVL state+STATE(FP),DI
ADDL AX,0(DI)
ADDL BX,4(DI)
ADDL CX,8(DI)
ADDL DX,12(DI)
ADDL SI,16(DI)
MOVL edata+EDATA(SP),DI
CMPL data+DATA(FP),DI
JCS mainloop
RET
END
|
9front/9front
| 5,261
|
sys/src/libsec/amd64/md5block.s
|
/*
* rfc1321 requires that I include this. The code is new. The constants
* all come from the rfc (hence the copyright). We trade a table for the
* macros in rfc. The total size is a lot less. -- presotto
*
* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
* rights reserved.
*
* License to copy and use this software is granted provided that it
* is identified as the "RSA Data Security, Inc. MD5 Message-Digest
* Algorithm" in all material mentioning or referencing this software
* or this function.
*
* License is also granted to make and use derivative works provided
* that such works are identified as "derived from the RSA Data
* Security, Inc. MD5 Message-Digest Algorithm" in all material
* mentioning or referencing the derived work.
*
* RSA Data Security, Inc. makes no representations concerning either
* the merchantability of this software or the suitability of this
* software forany particular purpose. It is provided "as is"
* without express or implied warranty of any kind.
* These notices must be retained in any copies of any part of this
* documentation and/or software.
*/
#define S11 7
#define S12 12
#define S13 17
#define S14 22
#define S21 5
#define S22 9
#define S23 14
#define S24 20
#define S31 4
#define S32 11
#define S33 16
#define S34 23
#define S41 6
#define S42 10
#define S43 15
#define S44 21
/*
* SI is data
* a += FN(B,C,D);
* a += x[sh] + t[sh];
* a = (a << S11) | (a >> (32 - S11));
* a += b;
*/
#define BODY1(off,V,FN,SH,A,B,C,D)\
FN(B,C,D)\
LEAL V(A)(DI*1),A;\
ADDL (off)(BP),A;\
ROLL $SH,A;\
ADDL B,A;\
#define BODY(off,V,FN,SH,A,B,C,D)\
FN(B,C,D)\
LEAL V(A)(DI*1),A;\
ADDL (off)(BP),A;\
ROLL $SH,A;\
ADDL B,A;\
/*
* fn1 = ((c ^ d) & b) ^ d
*/
#define FN1(B,C,D)\
MOVL C,DI;\
XORL D,DI;\
ANDL B,DI;\
XORL D,DI;\
/*
* fn2 = ((b ^ c) & d) ^ c;
*/
#define FN2(B,C,D)\
MOVL B,DI;\
XORL C,DI;\
ANDL D,DI;\
XORL C,DI;\
/*
* fn3 = b ^ c ^ d;
*/
#define FN3(B,C,D)\
MOVL B,DI;\
XORL C,DI;\
XORL D,DI;\
/*
* fn4 = c ^ (b | ~d);
*/
#define FN4(B,C,D)\
MOVL D,DI;\
XORL $-1,DI;\
ORL B,DI;\
XORL C,DI;\
#define LEN 8
#define STATE 16
TEXT _md5block+0(SB), $0
MOVQ RARG,R8
MOVLQZX len+LEN(FP),BX
ADDQ BX,R8
mainloop:
MOVQ state+STATE(FP),SI
MOVL (SI),AX
MOVL 4(SI),BX
MOVL 8(SI),CX
MOVL 12(SI),DX
BODY1( 0*4,0xd76aa478,FN1,S11,AX,BX,CX,DX)
BODY1( 1*4,0xe8c7b756,FN1,S12,DX,AX,BX,CX)
BODY1( 2*4,0x242070db,FN1,S13,CX,DX,AX,BX)
BODY1( 3*4,0xc1bdceee,FN1,S14,BX,CX,DX,AX)
BODY1( 4*4,0xf57c0faf,FN1,S11,AX,BX,CX,DX)
BODY1( 5*4,0x4787c62a,FN1,S12,DX,AX,BX,CX)
BODY1( 6*4,0xa8304613,FN1,S13,CX,DX,AX,BX)
BODY1( 7*4,0xfd469501,FN1,S14,BX,CX,DX,AX)
BODY1( 8*4,0x698098d8,FN1,S11,AX,BX,CX,DX)
BODY1( 9*4,0x8b44f7af,FN1,S12,DX,AX,BX,CX)
BODY1(10*4,0xffff5bb1,FN1,S13,CX,DX,AX,BX)
BODY1(11*4,0x895cd7be,FN1,S14,BX,CX,DX,AX)
BODY1(12*4,0x6b901122,FN1,S11,AX,BX,CX,DX)
BODY1(13*4,0xfd987193,FN1,S12,DX,AX,BX,CX)
BODY1(14*4,0xa679438e,FN1,S13,CX,DX,AX,BX)
BODY1(15*4,0x49b40821,FN1,S14,BX,CX,DX,AX)
BODY( 1*4,0xf61e2562,FN2,S21,AX,BX,CX,DX)
BODY( 6*4,0xc040b340,FN2,S22,DX,AX,BX,CX)
BODY(11*4,0x265e5a51,FN2,S23,CX,DX,AX,BX)
BODY( 0*4,0xe9b6c7aa,FN2,S24,BX,CX,DX,AX)
BODY( 5*4,0xd62f105d,FN2,S21,AX,BX,CX,DX)
BODY(10*4,0x02441453,FN2,S22,DX,AX,BX,CX)
BODY(15*4,0xd8a1e681,FN2,S23,CX,DX,AX,BX)
BODY( 4*4,0xe7d3fbc8,FN2,S24,BX,CX,DX,AX)
BODY( 9*4,0x21e1cde6,FN2,S21,AX,BX,CX,DX)
BODY(14*4,0xc33707d6,FN2,S22,DX,AX,BX,CX)
BODY( 3*4,0xf4d50d87,FN2,S23,CX,DX,AX,BX)
BODY( 8*4,0x455a14ed,FN2,S24,BX,CX,DX,AX)
BODY(13*4,0xa9e3e905,FN2,S21,AX,BX,CX,DX)
BODY( 2*4,0xfcefa3f8,FN2,S22,DX,AX,BX,CX)
BODY( 7*4,0x676f02d9,FN2,S23,CX,DX,AX,BX)
BODY(12*4,0x8d2a4c8a,FN2,S24,BX,CX,DX,AX)
BODY( 5*4,0xfffa3942,FN3,S31,AX,BX,CX,DX)
BODY( 8*4,0x8771f681,FN3,S32,DX,AX,BX,CX)
BODY(11*4,0x6d9d6122,FN3,S33,CX,DX,AX,BX)
BODY(14*4,0xfde5380c,FN3,S34,BX,CX,DX,AX)
BODY( 1*4,0xa4beea44,FN3,S31,AX,BX,CX,DX)
BODY( 4*4,0x4bdecfa9,FN3,S32,DX,AX,BX,CX)
BODY( 7*4,0xf6bb4b60,FN3,S33,CX,DX,AX,BX)
BODY(10*4,0xbebfbc70,FN3,S34,BX,CX,DX,AX)
BODY(13*4,0x289b7ec6,FN3,S31,AX,BX,CX,DX)
BODY( 0*4,0xeaa127fa,FN3,S32,DX,AX,BX,CX)
BODY( 3*4,0xd4ef3085,FN3,S33,CX,DX,AX,BX)
BODY( 6*4,0x04881d05,FN3,S34,BX,CX,DX,AX)
BODY( 9*4,0xd9d4d039,FN3,S31,AX,BX,CX,DX)
BODY(12*4,0xe6db99e5,FN3,S32,DX,AX,BX,CX)
BODY(15*4,0x1fa27cf8,FN3,S33,CX,DX,AX,BX)
BODY( 2*4,0xc4ac5665,FN3,S34,BX,CX,DX,AX)
BODY( 0*4,0xf4292244,FN4,S41,AX,BX,CX,DX)
BODY( 7*4,0x432aff97,FN4,S42,DX,AX,BX,CX)
BODY(14*4,0xab9423a7,FN4,S43,CX,DX,AX,BX)
BODY( 5*4,0xfc93a039,FN4,S44,BX,CX,DX,AX)
BODY(12*4,0x655b59c3,FN4,S41,AX,BX,CX,DX)
BODY( 3*4,0x8f0ccc92,FN4,S42,DX,AX,BX,CX)
BODY(10*4,0xffeff47d,FN4,S43,CX,DX,AX,BX)
BODY( 1*4,0x85845dd1,FN4,S44,BX,CX,DX,AX)
BODY( 8*4,0x6fa87e4f,FN4,S41,AX,BX,CX,DX)
BODY(15*4,0xfe2ce6e0,FN4,S42,DX,AX,BX,CX)
BODY( 6*4,0xa3014314,FN4,S43,CX,DX,AX,BX)
BODY(13*4,0x4e0811a1,FN4,S44,BX,CX,DX,AX)
BODY( 4*4,0xf7537e82,FN4,S41,AX,BX,CX,DX)
BODY(11*4,0xbd3af235,FN4,S42,DX,AX,BX,CX)
BODY( 2*4,0x2ad7d2bb,FN4,S43,CX,DX,AX,BX)
BODY( 9*4,0xeb86d391,FN4,S44,BX,CX,DX,AX)
ADDQ $(16*4),BP
MOVQ state+STATE(FP),DI
ADDL AX,0(DI)
ADDL BX,4(DI)
ADDL CX,8(DI)
ADDL DX,12(DI)
CMPQ BP,R8
JCS mainloop
RET
|
9front/9front
| 3,695
|
sys/src/libsec/amd64/sha1block.s
|
/* x = (wp[off-f] ^ wp[off-8] ^ wp[off-14] ^ wp[off-16]) <<< 1;
* wp[off] = x;
* x += A <<< 5;
* E += 0xca62c1d6 + x;
* x = FN(B,C,D);
* E += x;
* B >>> 2
*/
#define BSWAPDI BYTE $0x0f; BYTE $0xcf;
#define BODY(off,FN,V,A,B,C,D,E)\
MOVL (off-64)(BP),DI;\
XORL (off-56)(BP),DI;\
XORL (off-32)(BP),DI;\
XORL (off-12)(BP),DI;\
ROLL $1,DI;\
MOVL DI,off(BP);\
LEAL V(DI)(E*1),E;\
MOVL A,DI;\
ROLL $5,DI;\
ADDL DI,E;\
FN(B,C,D)\
ADDL DI,E;\
RORL $2,B;\
#define BODY0(off,FN,V,A,B,C,D,E)\
MOVLQZX off(BX),DI;\
BSWAPDI;\
MOVL DI,off(BP);\
LEAL V(DI)(E*1),E;\
MOVL A,DI;\
ROLL $5,DI;\
ADDL DI,E;\
FN(B,C,D)\
ADDL DI,E;\
RORL $2,B;\
/*
* fn1 = (((C^D)&B)^D);
*/
#define FN1(B,C,D)\
MOVL C,DI;\
XORL D,DI;\
ANDL B,DI;\
XORL D,DI;\
/*
* fn24 = B ^ C ^ D
*/
#define FN24(B,C,D)\
MOVL B,DI;\
XORL C,DI;\
XORL D,DI;\
/*
* fn3 = ((B ^ C) & (D ^= B)) ^ B
* D ^= B to restore D
*/
#define FN3(B,C,D)\
MOVL B,DI;\
XORL C,DI;\
XORL B,D;\
ANDL D,DI;\
XORL B,DI;\
XORL B,D;\
/*
* stack offsets
* void sha1block(uchar *DATA, int LEN, ulong *STATE)
*/
#define DATA 0
#define LEN 8
#define STATE 16
/*
* stack offsets for locals
* ulong w[80];
* uchar *edata;
* ulong *w15, *w40, *w60, *w80;
* register local
* ulong *wp = BP
* ulong a = eax, b = ebx, c = ecx, d = edx, e = esi
* ulong tmp = edi
*/
#define Rpdata R8
#define WARRAY (-8-(80*4))
#define TMP1 (-16-(80*4))
#define TMP2 (-24-(80*4))
#define W15 (-32-(80*4))
#define W40 (-40-(80*4))
#define W60 (-48-(80*4))
#define W80 (-56-(80*4))
#define EDATA (-64-(80*4))
TEXT _sha1block+0(SB),$384
MOVQ RARG, Rpdata
MOVLQZX len+LEN(FP),BX
ADDQ BX, RARG
MOVQ RARG,edata+EDATA(SP)
LEAQ aw15+(WARRAY+15*4)(SP),DI
MOVQ DI,w15+W15(SP)
LEAQ aw40+(WARRAY+40*4)(SP),DX
MOVQ DX,w40+W40(SP)
LEAQ aw60+(WARRAY+60*4)(SP),CX
MOVQ CX,w60+W60(SP)
LEAQ aw80+(WARRAY+80*4)(SP),DI
MOVQ DI,w80+W80(SP)
mainloop:
LEAQ warray+WARRAY(SP),BP
MOVQ state+STATE(FP),DI
MOVL (DI),AX
MOVL 4(DI),BX
MOVL BX,tmp1+TMP1(SP)
MOVL 8(DI),CX
MOVL 12(DI),DX
MOVL 16(DI),SI
MOVQ Rpdata,BX
loop1:
BODY0(0,FN1,0x5a827999,AX,tmp1+TMP1(SP),CX,DX,SI)
MOVL SI,tmp2+TMP2(SP)
BODY0(4,FN1,0x5a827999,SI,AX,tmp1+TMP1(SP),CX,DX)
MOVL tmp1+TMP1(SP),SI
BODY0(8,FN1,0x5a827999,DX,tmp2+TMP2(SP),AX,SI,CX)
BODY0(12,FN1,0x5a827999,CX,DX,tmp2+TMP2(SP),AX,SI)
MOVL SI,tmp1+TMP1(SP)
BODY0(16,FN1,0x5a827999,SI,CX,DX,tmp2+TMP2(SP),AX)
MOVL tmp2+TMP2(SP),SI
ADDQ $20,BX
ADDQ $20,BP
CMPQ BP,w15+W15(SP)
JCS loop1
BODY0(0,FN1,0x5a827999,AX,tmp1+TMP1(SP),CX,DX,SI)
ADDQ $4,BX
MOVQ BX,R8
MOVQ tmp1+TMP1(SP),BX
BODY(4,FN1,0x5a827999,SI,AX,BX,CX,DX)
BODY(8,FN1,0x5a827999,DX,SI,AX,BX,CX)
BODY(12,FN1,0x5a827999,CX,DX,SI,AX,BX)
BODY(16,FN1,0x5a827999,BX,CX,DX,SI,AX)
ADDQ $20,BP
loop2:
BODY(0,FN24,0x6ed9eba1,AX,BX,CX,DX,SI)
BODY(4,FN24,0x6ed9eba1,SI,AX,BX,CX,DX)
BODY(8,FN24,0x6ed9eba1,DX,SI,AX,BX,CX)
BODY(12,FN24,0x6ed9eba1,CX,DX,SI,AX,BX)
BODY(16,FN24,0x6ed9eba1,BX,CX,DX,SI,AX)
ADDQ $20,BP
CMPQ BP,w40+W40(SP)
JCS loop2
loop3:
BODY(0,FN3,0x8f1bbcdc,AX,BX,CX,DX,SI)
BODY(4,FN3,0x8f1bbcdc,SI,AX,BX,CX,DX)
BODY(8,FN3,0x8f1bbcdc,DX,SI,AX,BX,CX)
BODY(12,FN3,0x8f1bbcdc,CX,DX,SI,AX,BX)
BODY(16,FN3,0x8f1bbcdc,BX,CX,DX,SI,AX)
ADDQ $20,BP
CMPQ BP,w60+W60(SP)
JCS loop3
loop4:
BODY(0,FN24,0xca62c1d6,AX,BX,CX,DX,SI)
BODY(4,FN24,0xca62c1d6,SI,AX,BX,CX,DX)
BODY(8,FN24,0xca62c1d6,DX,SI,AX,BX,CX)
BODY(12,FN24,0xca62c1d6,CX,DX,SI,AX,BX)
BODY(16,FN24,0xca62c1d6,BX,CX,DX,SI,AX)
ADDQ $20,BP
CMPQ BP,w80+W80(SP)
JCS loop4
MOVQ state+STATE(FP),DI
ADDL AX,0(DI)
ADDL BX,4(DI)
ADDL CX,8(DI)
ADDL DX,12(DI)
ADDL SI,16(DI)
MOVQ edata+EDATA(SP),DI
CMPQ Rpdata,DI
JCS mainloop
RET
END
|
9front/9front
| 1,285
|
sys/src/libsec/amd64/chachablock.s
|
#define ROTATE(n, v1, v2) \
MOVO v1, v2; \
PSLLL $(n), v1; \
PSRLL $(32-n), v2; \
POR v1, v2
TEXT _chachablock(SB), 0, $0
MOVOU 0(RARG), X0
MOVOU 16(RARG), X1
MOVOU 32(RARG), X2
MOVOU 48(RARG), X3
MOVL rounds+8(FP), CX
SHRL $1, CX
_loop:
PADDL X1, X0
PXOR X0, X3
/* ROTATE(16, X3, X3) */
PSHUFLW $(1<<0 | 0<<2 | 3<<4 | 2<<6), X3, X3
PSHUFHW $(1<<0 | 0<<2 | 3<<4 | 2<<6), X3, X3
PADDL X3, X2
MOVO X1, X4
PXOR X2, X4
ROTATE(12, X4, X1)
PADDL X1, X0
MOVO X0, X4
PXOR X3, X4
ROTATE(8, X4, X3)
PADDL X3, X2
MOVO X1, X4
PXOR X2, X4
ROTATE(7, X4, X1)
PSHUFL $(1<<0 | 2<<2 | 3<<4 | 0<<6), X1, X1
PSHUFL $(2<<0 | 3<<2 | 0<<4 | 1<<6), X2, X2
PSHUFL $(3<<0 | 0<<2 | 1<<4 | 2<<6), X3, X3
PADDL X1, X0
PXOR X0, X3
/* ROTATE(16, X3, X3) */
PSHUFLW $(1<<0 | 0<<2 | 3<<4 | 2<<6), X3, X3
PSHUFHW $(1<<0 | 0<<2 | 3<<4 | 2<<6), X3, X3
PADDL X3, X2
MOVO X1, X4
PXOR X2, X4
ROTATE(12, X4, X1)
PADDL X1, X0
MOVO X0, X4
PXOR X3, X4
ROTATE(8, X4, X3)
PADDL X3, X2
MOVO X1, X4
PXOR X2, X4
ROTATE(7, X4, X1)
PSHUFL $(3<<0 | 0<<2 | 1<<4 | 2<<6), X1, X1
PSHUFL $(2<<0 | 3<<2 | 0<<4 | 1<<6), X2, X2
PSHUFL $(1<<0 | 2<<2 | 3<<4 | 0<<6), X3, X3
DECL CX
JNE _loop
MOVOU X0, 0(RARG)
MOVOU X1, 16(RARG)
MOVOU X2, 32(RARG)
MOVOU X3, 48(RARG)
RET
|
9front/9front
| 7,022
|
sys/src/libsec/amd64/aesni.s
|
#define AESOP(o,r1,r2) \
BYTE $0x66; \
BYTE $0x0F; \
BYTE $0x38; \
BYTE $(o); \
BYTE $(0xC0 | r2<<3 | r1)
#define AESIMC(r1,r2) AESOP(0xDB,r1,r2)
#define AESENC(r1,r2) AESOP(0xDC,r1,r2)
#define AESENCLAST(r1,r2) AESOP(0xDD,r1,r2)
#define AESDEC(r1,r2) AESOP(0xDE,r1,r2)
#define AESDECLAST(r1,r2) AESOP(0xDF,r1,r2)
#define AESKEYGENASSIST(i,r1,r2) \
BYTE $0x66; \
BYTE $0x0F; \
BYTE $0x3A; \
BYTE $0xDF; \
BYTE $(0xC0 | r2<<3 | r1); \
BYTE $(i)
TEXT aesni_init(SB), 0, $0
MOVL $1, AX
CPUID
XORL AX, AX
ANDL $(1<<25), CX
JZ _ret
/* override aes function pointers */
MOVQ $AESencrypt<>(SB), AX
MOVQ AX, aes_encrypt(SB)
MOVQ $AESdecrypt<>(SB), AX
MOVQ AX, aes_decrypt(SB)
/* return setup function pointer */
MOVQ $AESsetup<>(SB), AX
_ret:
RET
TEXT AESencrypt<>(SB), 0, $0
MOVL Nr+8(FP), CX
MOVQ pt+16(FP), SI
MOVQ ct+24(FP), DI
MOVO (RARG), X0
MOVOU (SI), X7
ADDQ $16, RARG
PXOR X7, X0
CMPL CX, $12
JLT erounds10
JEQ erounds12
erounds14:
MOVO 0(RARG), X1
MOVO 16(RARG), X2
ADDQ $32, RARG
AESENC(1, 0)
AESENC(2, 0)
erounds12:
MOVO 0(RARG), X3
MOVO 16(RARG), X4
ADDQ $32, RARG
AESENC(3, 0)
AESENC(4, 0)
erounds10:
MOVO 0(RARG), X1
MOVO 16(RARG), X2
MOVO 32(RARG), X3
MOVO 48(RARG), X4
MOVO 64(RARG), X5
MOVO 80(RARG), X6
MOVO 96(RARG), X7
AESENC(1, 0)
MOVO 112(RARG), X1
AESENC(2, 0)
MOVO 128(RARG), X2
AESENC(3, 0)
MOVO 144(RARG), X3
AESENC(4, 0)
AESENC(5, 0)
AESENC(6, 0)
AESENC(7, 0)
AESENC(1, 0)
AESENC(2, 0)
AESENCLAST(3, 0)
MOVOU X0, (DI)
RET
TEXT AESdecrypt<>(SB), 0, $0
MOVL Nr+8(FP), CX
MOVQ ct+16(FP), SI
MOVQ pt+24(FP), DI
MOVO (RARG), X0
MOVOU (SI), X7
ADDQ $16, RARG
PXOR X7, X0
CMPL CX, $12
JLT drounds10
JEQ drounds12
drounds14:
MOVO 0(RARG), X1
MOVO 16(RARG), X2
ADDQ $32, RARG
AESDEC(1, 0)
AESDEC(2, 0)
drounds12:
MOVO 0(RARG), X3
MOVO 16(RARG), X4
ADDQ $32, RARG
AESDEC(3, 0)
AESDEC(4, 0)
drounds10:
MOVO 0(RARG), X1
MOVO 16(RARG), X2
MOVO 32(RARG), X3
MOVO 48(RARG), X4
MOVO 64(RARG), X5
MOVO 80(RARG), X6
MOVO 96(RARG), X7
AESDEC(1, 0)
MOVO 112(RARG), X1
AESDEC(2, 0)
MOVO 128(RARG), X2
AESDEC(3, 0)
MOVO 144(RARG), X3
AESDEC(4, 0)
AESDEC(5, 0)
AESDEC(6, 0)
AESDEC(7, 0)
AESDEC(1, 0)
AESDEC(2, 0)
AESDECLAST(3, 0)
MOVOU X0, (DI)
RET
TEXT AESsetup<>(SB), 0, $16
MOVQ RARG, erk+0(FP)
MOVQ key+16(FP), DX
MOVL nkey+24(FP), BX
MOVQ DX, 8(SP)
CMPL BX, $32
JEQ esetup256
CMPL BX, $24
JEQ esetup192
CMPL BX, $16
JEQ esetup128
XORL AX, AX
RET
esetup256:
CALL setupEnc256<>(SB)
JMP dsetup
esetup192:
CALL setupEnc192<>(SB)
JMP dsetup
esetup128:
CALL setupEnc128<>(SB)
dsetup:
MOVQ erk+0(FP), SI
MOVQ drk+8(FP), DI
MOVL AX, BX
SHLL $4, BX
ADDQ BX, SI
MOVO (SI), X0
MOVO X0, (DI)
MOVO -16(SI), X1
MOVO -32(SI), X2
MOVO -48(SI), X3
MOVO -64(SI), X4
AESIMC(1, 1)
AESIMC(2, 2)
AESIMC(3, 3)
AESIMC(4, 4)
MOVO X1, 16(DI)
MOVO X2, 32(DI)
MOVO X3, 48(DI)
MOVO X4, 64(DI)
MOVO -80(SI), X1
MOVO -96(SI), X2
MOVO -112(SI), X3
MOVO -128(SI), X4
AESIMC(1, 1)
AESIMC(2, 2)
AESIMC(3, 3)
AESIMC(4, 4)
MOVO X1, 80(DI)
MOVO X2, 96(DI)
MOVO X3, 112(DI)
MOVO X4, 128(DI)
MOVO -144(SI), X1
AESIMC(1, 1)
MOVO X1, 144(DI)
CMPL AX, $10
JEQ dsetupend
MOVO -160(SI), X1
MOVO -176(SI), X2
AESIMC(1, 1)
AESIMC(2, 2)
MOVO X1, 160(DI)
MOVO X2, 176(DI)
CMPL AX, $12
JEQ dsetupend
MOVO -192(SI), X1
MOVO -208(SI), X2
AESIMC(1, 1)
AESIMC(2, 2)
MOVO X1, 192(DI)
MOVO X2, 208(DI)
dsetupend:
SUBQ BX, SI
ADDQ BX, DI
MOVO (SI), X0
MOVO X0, (DI)
RET
TEXT setupEnc128<>(SB), 0, $0
MOVQ key+8(FP), SI
MOVOU (SI), X1
MOVO X1, (RARG)
AESKEYGENASSIST(0x01, 1, 0)
CALL rk128<>(SB)
MOVO X1, 16(RARG)
AESKEYGENASSIST(0x02, 1, 0)
CALL rk128<>(SB)
MOVO X1, 32(RARG)
AESKEYGENASSIST(0x04, 1, 0)
CALL rk128<>(SB)
MOVO X1, 48(RARG)
AESKEYGENASSIST(0x08, 1, 0)
CALL rk128<>(SB)
MOVO X1, 64(RARG)
AESKEYGENASSIST(0x10, 1, 0)
CALL rk128<>(SB)
MOVO X1, 80(RARG)
AESKEYGENASSIST(0x20, 1, 0)
CALL rk128<>(SB)
MOVO X1, 96(RARG)
AESKEYGENASSIST(0x40, 1, 0)
CALL rk128<>(SB)
MOVO X1, 112(RARG)
AESKEYGENASSIST(0x80, 1, 0)
CALL rk128<>(SB)
MOVO X1, 128(RARG)
AESKEYGENASSIST(0x1b, 1, 0)
CALL rk128<>(SB)
MOVO X1, 144(RARG)
AESKEYGENASSIST(0x36, 1, 0)
CALL rk128<>(SB)
MOVO X1, 160(RARG)
MOVL $10, AX
RET
TEXT rk128<>(SB), 0, $0
PSHUFL $0xff, X0, X0
MOVO X1, X2
PSLLO $4, X2
PXOR X2, X1
PSLLO $4, X2
PXOR X2, X1
PSLLO $4, X2
PXOR X2, X1
PXOR X0, X1
RET
TEXT setupEnc192<>(SB), 0, $0
MOVQ key+8(FP), SI
MOVOU (SI), X1
MOVOU 16(SI), X2
MOVO X1, (RARG)
MOVO X2, X5
AESKEYGENASSIST(0x01, 2, 0)
CALL rk192<>(SB)
SHUFPD $0, X1, X5
MOVO X5, 16(RARG)
MOVO X1, X6
SHUFPD $1, X2, X6
MOVO X6, 32(RARG)
AESKEYGENASSIST(0x02, 2, 0)
CALL rk192<>(SB)
MOVO X1, 48(RARG)
MOVO X2, X5
AESKEYGENASSIST(0x04, 2, 0)
CALL rk192<>(SB)
SHUFPD $0, X1, X5
MOVO X5, 64(RARG)
MOVO X1, X6
SHUFPD $1, X2, X6
MOVO X6, 80(RARG)
AESKEYGENASSIST(0x08, 2, 0)
CALL rk192<>(SB)
MOVO X1, 96(RARG)
MOVO X2, X5
AESKEYGENASSIST(0x10, 2, 0)
CALL rk192<>(SB)
SHUFPD $0, X1, X5
MOVO X5, 112(RARG)
MOVO X1, X6
SHUFPD $1, X2, X6
MOVO X6, 128(RARG)
AESKEYGENASSIST(0x20, 2, 0)
CALL rk192<>(SB)
MOVO X1, 144(RARG)
MOVO X2, X5
AESKEYGENASSIST(0x40, 2, 0)
CALL rk192<>(SB)
SHUFPD $0, X1, X5
MOVO X5, 160(RARG)
MOVO X1, X6
SHUFPD $1, X2, X6
MOVO X6, 176(RARG)
AESKEYGENASSIST(0x80, 2, 0)
CALL rk192<>(SB)
MOVO X1, 192(RARG)
MOVL $12, AX
RET
TEXT rk192<>(SB), 0, $0
PSHUFL $0x55, X0, X0
MOVOU X1, X4
PSLLO $4, X4
PXOR X4, X1
PSLLO $4, X4
PXOR X4, X1
PSLLO $4, X4
PXOR X4, X1
PXOR X0, X1
PSHUFL $0xff, X1, X0
MOVOU X2, X4
PSLLO $4, X4
PXOR X4, X2
PXOR X0, X2
RET
TEXT setupEnc256<>(SB), 0, $0
MOVQ key+8(FP), SI
MOVOU (SI), X1
MOVOU 16(SI), X2
MOVO X1, (RARG)
MOVO X2, 16(RARG)
AESKEYGENASSIST(0x01, 2, 0)
CALL rk256_a<>(SB)
MOVO X1, 32(RARG)
AESKEYGENASSIST(0x00, 1, 0)
CALL rk256_b<>(SB)
MOVO X2, 48(RARG)
AESKEYGENASSIST(0x02, 2, 0)
CALL rk256_a<>(SB)
MOVO X1, 64(RARG)
AESKEYGENASSIST(0x00, 1, 0)
CALL rk256_b<>(SB)
MOVO X2, 80(RARG)
AESKEYGENASSIST(0x04, 2, 0)
CALL rk256_a<>(SB)
MOVO X1, 96(RARG)
AESKEYGENASSIST(0x00, 1, 0)
CALL rk256_b<>(SB)
MOVO X2, 112(RARG)
AESKEYGENASSIST(0x08, 2, 0)
CALL rk256_a<>(SB)
MOVO X1, 128(RARG)
AESKEYGENASSIST(0x00, 1, 0)
CALL rk256_b<>(SB)
MOVO X2, 144(RARG)
AESKEYGENASSIST(0x10, 2, 0)
CALL rk256_a<>(SB)
MOVO X1, 160(RARG)
AESKEYGENASSIST(0x00, 1, 0)
CALL rk256_b<>(SB)
MOVO X2, 176(RARG)
AESKEYGENASSIST(0x20, 2, 0)
CALL rk256_a<>(SB)
MOVO X1, 192(RARG)
AESKEYGENASSIST(0x00, 1, 0)
CALL rk256_b<>(SB)
MOVO X2, 208(RARG)
AESKEYGENASSIST(0x40, 2, 0)
CALL rk256_a<>(SB)
MOVO X1, 224(RARG)
MOVL $14, AX
RET
TEXT rk256_a<>(SB), 0, $0
PSHUFL $0xff, X0, X0
MOVO X1, X4
PSLLO $4, X4
PXOR X4, X1
PSLLO $4, X4
PXOR X4, X1
PSLLO $4, X4
PXOR X4, X1
PXOR X0, X1
RET
TEXT rk256_b<>(SB), 0, $0
PSHUFL $0xaa, X0, X0
MOVO X2, X4
PSLLO $4, X4
PXOR X4, X2
PSLLO $4, X4
PXOR X4, X2
PSLLO $4, X4
PXOR X4, X2
PXOR X0, X2
RET
|
9front/9front
| 7,204
|
sys/src/libsec/mips/md5block.s
|
/*
* rfc1321 requires that I include this. The code is new. The constants
* all come from the rfc (hence the copyright). We trade a table for the
* macros in rfc. The total size is a lot less. -- presotto
*
* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
* rights reserved.
*
* License to copy and use this software is granted provided that it
* is identified as the "RSA Data Security, Inc. MD5 Message-Digest
* Algorithm" in all material mentioning or referencing this software
* or this function.
*
* License is also granted to make and use derivative works provided
* that such works are identified as "derived from the RSA Data
* Security, Inc. MD5 Message-Digest Algorithm" in all material
* mentioning or referencing the derived work.
*
* RSA Data Security, Inc. makes no representations concerning either
* the merchantability of this software or the suitability of this
* software forany particular purpose. It is provided "as is"
* without express or implied warranty of any kind.
* These notices must be retained in any copies of any part of this
* documentation and/or software.
*/
/* round 1 */
DATA md5tab<>+( 0*4)(SB)/4,$0xd76aa478
DATA md5tab<>+( 1*4)(SB)/4,$0xe8c7b756
DATA md5tab<>+( 2*4)(SB)/4,$0x242070db
DATA md5tab<>+( 3*4)(SB)/4,$0xc1bdceee
DATA md5tab<>+( 4*4)(SB)/4,$0xf57c0faf
DATA md5tab<>+( 5*4)(SB)/4,$0x4787c62a
DATA md5tab<>+( 6*4)(SB)/4,$0xa8304613
DATA md5tab<>+( 7*4)(SB)/4,$0xfd469501
DATA md5tab<>+( 8*4)(SB)/4,$0x698098d8
DATA md5tab<>+( 9*4)(SB)/4,$0x8b44f7af
DATA md5tab<>+(10*4)(SB)/4,$0xffff5bb1
DATA md5tab<>+(11*4)(SB)/4,$0x895cd7be
DATA md5tab<>+(12*4)(SB)/4,$0x6b901122
DATA md5tab<>+(13*4)(SB)/4,$0xfd987193
DATA md5tab<>+(14*4)(SB)/4,$0xa679438e
DATA md5tab<>+(15*4)(SB)/4,$0x49b40821
/* round 2 */
DATA md5tab<>+(16*4)(SB)/4,$0xf61e2562
DATA md5tab<>+(17*4)(SB)/4,$0xc040b340
DATA md5tab<>+(18*4)(SB)/4,$0x265e5a51
DATA md5tab<>+(19*4)(SB)/4,$0xe9b6c7aa
DATA md5tab<>+(20*4)(SB)/4,$0xd62f105d
DATA md5tab<>+(21*4)(SB)/4,$0x02441453
DATA md5tab<>+(22*4)(SB)/4,$0xd8a1e681
DATA md5tab<>+(23*4)(SB)/4,$0xe7d3fbc8
DATA md5tab<>+(24*4)(SB)/4,$0x21e1cde6
DATA md5tab<>+(25*4)(SB)/4,$0xc33707d6
DATA md5tab<>+(26*4)(SB)/4,$0xf4d50d87
DATA md5tab<>+(27*4)(SB)/4,$0x455a14ed
DATA md5tab<>+(28*4)(SB)/4,$0xa9e3e905
DATA md5tab<>+(29*4)(SB)/4,$0xfcefa3f8
DATA md5tab<>+(30*4)(SB)/4,$0x676f02d9
DATA md5tab<>+(31*4)(SB)/4,$0x8d2a4c8a
/* round 3 */
DATA md5tab<>+(32*4)(SB)/4,$0xfffa3942
DATA md5tab<>+(33*4)(SB)/4,$0x8771f681
DATA md5tab<>+(34*4)(SB)/4,$0x6d9d6122
DATA md5tab<>+(35*4)(SB)/4,$0xfde5380c
DATA md5tab<>+(36*4)(SB)/4,$0xa4beea44
DATA md5tab<>+(37*4)(SB)/4,$0x4bdecfa9
DATA md5tab<>+(38*4)(SB)/4,$0xf6bb4b60
DATA md5tab<>+(39*4)(SB)/4,$0xbebfbc70
DATA md5tab<>+(40*4)(SB)/4,$0x289b7ec6
DATA md5tab<>+(41*4)(SB)/4,$0xeaa127fa
DATA md5tab<>+(42*4)(SB)/4,$0xd4ef3085
DATA md5tab<>+(43*4)(SB)/4,$0x04881d05
DATA md5tab<>+(44*4)(SB)/4,$0xd9d4d039
DATA md5tab<>+(45*4)(SB)/4,$0xe6db99e5
DATA md5tab<>+(46*4)(SB)/4,$0x1fa27cf8
DATA md5tab<>+(47*4)(SB)/4,$0xc4ac5665
/* round 4 */
DATA md5tab<>+(48*4)(SB)/4,$0xf4292244
DATA md5tab<>+(49*4)(SB)/4,$0x432aff97
DATA md5tab<>+(50*4)(SB)/4,$0xab9423a7
DATA md5tab<>+(51*4)(SB)/4,$0xfc93a039
DATA md5tab<>+(52*4)(SB)/4,$0x655b59c3
DATA md5tab<>+(53*4)(SB)/4,$0x8f0ccc92
DATA md5tab<>+(54*4)(SB)/4,$0xffeff47d
DATA md5tab<>+(55*4)(SB)/4,$0x85845dd1
DATA md5tab<>+(56*4)(SB)/4,$0x6fa87e4f
DATA md5tab<>+(57*4)(SB)/4,$0xfe2ce6e0
DATA md5tab<>+(58*4)(SB)/4,$0xa3014314
DATA md5tab<>+(59*4)(SB)/4,$0x4e0811a1
DATA md5tab<>+(60*4)(SB)/4,$0xf7537e82
DATA md5tab<>+(61*4)(SB)/4,$0xbd3af235
DATA md5tab<>+(62*4)(SB)/4,$0x2ad7d2bb
DATA md5tab<>+(63*4)(SB)/4,$0xeb86d391
#define S11 7
#define S12 12
#define S13 17
#define S14 22
#define S21 5
#define S22 9
#define S23 14
#define S24 20
#define S31 4
#define S32 11
#define S33 16
#define S34 23
#define S41 6
#define S42 10
#define S43 15
#define S44 21
#define AREG R5
#define BREG R6
#define CREG R7
#define DREG R8
#define DATAREG R1
#define TABREG R10
#define STREG R11
#define XREG R12
#define ELOOPREG R13
#define EDREG R14
#define IREG R15
#define TMP1 R9
#define TMP2 R2
#define TMP3 R3
#define TMP4 R4
/*
* decode little endian data into x[off], then the body
* bodies have this form:
* a += FN(B,C,D);
* a += x[off] + t[off];
* a = (a << S11) | (a >> (32 - S11));
* a += b;
*/
#define BODY1(off,FN,SH,A,B,C,D)\
MOVBU off(DATAREG),TMP2;\
MOVBU (off+1)(DATAREG),TMP3;\
MOVBU (off+2)(DATAREG),TMP1;\
MOVBU (off+3)(DATAREG),TMP4;\
SLL $8,TMP3;\
OR TMP3,TMP2;\
SLL $16,TMP1;\
OR TMP1,TMP2;\
SLL $24,TMP4;\
OR TMP4,TMP2;\
MOVW off(TABREG),TMP3;\
FN(B,C,D)\
ADDU TMP1,A;\
MOVW TMP2,off(XREG);\
ADDU TMP2,A;\
ADDU TMP3,A;\
SLL $SH,A,TMP1;\
SRL $(32-SH),A;\
OR TMP1,A;\
ADDU B,A;\
#define BODY(off,inc,FN,SH,A,B,C,D)\
MOVW off(TABREG),TMP3;\
ADDU XREG,IREG,TMP4;\
MOVW (TMP4),TMP2;\
ADDU $(inc*4),IREG;\
AND $63,IREG;\
FN(B,C,D)\
ADDU TMP1,A;\
ADDU TMP2,A;\
ADDU TMP3,A;\
SLL $SH,A,TMP1;\
SRL $(32-SH),A;\
OR TMP1,A;\
ADDU B,A;\
/*
* fn1 = ((c ^ d) & b) ^ d
*/
#define FN1(B,C,D)\
XOR C,D,TMP1;\
AND B,TMP1;\
XOR D,TMP1;\
/*
* fn2 = ((b ^ c) & d) ^ c;
*/
#define FN2(B,C,D)\
XOR B,C,TMP1;\
AND D,TMP1;\
XOR C,TMP1;\
/*
* fn3 = b ^ c ^ d;
*/
#define FN3(B,C,D)\
XOR B,C,TMP1;\
XOR D,TMP1;\
/*
* fn4 = c ^ (b | ~d);
*/
#define FN4(B,C,D)\
XOR $-1,D,TMP1;\
OR B,TMP1;\
XOR C,TMP1;\
#define DATA 0
#define LEN 4
#define STATE 8
#define XOFF (-4-16*4)
TEXT _md5block+0(SB),$68
MOVW len+LEN(FP),TMP1
ADDU DATAREG,TMP1,EDREG
MOVW state+STATE(FP),STREG
MOVW 0(STREG),AREG
MOVW 4(STREG),BREG
MOVW 8(STREG),CREG
MOVW 12(STREG),DREG
mainloop:
MOVW $md5tab<>+0(SB),TABREG
ADDU $(16*4),DATAREG,ELOOPREG
MOVW $x+XOFF(SP),XREG
loop1:
BODY1(0,FN1,S11,AREG,BREG,CREG,DREG)
BODY1(4,FN1,S12,DREG,AREG,BREG,CREG)
BODY1(8,FN1,S13,CREG,DREG,AREG,BREG)
BODY1(12,FN1,S14,BREG,CREG,DREG,AREG)
ADDU $16,DATAREG
ADDU $16,TABREG
ADDU $16,XREG
BNE DATAREG,ELOOPREG,loop1
MOVW $x+XOFF(SP),XREG
MOVW $(1*4),IREG
MOVW $(1*4),ELOOPREG
loop2:
BODY(0,5,FN2,S21,AREG,BREG,CREG,DREG)
BODY(4,5,FN2,S22,DREG,AREG,BREG,CREG)
BODY(8,5,FN2,S23,CREG,DREG,AREG,BREG)
BODY(12,5,FN2,S24,BREG,CREG,DREG,AREG)
ADDU $16,TABREG
BNE IREG,ELOOPREG,loop2
MOVW $(5*4),IREG
MOVW $(5*4),ELOOPREG
loop3:
BODY(0,3,FN3,S31,AREG,BREG,CREG,DREG)
BODY(4,3,FN3,S32,DREG,AREG,BREG,CREG)
BODY(8,3,FN3,S33,CREG,DREG,AREG,BREG)
BODY(12,3,FN3,S34,BREG,CREG,DREG,AREG)
ADDU $16,TABREG
BNE IREG,ELOOPREG,loop3
MOVW $0,IREG
loop4:
BODY(0,7,FN4,S41,AREG,BREG,CREG,DREG)
BODY(4,7,FN4,S42,DREG,AREG,BREG,CREG)
BODY(8,7,FN4,S43,CREG,DREG,AREG,BREG)
BODY(12,7,FN4,S44,BREG,CREG,DREG,AREG)
ADDU $16,TABREG
BNE IREG,R0,loop4
MOVW 0(STREG),TMP1
MOVW 4(STREG),TMP2
MOVW 8(STREG),TMP3
MOVW 12(STREG),TMP4
ADDU TMP1,AREG
ADDU TMP2,BREG
ADDU TMP3,CREG
ADDU TMP4,DREG
MOVW AREG,0(STREG)
MOVW BREG,4(STREG)
MOVW CREG,8(STREG)
MOVW DREG,12(STREG)
BNE DATAREG,EDREG,mainloop
RET
GLOBL md5tab<>+0(SB),$256
END
|
9front/9front
| 4,143
|
sys/src/libsec/mips/sha1block.s
|
TEXT _sha1block+0(SB),$328
/*
* wp[off] = x;
* x += A <<< 5;
* E += 0xca62c1d6 + x;
* x = FN(B,C,D);
* E += x;
* B >>> 2
*/
#define BODYX(off,FN,V,A,B,C,D,E)\
FN(B,C,D)\
ADDU TMP1,E;\
ADDU V,E;\
MOVW TMP2,off(WREG);\
ADDU TMP2,E;\
SLL $5,A,TMP3;\
SRL $27,A,TMP4;\
OR TMP3,TMP4;\
ADDU TMP4,E;\
SLL $30,B,TMP4;\
SRL $2,B;\
OR TMP4,B
/*
* x = data[i]
* BODYX
*/
#define BODY1(off,FN,V,A,B,C,D,E)\
MOVBU off(DATAREG),TMP2;\
MOVBU (off+1)(DATAREG),TMP3;\
MOVBU (off+2)(DATAREG),TMP1;\
MOVBU (off+3)(DATAREG),TMP4;\
SLL $24,TMP2;\
SLL $16,TMP3;\
OR TMP3,TMP2;\
SLL $8,TMP1;\
OR TMP1,TMP2;\
OR TMP4,TMP2;\
BODYX(off,FN,V,A,B,C,D,E)
/*
* x = (wp[off-3] ^ wp[off-8] ^ wp[off-14] ^ wp[off-16]) <<< 1;
* BODYX
*/
#define BODY(off,FN,V,A,B,C,D,E)\
MOVW (off-64)(WREG),TMP1;\
MOVW (off-56)(WREG),TMP2;\
MOVW (off-32)(WREG),TMP3;\
MOVW (off-12)(WREG),TMP4;\
XOR TMP1,TMP2;\
XOR TMP3,TMP2;\
XOR TMP4,TMP2;\
SLL $1,TMP2,TMP1;\
SRL $31,TMP2;\
OR TMP1,TMP2;\
BODYX(off,FN,V,A,B,C,D,E)
/*
* fn1 = (((C^D)&B)^D);
*/
#define FN1(B,C,D)\
XOR C,D,TMP1;\
AND B,TMP1;\
XOR D,TMP1;
/*
* fn24 = B ^ C ^ D
*/
#define FN24(B,C,D)\
XOR B,C,TMP1;\
XOR D,TMP1;
/*
* fn3 = ((B ^ C) & (D ^ B)) ^ B
*/
#define FN3(B,C,D)\
XOR B,C,TMP1;\
XOR B,D,TMP4;\
AND TMP4,TMP1;\
XOR B,TMP1;
/*
* stack offsets
* void vtSha1Block(ulong *STATE, uchar *DATA, int LEN)
*/
#define DATA 0
#define LEN 4
#define STATE 8
/*
* stack offsets for locals
* ulong w[80];
* uchar *edata;
* ulong *w15, *w40, *w60, *w80;
* register local
* ulong *wp = BP
* ulong a = eax, b = ebx, c = ecx, d = edx, e = esi
* ulong tmp = edi
*/
#define WARRAY (-4-(80*4))
#define AREG R5
#define BREG R6
#define CREG R7
#define DREG R8
#define EREG R9
#define DATAREG R1
#define STREG R11
#define WREG R12
#define W15REG R13
#define W60REG R14
#define W40REG R15
#define W80REG R16
#define EDREG R17
#define VREG R18
#define TMP1 R10
#define TMP2 R2
#define TMP3 R3
#define TMP4 R4
#define TMP5 R19
MOVW len+LEN(FP),TMP1
MOVW state+STATE(FP),STREG
ADDU DATAREG,TMP1,EDREG
MOVW 0(STREG),AREG
MOVW 4(STREG),BREG
MOVW 8(STREG),CREG
MOVW 12(STREG),DREG
MOVW 16(STREG),EREG
MOVW $warray+WARRAY(SP),WREG
ADDU $(15*4),WREG,W15REG
ADDU $(40*4),WREG,W40REG
ADDU $(60*4),WREG,W60REG
ADDU $(80*4),WREG,W80REG
mainloop:
MOVW $warray+WARRAY(SP),WREG
MOVW $0x5a827999,VREG
loop1:
BODY1(0,FN1,VREG,AREG,BREG,CREG,DREG,EREG)
BODY1(4,FN1,VREG,EREG,AREG,BREG,CREG,DREG)
BODY1(8,FN1,VREG,DREG,EREG,AREG,BREG,CREG)
BODY1(12,FN1,VREG,CREG,DREG,EREG,AREG,BREG)
BODY1(16,FN1,VREG,BREG,CREG,DREG,EREG,AREG)
ADDU $20,DATAREG
ADDU $20,WREG
BNE WREG,W15REG,loop1
BODY1(0,FN1,VREG,AREG,BREG,CREG,DREG,EREG)
ADDU $4,DATAREG
BODY(4,FN1,VREG,EREG,AREG,BREG,CREG,DREG)
BODY(8,FN1,VREG,DREG,EREG,AREG,BREG,CREG)
BODY(12,FN1,VREG,CREG,DREG,EREG,AREG,BREG)
BODY(16,FN1,VREG,BREG,CREG,DREG,EREG,AREG)
ADDU $20,WREG
MOVW $0x6ed9eba1,VREG
loop2:
BODY(0,FN24,VREG,AREG,BREG,CREG,DREG,EREG)
BODY(4,FN24,VREG,EREG,AREG,BREG,CREG,DREG)
BODY(8,FN24,VREG,DREG,EREG,AREG,BREG,CREG)
BODY(12,FN24,VREG,CREG,DREG,EREG,AREG,BREG)
BODY(16,FN24,VREG,BREG,CREG,DREG,EREG,AREG)
ADDU $20,WREG
BNE WREG,W40REG,loop2
MOVW $0x8f1bbcdc,VREG
loop3:
BODY(0,FN3,VREG,AREG,BREG,CREG,DREG,EREG)
BODY(4,FN3,VREG,EREG,AREG,BREG,CREG,DREG)
BODY(8,FN3,VREG,DREG,EREG,AREG,BREG,CREG)
BODY(12,FN3,VREG,CREG,DREG,EREG,AREG,BREG)
BODY(16,FN3,VREG,BREG,CREG,DREG,EREG,AREG)
ADDU $20,WREG
BNE WREG,W60REG,loop3
MOVW $0xca62c1d6,VREG
loop4:
BODY(0,FN24,VREG,AREG,BREG,CREG,DREG,EREG)
BODY(4,FN24,VREG,EREG,AREG,BREG,CREG,DREG)
BODY(8,FN24,VREG,DREG,EREG,AREG,BREG,CREG)
BODY(12,FN24,VREG,CREG,DREG,EREG,AREG,BREG)
BODY(16,FN24,VREG,BREG,CREG,DREG,EREG,AREG)
ADDU $20,WREG
BNE WREG,W80REG,loop4
MOVW 0(STREG),TMP1
MOVW 4(STREG),TMP2
MOVW 8(STREG),TMP3
MOVW 12(STREG),TMP4
MOVW 16(STREG),TMP5
ADDU TMP1,AREG
ADDU TMP2,BREG
ADDU TMP3,CREG
ADDU TMP4,DREG
ADDU TMP5,EREG
MOVW AREG,0(STREG)
MOVW BREG,4(STREG)
MOVW CREG,8(STREG)
MOVW DREG,12(STREG)
MOVW EREG,16(STREG)
BNE DATAREG,EDREG,mainloop
RET
END
|
9front/9front
| 1,233
|
sys/src/libmp/power/mpvecadd.s
|
#define BDNZ BC 16,0,
#define BDNE BC 0,2,
/*
* mpvecadd(mpdigit *a, int alen, mpdigit *b, int blen, mpdigit *sum)
*
* sum[0:alen] = a[0:alen-1] + b[0:blen-1]
*
* prereq: alen >= blen, sum has room for alen+1 digits
*
* R3 == a (first arg passed in R3)
* R4 == alen
* R5 == b
* R6 == blen
* R7 == sum
* R8 == temporary
* R9 == temporary
*/
TEXT mpvecadd(SB),$-4
MOVW alen+4(FP), R4
MOVW b+8(FP), R5
MOVW blen+12(FP), R6
MOVW sum+16(FP), R7
SUB R6, R4 /* calculate counter for second loop (alen > blen) */
SUB $4, R3 /* pre decrement for MOVWU's */
SUB $4, R5 /* pre decrement for MOVWU's */
SUB $4, R7 /* pre decrement for MOVWU's */
MOVW R0, XER /* zero carry going in */
/* if blen == 0, don't need to add it in */
CMP R0, R6
BEQ _add1
/* sum[0:blen-1],carry = a[0:blen-1] + b[0:blen-1] */
MOVW R6, CTR
_addloop1:
MOVWU 4(R3), R8
MOVWU 4(R5), R9
ADDE R8, R9
MOVWU R9, 4(R7)
BDNZ _addloop1
_add1:
/* if alen == blen, we're done */
CMP R0, R4
BEQ _addend
/* sum[blen:alen-1],carry = a[blen:alen-1] + 0 + carry */
MOVW R4, CTR
_addloop2:
MOVWU 4(R3), R8
ADDE R0, R8
MOVWU R8, 4(R7)
BDNZ _addloop2
/* sum[alen] = carry */
_addend:
ADDE R0, R0, R8
MOVW R8, 4(R7)
RETURN
|
9front/9front
| 1,118
|
sys/src/libmp/power/mpvecsub.s
|
#define BDNZ BC 16,0,
#define BDNE BC 0,2,
/*
* mpvecsub(mpdigit *a, int alen, mpdigit *b, int blen, mpdigit *diff)
*
* diff[0:alen-1] = a[0:alen-1] - b[0:blen-1]
*
* prereq: alen >= blen, diff has room for alen digits
*
* R3 == a
* R4 == alen
* R5 == b
* R6 == blen
* R7 == diff
* R8 == temporary
* R9 == temporary
*/
TEXT mpvecsub(SB),$-4
MOVW alen+4(FP),R4
MOVW b+8(FP),R5
MOVW blen+12(FP),R6
MOVW diff+16(FP),R7
SUB R6, R4 /* calculate counter for second loop (alen > blen) */
SUB $4, R3 /* pre decrement for MOVWU's */
SUB $4, R5 /* pre decrement for MOVWU's */
SUBC $4, R7 /* pre decrement for MOVWU's and set carry */
/* skip subraction if b is zero */
CMP R0,R6
BEQ _sub1
/* diff[0:blen-1],borrow = a[0:blen-1] - b[0:blen-1] */
MOVW R6, CTR
_subloop1:
MOVWU 4(R3), R8
MOVWU 4(R5), R9
SUBE R9, R8, R8
MOVWU R8, 4(R7)
BDNZ _subloop1
_sub1:
/* skip subtraction if a is zero */
CMP R0, R4
BEQ _subend
/* diff[blen:alen-1] = a[blen:alen-1] - 0 + carry */
MOVW R4, CTR
_subloop2:
MOVWU 4(R3), R8
SUBE R0, R8
MOVWU R8, 4(R7)
BDNZ _subloop2
_subend:
RETURN
|
9front/9front
| 1,300
|
sys/src/libmp/power/mpvecdigmulsub.s
|
#define BDNZ BC 16,0,
#define BDNE BC 0,2,
#define BLT BC 0xC,0,
/*
* mpvecdigmulsub(mpdigit *b, int n, mpdigit m, mpdigit *p)
*
* p -= b*m
*
* each step looks like:
* hi,lo = m*b[i]
* lo += oldhi + carry
* hi += carry
* p[i] += lo
* oldhi = hi
*
* the registers are:
* b = R3
* n = R4
* m = R5
* p = R6
* i = R7
* hi = R8 - constrained by hardware
* lo = R9 - constrained by hardware
* oldhi = R10
* tmp = R11
* borrow = R12
*
*/
TEXT mpvecdigmulsub(SB),$0
MOVW n+4(FP),R10
MOVW R10,CTR
MOVW m+8(FP),R5
MOVW p+12(FP),R6
SUB $4, R3 /* pre decrement for MOVWU's */
SUBC $4, R6 /* pre decrement for MOVWU's and set carry */
MOVW XER,R12
MOVW R0, R10
_mulsubloop:
MOVWU 4(R3),R9 /* lo = b[i] */
MOVW 4(R6),R11 /* tmp = p[i] */
MULHWU R9,R5,R8 /* hi = (b[i] * m)>>32 */
MULLW R9,R5,R9 /* lo = b[i] * m */
ADDC R10,R9 /* lo += oldhi */
ADDE R0,R8 /* hi += carry */
MOVW R12,XER
SUBE R9,R11 /* tmp -= lo */
MOVW XER,R12
MOVWU R11,4(R6) /* p[i] = tmp */
MOVW R8,R10 /* oldhi = hi */
BDNZ _mulsubloop
MOVW 4(R6),R11 /* tmp = p[i] */
MOVW R12,XER
SUBE R10,R11 /* tmp -= lo */
MOVWU R11,4(R6) /* p[i] = tmp */
/* return -1 if the result was negative, +1 otherwise */
SUBECC R0,R0,R3
BLT _mulsub2
MOVW $1,R3
_mulsub2:
RETURN
|
9front/9front
| 1,105
|
sys/src/libmp/power/mpvecdigmuladd.s
|
#define BDNZ BC 16,0,
#define BDNE BC 0,2,
/*
* mpvecdigmuladd(mpdigit *b, int n, mpdigit m, mpdigit *p)
*
* p += b*m
*
* each step looks like:
* hi,lo = m*b[i]
* lo += oldhi + carry
* hi += carry
* p[i] += lo
* oldhi = hi
*
* the registers are:
* b = R3
* n = R4
* m = R5
* p = R6
* i = R7
* hi = R8 - constrained by hardware
* lo = R9 - constrained by hardware
* oldhi = R10
* tmp = R11
*
*/
TEXT mpvecdigmuladd(SB),$0
MOVW n+4(FP),R4
MOVW m+8(FP),R5
MOVW p+12(FP),R6
SUB $4, R3 /* pre decrement for MOVWU's */
SUB $4, R6 /* pre decrement for MOVWU's */
MOVW R0, R10
MOVW R0, XER
MOVW R4, CTR
_muladdloop:
MOVWU 4(R3),R9 /* lo = b[i] */
MOVW 4(R6),R11 /* tmp = p[i] */
MULHWU R9,R5,R8 /* hi = (b[i] * m)>>32 */
MULLW R9,R5,R9 /* lo = b[i] * m */
ADDC R10,R9 /* lo += oldhi */
ADDE R0,R8 /* hi += carry */
ADDC R9,R11 /* tmp += lo */
ADDE R0,R8 /* hi += carry */
MOVWU R11,4(R6) /* p[i] = tmp */
MOVW R8,R10 /* oldhi = hi */
BDNZ _muladdloop
MOVW 4(R6),R11 /* tmp = p[i] */
ADDC R10,R11
MOVWU R11,4(R6) /* p[i] = tmp */
RETURN
|
9front/9front
| 1,211
|
sys/src/libmp/mips/mpvecadd.s
|
#define BDNZ BC 16,0,
#define BDNE BC 0,2,
/*
* mpvecadd(mpdigit *a, int alen, mpdigit *b, int blen, mpdigit *sum)
*
* sum[0:alen] = a[0:alen-1] + b[0:blen-1]
*
* prereq: alen >= blen, sum has room for alen+1 digits
*
* R1 == a (first arg passed in R1)
* R3 == carry
* R4 == alen
* R5 == b
* R6 == blen
* R7 == sum
* R2 == temporary
* R8 == temporary
* R9 == temporary
*/
TEXT mpvecadd(SB),$-4
MOVW alen+4(FP), R4
MOVW b+8(FP), R5
MOVW blen+12(FP), R6
MOVW sum+16(FP), R7
SUBU R6, R4 /* calculate counter for second loop (alen > blen) */
MOVW R0, R3
/* if blen == 0, don't need to add it in */
BEQ R6,_add1
/* sum[0:blen-1],carry = a[0:blen-1] + b[0:blen-1] */
_addloop1:
MOVW 0(R1), R8
ADDU $4, R1
MOVW 0(R5), R9
ADDU $4, R5
ADDU R3, R8
SGTU R3, R8, R3
ADDU R8, R9
SGTU R8, R9, R2
ADDU R2, R3
MOVW R9, 0(R7)
ADDU $4, R7
SUBU $1, R6
BNE R6, _addloop1
_add1:
/* if alen == blen, we're done */
BEQ R4, _addend
/* sum[blen:alen-1],carry = a[blen:alen-1] + 0 + carry */
_addloop2:
MOVW 0(R1), R8
ADDU $4, R1
ADDU R3, R8
SGTU R3, R8, R3
MOVW R8, 0(R7)
ADDU $4, R7
SUBU $1, R4
BNE R4, _addloop2
/* sum[alen] = carry */
_addend:
MOVW R3, 0(R7)
RET
|
9front/9front
| 1,209
|
sys/src/libmp/mips/mpvecsub.s
|
#define BDNZ BC 16,0,
#define BDNE BC 0,2,
/*
* mpvecadd(mpdigit *a, int alen, mpdigit *b, int blen, mpdigit *sum)
*
* sum[0:alen] = a[0:alen-1] - b[0:blen-1]
*
* prereq: alen >= blen, sum has room for alen+1 digits
*
* R1 == a (first arg passed in R1)
* R3 == carry
* R4 == alen
* R5 == b
* R6 == blen
* R7 == sum
* R2 == temporary
* R8 == temporary
* R9 == temporary
*/
TEXT mpvecsub(SB),$-4
MOVW alen+4(FP), R4
MOVW b+8(FP), R5
MOVW blen+12(FP), R6
MOVW sum+16(FP), R7
SUBU R6, R4 /* calculate counter for second loop (alen > blen) */
MOVW R0, R3
/* if blen == 0, don't need to subtract it */
BEQ R6,_sub1
/* sum[0:blen-1],carry = a[0:blen-1] - b[0:blen-1] */
_subloop1:
MOVW 0(R1), R8
ADDU $4, R1
MOVW 0(R5), R9
ADDU $4, R5
SUBU R3, R8, R2
SGTU R2, R8, R3
SUBU R9, R2, R8
SGTU R8, R2, R9
ADDU R9, R3
MOVW R8, 0(R7)
ADDU $4, R7
SUBU $1, R6
BNE R6, _subloop1
_sub1:
/* if alen == blen, we're done */
BEQ R4, _subend
/* sum[blen:alen-1],carry = a[blen:alen-1] + 0 + carry */
_subloop2:
MOVW 0(R1), R8
ADDU $4, R1
SUBU R3, R8, R2
SGTU R2, R8, R3
MOVW R2, 0(R7)
ADDU $4, R7
SUBU $1, R4
BNE R4, _subloop2
/* sum[alen] = carry */
_subend:
RET
|
9front/9front
| 1,226
|
sys/src/libmp/mips/mpvecdigmulsub.s
|
/*
* mpvecdigmulsub(mpdigit *b, int n, mpdigit m, mpdigit *p)
*
* p -= b*m
*
* each step looks like:
* hi,lo = m*b[i]
* lo += oldhi + carry
* hi += carry
* p[i] += lo
* oldhi = hi
*
* the registers are:
* b = R1
* n = R4
* m = R5
* p = R6
* i = R7
* hi = R8 - constrained by hardware
* lo = R9 - constrained by hardware
* oldhi = R10
* tmp = R11
*
*/
TEXT mpvecdigmulsub(SB),$0
MOVW n+4(FP),R4
MOVW m+8(FP),R5
MOVW p+12(FP),R6
MOVW R0, R10 /* oldhi = 0 */
_mulsubloop:
MOVW 0(R1), R9 /* lo = b[i] */
ADDU $4, R1
MOVW 0(R6), R11 /* tmp = p[i] */
MULU R9, R5
MOVW HI, R8 /* hi = (b[i] * m)>>32 */
MOVW LO, R9 /* lo = b[i] * m */
ADDU R10, R9 /* lo += oldhi */
SGTU R10, R9, R2
ADDU R2, R8 /* hi += carry */
SUBU R9, R11, R3 /* tmp -= lo */
SGTU R3, R11, R2
ADDU R2, R8 /* hi += carry */
MOVW R3, 0(R6) /* p[i] = tmp */
ADDU $4, R6
MOVW R8, R10 /* oldhi = hi */
SUBU $1, R4
BNE R4, _mulsubloop
MOVW 0(R6), R11 /* tmp = p[i] */
SUBU R10, R11, R3 /* tmp -= oldhi */
MOVW R3, 0(R6) /* p[i] = tmp */
SGTU R3, R11, R1
BNE R1, _mulsub2
MOVW $1, R1 /* return +1 for positive result */
RET
_mulsub2:
MOVW $-1, R1 /* return -1 for negative result */
RET
|
9front/9front
| 1,098
|
sys/src/libmp/mips/mpvecdigmuladd.s
|
/*
* mpvecdigmuladd(mpdigit *b, int n, mpdigit m, mpdigit *p)
*
* p += b*m
*
* each step looks like:
* hi,lo = m*b[i]
* lo += oldhi + carry
* hi += carry
* p[i] += lo
* oldhi = hi
*
* the registers are:
* b = R1
* n = R4
* m = R5
* p = R6
* i = R7
* hi = R8 - constrained by hardware
* lo = R9 - constrained by hardware
* oldhi = R10
* tmp = R11
*
*/
TEXT mpvecdigmuladd(SB),$0
MOVW n+4(FP),R4
MOVW m+8(FP),R5
MOVW p+12(FP),R6
MOVW R0, R10 /* oldhi = 0 */
BEQ R6, _muladd1
_muladdloop:
MOVW 0(R1), R9 /* lo = b[i] */
ADDU $4, R1
MOVW 0(R6), R11 /* tmp = p[i] */
MULU R9, R5
MOVW HI, R8 /* hi = (b[i] * m)>>32 */
MOVW LO, R9 /* lo = b[i] * m */
ADDU R10, R9 /* lo += oldhi */
SGTU R10, R9, R2
ADDU R2, R8 /* hi += carry */
ADDU R9, R11 /* tmp += lo */
SGTU R9, R11, R2
ADDU R2, R8 /* hi += carry */
MOVW R11, 0(R6) /* p[i] = tmp */
ADDU $4, R6
MOVW R8, R10 /* oldhi = hi */
SUBU $1, R4
BNE R4, _muladdloop
_muladd1:
MOVW 0(R6), R11 /* tmp = p[i] */
ADDU R10, R11 /* tmp += oldhi */
MOVW R11, 0(R6) /* p[i] = tmp */
RET
|
a5632645/keil_cracker_music_stm32f103
| 9,725
|
Core/Startup/startup_stm32f103c8tx.s
|
/**
*************** (C) COPYRIGHT 2017 STMicroelectronics ************************
* @file startup_stm32f103xb.s
* @author MCD Application Team
* @brief STM32F103xB Devices vector table for Atollic toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Configure the clock system
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M3 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2017-2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m3
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.equ BootRAM, 0xF108F85F
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler
.word PVD_IRQHandler
.word TAMPER_IRQHandler
.word RTC_IRQHandler
.word FLASH_IRQHandler
.word RCC_IRQHandler
.word EXTI0_IRQHandler
.word EXTI1_IRQHandler
.word EXTI2_IRQHandler
.word EXTI3_IRQHandler
.word EXTI4_IRQHandler
.word DMA1_Channel1_IRQHandler
.word DMA1_Channel2_IRQHandler
.word DMA1_Channel3_IRQHandler
.word DMA1_Channel4_IRQHandler
.word DMA1_Channel5_IRQHandler
.word DMA1_Channel6_IRQHandler
.word DMA1_Channel7_IRQHandler
.word ADC1_2_IRQHandler
.word USB_HP_CAN1_TX_IRQHandler
.word USB_LP_CAN1_RX0_IRQHandler
.word CAN1_RX1_IRQHandler
.word CAN1_SCE_IRQHandler
.word EXTI9_5_IRQHandler
.word TIM1_BRK_IRQHandler
.word TIM1_UP_IRQHandler
.word TIM1_TRG_COM_IRQHandler
.word TIM1_CC_IRQHandler
.word TIM2_IRQHandler
.word TIM3_IRQHandler
.word TIM4_IRQHandler
.word I2C1_EV_IRQHandler
.word I2C1_ER_IRQHandler
.word I2C2_EV_IRQHandler
.word I2C2_ER_IRQHandler
.word SPI1_IRQHandler
.word SPI2_IRQHandler
.word USART1_IRQHandler
.word USART2_IRQHandler
.word USART3_IRQHandler
.word EXTI15_10_IRQHandler
.word RTC_Alarm_IRQHandler
.word USBWakeUp_IRQHandler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word BootRAM /* @0x108. This is for boot in RAM mode for
STM32F10x Medium Density devices. */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMPER_IRQHandler
.thumb_set TAMPER_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_IRQHandler
.thumb_set DMA1_Channel2_IRQHandler,Default_Handler
.weak DMA1_Channel3_IRQHandler
.thumb_set DMA1_Channel3_IRQHandler,Default_Handler
.weak DMA1_Channel4_IRQHandler
.thumb_set DMA1_Channel4_IRQHandler,Default_Handler
.weak DMA1_Channel5_IRQHandler
.thumb_set DMA1_Channel5_IRQHandler,Default_Handler
.weak DMA1_Channel6_IRQHandler
.thumb_set DMA1_Channel6_IRQHandler,Default_Handler
.weak DMA1_Channel7_IRQHandler
.thumb_set DMA1_Channel7_IRQHandler,Default_Handler
.weak ADC1_2_IRQHandler
.thumb_set ADC1_2_IRQHandler,Default_Handler
.weak USB_HP_CAN1_TX_IRQHandler
.thumb_set USB_HP_CAN1_TX_IRQHandler,Default_Handler
.weak USB_LP_CAN1_RX0_IRQHandler
.thumb_set USB_LP_CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak USBWakeUp_IRQHandler
.thumb_set USBWakeUp_IRQHandler,Default_Handler
|
a1studmuffin/Cataclysm-DDA-Android
| 13,216
|
Android/jni/SDL2_image/external/jpeg-9/jidctfst.S
|
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <machine/cpu-features.h>
.text
.align
.global jpeg_idct_ifast
.func jpeg_idct_ifast
// NOTE: sb=r9, fp=r11 ip=r12, sp=r13, lr=r14, pc=r15
// jpeg_idct_ifast (j_decompress_ptr cinfo,
// jpeg_component_info * compptr,
// short* coef_block,
// unsigned char* output_buf,
// int output_col)
#define local_TMP0123 sp
#define local_TMP0 [sp, #0]
#define local_TMP1 [sp, #4]
#define local_TMP2 [sp, #8]
#define local_TMP3 [sp, #12]
#define local_RANGE_TABLE [sp, #16]
#define local_OUTPUT_COL [sp, #20]
#define local_OUTPUT_BUF [sp, #24]
#define local_UNUSED [sp, #28]
#define off_WORKSPACE 32
#define local_WORKSPACE [sp, #offWORKSPACE]
#define local_SIZE (off_WORKSPACE + 8*8*4)
#define off_DECOMPRESS_range_limit_base 324
#define off_COMPINFO_quanttable 80
#define DCTSIZE 8
#define VY(x) ((x)*DCTSIZE*2)
#define QY(x) ((x)*DCTSIZE*4)
#define VX(x) ((x)*2)
#define QX(x) ((x)*4)
#define FIX_1_414213562 #362
#define FIX_1_082392200 #277
#define FIX_1_847759065 #473
#define FIX_2_613125930 #669
#define RANGE_MASK 1023
jpeg_idct_ifast:
PLD [r2, #0]
stmdb sp!, {r4,r5, r6,r7, r8,r9, r10,r11, r12,lr}
ldr r4, [sp, #4*10]
sub sp, #local_SIZE
ldr r10,[r1, #off_COMPINFO_quanttable] // r10 = quanttable
str r4, local_OUTPUT_COL
str r3, local_OUTPUT_BUF
ldr r5, [r0, #off_DECOMPRESS_range_limit_base]
add r5, r5, #128
str r5, local_RANGE_TABLE
mov fp, r2 // fp = coef_block
add ip, sp, #off_WORKSPACE
VLoopTail:
ldrsh r0, [fp, #VY(0)]
ldrsh r1, [fp, #VY(1)]
ldrsh r2, [fp, #VY(2)]
ldrsh r3, [fp, #VY(3)]
ldrsh r4, [fp, #VY(4)]
ldrsh r5, [fp, #VY(5)]
ldrsh r6, [fp, #VY(6)]
ldrsh r7, [fp, #VY(7)]
cmp r1, #0
orreqs r8, r2, r3
orreqs r8, r4, r5
orreqs r8, r6, r7
beq VLoopHeadZero
VLoopHead:
// tmp0 = DEQUANTIZE(in[DCTSIZE*0], quant[DCTSIZE*0] (r0)
// tmp2 = DEQUANTIZE(in[DCTSIZE*4], quant[DCTSIZE*4] (r4)
// tmp1 = DEQUANTIZE(in[DCTSIZE*2], quant[DCTSIZE*2] (r2)
// tmp3 = DEQUANTIZE(in[DCTSIZE*6], quant[DCTSIZE*6] (r6)
// tmp10 = tmp0 + tmp2 (r0)
// tmp11 = tmp0 - tmp2 (r4)
ldr r9, [r10, #QY(4)]
ldr r8, [r10, #QY(0)]
#if __ARM_HAVE_HALFWORD_MULTIPLY
smulbb r4, r9, r4
smlabb r0, r8, r0, r4
#else
mul r4, r9, r4
mul r0, r8, r0
add r0, r4
#endif
ldr r9, [r10, #QY(6)]
ldr r8, [r10, #QY(2)]
sub r4, r0, r4, lsl #1
#if __ARM_HAVE_HALFWORD_MULTIPLY
smulbb r6, r9, r6
smlabb r2, r8, r2, r6
#else
mul r6, r9, r6
mul r2, r8, r2
add r2, r6
#endif
// tmp13 = tmp1 + tmp3 (r2)
// tmp12 = MULTIPLY(tmp1 - tmp3, FIX_1_414213562) - tmp13 (r6)
// FIX_1_4142... = 362 = 45*8 + 2
sub r6, r2, r6, lsl #1
mov r8, #360
add r8, r8, #2
mul r9, r6, r8
// tmp0 = tmp10 + tmp13; (r0)
// tmp3 = tmp10 - tmp13; (r8)
// tmp1 = tmp11 + tmp12; (r4)
// tmp2 = tmp11 - tmp12; (r6)
add r0, r0, r2
rsb r6, r2, r9, asr #8
sub r8, r0, r2, lsl #1
add r4, r4, r6
sub r6, r4, r6, lsl #1
stmia local_TMP0123, {r0, r4, r6, r8}
// NOTE: be sure to not user r0,r4,r6,r8 soon after stm above
// odd part
// tmp4 = DEQUANTIZE( in[DCTSIZE*1], quant[DCTSIZE*1] ) (r1)
// tmp6 = DEQUANTIZE( in[DCTSIZE*5], quant[DCTSIZE*5] ) (r5)
// tmp5 = DEQUANTIZE( in[DCTSIZE*3], quant[DCTSIZE*3] ) (r3)
// tmp7 = DEQUANTIZE( in[DCTSIZE*7], quant[DCTSIZE*7] ) (r7)
// z13 = tmp6 + tmp5; (r0)
// z10 = tmp6 - tmp5; (r2)
// z11 = tmp4 + tmp7; (r4)
// z12 = tmp4 - tmp7; (r6)
ldr r2, [r10, #QY(1)]
ldr r9, [r10, #QY(5)]
#if __ARM_HAVE_HALFWORD_MULTIPLY
smulbb r1, r2, r1
#else
mul r1, r2, r1
#endif
ldr r2, [r10, #QY(3)]
#if __ARM_HAVE_HALFWORD_MULTIPLY
smulbb r5, r9, r5
#else
mul r5, r9, r5
#endif
ldr r9, [r10, #QY(7)]
#if __ARM_HAVE_HALFWORD_MULTIPLY
smlabb r0, r2, r3, r5
smlabb r4, r9, r7, r1
#else
mul r0, r2, r3
add r0, r5
mul r4, r9, r7
add r4, r1
#endif
rsb r2, r0, r5, lsl #1
rsb r6, r4, r1, lsl #1
// tmp7 = z11 + z13; (r7)
// tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); (r1)
// FIX_... = 360 + 2
add r7, r4, r0
sub r1, r4, r0
mov r8, #360
add r8, r8, #2
mul r1, r8, r1
// z5 = MULTIPLY(z10 + z12, FIX_1_847759065); (r8)
// tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; (r0)
// tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5; (r2)
// FIX_1_8477... = 473 = 472 + 1
// FIX_1_082... = 277 = 276 + 1
// FIX_2_... = 669 = 668 + 1
add r8, r2, r6
mov r9, #472
mla r8, r9, r8, r8
mov r9, #276
mla r0, r6, r9, r6
mov r9, #668
mla r2, r9, r2, r2
sub r0, r0, r8
rsb r2, r2, r8
// tmp6 = tmp12 - tmp7; (r6)
// tmp5 = tmp11 - tmp6; (r5)
// tmp4 = tmp10 + tmp5; (r4)
rsb r6, r7, r2, asr #8
rsb r5, r6, r1, asr #8
add r4, r5, r0, asr #8
ldmia local_TMP0123, {r0, r1, r2, r3}
// wsptr[DCTSIZE*0] = (int) (tmp0 + tmp7);
// wsptr[DCTSIZE*7] = (int) (tmp0 - tmp7);
// wsptr[DCTSIZE*1] = (int) (tmp1 + tmp6);
// wsptr[DCTSIZE*6] = (int) (tmp1 - tmp6);
// wsptr[DCTSIZE*2] = (int) (tmp2 + tmp5);
// wsptr[DCTSIZE*5] = (int) (tmp2 - tmp5);
// wsptr[DCTSIZE*4] = (int) (tmp3 + tmp4);
// wsptr[DCTSIZE*3] = (int) (tmp3 - tmp4);
add r0, r0, r7
sub r7, r0, r7, lsl #1
add r1, r1, r6
sub r6, r1, r6, lsl #1
add r2, r2, r5
sub r5, r2, r5, lsl #1
sub r3, r3, r4
add r4, r3, r4, lsl #1
str r0, [ip, #QY(0)]
str r1, [ip, #QY(1)]
str r2, [ip, #QY(2)]
str r3, [ip, #QY(3)]
str r4, [ip, #QY(4)]
str r5, [ip, #QY(5)]
str r6, [ip, #QY(6)]
str r7, [ip, #QY(7)]
// inptr++; /* advance pointers to next column */
// quantptr++;
// wsptr++;
add fp, fp, #2
add r10, r10, #4
add ip, ip, #4
add r0, sp, #(off_WORKSPACE + 4*8)
cmp ip, r0
bne VLoopTail
HLoopStart:
// reset pointers
PLD [sp, #off_WORKSPACE]
add ip, sp, #off_WORKSPACE
ldr r10, local_RANGE_TABLE
HLoopTail:
// output = *output_buf++ + output_col
ldr r0, local_OUTPUT_BUF
ldr r1, local_OUTPUT_COL
ldr r2, [r0], #4
str r0, local_OUTPUT_BUF
add fp, r2, r1
PLD [ip, #32]
ldmia ip!, {r0-r7}
cmp r1, #0
orreqs r8, r2, r3
orreqs r8, r4, r5
orreqs r8, r6, r7
beq HLoopTailZero
HLoopHead:
// tmp10 = ((DCTELEM) wsptr[0] + (DCTELEM) wsptr[4]); (r0)
// tmp11 = ((DCTELEM) wsptr[0] - (DCTELEM) wsptr[4]); (r4)
add r0, r0, r4
sub r4, r0, r4, lsl #1
// tmp13 = ((DCTELEM) wsptr[2] + (DCTELEM) wsptr[6]); (r2)
// tmp12 = MULTIPLY((DCTELEM) wsptr[2] - (DCTELEM) wsptr[6], FIX_1_414213562) - tmp13; (r6)
// FIX_... = 360 + 2
add r2, r2, r6
sub r6, r2, r6, lsl #1
mov r8, #360
add r8, r8, #2
mul r6, r8, r6
// tmp0 = tmp10 + tmp13; (r0)
// tmp3 = tmp10 - tmp13; (r8)
// tmp1 = tmp11 + tmp12; (r4)
// tmp2 = tmp11 - tmp12; (r6)
add r0, r0, r2
rsb r6, r2, r6, asr #8
sub r8, r0, r2, lsl #1
add r4, r4, r6
sub r6, r4, r6, lsl #1
stmia local_TMP0123, {r0, r4, r6, r8}
// Odd part
// z13 = (DCTELEM) wsptr[5] + (DCTELEM) wsptr[3]; (r0)
// z10 = (DCTELEM) wsptr[5] - (DCTELEM) wsptr[3]; (r2)
// z11 = (DCTELEM) wsptr[1] + (DCTELEM) wsptr[7]; (r4)
// z12 = (DCTELEM) wsptr[1] - (DCTELEM) wsptr[7]; (r6)
add r0, r5, r3
sub r2, r5, r3
add r4, r1, r7
sub r6, r1, r7
// tmp7 = z11 + z13; (r7)
// tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); (r1)
// FIX_... = 360 + 2
add r7, r4, r0
sub r1, r4, r0
mov r8, #360
add r8, r8, #2
mul r1, r8, r1
// z5 = MULTIPLY(z10 + z12, FIX_1_847759065); (r8)
// tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; (r0)
// tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5; (r2)
// FIX_1_8477... = 473 = 472 + 1
// FIX_1_082... = 277 = 276 + 1
// FIX_2_... = 669 = 668 + 1
add r8, r2, r6
mov r9, #472
mla r8, r9, r8, r8
mov r9, #276
mla r0, r6, r9, r6
mov r9, #668
mla r2, r9, r2, r2
sub r0, r0, r8
sub r2, r8, r2
// tmp6 = tmp12 - tmp7; (r6)
// tmp5 = tmp11 - tmp6; (r5)
// tmp4 = tmp10 + tmp5; (r4)
rsb r6, r7, r2, asr #8
rsb r5, r6, r1, asr #8
add r4, r5, r0, asr #8
ldmia local_TMP0123, {r0, r1, r2, r3}
// outptr[0] = range_limit[IDESCALE(tmp0 + tmp7, PASS1_BITS+3) & RANGE_MASK];
// outptr[7] = range_limit[IDESCALE(tmp0 - tmp7, PASS1_BITS+3) & RANGE_MASK];
// outptr[1] = range_limit[IDESCALE(tmp1 + tmp6, PASS1_BITS+3) & RANGE_MASK];
// outptr[6] = range_limit[IDESCALE(tmp1 - tmp6, PASS1_BITS+3) & RANGE_MASK];
// outptr[2] = range_limit[IDESCALE(tmp2 + tmp5, PASS1_BITS+3) & RANGE_MASK];
// outptr[5] = range_limit[IDESCALE(tmp2 - tmp5, PASS1_BITS+3) & RANGE_MASK];
// outptr[4] = range_limit[IDESCALE(tmp3 + tmp4, PASS1_BITS+3) & RANGE_MASK];
// outptr[3] = range_limit[IDESCALE(tmp3 - tmp4, PASS1_BITS+3) & RANGE_MASK];
mov r8, #128
add r0, r0, r7
sub r7, r0, r7, lsl #1
add r0, r8, r0, asr #5
add r7, r8, r7, asr #5
add r1, r1, r6
sub r6, r1, r6, lsl #1
add r1, r8, r1, asr #5
add r6, r8, r6, asr #5
add r2, r2, r5
sub r5, r2, r5, lsl #1
add r2, r8, r2, asr #5
add r5, r8, r5, asr #5
sub r3, r3, r4
add r4, r3, r4, lsl #1
add r3, r8, r3, asr #5
add r4, r8, r4, asr #5
#if __ARM_ARCH__ >= 6
usat r0, #8, r0
usat r1, #8, r1
usat r2, #8, r2
usat r3, #8, r3
usat r4, #8, r4
usat r5, #8, r5
usat r6, #8, r6
usat r7, #8, r7
#else
cmp r0, #255
mvnhi r0, r0, asr #31
andhi r0, #255
cmp r7, #255
mvnhi r7, r7, asr #31
cmp r1, #255
mvnhi r1, r1, asr #31
andhi r1, #255
cmp r6, #255
mvnhi r6, r6, asr #31
andhi r6, #255
cmp r2, #255
mvnhi r2, r2, asr #31
andhi r2, #255
cmp r5, #255
mvnhi r5, r5, asr #31
andhi r5, #255
cmp r3, #255
mvnhi r3, r3, asr #31
cmp r4, #255
mvnhi r4, r4, asr #31
andhi r4, #255
#endif
// r3 r2 r1 r0
orr r0, r0, r1, lsl #8
orr r0, r0, r2, lsl #16
orr r0, r0, r3, lsl #24
// r7 r6 r5 r4
orr r1, r4, r5, lsl #8
orr r1, r1, r6, lsl #16
orr r1, r1, r7, lsl #24
stmia fp, {r0, r1}
add r0, sp, #(off_WORKSPACE + 8*8*4)
cmp ip, r0
bne HLoopTail
Exit:
add sp, sp, #local_SIZE
ldmia sp!, {r4,r5, r6,r7, r8,r9, r10,r11, r12,lr}
bx lr
VLoopHeadZero:
// ok, all AC coefficients are 0
ldr r1, [r10, #QY(0)]
add fp, fp, #2
add r10, r10, #4
mul r0, r1, r0
str r0, [ip, #QY(0)]
str r0, [ip, #QY(1)]
str r0, [ip, #QY(2)]
str r0, [ip, #QY(3)]
str r0, [ip, #QY(4)]
str r0, [ip, #QY(5)]
str r0, [ip, #QY(6)]
str r0, [ip, #QY(7)]
add ip, ip, #4
add r0, sp, #(off_WORKSPACE + 4*8)
cmp ip, r0
beq HLoopStart
b VLoopTail
HLoopTailZero:
mov r0, r0, asr #5
add r0, #128
#if __ARM_ARCH__ >= 6
usat r0, #8, r0
#else
cmp r0, #255
mvnhi r0, r0, asr #31
andhi r0, r0, #255
#endif
orr r0, r0, lsl #8
orr r0, r0, lsl #16
mov r1, r0
stmia fp, {r0, r1}
add r0, sp, #(off_WORKSPACE + 64*4)
cmp ip, r0
beq Exit
b HLoopTail
.endfunc
|
a1studmuffin/Cataclysm-DDA-Android
| 42,842
|
Android/jni/SDL2_image/external/zlib-1.2.8/contrib/inflate86/inffast.S
|
/*
* inffast.S is a hand tuned assembler version of:
*
* inffast.c -- fast decoding
* Copyright (C) 1995-2003 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Copyright (C) 2003 Chris Anderson <christop@charm.net>
* Please use the copyright conditions above.
*
* This version (Jan-23-2003) of inflate_fast was coded and tested under
* GNU/Linux on a pentium 3, using the gcc-3.2 compiler distribution. On that
* machine, I found that gzip style archives decompressed about 20% faster than
* the gcc-3.2 -O3 -fomit-frame-pointer compiled version. Your results will
* depend on how large of a buffer is used for z_stream.next_in & next_out
* (8K-32K worked best for my 256K cpu cache) and how much overhead there is in
* stream processing I/O and crc32/addler32. In my case, this routine used
* 70% of the cpu time and crc32 used 20%.
*
* I am confident that this version will work in the general case, but I have
* not tested a wide variety of datasets or a wide variety of platforms.
*
* Jan-24-2003 -- Added -DUSE_MMX define for slightly faster inflating.
* It should be a runtime flag instead of compile time flag...
*
* Jan-26-2003 -- Added runtime check for MMX support with cpuid instruction.
* With -DUSE_MMX, only MMX code is compiled. With -DNO_MMX, only non-MMX code
* is compiled. Without either option, runtime detection is enabled. Runtime
* detection should work on all modern cpus and the recomended algorithm (flip
* ID bit on eflags and then use the cpuid instruction) is used in many
* multimedia applications. Tested under win2k with gcc-2.95 and gas-2.12
* distributed with cygwin3. Compiling with gcc-2.95 -c inffast.S -o
* inffast.obj generates a COFF object which can then be linked with MSVC++
* compiled code. Tested under FreeBSD 4.7 with gcc-2.95.
*
* Jan-28-2003 -- Tested Athlon XP... MMX mode is slower than no MMX (and
* slower than compiler generated code). Adjusted cpuid check to use the MMX
* code only for Pentiums < P4 until I have more data on the P4. Speed
* improvment is only about 15% on the Athlon when compared with code generated
* with MSVC++. Not sure yet, but I think the P4 will also be slower using the
* MMX mode because many of it's x86 ALU instructions execute in .5 cycles and
* have less latency than MMX ops. Added code to buffer the last 11 bytes of
* the input stream since the MMX code grabs bits in chunks of 32, which
* differs from the inffast.c algorithm. I don't think there would have been
* read overruns where a page boundary was crossed (a segfault), but there
* could have been overruns when next_in ends on unaligned memory (unintialized
* memory read).
*
* Mar-13-2003 -- P4 MMX is slightly slower than P4 NO_MMX. I created a C
* version of the non-MMX code so that it doesn't depend on zstrm and zstate
* structure offsets which are hard coded in this file. This was last tested
* with zlib-1.2.0 which is currently in beta testing, newer versions of this
* and inffas86.c can be found at http://www.eetbeetee.com/zlib/ and
* http://www.charm.net/~christop/zlib/
*/
/*
* if you have underscore linking problems (_inflate_fast undefined), try
* using -DGAS_COFF
*/
#if ! defined( GAS_COFF ) && ! defined( GAS_ELF )
#if defined( WIN32 ) || defined( __CYGWIN__ )
#define GAS_COFF /* windows object format */
#else
#define GAS_ELF
#endif
#endif /* ! GAS_COFF && ! GAS_ELF */
#if defined( GAS_COFF )
/* coff externals have underscores */
#define inflate_fast _inflate_fast
#define inflate_fast_use_mmx _inflate_fast_use_mmx
#endif /* GAS_COFF */
.file "inffast.S"
.globl inflate_fast
.text
.align 4,0
.L_invalid_literal_length_code_msg:
.string "invalid literal/length code"
.align 4,0
.L_invalid_distance_code_msg:
.string "invalid distance code"
.align 4,0
.L_invalid_distance_too_far_msg:
.string "invalid distance too far back"
#if ! defined( NO_MMX )
.align 4,0
.L_mask: /* mask[N] = ( 1 << N ) - 1 */
.long 0
.long 1
.long 3
.long 7
.long 15
.long 31
.long 63
.long 127
.long 255
.long 511
.long 1023
.long 2047
.long 4095
.long 8191
.long 16383
.long 32767
.long 65535
.long 131071
.long 262143
.long 524287
.long 1048575
.long 2097151
.long 4194303
.long 8388607
.long 16777215
.long 33554431
.long 67108863
.long 134217727
.long 268435455
.long 536870911
.long 1073741823
.long 2147483647
.long 4294967295
#endif /* NO_MMX */
.text
/*
* struct z_stream offsets, in zlib.h
*/
#define next_in_strm 0 /* strm->next_in */
#define avail_in_strm 4 /* strm->avail_in */
#define next_out_strm 12 /* strm->next_out */
#define avail_out_strm 16 /* strm->avail_out */
#define msg_strm 24 /* strm->msg */
#define state_strm 28 /* strm->state */
/*
* struct inflate_state offsets, in inflate.h
*/
#define mode_state 0 /* state->mode */
#define wsize_state 32 /* state->wsize */
#define write_state 40 /* state->write */
#define window_state 44 /* state->window */
#define hold_state 48 /* state->hold */
#define bits_state 52 /* state->bits */
#define lencode_state 68 /* state->lencode */
#define distcode_state 72 /* state->distcode */
#define lenbits_state 76 /* state->lenbits */
#define distbits_state 80 /* state->distbits */
/*
* inflate_fast's activation record
*/
#define local_var_size 64 /* how much local space for vars */
#define strm_sp 88 /* first arg: z_stream * (local_var_size + 24) */
#define start_sp 92 /* second arg: unsigned int (local_var_size + 28) */
/*
* offsets for local vars on stack
*/
#define out 60 /* unsigned char* */
#define window 56 /* unsigned char* */
#define wsize 52 /* unsigned int */
#define write 48 /* unsigned int */
#define in 44 /* unsigned char* */
#define beg 40 /* unsigned char* */
#define buf 28 /* char[ 12 ] */
#define len 24 /* unsigned int */
#define last 20 /* unsigned char* */
#define end 16 /* unsigned char* */
#define dcode 12 /* code* */
#define lcode 8 /* code* */
#define dmask 4 /* unsigned int */
#define lmask 0 /* unsigned int */
/*
* typedef enum inflate_mode consts, in inflate.h
*/
#define INFLATE_MODE_TYPE 11 /* state->mode flags enum-ed in inflate.h */
#define INFLATE_MODE_BAD 26
#if ! defined( USE_MMX ) && ! defined( NO_MMX )
#define RUN_TIME_MMX
#define CHECK_MMX 1
#define DO_USE_MMX 2
#define DONT_USE_MMX 3
.globl inflate_fast_use_mmx
.data
.align 4,0
inflate_fast_use_mmx: /* integer flag for run time control 1=check,2=mmx,3=no */
.long CHECK_MMX
#if defined( GAS_ELF )
/* elf info */
.type inflate_fast_use_mmx,@object
.size inflate_fast_use_mmx,4
#endif
#endif /* RUN_TIME_MMX */
#if defined( GAS_COFF )
/* coff info: scl 2 = extern, type 32 = function */
.def inflate_fast; .scl 2; .type 32; .endef
#endif
.text
.align 32,0x90
inflate_fast:
pushl %edi
pushl %esi
pushl %ebp
pushl %ebx
pushf /* save eflags (strm_sp, state_sp assumes this is 32 bits) */
subl $local_var_size, %esp
cld
#define strm_r %esi
#define state_r %edi
movl strm_sp(%esp), strm_r
movl state_strm(strm_r), state_r
/* in = strm->next_in;
* out = strm->next_out;
* last = in + strm->avail_in - 11;
* beg = out - (start - strm->avail_out);
* end = out + (strm->avail_out - 257);
*/
movl avail_in_strm(strm_r), %edx
movl next_in_strm(strm_r), %eax
addl %eax, %edx /* avail_in += next_in */
subl $11, %edx /* avail_in -= 11 */
movl %eax, in(%esp)
movl %edx, last(%esp)
movl start_sp(%esp), %ebp
movl avail_out_strm(strm_r), %ecx
movl next_out_strm(strm_r), %ebx
subl %ecx, %ebp /* start -= avail_out */
negl %ebp /* start = -start */
addl %ebx, %ebp /* start += next_out */
subl $257, %ecx /* avail_out -= 257 */
addl %ebx, %ecx /* avail_out += out */
movl %ebx, out(%esp)
movl %ebp, beg(%esp)
movl %ecx, end(%esp)
/* wsize = state->wsize;
* write = state->write;
* window = state->window;
* hold = state->hold;
* bits = state->bits;
* lcode = state->lencode;
* dcode = state->distcode;
* lmask = ( 1 << state->lenbits ) - 1;
* dmask = ( 1 << state->distbits ) - 1;
*/
movl lencode_state(state_r), %eax
movl distcode_state(state_r), %ecx
movl %eax, lcode(%esp)
movl %ecx, dcode(%esp)
movl $1, %eax
movl lenbits_state(state_r), %ecx
shll %cl, %eax
decl %eax
movl %eax, lmask(%esp)
movl $1, %eax
movl distbits_state(state_r), %ecx
shll %cl, %eax
decl %eax
movl %eax, dmask(%esp)
movl wsize_state(state_r), %eax
movl write_state(state_r), %ecx
movl window_state(state_r), %edx
movl %eax, wsize(%esp)
movl %ecx, write(%esp)
movl %edx, window(%esp)
movl hold_state(state_r), %ebp
movl bits_state(state_r), %ebx
#undef strm_r
#undef state_r
#define in_r %esi
#define from_r %esi
#define out_r %edi
movl in(%esp), in_r
movl last(%esp), %ecx
cmpl in_r, %ecx
ja .L_align_long /* if in < last */
addl $11, %ecx /* ecx = &in[ avail_in ] */
subl in_r, %ecx /* ecx = avail_in */
movl $12, %eax
subl %ecx, %eax /* eax = 12 - avail_in */
leal buf(%esp), %edi
rep movsb /* memcpy( buf, in, avail_in ) */
movl %eax, %ecx
xorl %eax, %eax
rep stosb /* memset( &buf[ avail_in ], 0, 12 - avail_in ) */
leal buf(%esp), in_r /* in = buf */
movl in_r, last(%esp) /* last = in, do just one iteration */
jmp .L_is_aligned
/* align in_r on long boundary */
.L_align_long:
testl $3, in_r
jz .L_is_aligned
xorl %eax, %eax
movb (in_r), %al
incl in_r
movl %ebx, %ecx
addl $8, %ebx
shll %cl, %eax
orl %eax, %ebp
jmp .L_align_long
.L_is_aligned:
movl out(%esp), out_r
#if defined( NO_MMX )
jmp .L_do_loop
#endif
#if defined( USE_MMX )
jmp .L_init_mmx
#endif
/*** Runtime MMX check ***/
#if defined( RUN_TIME_MMX )
.L_check_mmx:
cmpl $DO_USE_MMX, inflate_fast_use_mmx
je .L_init_mmx
ja .L_do_loop /* > 2 */
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
pushf
movl (%esp), %eax /* copy eflags to eax */
xorl $0x200000, (%esp) /* try toggling ID bit of eflags (bit 21)
* to see if cpu supports cpuid...
* ID bit method not supported by NexGen but
* bios may load a cpuid instruction and
* cpuid may be disabled on Cyrix 5-6x86 */
popf
pushf
popl %edx /* copy new eflags to edx */
xorl %eax, %edx /* test if ID bit is flipped */
jz .L_dont_use_mmx /* not flipped if zero */
xorl %eax, %eax
cpuid
cmpl $0x756e6547, %ebx /* check for GenuineIntel in ebx,ecx,edx */
jne .L_dont_use_mmx
cmpl $0x6c65746e, %ecx
jne .L_dont_use_mmx
cmpl $0x49656e69, %edx
jne .L_dont_use_mmx
movl $1, %eax
cpuid /* get cpu features */
shrl $8, %eax
andl $15, %eax
cmpl $6, %eax /* check for Pentium family, is 0xf for P4 */
jne .L_dont_use_mmx
testl $0x800000, %edx /* test if MMX feature is set (bit 23) */
jnz .L_use_mmx
jmp .L_dont_use_mmx
.L_use_mmx:
movl $DO_USE_MMX, inflate_fast_use_mmx
jmp .L_check_mmx_pop
.L_dont_use_mmx:
movl $DONT_USE_MMX, inflate_fast_use_mmx
.L_check_mmx_pop:
popl %edx
popl %ecx
popl %ebx
popl %eax
jmp .L_check_mmx
#endif
/*** Non-MMX code ***/
#if defined ( NO_MMX ) || defined( RUN_TIME_MMX )
#define hold_r %ebp
#define bits_r %bl
#define bitslong_r %ebx
.align 32,0x90
.L_while_test:
/* while (in < last && out < end)
*/
cmpl out_r, end(%esp)
jbe .L_break_loop /* if (out >= end) */
cmpl in_r, last(%esp)
jbe .L_break_loop
.L_do_loop:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out
*
* do {
* if (bits < 15) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* this = lcode[hold & lmask]
*/
cmpb $15, bits_r
ja .L_get_length_code /* if (15 < bits) */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
.L_get_length_code:
movl lmask(%esp), %edx /* edx = lmask */
movl lcode(%esp), %ecx /* ecx = lcode */
andl hold_r, %edx /* edx &= hold */
movl (%ecx,%edx,4), %eax /* eax = lcode[hold & lmask] */
.L_dolen:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out
*
* dolen:
* bits -= this.bits;
* hold >>= this.bits
*/
movb %ah, %cl /* cl = this.bits */
subb %ah, bits_r /* bits -= this.bits */
shrl %cl, hold_r /* hold >>= this.bits */
/* check if op is a literal
* if (op == 0) {
* PUP(out) = this.val;
* }
*/
testb %al, %al
jnz .L_test_for_length_base /* if (op != 0) 45.7% */
shrl $16, %eax /* output this.val char */
stosb
jmp .L_while_test
.L_test_for_length_base:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out, %edx = len
*
* else if (op & 16) {
* len = this.val
* op &= 15
* if (op) {
* if (op > bits) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* len += hold & mask[op];
* bits -= op;
* hold >>= op;
* }
*/
#define len_r %edx
movl %eax, len_r /* len = this */
shrl $16, len_r /* len = this.val */
movb %al, %cl
testb $16, %al
jz .L_test_for_second_level_length /* if ((op & 16) == 0) 8% */
andb $15, %cl /* op &= 15 */
jz .L_save_len /* if (!op) */
cmpb %cl, bits_r
jae .L_add_bits_to_len /* if (op <= bits) */
movb %cl, %ch /* stash op in ch, freeing cl */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
movb %ch, %cl /* move op back to ecx */
.L_add_bits_to_len:
movl $1, %eax
shll %cl, %eax
decl %eax
subb %cl, bits_r
andl hold_r, %eax /* eax &= hold */
shrl %cl, hold_r
addl %eax, len_r /* len += hold & mask[op] */
.L_save_len:
movl len_r, len(%esp) /* save len */
#undef len_r
.L_decode_distance:
/* regs: %esi = in, %ebp = hold, %bl = bits, %edi = out, %edx = dist
*
* if (bits < 15) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* this = dcode[hold & dmask];
* dodist:
* bits -= this.bits;
* hold >>= this.bits;
* op = this.op;
*/
cmpb $15, bits_r
ja .L_get_distance_code /* if (15 < bits) */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
.L_get_distance_code:
movl dmask(%esp), %edx /* edx = dmask */
movl dcode(%esp), %ecx /* ecx = dcode */
andl hold_r, %edx /* edx &= hold */
movl (%ecx,%edx,4), %eax /* eax = dcode[hold & dmask] */
#define dist_r %edx
.L_dodist:
movl %eax, dist_r /* dist = this */
shrl $16, dist_r /* dist = this.val */
movb %ah, %cl
subb %ah, bits_r /* bits -= this.bits */
shrl %cl, hold_r /* hold >>= this.bits */
/* if (op & 16) {
* dist = this.val
* op &= 15
* if (op > bits) {
* hold |= *((unsigned short *)in)++ << bits;
* bits += 16
* }
* dist += hold & mask[op];
* bits -= op;
* hold >>= op;
*/
movb %al, %cl /* cl = this.op */
testb $16, %al /* if ((op & 16) == 0) */
jz .L_test_for_second_level_dist
andb $15, %cl /* op &= 15 */
jz .L_check_dist_one
cmpb %cl, bits_r
jae .L_add_bits_to_dist /* if (op <= bits) 97.6% */
movb %cl, %ch /* stash op in ch, freeing cl */
xorl %eax, %eax
lodsw /* al = *(ushort *)in++ */
movb bits_r, %cl /* cl = bits, needs it for shifting */
addb $16, bits_r /* bits += 16 */
shll %cl, %eax
orl %eax, hold_r /* hold |= *((ushort *)in)++ << bits */
movb %ch, %cl /* move op back to ecx */
.L_add_bits_to_dist:
movl $1, %eax
shll %cl, %eax
decl %eax /* (1 << op) - 1 */
subb %cl, bits_r
andl hold_r, %eax /* eax &= hold */
shrl %cl, hold_r
addl %eax, dist_r /* dist += hold & ((1 << op) - 1) */
jmp .L_check_window
.L_check_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes
*
* nbytes = out - beg;
* if (dist <= nbytes) {
* from = out - dist;
* do {
* PUP(out) = PUP(from);
* } while (--len > 0) {
* }
*/
movl in_r, in(%esp) /* save in so from can use it's reg */
movl out_r, %eax
subl beg(%esp), %eax /* nbytes = out - beg */
cmpl dist_r, %eax
jb .L_clip_window /* if (dist > nbytes) 4.2% */
movl len(%esp), %ecx
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
subl $3, %ecx
movb (from_r), %al
movb %al, (out_r)
movb 1(from_r), %al
movb 2(from_r), %dl
addl $3, from_r
movb %al, 1(out_r)
movb %dl, 2(out_r)
addl $3, out_r
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
jmp .L_while_test
.align 16,0x90
.L_check_dist_one:
cmpl $1, dist_r
jne .L_check_window
cmpl out_r, beg(%esp)
je .L_check_window
decl out_r
movl len(%esp), %ecx
movb (out_r), %al
subl $3, %ecx
movb %al, 1(out_r)
movb %al, 2(out_r)
movb %al, 3(out_r)
addl $4, out_r
rep stosb
jmp .L_while_test
.align 16,0x90
.L_test_for_second_level_length:
/* else if ((op & 64) == 0) {
* this = lcode[this.val + (hold & mask[op])];
* }
*/
testb $64, %al
jnz .L_test_for_end_of_block /* if ((op & 64) != 0) */
movl $1, %eax
shll %cl, %eax
decl %eax
andl hold_r, %eax /* eax &= hold */
addl %edx, %eax /* eax += this.val */
movl lcode(%esp), %edx /* edx = lcode */
movl (%edx,%eax,4), %eax /* eax = lcode[val + (hold&mask[op])] */
jmp .L_dolen
.align 16,0x90
.L_test_for_second_level_dist:
/* else if ((op & 64) == 0) {
* this = dcode[this.val + (hold & mask[op])];
* }
*/
testb $64, %al
jnz .L_invalid_distance_code /* if ((op & 64) != 0) */
movl $1, %eax
shll %cl, %eax
decl %eax
andl hold_r, %eax /* eax &= hold */
addl %edx, %eax /* eax += this.val */
movl dcode(%esp), %edx /* edx = dcode */
movl (%edx,%eax,4), %eax /* eax = dcode[val + (hold&mask[op])] */
jmp .L_dodist
.align 16,0x90
.L_clip_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes
*
* else {
* if (dist > wsize) {
* invalid distance
* }
* from = window;
* nbytes = dist - nbytes;
* if (write == 0) {
* from += wsize - nbytes;
*/
#define nbytes_r %ecx
movl %eax, nbytes_r
movl wsize(%esp), %eax /* prepare for dist compare */
negl nbytes_r /* nbytes = -nbytes */
movl window(%esp), from_r /* from = window */
cmpl dist_r, %eax
jb .L_invalid_distance_too_far /* if (dist > wsize) */
addl dist_r, nbytes_r /* nbytes = dist - nbytes */
cmpl $0, write(%esp)
jne .L_wrap_around_window /* if (write != 0) */
subl nbytes_r, %eax
addl %eax, from_r /* from += wsize - nbytes */
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes, %eax = len
*
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while (--nbytes);
* from = out - dist;
* }
* }
*/
#define len_r %eax
movl len(%esp), len_r
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1
.L_wrap_around_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes, %eax = write, %eax = len
*
* else if (write < nbytes) {
* from += wsize + write - nbytes;
* nbytes -= write;
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while (--nbytes);
* from = window;
* nbytes = write;
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while(--nbytes);
* from = out - dist;
* }
* }
* }
*/
#define write_r %eax
movl write(%esp), write_r
cmpl write_r, nbytes_r
jbe .L_contiguous_in_window /* if (write >= nbytes) */
addl wsize(%esp), from_r
addl write_r, from_r
subl nbytes_r, from_r /* from += wsize + write - nbytes */
subl write_r, nbytes_r /* nbytes -= write */
#undef write_r
movl len(%esp), len_r
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl window(%esp), from_r /* from = window */
movl write(%esp), nbytes_r /* nbytes = write */
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1
.L_contiguous_in_window:
/* regs: %esi = from, %ebp = hold, %bl = bits, %edi = out, %edx = dist
* %ecx = nbytes, %eax = write, %eax = len
*
* else {
* from += write - nbytes;
* if (nbytes < len) {
* len -= nbytes;
* do {
* PUP(out) = PUP(from);
* } while (--nbytes);
* from = out - dist;
* }
* }
*/
#define write_r %eax
addl write_r, from_r
subl nbytes_r, from_r /* from += write - nbytes */
#undef write_r
movl len(%esp), len_r
cmpl nbytes_r, len_r
jbe .L_do_copy1 /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
.L_do_copy1:
/* regs: %esi = from, %esi = in, %ebp = hold, %bl = bits, %edi = out
* %eax = len
*
* while (len > 0) {
* PUP(out) = PUP(from);
* len--;
* }
* }
* } while (in < last && out < end);
*/
#undef nbytes_r
#define in_r %esi
movl len_r, %ecx
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
jmp .L_while_test
#undef len_r
#undef dist_r
#endif /* NO_MMX || RUN_TIME_MMX */
/*** MMX code ***/
#if defined( USE_MMX ) || defined( RUN_TIME_MMX )
.align 32,0x90
.L_init_mmx:
emms
#undef bits_r
#undef bitslong_r
#define bitslong_r %ebp
#define hold_mm %mm0
movd %ebp, hold_mm
movl %ebx, bitslong_r
#define used_mm %mm1
#define dmask2_mm %mm2
#define lmask2_mm %mm3
#define lmask_mm %mm4
#define dmask_mm %mm5
#define tmp_mm %mm6
movd lmask(%esp), lmask_mm
movq lmask_mm, lmask2_mm
movd dmask(%esp), dmask_mm
movq dmask_mm, dmask2_mm
pxor used_mm, used_mm
movl lcode(%esp), %ebx /* ebx = lcode */
jmp .L_do_loop_mmx
.align 32,0x90
.L_while_test_mmx:
/* while (in < last && out < end)
*/
cmpl out_r, end(%esp)
jbe .L_break_loop /* if (out >= end) */
cmpl in_r, last(%esp)
jbe .L_break_loop
.L_do_loop_mmx:
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
cmpl $32, bitslong_r
ja .L_get_length_code_mmx /* if (32 < bits) */
movd bitslong_r, tmp_mm
movd (in_r), %mm7
addl $4, in_r
psllq tmp_mm, %mm7
addl $32, bitslong_r
por %mm7, hold_mm /* hold_mm |= *((uint *)in)++ << bits */
.L_get_length_code_mmx:
pand hold_mm, lmask_mm
movd lmask_mm, %eax
movq lmask2_mm, lmask_mm
movl (%ebx,%eax,4), %eax /* eax = lcode[hold & lmask] */
.L_dolen_mmx:
movzbl %ah, %ecx /* ecx = this.bits */
movd %ecx, used_mm
subl %ecx, bitslong_r /* bits -= this.bits */
testb %al, %al
jnz .L_test_for_length_base_mmx /* if (op != 0) 45.7% */
shrl $16, %eax /* output this.val char */
stosb
jmp .L_while_test_mmx
.L_test_for_length_base_mmx:
#define len_r %edx
movl %eax, len_r /* len = this */
shrl $16, len_r /* len = this.val */
testb $16, %al
jz .L_test_for_second_level_length_mmx /* if ((op & 16) == 0) 8% */
andl $15, %eax /* op &= 15 */
jz .L_decode_distance_mmx /* if (!op) */
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd %eax, used_mm
movd hold_mm, %ecx
subl %eax, bitslong_r
andl .L_mask(,%eax,4), %ecx
addl %ecx, len_r /* len += hold & mask[op] */
.L_decode_distance_mmx:
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
cmpl $32, bitslong_r
ja .L_get_dist_code_mmx /* if (32 < bits) */
movd bitslong_r, tmp_mm
movd (in_r), %mm7
addl $4, in_r
psllq tmp_mm, %mm7
addl $32, bitslong_r
por %mm7, hold_mm /* hold_mm |= *((uint *)in)++ << bits */
.L_get_dist_code_mmx:
movl dcode(%esp), %ebx /* ebx = dcode */
pand hold_mm, dmask_mm
movd dmask_mm, %eax
movq dmask2_mm, dmask_mm
movl (%ebx,%eax,4), %eax /* eax = dcode[hold & lmask] */
.L_dodist_mmx:
#define dist_r %ebx
movzbl %ah, %ecx /* ecx = this.bits */
movl %eax, dist_r
shrl $16, dist_r /* dist = this.val */
subl %ecx, bitslong_r /* bits -= this.bits */
movd %ecx, used_mm
testb $16, %al /* if ((op & 16) == 0) */
jz .L_test_for_second_level_dist_mmx
andl $15, %eax /* op &= 15 */
jz .L_check_dist_one_mmx
.L_add_bits_to_dist_mmx:
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd %eax, used_mm /* save bit length of current op */
movd hold_mm, %ecx /* get the next bits on input stream */
subl %eax, bitslong_r /* bits -= op bits */
andl .L_mask(,%eax,4), %ecx /* ecx = hold & mask[op] */
addl %ecx, dist_r /* dist += hold & mask[op] */
.L_check_window_mmx:
movl in_r, in(%esp) /* save in so from can use it's reg */
movl out_r, %eax
subl beg(%esp), %eax /* nbytes = out - beg */
cmpl dist_r, %eax
jb .L_clip_window_mmx /* if (dist > nbytes) 4.2% */
movl len_r, %ecx
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
subl $3, %ecx
movb (from_r), %al
movb %al, (out_r)
movb 1(from_r), %al
movb 2(from_r), %dl
addl $3, from_r
movb %al, 1(out_r)
movb %dl, 2(out_r)
addl $3, out_r
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */
jmp .L_while_test_mmx
.align 16,0x90
.L_check_dist_one_mmx:
cmpl $1, dist_r
jne .L_check_window_mmx
cmpl out_r, beg(%esp)
je .L_check_window_mmx
decl out_r
movl len_r, %ecx
movb (out_r), %al
subl $3, %ecx
movb %al, 1(out_r)
movb %al, 2(out_r)
movb %al, 3(out_r)
addl $4, out_r
rep stosb
movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */
jmp .L_while_test_mmx
.align 16,0x90
.L_test_for_second_level_length_mmx:
testb $64, %al
jnz .L_test_for_end_of_block /* if ((op & 64) != 0) */
andl $15, %eax
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd hold_mm, %ecx
andl .L_mask(,%eax,4), %ecx
addl len_r, %ecx
movl (%ebx,%ecx,4), %eax /* eax = lcode[hold & lmask] */
jmp .L_dolen_mmx
.align 16,0x90
.L_test_for_second_level_dist_mmx:
testb $64, %al
jnz .L_invalid_distance_code /* if ((op & 64) != 0) */
andl $15, %eax
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd hold_mm, %ecx
andl .L_mask(,%eax,4), %ecx
movl dcode(%esp), %eax /* ecx = dcode */
addl dist_r, %ecx
movl (%eax,%ecx,4), %eax /* eax = lcode[hold & lmask] */
jmp .L_dodist_mmx
.align 16,0x90
.L_clip_window_mmx:
#define nbytes_r %ecx
movl %eax, nbytes_r
movl wsize(%esp), %eax /* prepare for dist compare */
negl nbytes_r /* nbytes = -nbytes */
movl window(%esp), from_r /* from = window */
cmpl dist_r, %eax
jb .L_invalid_distance_too_far /* if (dist > wsize) */
addl dist_r, nbytes_r /* nbytes = dist - nbytes */
cmpl $0, write(%esp)
jne .L_wrap_around_window_mmx /* if (write != 0) */
subl nbytes_r, %eax
addl %eax, from_r /* from += wsize - nbytes */
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1_mmx
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1_mmx
.L_wrap_around_window_mmx:
#define write_r %eax
movl write(%esp), write_r
cmpl write_r, nbytes_r
jbe .L_contiguous_in_window_mmx /* if (write >= nbytes) */
addl wsize(%esp), from_r
addl write_r, from_r
subl nbytes_r, from_r /* from += wsize + write - nbytes */
subl write_r, nbytes_r /* nbytes -= write */
#undef write_r
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl window(%esp), from_r /* from = window */
movl write(%esp), nbytes_r /* nbytes = write */
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
jmp .L_do_copy1_mmx
.L_contiguous_in_window_mmx:
#define write_r %eax
addl write_r, from_r
subl nbytes_r, from_r /* from += write - nbytes */
#undef write_r
cmpl nbytes_r, len_r
jbe .L_do_copy1_mmx /* if (nbytes >= len) */
subl nbytes_r, len_r /* len -= nbytes */
rep movsb
movl out_r, from_r
subl dist_r, from_r /* from = out - dist */
.L_do_copy1_mmx:
#undef nbytes_r
#define in_r %esi
movl len_r, %ecx
rep movsb
movl in(%esp), in_r /* move in back to %esi, toss from */
movl lcode(%esp), %ebx /* move lcode back to %ebx, toss dist */
jmp .L_while_test_mmx
#undef hold_r
#undef bitslong_r
#endif /* USE_MMX || RUN_TIME_MMX */
/*** USE_MMX, NO_MMX, and RUNTIME_MMX from here on ***/
.L_invalid_distance_code:
/* else {
* strm->msg = "invalid distance code";
* state->mode = BAD;
* }
*/
movl $.L_invalid_distance_code_msg, %ecx
movl $INFLATE_MODE_BAD, %edx
jmp .L_update_stream_state
.L_test_for_end_of_block:
/* else if (op & 32) {
* state->mode = TYPE;
* break;
* }
*/
testb $32, %al
jz .L_invalid_literal_length_code /* if ((op & 32) == 0) */
movl $0, %ecx
movl $INFLATE_MODE_TYPE, %edx
jmp .L_update_stream_state
.L_invalid_literal_length_code:
/* else {
* strm->msg = "invalid literal/length code";
* state->mode = BAD;
* }
*/
movl $.L_invalid_literal_length_code_msg, %ecx
movl $INFLATE_MODE_BAD, %edx
jmp .L_update_stream_state
.L_invalid_distance_too_far:
/* strm->msg = "invalid distance too far back";
* state->mode = BAD;
*/
movl in(%esp), in_r /* from_r has in's reg, put in back */
movl $.L_invalid_distance_too_far_msg, %ecx
movl $INFLATE_MODE_BAD, %edx
jmp .L_update_stream_state
.L_update_stream_state:
/* set strm->msg = %ecx, strm->state->mode = %edx */
movl strm_sp(%esp), %eax
testl %ecx, %ecx /* if (msg != NULL) */
jz .L_skip_msg
movl %ecx, msg_strm(%eax) /* strm->msg = msg */
.L_skip_msg:
movl state_strm(%eax), %eax /* state = strm->state */
movl %edx, mode_state(%eax) /* state->mode = edx (BAD | TYPE) */
jmp .L_break_loop
.align 32,0x90
.L_break_loop:
/*
* Regs:
*
* bits = %ebp when mmx, and in %ebx when non-mmx
* hold = %hold_mm when mmx, and in %ebp when non-mmx
* in = %esi
* out = %edi
*/
#if defined( USE_MMX ) || defined( RUN_TIME_MMX )
#if defined( RUN_TIME_MMX )
cmpl $DO_USE_MMX, inflate_fast_use_mmx
jne .L_update_next_in
#endif /* RUN_TIME_MMX */
movl %ebp, %ebx
.L_update_next_in:
#endif
#define strm_r %eax
#define state_r %edx
/* len = bits >> 3;
* in -= len;
* bits -= len << 3;
* hold &= (1U << bits) - 1;
* state->hold = hold;
* state->bits = bits;
* strm->next_in = in;
* strm->next_out = out;
*/
movl strm_sp(%esp), strm_r
movl %ebx, %ecx
movl state_strm(strm_r), state_r
shrl $3, %ecx
subl %ecx, in_r
shll $3, %ecx
subl %ecx, %ebx
movl out_r, next_out_strm(strm_r)
movl %ebx, bits_state(state_r)
movl %ebx, %ecx
leal buf(%esp), %ebx
cmpl %ebx, last(%esp)
jne .L_buf_not_used /* if buf != last */
subl %ebx, in_r /* in -= buf */
movl next_in_strm(strm_r), %ebx
movl %ebx, last(%esp) /* last = strm->next_in */
addl %ebx, in_r /* in += strm->next_in */
movl avail_in_strm(strm_r), %ebx
subl $11, %ebx
addl %ebx, last(%esp) /* last = &strm->next_in[ avail_in - 11 ] */
.L_buf_not_used:
movl in_r, next_in_strm(strm_r)
movl $1, %ebx
shll %cl, %ebx
decl %ebx
#if defined( USE_MMX ) || defined( RUN_TIME_MMX )
#if defined( RUN_TIME_MMX )
cmpl $DO_USE_MMX, inflate_fast_use_mmx
jne .L_update_hold
#endif /* RUN_TIME_MMX */
psrlq used_mm, hold_mm /* hold_mm >>= last bit length */
movd hold_mm, %ebp
emms
.L_update_hold:
#endif /* USE_MMX || RUN_TIME_MMX */
andl %ebx, %ebp
movl %ebp, hold_state(state_r)
#define last_r %ebx
/* strm->avail_in = in < last ? 11 + (last - in) : 11 - (in - last) */
movl last(%esp), last_r
cmpl in_r, last_r
jbe .L_last_is_smaller /* if (in >= last) */
subl in_r, last_r /* last -= in */
addl $11, last_r /* last += 11 */
movl last_r, avail_in_strm(strm_r)
jmp .L_fixup_out
.L_last_is_smaller:
subl last_r, in_r /* in -= last */
negl in_r /* in = -in */
addl $11, in_r /* in += 11 */
movl in_r, avail_in_strm(strm_r)
#undef last_r
#define end_r %ebx
.L_fixup_out:
/* strm->avail_out = out < end ? 257 + (end - out) : 257 - (out - end)*/
movl end(%esp), end_r
cmpl out_r, end_r
jbe .L_end_is_smaller /* if (out >= end) */
subl out_r, end_r /* end -= out */
addl $257, end_r /* end += 257 */
movl end_r, avail_out_strm(strm_r)
jmp .L_done
.L_end_is_smaller:
subl end_r, out_r /* out -= end */
negl out_r /* out = -out */
addl $257, out_r /* out += 257 */
movl out_r, avail_out_strm(strm_r)
#undef end_r
#undef strm_r
#undef state_r
.L_done:
addl $local_var_size, %esp
popf
popl %ebx
popl %ebp
popl %esi
popl %edi
ret
#if defined( GAS_ELF )
/* elf info */
.type inflate_fast,@function
.size inflate_fast,.-inflate_fast
#endif
|
a1studmuffin/Cataclysm-DDA-Android
| 10,365
|
Android/jni/SDL2_image/external/zlib-1.2.8/contrib/asm686/match.S
|
/* match.S -- x86 assembly version of the zlib longest_match() function.
* Optimized for the Intel 686 chips (PPro and later).
*
* Copyright (C) 1998, 2007 Brian Raiter <breadbox@muppetlabs.com>
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the author be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
#ifndef NO_UNDERLINE
#define match_init _match_init
#define longest_match _longest_match
#endif
#define MAX_MATCH (258)
#define MIN_MATCH (3)
#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
/* stack frame offsets */
#define chainlenwmask 0 /* high word: current chain len */
/* low word: s->wmask */
#define window 4 /* local copy of s->window */
#define windowbestlen 8 /* s->window + bestlen */
#define scanstart 16 /* first two bytes of string */
#define scanend 12 /* last two bytes of string */
#define scanalign 20 /* dword-misalignment of string */
#define nicematch 24 /* a good enough match size */
#define bestlen 28 /* size of best match so far */
#define scan 32 /* ptr to string wanting match */
#define LocalVarsSize (36)
/* saved ebx 36 */
/* saved edi 40 */
/* saved esi 44 */
/* saved ebp 48 */
/* return address 52 */
#define deflatestate 56 /* the function arguments */
#define curmatch 60
/* All the +zlib1222add offsets are due to the addition of fields
* in zlib in the deflate_state structure since the asm code was first written
* (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").
* (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").
* if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").
*/
#define zlib1222add (8)
#define dsWSize (36+zlib1222add)
#define dsWMask (44+zlib1222add)
#define dsWindow (48+zlib1222add)
#define dsPrev (56+zlib1222add)
#define dsMatchLen (88+zlib1222add)
#define dsPrevMatch (92+zlib1222add)
#define dsStrStart (100+zlib1222add)
#define dsMatchStart (104+zlib1222add)
#define dsLookahead (108+zlib1222add)
#define dsPrevLen (112+zlib1222add)
#define dsMaxChainLen (116+zlib1222add)
#define dsGoodMatch (132+zlib1222add)
#define dsNiceMatch (136+zlib1222add)
.file "match.S"
.globl match_init, longest_match
.text
/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
.cfi_sections .debug_frame
longest_match:
.cfi_startproc
/* Save registers that the compiler may be using, and adjust %esp to */
/* make room for our stack frame. */
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset ebp, -8
pushl %edi
.cfi_def_cfa_offset 12
pushl %esi
.cfi_def_cfa_offset 16
pushl %ebx
.cfi_def_cfa_offset 20
subl $LocalVarsSize, %esp
.cfi_def_cfa_offset LocalVarsSize+20
/* Retrieve the function arguments. %ecx will hold cur_match */
/* throughout the entire function. %edx will hold the pointer to the */
/* deflate_state structure during the function's setup (before */
/* entering the main loop). */
movl deflatestate(%esp), %edx
movl curmatch(%esp), %ecx
/* uInt wmask = s->w_mask; */
/* unsigned chain_length = s->max_chain_length; */
/* if (s->prev_length >= s->good_match) { */
/* chain_length >>= 2; */
/* } */
movl dsPrevLen(%edx), %eax
movl dsGoodMatch(%edx), %ebx
cmpl %ebx, %eax
movl dsWMask(%edx), %eax
movl dsMaxChainLen(%edx), %ebx
jl LastMatchGood
shrl $2, %ebx
LastMatchGood:
/* chainlen is decremented once beforehand so that the function can */
/* use the sign flag instead of the zero flag for the exit test. */
/* It is then shifted into the high word, to make room for the wmask */
/* value, which it will always accompany. */
decl %ebx
shll $16, %ebx
orl %eax, %ebx
movl %ebx, chainlenwmask(%esp)
/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
movl dsNiceMatch(%edx), %eax
movl dsLookahead(%edx), %ebx
cmpl %eax, %ebx
jl LookaheadLess
movl %eax, %ebx
LookaheadLess: movl %ebx, nicematch(%esp)
/* register Bytef *scan = s->window + s->strstart; */
movl dsWindow(%edx), %esi
movl %esi, window(%esp)
movl dsStrStart(%edx), %ebp
lea (%esi,%ebp), %edi
movl %edi, scan(%esp)
/* Determine how many bytes the scan ptr is off from being */
/* dword-aligned. */
movl %edi, %eax
negl %eax
andl $3, %eax
movl %eax, scanalign(%esp)
/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
/* s->strstart - (IPos)MAX_DIST(s) : NIL; */
movl dsWSize(%edx), %eax
subl $MIN_LOOKAHEAD, %eax
subl %eax, %ebp
jg LimitPositive
xorl %ebp, %ebp
LimitPositive:
/* int best_len = s->prev_length; */
movl dsPrevLen(%edx), %eax
movl %eax, bestlen(%esp)
/* Store the sum of s->window + best_len in %esi locally, and in %esi. */
addl %eax, %esi
movl %esi, windowbestlen(%esp)
/* register ush scan_start = *(ushf*)scan; */
/* register ush scan_end = *(ushf*)(scan+best_len-1); */
/* Posf *prev = s->prev; */
movzwl (%edi), %ebx
movl %ebx, scanstart(%esp)
movzwl -1(%edi,%eax), %ebx
movl %ebx, scanend(%esp)
movl dsPrev(%edx), %edi
/* Jump into the main loop. */
movl chainlenwmask(%esp), %edx
jmp LoopEntry
.balign 16
/* do {
* match = s->window + cur_match;
* if (*(ushf*)(match+best_len-1) != scan_end ||
* *(ushf*)match != scan_start) continue;
* [...]
* } while ((cur_match = prev[cur_match & wmask]) > limit
* && --chain_length != 0);
*
* Here is the inner loop of the function. The function will spend the
* majority of its time in this loop, and majority of that time will
* be spent in the first ten instructions.
*
* Within this loop:
* %ebx = scanend
* %ecx = curmatch
* %edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)
* %esi = windowbestlen - i.e., (window + bestlen)
* %edi = prev
* %ebp = limit
*/
LookupLoop:
andl %edx, %ecx
movzwl (%edi,%ecx,2), %ecx
cmpl %ebp, %ecx
jbe LeaveNow
subl $0x00010000, %edx
js LeaveNow
LoopEntry: movzwl -1(%esi,%ecx), %eax
cmpl %ebx, %eax
jnz LookupLoop
movl window(%esp), %eax
movzwl (%eax,%ecx), %eax
cmpl scanstart(%esp), %eax
jnz LookupLoop
/* Store the current value of chainlen. */
movl %edx, chainlenwmask(%esp)
/* Point %edi to the string under scrutiny, and %esi to the string we */
/* are hoping to match it up with. In actuality, %esi and %edi are */
/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
/* initialized to -(MAX_MATCH_8 - scanalign). */
movl window(%esp), %esi
movl scan(%esp), %edi
addl %ecx, %esi
movl scanalign(%esp), %eax
movl $(-MAX_MATCH_8), %edx
lea MAX_MATCH_8(%edi,%eax), %edi
lea MAX_MATCH_8(%esi,%eax), %esi
/* Test the strings for equality, 8 bytes at a time. At the end,
* adjust %edx so that it is offset to the exact byte that mismatched.
*
* We already know at this point that the first three bytes of the
* strings match each other, and they can be safely passed over before
* starting the compare loop. So what this code does is skip over 0-3
* bytes, as much as necessary in order to dword-align the %edi
* pointer. (%esi will still be misaligned three times out of four.)
*
* It should be confessed that this loop usually does not represent
* much of the total running time. Replacing it with a more
* straightforward "rep cmpsb" would not drastically degrade
* performance.
*/
LoopCmps:
movl (%esi,%edx), %eax
xorl (%edi,%edx), %eax
jnz LeaveLoopCmps
movl 4(%esi,%edx), %eax
xorl 4(%edi,%edx), %eax
jnz LeaveLoopCmps4
addl $8, %edx
jnz LoopCmps
jmp LenMaximum
LeaveLoopCmps4: addl $4, %edx
LeaveLoopCmps: testl $0x0000FFFF, %eax
jnz LenLower
addl $2, %edx
shrl $16, %eax
LenLower: subb $1, %al
adcl $0, %edx
/* Calculate the length of the match. If it is longer than MAX_MATCH, */
/* then automatically accept it as the best possible match and leave. */
lea (%edi,%edx), %eax
movl scan(%esp), %edi
subl %edi, %eax
cmpl $MAX_MATCH, %eax
jge LenMaximum
/* If the length of the match is not longer than the best match we */
/* have so far, then forget it and return to the lookup loop. */
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
cmpl %ebx, %eax
jg LongerMatch
movl windowbestlen(%esp), %esi
movl dsPrev(%edx), %edi
movl scanend(%esp), %ebx
movl chainlenwmask(%esp), %edx
jmp LookupLoop
/* s->match_start = cur_match; */
/* best_len = len; */
/* if (len >= nice_match) break; */
/* scan_end = *(ushf*)(scan+best_len-1); */
LongerMatch: movl nicematch(%esp), %ebx
movl %eax, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
cmpl %ebx, %eax
jge LeaveNow
movl window(%esp), %esi
addl %eax, %esi
movl %esi, windowbestlen(%esp)
movzwl -1(%edi,%eax), %ebx
movl dsPrev(%edx), %edi
movl %ebx, scanend(%esp)
movl chainlenwmask(%esp), %edx
jmp LookupLoop
/* Accept the current string, with the maximum possible length. */
LenMaximum: movl deflatestate(%esp), %edx
movl $MAX_MATCH, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
/* return s->lookahead; */
LeaveNow:
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
movl dsLookahead(%edx), %eax
cmpl %eax, %ebx
jg LookaheadRet
movl %ebx, %eax
LookaheadRet:
/* Restore the stack and return from whence we came. */
addl $LocalVarsSize, %esp
.cfi_def_cfa_offset 20
popl %ebx
.cfi_def_cfa_offset 16
popl %esi
.cfi_def_cfa_offset 12
popl %edi
.cfi_def_cfa_offset 8
popl %ebp
.cfi_def_cfa_offset 4
.cfi_endproc
match_init: ret
|
a1studmuffin/Cataclysm-DDA-Android
| 15,839
|
Android/jni/SDL2_image/external/zlib-1.2.8/contrib/gcc_gvmat64/gvmat64.S
|
/*
;uInt longest_match_x64(
; deflate_state *s,
; IPos cur_match); // current match
; gvmat64.S -- Asm portion of the optimized longest_match for 32 bits x86_64
; (AMD64 on Athlon 64, Opteron, Phenom
; and Intel EM64T on Pentium 4 with EM64T, Pentium D, Core 2 Duo, Core I5/I7)
; this file is translation from gvmat64.asm to GCC 4.x (for Linux, Mac XCode)
; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant.
;
; File written by Gilles Vollant, by converting to assembly the longest_match
; from Jean-loup Gailly in deflate.c of zLib and infoZip zip.
; and by taking inspiration on asm686 with masm, optimised assembly code
; from Brian Raiter, written 1998
;
; This software is provided 'as-is', without any express or implied
; warranty. In no event will the authors be held liable for any damages
; arising from the use of this software.
;
; Permission is granted to anyone to use this software for any purpose,
; including commercial applications, and to alter it and redistribute it
; freely, subject to the following restrictions:
;
; 1. The origin of this software must not be misrepresented; you must not
; claim that you wrote the original software. If you use this software
; in a product, an acknowledgment in the product documentation would be
; appreciated but is not required.
; 2. Altered source versions must be plainly marked as such, and must not be
; misrepresented as being the original software
; 3. This notice may not be removed or altered from any source distribution.
;
; http://www.zlib.net
; http://www.winimage.com/zLibDll
; http://www.muppetlabs.com/~breadbox/software/assembly.html
;
; to compile this file for zLib, I use option:
; gcc -c -arch x86_64 gvmat64.S
;uInt longest_match(s, cur_match)
; deflate_state *s;
; IPos cur_match; // current match /
;
; with XCode for Mac, I had strange error with some jump on intel syntax
; this is why BEFORE_JMP and AFTER_JMP are used
*/
#define BEFORE_JMP .att_syntax
#define AFTER_JMP .intel_syntax noprefix
#ifndef NO_UNDERLINE
# define match_init _match_init
# define longest_match _longest_match
#endif
.intel_syntax noprefix
.globl match_init, longest_match
.text
longest_match:
#define LocalVarsSize 96
/*
; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12
; free register : r14,r15
; register can be saved : rsp
*/
#define chainlenwmask (rsp + 8 - LocalVarsSize)
#define nicematch (rsp + 16 - LocalVarsSize)
#define save_rdi (rsp + 24 - LocalVarsSize)
#define save_rsi (rsp + 32 - LocalVarsSize)
#define save_rbx (rsp + 40 - LocalVarsSize)
#define save_rbp (rsp + 48 - LocalVarsSize)
#define save_r12 (rsp + 56 - LocalVarsSize)
#define save_r13 (rsp + 64 - LocalVarsSize)
#define save_r14 (rsp + 72 - LocalVarsSize)
#define save_r15 (rsp + 80 - LocalVarsSize)
/*
; all the +4 offsets are due to the addition of pending_buf_size (in zlib
; in the deflate_state structure since the asm code was first written
; (if you compile with zlib 1.0.4 or older, remove the +4).
; Note : these value are good with a 8 bytes boundary pack structure
*/
#define MAX_MATCH 258
#define MIN_MATCH 3
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
/*
;;; Offsets for fields in the deflate_state structure. These numbers
;;; are calculated from the definition of deflate_state, with the
;;; assumption that the compiler will dword-align the fields. (Thus,
;;; changing the definition of deflate_state could easily cause this
;;; program to crash horribly, without so much as a warning at
;;; compile time. Sigh.)
; all the +zlib1222add offsets are due to the addition of fields
; in zlib in the deflate_state structure since the asm code was first written
; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").
; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").
; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").
*/
/* you can check the structure offset by running
#include <stdlib.h>
#include <stdio.h>
#include "deflate.h"
void print_depl()
{
deflate_state ds;
deflate_state *s=&ds;
printf("size pointer=%u\n",(int)sizeof(void*));
printf("#define dsWSize %u\n",(int)(((char*)&(s->w_size))-((char*)s)));
printf("#define dsWMask %u\n",(int)(((char*)&(s->w_mask))-((char*)s)));
printf("#define dsWindow %u\n",(int)(((char*)&(s->window))-((char*)s)));
printf("#define dsPrev %u\n",(int)(((char*)&(s->prev))-((char*)s)));
printf("#define dsMatchLen %u\n",(int)(((char*)&(s->match_length))-((char*)s)));
printf("#define dsPrevMatch %u\n",(int)(((char*)&(s->prev_match))-((char*)s)));
printf("#define dsStrStart %u\n",(int)(((char*)&(s->strstart))-((char*)s)));
printf("#define dsMatchStart %u\n",(int)(((char*)&(s->match_start))-((char*)s)));
printf("#define dsLookahead %u\n",(int)(((char*)&(s->lookahead))-((char*)s)));
printf("#define dsPrevLen %u\n",(int)(((char*)&(s->prev_length))-((char*)s)));
printf("#define dsMaxChainLen %u\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));
printf("#define dsGoodMatch %u\n",(int)(((char*)&(s->good_match))-((char*)s)));
printf("#define dsNiceMatch %u\n",(int)(((char*)&(s->nice_match))-((char*)s)));
}
*/
#define dsWSize 68
#define dsWMask 76
#define dsWindow 80
#define dsPrev 96
#define dsMatchLen 144
#define dsPrevMatch 148
#define dsStrStart 156
#define dsMatchStart 160
#define dsLookahead 164
#define dsPrevLen 168
#define dsMaxChainLen 172
#define dsGoodMatch 188
#define dsNiceMatch 192
#define window_size [ rcx + dsWSize]
#define WMask [ rcx + dsWMask]
#define window_ad [ rcx + dsWindow]
#define prev_ad [ rcx + dsPrev]
#define strstart [ rcx + dsStrStart]
#define match_start [ rcx + dsMatchStart]
#define Lookahead [ rcx + dsLookahead] //; 0ffffffffh on infozip
#define prev_length [ rcx + dsPrevLen]
#define max_chain_length [ rcx + dsMaxChainLen]
#define good_match [ rcx + dsGoodMatch]
#define nice_match [ rcx + dsNiceMatch]
/*
; windows:
; parameter 1 in rcx(deflate state s), param 2 in rdx (cur match)
; see http://weblogs.asp.net/oldnewthing/archive/2004/01/14/58579.aspx and
; http://msdn.microsoft.com/library/en-us/kmarch/hh/kmarch/64bitAMD_8e951dd2-ee77-4728-8702-55ce4b5dd24a.xml.asp
;
; All registers must be preserved across the call, except for
; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch.
;
; gcc on macosx-linux:
; see http://www.x86-64.org/documentation/abi-0.99.pdf
; param 1 in rdi, param 2 in rsi
; rbx, rsp, rbp, r12 to r15 must be preserved
;;; Save registers that the compiler may be using, and adjust esp to
;;; make room for our stack frame.
;;; Retrieve the function arguments. r8d will hold cur_match
;;; throughout the entire function. edx will hold the pointer to the
;;; deflate_state structure during the function's setup (before
;;; entering the main loop.
; ms: parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match)
; mac: param 1 in rdi, param 2 rsi
; this clear high 32 bits of r8, which can be garbage in both r8 and rdx
*/
mov [save_rbx],rbx
mov [save_rbp],rbp
mov rcx,rdi
mov r8d,esi
mov [save_r12],r12
mov [save_r13],r13
mov [save_r14],r14
mov [save_r15],r15
//;;; uInt wmask = s->w_mask;
//;;; unsigned chain_length = s->max_chain_length;
//;;; if (s->prev_length >= s->good_match) {
//;;; chain_length >>= 2;
//;;; }
mov edi, prev_length
mov esi, good_match
mov eax, WMask
mov ebx, max_chain_length
cmp edi, esi
jl LastMatchGood
shr ebx, 2
LastMatchGood:
//;;; chainlen is decremented once beforehand so that the function can
//;;; use the sign flag instead of the zero flag for the exit test.
//;;; It is then shifted into the high word, to make room for the wmask
//;;; value, which it will always accompany.
dec ebx
shl ebx, 16
or ebx, eax
//;;; on zlib only
//;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
mov eax, nice_match
mov [chainlenwmask], ebx
mov r10d, Lookahead
cmp r10d, eax
cmovnl r10d, eax
mov [nicematch],r10d
//;;; register Bytef *scan = s->window + s->strstart;
mov r10, window_ad
mov ebp, strstart
lea r13, [r10 + rbp]
//;;; Determine how many bytes the scan ptr is off from being
//;;; dword-aligned.
mov r9,r13
neg r13
and r13,3
//;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
//;;; s->strstart - (IPos)MAX_DIST(s) : NIL;
mov eax, window_size
sub eax, MIN_LOOKAHEAD
xor edi,edi
sub ebp, eax
mov r11d, prev_length
cmovng ebp,edi
//;;; int best_len = s->prev_length;
//;;; Store the sum of s->window + best_len in esi locally, and in esi.
lea rsi,[r10+r11]
//;;; register ush scan_start = *(ushf*)scan;
//;;; register ush scan_end = *(ushf*)(scan+best_len-1);
//;;; Posf *prev = s->prev;
movzx r12d,word ptr [r9]
movzx ebx, word ptr [r9 + r11 - 1]
mov rdi, prev_ad
//;;; Jump into the main loop.
mov edx, [chainlenwmask]
cmp bx,word ptr [rsi + r8 - 1]
jz LookupLoopIsZero
LookupLoop1:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
jbe LeaveNow
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry1:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jz LookupLoopIsZero
AFTER_JMP
LookupLoop2:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
BEFORE_JMP
jbe LeaveNow
AFTER_JMP
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry2:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jz LookupLoopIsZero
AFTER_JMP
LookupLoop4:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
BEFORE_JMP
jbe LeaveNow
AFTER_JMP
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry4:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jnz LookupLoop1
jmp LookupLoopIsZero
AFTER_JMP
/*
;;; do {
;;; match = s->window + cur_match;
;;; if (*(ushf*)(match+best_len-1) != scan_end ||
;;; *(ushf*)match != scan_start) continue;
;;; [...]
;;; } while ((cur_match = prev[cur_match & wmask]) > limit
;;; && --chain_length != 0);
;;;
;;; Here is the inner loop of the function. The function will spend the
;;; majority of its time in this loop, and majority of that time will
;;; be spent in the first ten instructions.
;;;
;;; Within this loop:
;;; ebx = scanend
;;; r8d = curmatch
;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)
;;; esi = windowbestlen - i.e., (window + bestlen)
;;; edi = prev
;;; ebp = limit
*/
.balign 16
LookupLoop:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
BEFORE_JMP
jbe LeaveNow
AFTER_JMP
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jnz LookupLoop1
AFTER_JMP
LookupLoopIsZero:
cmp r12w, word ptr [r10 + r8]
BEFORE_JMP
jnz LookupLoop1
AFTER_JMP
//;;; Store the current value of chainlen.
mov [chainlenwmask], edx
/*
;;; Point edi to the string under scrutiny, and esi to the string we
;;; are hoping to match it up with. In actuality, esi and edi are
;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is
;;; initialized to -(MAX_MATCH_8 - scanalign).
*/
lea rsi,[r8+r10]
mov rdx, 0xfffffffffffffef8 //; -(MAX_MATCH_8)
lea rsi, [rsi + r13 + 0x0108] //;MAX_MATCH_8]
lea rdi, [r9 + r13 + 0x0108] //;MAX_MATCH_8]
prefetcht1 [rsi+rdx]
prefetcht1 [rdi+rdx]
/*
;;; Test the strings for equality, 8 bytes at a time. At the end,
;;; adjust rdx so that it is offset to the exact byte that mismatched.
;;;
;;; We already know at this point that the first three bytes of the
;;; strings match each other, and they can be safely passed over before
;;; starting the compare loop. So what this code does is skip over 0-3
;;; bytes, as much as necessary in order to dword-align the edi
;;; pointer. (rsi will still be misaligned three times out of four.)
;;;
;;; It should be confessed that this loop usually does not represent
;;; much of the total running time. Replacing it with a more
;;; straightforward "rep cmpsb" would not drastically degrade
;;; performance.
*/
LoopCmps:
mov rax, [rsi + rdx]
xor rax, [rdi + rdx]
jnz LeaveLoopCmps
mov rax, [rsi + rdx + 8]
xor rax, [rdi + rdx + 8]
jnz LeaveLoopCmps8
mov rax, [rsi + rdx + 8+8]
xor rax, [rdi + rdx + 8+8]
jnz LeaveLoopCmps16
add rdx,8+8+8
BEFORE_JMP
jnz LoopCmps
jmp LenMaximum
AFTER_JMP
LeaveLoopCmps16: add rdx,8
LeaveLoopCmps8: add rdx,8
LeaveLoopCmps:
test eax, 0x0000FFFF
jnz LenLower
test eax,0xffffffff
jnz LenLower32
add rdx,4
shr rax,32
or ax,ax
BEFORE_JMP
jnz LenLower
AFTER_JMP
LenLower32:
shr eax,16
add rdx,2
LenLower:
sub al, 1
adc rdx, 0
//;;; Calculate the length of the match. If it is longer than MAX_MATCH,
//;;; then automatically accept it as the best possible match and leave.
lea rax, [rdi + rdx]
sub rax, r9
cmp eax, MAX_MATCH
BEFORE_JMP
jge LenMaximum
AFTER_JMP
/*
;;; If the length of the match is not longer than the best match we
;;; have so far, then forget it and return to the lookup loop.
;///////////////////////////////////
*/
cmp eax, r11d
jg LongerMatch
lea rsi,[r10+r11]
mov rdi, prev_ad
mov edx, [chainlenwmask]
BEFORE_JMP
jmp LookupLoop
AFTER_JMP
/*
;;; s->match_start = cur_match;
;;; best_len = len;
;;; if (len >= nice_match) break;
;;; scan_end = *(ushf*)(scan+best_len-1);
*/
LongerMatch:
mov r11d, eax
mov match_start, r8d
cmp eax, [nicematch]
BEFORE_JMP
jge LeaveNow
AFTER_JMP
lea rsi,[r10+rax]
movzx ebx, word ptr [r9 + rax - 1]
mov rdi, prev_ad
mov edx, [chainlenwmask]
BEFORE_JMP
jmp LookupLoop
AFTER_JMP
//;;; Accept the current string, with the maximum possible length.
LenMaximum:
mov r11d,MAX_MATCH
mov match_start, r8d
//;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
//;;; return s->lookahead;
LeaveNow:
mov eax, Lookahead
cmp r11d, eax
cmovng eax, r11d
//;;; Restore the stack and return from whence we came.
// mov rsi,[save_rsi]
// mov rdi,[save_rdi]
mov rbx,[save_rbx]
mov rbp,[save_rbp]
mov r12,[save_r12]
mov r13,[save_r13]
mov r14,[save_r14]
mov r15,[save_r15]
ret 0
//; please don't remove this string !
//; Your can freely use gvmat64 in any free or commercial app
//; but it is far better don't remove the string in the binary!
// db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998, converted to amd 64 by Gilles Vollant 2005",0dh,0ah,0
match_init:
ret 0
|
a1studmuffin/Cataclysm-DDA-Android
| 12,418
|
Android/jni/SDL2_image/external/zlib-1.2.8/contrib/amd64/amd64-match.S
|
/*
* match.S -- optimized version of longest_match()
* based on the similar work by Gilles Vollant, and Brian Raiter, written 1998
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the BSD License. Use by owners of Che Guevarra
* parafernalia is prohibited, where possible, and highly discouraged
* elsewhere.
*/
#ifndef NO_UNDERLINE
# define match_init _match_init
# define longest_match _longest_match
#endif
#define scanend ebx
#define scanendw bx
#define chainlenwmask edx /* high word: current chain len low word: s->wmask */
#define curmatch rsi
#define curmatchd esi
#define windowbestlen r8
#define scanalign r9
#define scanalignd r9d
#define window r10
#define bestlen r11
#define bestlend r11d
#define scanstart r12d
#define scanstartw r12w
#define scan r13
#define nicematch r14d
#define limit r15
#define limitd r15d
#define prev rcx
/*
* The 258 is a "magic number, not a parameter -- changing it
* breaks the hell loose
*/
#define MAX_MATCH (258)
#define MIN_MATCH (3)
#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
/* stack frame offsets */
#define LocalVarsSize (112)
#define _chainlenwmask ( 8-LocalVarsSize)(%rsp)
#define _windowbestlen (16-LocalVarsSize)(%rsp)
#define save_r14 (24-LocalVarsSize)(%rsp)
#define save_rsi (32-LocalVarsSize)(%rsp)
#define save_rbx (40-LocalVarsSize)(%rsp)
#define save_r12 (56-LocalVarsSize)(%rsp)
#define save_r13 (64-LocalVarsSize)(%rsp)
#define save_r15 (80-LocalVarsSize)(%rsp)
.globl match_init, longest_match
/*
* On AMD64 the first argument of a function (in our case -- the pointer to
* deflate_state structure) is passed in %rdi, hence our offsets below are
* all off of that.
*/
/* you can check the structure offset by running
#include <stdlib.h>
#include <stdio.h>
#include "deflate.h"
void print_depl()
{
deflate_state ds;
deflate_state *s=&ds;
printf("size pointer=%u\n",(int)sizeof(void*));
printf("#define dsWSize (%3u)(%%rdi)\n",(int)(((char*)&(s->w_size))-((char*)s)));
printf("#define dsWMask (%3u)(%%rdi)\n",(int)(((char*)&(s->w_mask))-((char*)s)));
printf("#define dsWindow (%3u)(%%rdi)\n",(int)(((char*)&(s->window))-((char*)s)));
printf("#define dsPrev (%3u)(%%rdi)\n",(int)(((char*)&(s->prev))-((char*)s)));
printf("#define dsMatchLen (%3u)(%%rdi)\n",(int)(((char*)&(s->match_length))-((char*)s)));
printf("#define dsPrevMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_match))-((char*)s)));
printf("#define dsStrStart (%3u)(%%rdi)\n",(int)(((char*)&(s->strstart))-((char*)s)));
printf("#define dsMatchStart (%3u)(%%rdi)\n",(int)(((char*)&(s->match_start))-((char*)s)));
printf("#define dsLookahead (%3u)(%%rdi)\n",(int)(((char*)&(s->lookahead))-((char*)s)));
printf("#define dsPrevLen (%3u)(%%rdi)\n",(int)(((char*)&(s->prev_length))-((char*)s)));
printf("#define dsMaxChainLen (%3u)(%%rdi)\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));
printf("#define dsGoodMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->good_match))-((char*)s)));
printf("#define dsNiceMatch (%3u)(%%rdi)\n",(int)(((char*)&(s->nice_match))-((char*)s)));
}
*/
/*
to compile for XCode 3.2 on MacOSX x86_64
- run "gcc -g -c -DXCODE_MAC_X64_STRUCTURE amd64-match.S"
*/
#ifndef CURRENT_LINX_XCODE_MAC_X64_STRUCTURE
#define dsWSize ( 68)(%rdi)
#define dsWMask ( 76)(%rdi)
#define dsWindow ( 80)(%rdi)
#define dsPrev ( 96)(%rdi)
#define dsMatchLen (144)(%rdi)
#define dsPrevMatch (148)(%rdi)
#define dsStrStart (156)(%rdi)
#define dsMatchStart (160)(%rdi)
#define dsLookahead (164)(%rdi)
#define dsPrevLen (168)(%rdi)
#define dsMaxChainLen (172)(%rdi)
#define dsGoodMatch (188)(%rdi)
#define dsNiceMatch (192)(%rdi)
#else
#ifndef STRUCT_OFFSET
# define STRUCT_OFFSET (0)
#endif
#define dsWSize ( 56 + STRUCT_OFFSET)(%rdi)
#define dsWMask ( 64 + STRUCT_OFFSET)(%rdi)
#define dsWindow ( 72 + STRUCT_OFFSET)(%rdi)
#define dsPrev ( 88 + STRUCT_OFFSET)(%rdi)
#define dsMatchLen (136 + STRUCT_OFFSET)(%rdi)
#define dsPrevMatch (140 + STRUCT_OFFSET)(%rdi)
#define dsStrStart (148 + STRUCT_OFFSET)(%rdi)
#define dsMatchStart (152 + STRUCT_OFFSET)(%rdi)
#define dsLookahead (156 + STRUCT_OFFSET)(%rdi)
#define dsPrevLen (160 + STRUCT_OFFSET)(%rdi)
#define dsMaxChainLen (164 + STRUCT_OFFSET)(%rdi)
#define dsGoodMatch (180 + STRUCT_OFFSET)(%rdi)
#define dsNiceMatch (184 + STRUCT_OFFSET)(%rdi)
#endif
.text
/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
longest_match:
/*
* Retrieve the function arguments. %curmatch will hold cur_match
* throughout the entire function (passed via rsi on amd64).
* rdi will hold the pointer to the deflate_state (first arg on amd64)
*/
mov %rsi, save_rsi
mov %rbx, save_rbx
mov %r12, save_r12
mov %r13, save_r13
mov %r14, save_r14
mov %r15, save_r15
/* uInt wmask = s->w_mask; */
/* unsigned chain_length = s->max_chain_length; */
/* if (s->prev_length >= s->good_match) { */
/* chain_length >>= 2; */
/* } */
movl dsPrevLen, %eax
movl dsGoodMatch, %ebx
cmpl %ebx, %eax
movl dsWMask, %eax
movl dsMaxChainLen, %chainlenwmask
jl LastMatchGood
shrl $2, %chainlenwmask
LastMatchGood:
/* chainlen is decremented once beforehand so that the function can */
/* use the sign flag instead of the zero flag for the exit test. */
/* It is then shifted into the high word, to make room for the wmask */
/* value, which it will always accompany. */
decl %chainlenwmask
shll $16, %chainlenwmask
orl %eax, %chainlenwmask
/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
movl dsNiceMatch, %eax
movl dsLookahead, %ebx
cmpl %eax, %ebx
jl LookaheadLess
movl %eax, %ebx
LookaheadLess: movl %ebx, %nicematch
/* register Bytef *scan = s->window + s->strstart; */
mov dsWindow, %window
movl dsStrStart, %limitd
lea (%limit, %window), %scan
/* Determine how many bytes the scan ptr is off from being */
/* dword-aligned. */
mov %scan, %scanalign
negl %scanalignd
andl $3, %scanalignd
/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
/* s->strstart - (IPos)MAX_DIST(s) : NIL; */
movl dsWSize, %eax
subl $MIN_LOOKAHEAD, %eax
xorl %ecx, %ecx
subl %eax, %limitd
cmovng %ecx, %limitd
/* int best_len = s->prev_length; */
movl dsPrevLen, %bestlend
/* Store the sum of s->window + best_len in %windowbestlen locally, and in memory. */
lea (%window, %bestlen), %windowbestlen
mov %windowbestlen, _windowbestlen
/* register ush scan_start = *(ushf*)scan; */
/* register ush scan_end = *(ushf*)(scan+best_len-1); */
/* Posf *prev = s->prev; */
movzwl (%scan), %scanstart
movzwl -1(%scan, %bestlen), %scanend
mov dsPrev, %prev
/* Jump into the main loop. */
movl %chainlenwmask, _chainlenwmask
jmp LoopEntry
.balign 16
/* do {
* match = s->window + cur_match;
* if (*(ushf*)(match+best_len-1) != scan_end ||
* *(ushf*)match != scan_start) continue;
* [...]
* } while ((cur_match = prev[cur_match & wmask]) > limit
* && --chain_length != 0);
*
* Here is the inner loop of the function. The function will spend the
* majority of its time in this loop, and majority of that time will
* be spent in the first ten instructions.
*/
LookupLoop:
andl %chainlenwmask, %curmatchd
movzwl (%prev, %curmatch, 2), %curmatchd
cmpl %limitd, %curmatchd
jbe LeaveNow
subl $0x00010000, %chainlenwmask
js LeaveNow
LoopEntry: cmpw -1(%windowbestlen, %curmatch), %scanendw
jne LookupLoop
cmpw %scanstartw, (%window, %curmatch)
jne LookupLoop
/* Store the current value of chainlen. */
movl %chainlenwmask, _chainlenwmask
/* %scan is the string under scrutiny, and %prev to the string we */
/* are hoping to match it up with. In actuality, %esi and %edi are */
/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
/* initialized to -(MAX_MATCH_8 - scanalign). */
mov $(-MAX_MATCH_8), %rdx
lea (%curmatch, %window), %windowbestlen
lea MAX_MATCH_8(%windowbestlen, %scanalign), %windowbestlen
lea MAX_MATCH_8(%scan, %scanalign), %prev
/* the prefetching below makes very little difference... */
prefetcht1 (%windowbestlen, %rdx)
prefetcht1 (%prev, %rdx)
/*
* Test the strings for equality, 8 bytes at a time. At the end,
* adjust %rdx so that it is offset to the exact byte that mismatched.
*
* It should be confessed that this loop usually does not represent
* much of the total running time. Replacing it with a more
* straightforward "rep cmpsb" would not drastically degrade
* performance -- unrolling it, for example, makes no difference.
*/
#undef USE_SSE /* works, but is 6-7% slower, than non-SSE... */
LoopCmps:
#ifdef USE_SSE
/* Preload the SSE registers */
movdqu (%windowbestlen, %rdx), %xmm1
movdqu (%prev, %rdx), %xmm2
pcmpeqb %xmm2, %xmm1
movdqu 16(%windowbestlen, %rdx), %xmm3
movdqu 16(%prev, %rdx), %xmm4
pcmpeqb %xmm4, %xmm3
movdqu 32(%windowbestlen, %rdx), %xmm5
movdqu 32(%prev, %rdx), %xmm6
pcmpeqb %xmm6, %xmm5
movdqu 48(%windowbestlen, %rdx), %xmm7
movdqu 48(%prev, %rdx), %xmm8
pcmpeqb %xmm8, %xmm7
/* Check the comparisions' results */
pmovmskb %xmm1, %rax
notw %ax
bsfw %ax, %ax
jnz LeaveLoopCmps
/* this is the only iteration of the loop with a possibility of having
incremented rdx by 0x108 (each loop iteration add 16*4 = 0x40
and (0x40*4)+8=0x108 */
add $8, %rdx
jz LenMaximum
add $8, %rdx
pmovmskb %xmm3, %rax
notw %ax
bsfw %ax, %ax
jnz LeaveLoopCmps
add $16, %rdx
pmovmskb %xmm5, %rax
notw %ax
bsfw %ax, %ax
jnz LeaveLoopCmps
add $16, %rdx
pmovmskb %xmm7, %rax
notw %ax
bsfw %ax, %ax
jnz LeaveLoopCmps
add $16, %rdx
jmp LoopCmps
LeaveLoopCmps: add %rax, %rdx
#else
mov (%windowbestlen, %rdx), %rax
xor (%prev, %rdx), %rax
jnz LeaveLoopCmps
mov 8(%windowbestlen, %rdx), %rax
xor 8(%prev, %rdx), %rax
jnz LeaveLoopCmps8
mov 16(%windowbestlen, %rdx), %rax
xor 16(%prev, %rdx), %rax
jnz LeaveLoopCmps16
add $24, %rdx
jnz LoopCmps
jmp LenMaximum
# if 0
/*
* This three-liner is tantalizingly simple, but bsf is a slow instruction,
* and the complicated alternative down below is quite a bit faster. Sad...
*/
LeaveLoopCmps: bsf %rax, %rax /* find the first non-zero bit */
shrl $3, %eax /* divide by 8 to get the byte */
add %rax, %rdx
# else
LeaveLoopCmps16:
add $8, %rdx
LeaveLoopCmps8:
add $8, %rdx
LeaveLoopCmps: testl $0xFFFFFFFF, %eax /* Check the first 4 bytes */
jnz Check16
add $4, %rdx
shr $32, %rax
Check16: testw $0xFFFF, %ax
jnz LenLower
add $2, %rdx
shrl $16, %eax
LenLower: subb $1, %al
adc $0, %rdx
# endif
#endif
/* Calculate the length of the match. If it is longer than MAX_MATCH, */
/* then automatically accept it as the best possible match and leave. */
lea (%prev, %rdx), %rax
sub %scan, %rax
cmpl $MAX_MATCH, %eax
jge LenMaximum
/* If the length of the match is not longer than the best match we */
/* have so far, then forget it and return to the lookup loop. */
cmpl %bestlend, %eax
jg LongerMatch
mov _windowbestlen, %windowbestlen
mov dsPrev, %prev
movl _chainlenwmask, %edx
jmp LookupLoop
/* s->match_start = cur_match; */
/* best_len = len; */
/* if (len >= nice_match) break; */
/* scan_end = *(ushf*)(scan+best_len-1); */
LongerMatch:
movl %eax, %bestlend
movl %curmatchd, dsMatchStart
cmpl %nicematch, %eax
jge LeaveNow
lea (%window, %bestlen), %windowbestlen
mov %windowbestlen, _windowbestlen
movzwl -1(%scan, %rax), %scanend
mov dsPrev, %prev
movl _chainlenwmask, %chainlenwmask
jmp LookupLoop
/* Accept the current string, with the maximum possible length. */
LenMaximum:
movl $MAX_MATCH, %bestlend
movl %curmatchd, dsMatchStart
/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
/* return s->lookahead; */
LeaveNow:
movl dsLookahead, %eax
cmpl %eax, %bestlend
cmovngl %bestlend, %eax
LookaheadRet:
/* Restore the registers and return from whence we came. */
mov save_rsi, %rsi
mov save_rbx, %rbx
mov save_r12, %r12
mov save_r13, %r13
mov save_r14, %r14
mov save_r15, %r15
ret
match_init: ret
|
a1studmuffin/Cataclysm-DDA-Android
| 7,724
|
Android/jni/SDL2_image/external/libpng-1.6.2/arm/filter_neon.S
|
/* filter_neon.S - NEON optimised filter functions
*
* Copyright (c) 2011 Glenn Randers-Pehrson
* Written by Mans Rullgard, 2011.
* Last changed in libpng 1.5.7 [December 15, 2011]
*
* This code is released under the libpng license.
* For conditions of distribution and use, see the disclaimer
* and license in png.h
*/
/* This is required to get the symbol renames, which are #defines, and also
* includes the value of PNG_FILTER_OPTIMIZATIONS.
*/
#define PNG_VERSION_INFO_ONLY
#include "../pngpriv.h"
#if defined(PNG_FILTER_OPTIMIZATIONS) && defined(__arm__) && \
defined(__ARM_NEON__)
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits /* mark stack as non-executable */
#endif
#ifdef __ELF__
# define ELF
#else
# define ELF @
#endif
.arch armv7-a
.fpu neon
.macro func name, export=0
.macro endfunc
ELF .size \name, . - \name
.endfunc
.purgem endfunc
.endm
.text
.if \export
.global \name
.endif
ELF .type \name, STT_FUNC
.func \name
\name:
.endm
func png_read_filter_row_sub4_neon, export=1
ldr r3, [r0, #4] @ rowbytes
vmov.i8 d3, #0
1:
vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
vadd.u8 d0, d3, d4
vadd.u8 d1, d0, d5
vadd.u8 d2, d1, d6
vadd.u8 d3, d2, d7
vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]!
subs r3, r3, #16
bgt 1b
bx lr
endfunc
func png_read_filter_row_sub3_neon, export=1
ldr r3, [r0, #4] @ rowbytes
vmov.i8 d3, #0
mov r0, r1
mov r2, #3
mov r12, #12
vld1.8 {q11}, [r0], r12
1:
vext.8 d5, d22, d23, #3
vadd.u8 d0, d3, d22
vext.8 d6, d22, d23, #6
vadd.u8 d1, d0, d5
vext.8 d7, d23, d23, #1
vld1.8 {q11}, [r0], r12
vst1.32 {d0[0]}, [r1,:32], r2
vadd.u8 d2, d1, d6
vst1.32 {d1[0]}, [r1], r2
vadd.u8 d3, d2, d7
vst1.32 {d2[0]}, [r1], r2
vst1.32 {d3[0]}, [r1], r2
subs r3, r3, #12
bgt 1b
bx lr
endfunc
func png_read_filter_row_up_neon, export=1
ldr r3, [r0, #4] @ rowbytes
1:
vld1.8 {q0}, [r1,:128]
vld1.8 {q1}, [r2,:128]!
vadd.u8 q0, q0, q1
vst1.8 {q0}, [r1,:128]!
subs r3, r3, #16
bgt 1b
bx lr
endfunc
func png_read_filter_row_avg4_neon, export=1
ldr r12, [r0, #4] @ rowbytes
vmov.i8 d3, #0
1:
vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
vld4.32 {d16[],d17[],d18[],d19[]},[r2,:128]!
vhadd.u8 d0, d3, d16
vadd.u8 d0, d0, d4
vhadd.u8 d1, d0, d17
vadd.u8 d1, d1, d5
vhadd.u8 d2, d1, d18
vadd.u8 d2, d2, d6
vhadd.u8 d3, d2, d19
vadd.u8 d3, d3, d7
vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]!
subs r12, r12, #16
bgt 1b
bx lr
endfunc
func png_read_filter_row_avg3_neon, export=1
push {r4,lr}
ldr r12, [r0, #4] @ rowbytes
vmov.i8 d3, #0
mov r0, r1
mov r4, #3
mov lr, #12
vld1.8 {q11}, [r0], lr
1:
vld1.8 {q10}, [r2], lr
vext.8 d5, d22, d23, #3
vhadd.u8 d0, d3, d20
vext.8 d17, d20, d21, #3
vadd.u8 d0, d0, d22
vext.8 d6, d22, d23, #6
vhadd.u8 d1, d0, d17
vext.8 d18, d20, d21, #6
vadd.u8 d1, d1, d5
vext.8 d7, d23, d23, #1
vld1.8 {q11}, [r0], lr
vst1.32 {d0[0]}, [r1,:32], r4
vhadd.u8 d2, d1, d18
vst1.32 {d1[0]}, [r1], r4
vext.8 d19, d21, d21, #1
vadd.u8 d2, d2, d6
vhadd.u8 d3, d2, d19
vst1.32 {d2[0]}, [r1], r4
vadd.u8 d3, d3, d7
vst1.32 {d3[0]}, [r1], r4
subs r12, r12, #12
bgt 1b
pop {r4,pc}
endfunc
.macro paeth rx, ra, rb, rc
vaddl.u8 q12, \ra, \rb @ a + b
vaddl.u8 q15, \rc, \rc @ 2*c
vabdl.u8 q13, \rb, \rc @ pa
vabdl.u8 q14, \ra, \rc @ pb
vabd.u16 q15, q12, q15 @ pc
vcle.u16 q12, q13, q14 @ pa <= pb
vcle.u16 q13, q13, q15 @ pa <= pc
vcle.u16 q14, q14, q15 @ pb <= pc
vand q12, q12, q13 @ pa <= pb && pa <= pc
vmovn.u16 d28, q14
vmovn.u16 \rx, q12
vbsl d28, \rb, \rc
vbsl \rx, \ra, d28
.endm
func png_read_filter_row_paeth4_neon, export=1
ldr r12, [r0, #4] @ rowbytes
vmov.i8 d3, #0
vmov.i8 d20, #0
1:
vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
vld4.32 {d16[],d17[],d18[],d19[]},[r2,:128]!
paeth d0, d3, d16, d20
vadd.u8 d0, d0, d4
paeth d1, d0, d17, d16
vadd.u8 d1, d1, d5
paeth d2, d1, d18, d17
vadd.u8 d2, d2, d6
paeth d3, d2, d19, d18
vmov d20, d19
vadd.u8 d3, d3, d7
vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]!
subs r12, r12, #16
bgt 1b
bx lr
endfunc
func png_read_filter_row_paeth3_neon, export=1
push {r4,lr}
ldr r12, [r0, #4] @ rowbytes
vmov.i8 d3, #0
vmov.i8 d4, #0
mov r0, r1
mov r4, #3
mov lr, #12
vld1.8 {q11}, [r0], lr
1:
vld1.8 {q10}, [r2], lr
paeth d0, d3, d20, d4
vext.8 d5, d22, d23, #3
vadd.u8 d0, d0, d22
vext.8 d17, d20, d21, #3
paeth d1, d0, d17, d20
vst1.32 {d0[0]}, [r1,:32], r4
vext.8 d6, d22, d23, #6
vadd.u8 d1, d1, d5
vext.8 d18, d20, d21, #6
paeth d2, d1, d18, d17
vext.8 d7, d23, d23, #1
vld1.8 {q11}, [r0], lr
vst1.32 {d1[0]}, [r1], r4
vadd.u8 d2, d2, d6
vext.8 d19, d21, d21, #1
paeth d3, d2, d19, d18
vst1.32 {d2[0]}, [r1], r4
vmov d4, d19
vadd.u8 d3, d3, d7
vst1.32 {d3[0]}, [r1], r4
subs r12, r12, #12
bgt 1b
pop {r4,pc}
endfunc
#endif /* FILTER_OPTIMIZATIONS && __arm__ && __ARM_NEON__ */
|
a1studmuffin/Cataclysm-DDA-Android
| 21,579
|
Android/jni/SDL2_mixer/external/smpeg2-2.0.0/video/mmxidct_asm.S
|
#if defined(i386) && defined(USE_MMX)
/*
* the input data is tranposed and each 16 bit element in the 8x8 matrix
* is left aligned:
* for example in 11...1110000 format
* If the iDCT is of I macroblock then 0.5 needs to be added to the;DC Component
* (element[0][0] of the matrix)
*/
/* extrn re_matrix */
.data
.align 16
.type preSC,@object
preSC: .short 16384,22725,21407,19266,16384,12873,8867,4520
.short 22725,31521,29692,26722,22725,17855,12299,6270
.short 21407,29692,27969,25172,21407,16819,11585,5906
.short 19266,26722,25172,22654,19266,15137,10426,5315
.short 16384,22725,21407,19266,16384,12873,8867,4520
.short 12873,17855,16819,15137,25746,20228,13933,7103
.short 17734,24598,23170,20853,17734,13933,9597,4892
.short 18081,25080,23624,21261,18081,14206,9785,4988
.size preSC,128
.align 8
.type x0005000200010001,@object
.size x0005000200010001,8
x0005000200010001:
.long 0x00010001,0x00050002
.align 8
.type x5a825a825a825a82,@object
.size x5a825a825a825a82,8
x5a825a825a825a82:
.long 0x5a825a82, 0x5a825a82
.align 8
.type x539f539f539f539f,@object
.size x539f539f539f539f,8
x539f539f539f539f:
.long 0x539f539f,0x539f539f
.align 8
.type x4546454645464546,@object
.size x4546454645464546,8
x4546454645464546:
.long 0x45464546,0x45464546
.align 8
.type x61f861f861f861f8,@object
.size x61f861f861f861f8,8
x61f861f861f861f8:
.long 0x61f861f8,0x61f861f8
.align 8
.type scratch1,@object
.size scratch1,8
scratch1:
.long 0,0
.align 8
.type scratch3,@object
.size scratch3,8
scratch3:
.long 0,0
.align 8
.type scratch5,@object
.size scratch5,8
scratch5:
.long 0,0
.align 8
.type scratch7,@object
.size scratch7,8
scratch7:
.long 0,0
.type x0,@object
.size x0,8
x0:
.long 0,0
.align 8
.text
.align 4
#ifdef __PIC__
# undef __i686 /* gcc define gets in our way */
# define MUNG(sym) sym ## @GOTOFF(%ebx)
# define INIT_PIC() \
call __i686.get_pc_thunk.bx ; \
addl $_GLOBAL_OFFSET_TABLE_, %ebx
#else
# define MUNG(sym) sym
# define INIT_PIC()
#endif
.globl IDCT_mmx
.type IDCT_mmx,@function
IDCT_mmx:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
INIT_PIC()
movl 8(%ebp),%esi /* source matrix */
leal MUNG(preSC), %ecx
/* column 0: even part
* use V4, V12, V0, V8 to produce V22..V25
*/
movq 8*12(%ecx), %mm0 /* maybe the first mul can be done together */
/* with the dequantization in iHuff module */
pmulhw 8*12(%esi), %mm0 /* V12 */
movq 8*4(%ecx), %mm1
pmulhw 8*4(%esi), %mm1 /* V4 */
movq (%ecx), %mm3
psraw $1, %mm0 /* t64=t66 */
pmulhw (%esi), %mm3 /* V0 */
movq 8*8(%ecx), %mm5 /* duplicate V4 */
movq %mm1, %mm2 /* added 11/1/96 */
pmulhw 8*8(%esi),%mm5 /* V8 */
psubsw %mm0, %mm1 /* V16 */
pmulhw MUNG(x5a825a825a825a82), %mm1 /* 23170 ->V18 */
paddsw %mm0, %mm2 /* V17 */
movq %mm2, %mm0 /* duplicate V17 */
psraw $1, %mm2 /* t75=t82 */
psraw $2, %mm0 /* t72 */
movq %mm3, %mm4 /* duplicate V0 */
paddsw %mm5, %mm3 /* V19 */
psubsw %mm5, %mm4 /* V20 ;mm5 free */
/* moved from the block below */
movq 8*10(%ecx), %mm7
psraw $1, %mm3 /* t74=t81 */
movq %mm3, %mm6 /* duplicate t74=t81 */
psraw $2, %mm4 /* t77=t79 */
psubsw %mm0, %mm1 /* V21 ; mm0 free */
paddsw %mm2, %mm3 /* V22 */
movq %mm1, %mm5 /* duplicate V21 */
paddsw %mm4, %mm1 /* V23 */
movq %mm3, 8*4(%esi) /* V22 */
psubsw %mm5, %mm4 /* V24; mm5 free */
movq %mm1, 8*12(%esi) /* V23 */
psubsw %mm2, %mm6 /* V25; mm2 free */
movq %mm4, (%esi) /* V24 */
/* keep mm6 alive all along the next block */
/* movq %mm6, 8*8(%esi) V25 */
/* column 0: odd part
* use V2, V6, V10, V14 to produce V31, V39, V40, V41
*/
/* moved above: movq 8*10(%ecx), %mm7 */
pmulhw 8*10(%esi), %mm7 /* V10 */
movq 8*6(%ecx), %mm0
pmulhw 8*6(%esi), %mm0 /* V6 */
movq 8*2(%ecx), %mm5
movq %mm7, %mm3 /* duplicate V10 */
pmulhw 8*2(%esi), %mm5 /* V2 */
movq 8*14(%ecx), %mm4
psubsw %mm0, %mm7 /* V26 */
pmulhw 8*14(%esi), %mm4 /* V14 */
paddsw %mm0, %mm3 /* V29 ; free mm0 */
movq %mm7, %mm1 /* duplicate V26 */
psraw $1, %mm3 /* t91=t94 */
pmulhw MUNG(x539f539f539f539f),%mm7 /* V33 */
psraw $1, %mm1 /* t96 */
movq %mm5, %mm0 /* duplicate V2 */
psraw $2, %mm4 /* t85=t87 */
paddsw %mm4,%mm5 /* V27 */
psubsw %mm4, %mm0 /* V28 ; free mm4 */
movq %mm0, %mm2 /* duplicate V28 */
psraw $1, %mm5 /* t90=t93 */
pmulhw MUNG(x4546454645464546),%mm0 /* V35 */
psraw $1, %mm2 /* t97 */
movq %mm5, %mm4 /* duplicate t90=t93 */
psubsw %mm2, %mm1 /* V32 ; free mm2 */
pmulhw MUNG(x61f861f861f861f8),%mm1 /* V36 */
psllw $1, %mm7 /* t107 */
paddsw %mm3, %mm5 /* V31 */
psubsw %mm3, %mm4 /* V30 ; free mm3 */
pmulhw MUNG(x5a825a825a825a82),%mm4 /* V34 */
nop
psubsw %mm1, %mm0 /* V38 */
psubsw %mm7, %mm1 /* V37 ; free mm7 */
psllw $1, %mm1 /* t114 */
/* move from the next block */
movq %mm6, %mm3 /* duplicate V25 */
/* move from the next block */
movq 8*4(%esi), %mm7 /* V22 */
psllw $1, %mm0 /* t110 */
psubsw %mm5, %mm0 /* V39 (mm5 needed for next block) */
psllw $2, %mm4 /* t112 */
/* moved from the next block */
movq 8*12(%esi), %mm2 /* V23 */
psubsw %mm0, %mm4 /* V40 */
paddsw %mm4, %mm1 /* V41; free mm0 */
/* moved from the next block */
psllw $1, %mm2 /* t117=t125 */
/* column 0: output butterfly */
/* moved above:
* movq %mm6, %mm3 duplicate V25
* movq 8*4(%esi), %mm7 V22
* movq 8*12(%esi), %mm2 V23
* psllw $1, %mm2 t117=t125
*/
psubsw %mm1, %mm6 /* tm6 */
paddsw %mm1, %mm3 /* tm8; free mm1 */
movq %mm7, %mm1 /* duplicate V22 */
paddsw %mm5, %mm7 /* tm0 */
movq %mm3, 8*8(%esi) /* tm8; free mm3 */
psubsw %mm5, %mm1 /* tm14; free mm5 */
movq %mm6, 8*6(%esi) /* tm6; free mm6 */
movq %mm2, %mm3 /* duplicate t117=t125 */
movq (%esi), %mm6 /* V24 */
paddsw %mm0, %mm2 /* tm2 */
movq %mm7, (%esi) /* tm0; free mm7 */
psubsw %mm0, %mm3 /* tm12; free mm0 */
movq %mm1, 8*14(%esi) /* tm14; free mm1 */
psllw $1, %mm6 /* t119=t123 */
movq %mm2, 8*2(%esi) /* tm2; free mm2 */
movq %mm6, %mm0 /* duplicate t119=t123 */
movq %mm3, 8*12(%esi) /* tm12; free mm3 */
paddsw %mm4, %mm6 /* tm4 */
/* moved from next block */
movq 8*5(%ecx), %mm1
psubsw %mm4, %mm0 /* tm10; free mm4 */
/* moved from next block */
pmulhw 8*5(%esi), %mm1 /* V5 */
movq %mm6, 8*4(%esi) /* tm4; free mm6 */
movq %mm0, 8*10(%esi) /* tm10; free mm0 */
/* column 1: even part
* use V5, V13, V1, V9 to produce V56..V59
*/
/* moved to prev block:
* movq 8*5(%ecx), %mm1
* pmulhw 8*5(%esi), %mm1 V5
*/
movq 8*13(%ecx), %mm7
psllw $1, %mm1 /* t128=t130 */
pmulhw 8*13(%esi), %mm7 /* V13 */
movq %mm1, %mm2 /* duplicate t128=t130 */
movq 8(%ecx), %mm3
pmulhw 8(%esi), %mm3 /* V1 */
movq 8*9(%ecx), %mm5
psubsw %mm7, %mm1 /* V50 */
pmulhw 8*9(%esi), %mm5 /* V9 */
paddsw %mm7, %mm2 /* V51 */
pmulhw MUNG(x5a825a825a825a82), %mm1 /* 23170 ->V52 */
movq %mm2, %mm6 /* duplicate V51 */
psraw $1, %mm2 /* t138=t144 */
movq %mm3, %mm4 /* duplicate V1 */
psraw $2, %mm6 /* t136 */
paddsw %mm5, %mm3 /* V53 */
psubsw %mm5, %mm4 /* V54 ;mm5 free */
movq %mm3, %mm7 /* duplicate V53 */
/* moved from next block */
movq 8*11(%ecx), %mm0
psraw $1, %mm4 /* t140=t142 */
psubsw %mm6, %mm1 /* V55 ; mm6 free */
paddsw %mm2, %mm3 /* V56 */
movq %mm4, %mm5 /* duplicate t140=t142 */
paddsw %mm1, %mm4 /* V57 */
movq %mm3, 8*5(%esi) /* V56 */
psubsw %mm1, %mm5 /* V58; mm1 free */
movq %mm4, 8*13(%esi) /* V57 */
psubsw %mm2, %mm7 /* V59; mm2 free */
movq %mm5, 8*9(%esi) /* V58 */
/* keep mm7 alive all along the next block
* movq %mm7, 8(%esi) V59
* moved above
* movq 8*11(%ecx), %mm0
*/
pmulhw 8*11(%esi), %mm0 /* V11 */
movq 8*7(%ecx), %mm6
pmulhw 8*7(%esi), %mm6 /* V7 */
movq 8*15(%ecx), %mm4
movq %mm0, %mm3 /* duplicate V11 */
pmulhw 8*15(%esi), %mm4 /* V15 */
movq 8*3(%ecx), %mm5
psllw $1, %mm6 /* t146=t152 */
pmulhw 8*3(%esi), %mm5 /* V3 */
paddsw %mm6, %mm0 /* V63 */
/* note that V15 computation has a correction step:
* this is a 'magic' constant that rebiases the results to be closer to the
* expected result. this magic constant can be refined to reduce the error
* even more by doing the correction step in a later stage when the number
* is actually multiplied by 16
*/
paddw MUNG(x0005000200010001), %mm4
psubsw %mm6, %mm3 /* V60 ; free mm6 */
psraw $1, %mm0 /* t154=t156 */
movq %mm3, %mm1 /* duplicate V60 */
pmulhw MUNG(x539f539f539f539f), %mm1 /* V67 */
movq %mm5, %mm6 /* duplicate V3 */
psraw $2, %mm4 /* t148=t150 */
paddsw %mm4, %mm5 /* V61 */
psubsw %mm4, %mm6 /* V62 ; free mm4 */
movq %mm5, %mm4 /* duplicate V61 */
psllw $1, %mm1 /* t169 */
paddsw %mm0, %mm5 /* V65 -> result */
psubsw %mm0, %mm4 /* V64 ; free mm0 */
pmulhw MUNG(x5a825a825a825a82), %mm4 /* V68 */
psraw $1, %mm3 /* t158 */
psubsw %mm6, %mm3 /* V66 */
movq %mm5, %mm2 /* duplicate V65 */
pmulhw MUNG(x61f861f861f861f8), %mm3 /* V70 */
psllw $1, %mm6 /* t165 */
pmulhw MUNG(x4546454645464546), %mm6 /* V69 */
psraw $1, %mm2 /* t172 */
/* moved from next block */
movq 8*5(%esi), %mm0 /* V56 */
psllw $1, %mm4 /* t174 */
/* moved from next block */
psraw $1, %mm0 /* t177=t188 */
nop
psubsw %mm3, %mm6 /* V72 */
psubsw %mm1, %mm3 /* V71 ; free mm1 */
psubsw %mm2, %mm6 /* V73 ; free mm2 */
/* moved from next block */
psraw $1, %mm5 /* t178=t189 */
psubsw %mm6, %mm4 /* V74 */
/* moved from next block */
movq %mm0, %mm1 /* duplicate t177=t188 */
paddsw %mm4, %mm3 /* V75 */
/* moved from next block */
paddsw %mm5, %mm0 /* tm1 */
/* location
* 5 - V56
* 13 - V57
* 9 - V58
* X - V59, mm7
* X - V65, mm5
* X - V73, mm6
* X - V74, mm4
* X - V75, mm3
* free mm0, mm1 & mm2
* moved above
* movq 8*5(%esi), %mm0 V56
* psllw $1, %mm0 t177=t188 ! new !!
* psllw $1, %mm5 t178=t189 ! new !!
* movq %mm0, %mm1 duplicate t177=t188
* paddsw %mm5, %mm0 tm1
*/
movq 8*13(%esi), %mm2 /* V57 */
psubsw %mm5, %mm1 /* tm15; free mm5 */
movq %mm0, 8(%esi) /* tm1; free mm0 */
psraw $1, %mm7 /* t182=t184 ! new !! */
/* save the store as used directly in the transpose
* movq %mm1, 120(%esi) tm15; free mm1
*/
movq %mm7, %mm5 /* duplicate t182=t184 */
psubsw %mm3, %mm7 /* tm7 */
paddsw %mm3, %mm5 /* tm9; free mm3 */
movq 8*9(%esi), %mm0 /* V58 */
movq %mm2, %mm3 /* duplicate V57 */
movq %mm7, 8*7(%esi) /* tm7; free mm7 */
psubsw %mm6, %mm3 /* tm13 */
paddsw %mm6, %mm2 /* tm3 ; free mm6 */
/* moved up from the transpose */
movq %mm3, %mm7
/* moved up from the transpose */
punpcklwd %mm1, %mm3
movq %mm0, %mm6 /* duplicate V58 */
movq %mm2, 8*3(%esi) /* tm3; free mm2 */
paddsw %mm4, %mm0 /* tm5 */
psubsw %mm4, %mm6 /* tm11; free mm4 */
/* moved up from the transpose */
punpckhwd %mm1, %mm7
movq %mm0, 8*5(%esi) /* tm5; free mm0 */
/* moved up from the transpose */
movq %mm5, %mm2
/* transpose - M4 part
* --------- ---------
* | M1 | M2 | | M1'| M3'|
* --------- --> ---------
* | M3 | M4 | | M2'| M4'|
* --------- ---------
* Two alternatives: use full mmword approach so the following code can be
* scheduled before the transpose is done without stores, or use the faster
* half mmword stores (when possible)
*/
movd %mm3, 8*9+4(%esi) /* MS part of tmt9 */
punpcklwd %mm6, %mm5
movd %mm7, 8*13+4(%esi) /* MS part of tmt13 */
punpckhwd %mm6, %mm2
movd %mm5, 8*9(%esi) /* LS part of tmt9 */
punpckhdq %mm3, %mm5 /* free mm3 */
movd %mm2, 8*13(%esi) /* LS part of tmt13 */
punpckhdq %mm7, %mm2 /* free mm7 */
/* moved up from the M3 transpose */
movq 8*8(%esi), %mm0
/* moved up from the M3 transpose */
movq 8*10(%esi), %mm1
/* moved up from the M3 transpose */
movq %mm0, %mm3
/* shuffle the rest of the data, and write it with 2 mmword writes */
movq %mm5, 8*11(%esi) /* tmt11 */
/* moved up from the M3 transpose */
punpcklwd %mm1, %mm0
movq %mm2, 8*15(%esi) /* tmt15 */
/* moved up from the M3 transpose */
punpckhwd %mm1, %mm3
/* transpose - M3 part
* moved up to previous code section
* movq 8*8(%esi), %mm0
* movq 8*10(%esi), %mm1
* movq %mm0, %mm3
* punpcklwd %mm1, %mm0
* punpckhwd %mm1, %mm3
*/
movq 8*12(%esi), %mm6
movq 8*14(%esi), %mm4
movq %mm6, %mm2
/* shuffle the data and write the lower parts of the transposed in 4 dwords */
punpcklwd %mm4, %mm6
movq %mm0, %mm1
punpckhdq %mm6, %mm1
movq %mm3, %mm7
punpckhwd %mm4, %mm2 /* free mm4 */
punpckldq %mm6, %mm0 /* free mm6 */
/* moved from next block */
movq 8*13(%esi), %mm4 /* tmt13 */
punpckldq %mm2, %mm3
punpckhdq %mm2, %mm7 /* free mm2 */
/* moved from next block */
movq %mm3, %mm5 /* duplicate tmt5 */
/* column 1: even part (after transpose)
* moved above
* movq %mm3, %mm5 duplicate tmt5
* movq 8*13(%esi), %mm4 tmt13
*/
psubsw %mm4, %mm3 /* V134 */
pmulhw MUNG(x5a825a825a825a82), %mm3 /* 23170 ->V136 */
movq 8*9(%esi), %mm6 /* tmt9 */
paddsw %mm4, %mm5 /* V135 ; mm4 free */
movq %mm0, %mm4 /* duplicate tmt1 */
paddsw %mm6, %mm0 /* V137 */
psubsw %mm6, %mm4 /* V138 ; mm6 free */
psllw $2, %mm3 /* t290 */
psubsw %mm5, %mm3 /* V139 */
movq %mm0, %mm6 /* duplicate V137 */
paddsw %mm5, %mm0 /* V140 */
movq %mm4, %mm2 /* duplicate V138 */
paddsw %mm3, %mm2 /* V141 */
psubsw %mm3, %mm4 /* V142 ; mm3 free */
movq %mm0, 8*9(%esi) /* V140 */
psubsw %mm5, %mm6 /* V143 ; mm5 free */
/* moved from next block */
movq 8*11(%esi), %mm0 /* tmt11 */
movq %mm2, 8*13(%esi) /* V141 */
/* moved from next block */
movq %mm0, %mm2 /* duplicate tmt11 */
/* column 1: odd part (after transpose) */
/* moved up to the prev block
* movq 8*11(%esi), %mm0 tmt11
* movq %mm0, %mm2 duplicate tmt11
*/
movq 8*15(%esi), %mm5 /* tmt15 */
psubsw %mm7, %mm0 /* V144 */
movq %mm0, %mm3 /* duplicate V144 */
paddsw %mm7, %mm2 /* V147 ; free mm7 */
pmulhw MUNG(x539f539f539f539f), %mm0 /* 21407-> V151 */
movq %mm1, %mm7 /* duplicate tmt3 */
paddsw %mm5, %mm7 /* V145 */
psubsw %mm5, %mm1 /* V146 ; free mm5 */
psubsw %mm1, %mm3 /* V150 */
movq %mm7, %mm5 /* duplicate V145 */
pmulhw MUNG(x4546454645464546), %mm1 /* 17734-> V153 */
psubsw %mm2, %mm5 /* V148 */
pmulhw MUNG(x61f861f861f861f8), %mm3 /* 25080-> V154 */
psllw $2, %mm0 /* t311 */
pmulhw MUNG(x5a825a825a825a82), %mm5 /* 23170-> V152 */
paddsw %mm2, %mm7 /* V149 ; free mm2 */
psllw $1, %mm1 /* t313 */
nop /* without the nop - freeze here for one clock */
movq %mm3, %mm2 /* duplicate V154 */
psubsw %mm0, %mm3 /* V155 ; free mm0 */
psubsw %mm2, %mm1 /* V156 ; free mm2 */
/* moved from the next block */
movq %mm6, %mm2 /* duplicate V143 */
/* moved from the next block */
movq 8*13(%esi), %mm0 /* V141 */
psllw $1, %mm1 /* t315 */
psubsw %mm7, %mm1 /* V157 (keep V149) */
psllw $2, %mm5 /* t317 */
psubsw %mm1, %mm5 /* V158 */
psllw $1, %mm3 /* t319 */
paddsw %mm5, %mm3 /* V159 */
/* column 1: output butterfly (after transform)
* moved to the prev block
* movq %mm6, %mm2 duplicate V143
* movq 8*13(%esi), %mm0 V141
*/
psubsw %mm3, %mm2 /* V163 */
paddsw %mm3, %mm6 /* V164 ; free mm3 */
movq %mm4, %mm3 /* duplicate V142 */
psubsw %mm5, %mm4 /* V165 ; free mm5 */
movq %mm2, MUNG(scratch7) /* out7 */
psraw $4, %mm6
psraw $4, %mm4
paddsw %mm5, %mm3 /* V162 */
movq 8*9(%esi), %mm2 /* V140 */
movq %mm0, %mm5 /* duplicate V141 */
/* in order not to perculate this line up,
* we read 72(%esi) very near to this location
*/
movq %mm6, 8*9(%esi) /* out9 */
paddsw %mm1, %mm0 /* V161 */
movq %mm3, MUNG(scratch5) /* out5 */
psubsw %mm1, %mm5 /* V166 ; free mm1 */
movq %mm4, 8*11(%esi) /* out11 */
psraw $4, %mm5
movq %mm0, MUNG(scratch3) /* out3 */
movq %mm2, %mm4 /* duplicate V140 */
movq %mm5, 8*13(%esi) /* out13 */
paddsw %mm7, %mm2 /* V160 */
/* moved from the next block */
movq 8(%esi), %mm0
psubsw %mm7, %mm4 /* V167 ; free mm7 */
/* moved from the next block */
movq 8*3(%esi), %mm7
psraw $4, %mm4
movq %mm2, MUNG(scratch1) /* out1 */
/* moved from the next block */
movq %mm0, %mm1
movq %mm4, 8*15(%esi) /* out15 */
/* moved from the next block */
punpcklwd %mm7, %mm0
/* transpose - M2 parts
* moved up to the prev block
* movq 8(%esi), %mm0
* movq 8*3(%esi), %mm7
* movq %mm0, %mm1
* punpcklwd %mm7, %mm0
*/
movq 8*5(%esi), %mm5
punpckhwd %mm7, %mm1
movq 8*7(%esi), %mm4
movq %mm5, %mm3
/* shuffle the data and write the lower parts of the trasposed in 4 dwords */
movd %mm0, 8*8(%esi) /* LS part of tmt8 */
punpcklwd %mm4, %mm5
movd %mm1, 8*12(%esi) /* LS part of tmt12 */
punpckhwd %mm4, %mm3
movd %mm5, 8*8+4(%esi) /* MS part of tmt8 */
punpckhdq %mm5, %mm0 /* tmt10 */
movd %mm3, 8*12+4(%esi) /* MS part of tmt12 */
punpckhdq %mm3, %mm1 /* tmt14 */
/* transpose - M1 parts */
movq (%esi), %mm7
movq 8*2(%esi), %mm2
movq %mm7, %mm6
movq 8*4(%esi), %mm5
punpcklwd %mm2, %mm7
movq 8*6(%esi), %mm4
punpckhwd %mm2, %mm6 /* free mm2 */
movq %mm5, %mm3
punpcklwd %mm4, %mm5
punpckhwd %mm4, %mm3 /* free mm4 */
movq %mm7, %mm2
movq %mm6, %mm4
punpckldq %mm5, %mm7 /* tmt0 */
punpckhdq %mm5, %mm2 /* tmt2 ; free mm5 */
/* shuffle the rest of the data, and write it with 2 mmword writes */
punpckldq %mm3, %mm6 /* tmt4 */
/* moved from next block */
movq %mm2, %mm5 /* duplicate tmt2 */
punpckhdq %mm3, %mm4 /* tmt6 ; free mm3 */
/* moved from next block */
movq %mm0, %mm3 /* duplicate tmt10 */
/* column 0: odd part (after transpose)
*moved up to prev block
* movq %mm0, %mm3 duplicate tmt10
* movq %mm2, %mm5 duplicate tmt2
*/
psubsw %mm4, %mm0 /* V110 */
paddsw %mm4, %mm3 /* V113 ; free mm4 */
movq %mm0, %mm4 /* duplicate V110 */
paddsw %mm1, %mm2 /* V111 */
pmulhw MUNG(x539f539f539f539f), %mm0 /* 21407-> V117 */
psubsw %mm1, %mm5 /* V112 ; free mm1 */
psubsw %mm5, %mm4 /* V116 */
movq %mm2, %mm1 /* duplicate V111 */
pmulhw MUNG(x4546454645464546), %mm5 /* 17734-> V119 */
psubsw %mm3, %mm2 /* V114 */
pmulhw MUNG(x61f861f861f861f8), %mm4 /* 25080-> V120 */
paddsw %mm3, %mm1 /* V115 ; free mm3 */
pmulhw MUNG(x5a825a825a825a82), %mm2 /* 23170-> V118 */
psllw $2, %mm0 /* t266 */
movq %mm1, (%esi) /* save V115 */
psllw $1, %mm5 /* t268 */
psubsw %mm4, %mm5 /* V122 */
psubsw %mm0, %mm4 /* V121 ; free mm0 */
psllw $1, %mm5 /* t270 */
psubsw %mm1, %mm5 /* V123 ; free mm1 */
psllw $2, %mm2 /* t272 */
psubsw %mm5, %mm2 /* V124 (keep V123) */
psllw $1, %mm4 /* t274 */
movq %mm5, 8*2(%esi) /* save V123 ; free mm5 */
paddsw %mm2, %mm4 /* V125 (keep V124) */
/* column 0: even part (after transpose) */
movq 8*12(%esi), %mm0 /* tmt12 */
movq %mm6, %mm3 /* duplicate tmt4 */
psubsw %mm0, %mm6 /* V100 */
paddsw %mm0, %mm3 /* V101 ; free mm0 */
pmulhw MUNG(x5a825a825a825a82), %mm6 /* 23170 ->V102 */
movq %mm7, %mm5 /* duplicate tmt0 */
movq 8*8(%esi), %mm1 /* tmt8 */
paddsw %mm1, %mm7 /* V103 */
psubsw %mm1, %mm5 /* V104 ; free mm1 */
movq %mm7, %mm0 /* duplicate V103 */
psllw $2, %mm6 /* t245 */
paddsw %mm3, %mm7 /* V106 */
movq %mm5, %mm1 /* duplicate V104 */
psubsw %mm3, %mm6 /* V105 */
psubsw %mm3, %mm0 /* V109; free mm3 */
paddsw %mm6, %mm5 /* V107 */
psubsw %mm6, %mm1 /* V108 ; free mm6 */
/* column 0: output butterfly (after transform) */
movq %mm1, %mm3 /* duplicate V108 */
paddsw %mm2, %mm1 /* out4 */
psraw $4, %mm1
psubsw %mm2, %mm3 /* out10 ; free mm2 */
psraw $4, %mm3
movq %mm0, %mm6 /* duplicate V109 */
movq %mm1, 8*4(%esi) /* out4 ; free mm1 */
psubsw %mm4, %mm0 /* out6 */
movq %mm3, 8*10(%esi) /* out10 ; free mm3 */
psraw $4, %mm0
paddsw %mm4, %mm6 /* out8 ; free mm4 */
movq %mm7, %mm1 /* duplicate V106 */
movq %mm0, 8*6(%esi) /* out6 ; free mm0 */
psraw $4, %mm6
movq (%esi), %mm4 /* V115 */
movq %mm6, 8*8(%esi) /* out8 ; free mm6 */
movq %mm5, %mm2 /* duplicate V107 */
movq 8*2(%esi), %mm3 /* V123 */
paddsw %mm4, %mm7 /* out0 */
/* moved up from next block */
movq MUNG(scratch3), %mm0
psraw $4, %mm7
/* moved up from next block */
movq MUNG(scratch5), %mm6
psubsw %mm4, %mm1 /* out14 ; free mm4 */
paddsw %mm3, %mm5 /* out2 */
psraw $4, %mm1
movq %mm7, (%esi) /* out0 ; free mm7 */
psraw $4, %mm5
movq %mm1, 8*14(%esi) /* out14 ; free mm1 */
psubsw %mm3, %mm2 /* out12 ; free mm3 */
movq %mm5, 8*2(%esi) /* out2 ; free mm5 */
psraw $4, %mm2
/* moved up to the prev block */
movq MUNG(scratch7), %mm4
/* moved up to the prev block */
psraw $4, %mm0
movq %mm2, 8*12(%esi) /* out12 ; free mm2 */
/* moved up to the prev block */
psraw $4, %mm6
/* move back the data to its correct place
* moved up to the prev block
* movq MUNG(scratch3), %mm0
* movq MUNG(scratch5), %mm6
* movq MUNG(scratch7), %mm4
* psraw $4, %mm0
* psraw $4, %mm6
*/
movq MUNG(scratch1), %mm1
psraw $4, %mm4
movq %mm0, 8*3(%esi) /* out3 */
psraw $4, %mm1
movq %mm6, 8*5(%esi) /* out5 */
movq %mm4, 8*7(%esi) /* out7 */
movq %mm1, 8(%esi) /* out1 */
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
movl %ebp,%esp
popl %ebp
ret
.Lfe1:
.size IDCT_mmx,.Lfe1-IDCT_mmx
#ifdef __PIC__
.section .gnu.linkonce.t.__i686.get_pc_thunk.bx,"ax",@progbits
.globl __i686.get_pc_thunk.bx
.hidden __i686.get_pc_thunk.bx
.type __i686.get_pc_thunk.bx,@function
__i686.get_pc_thunk.bx:
movl (%esp), %ebx
ret
#endif
#endif /* i386 && USE_MMX */
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits
#endif
|
a1studmuffin/Cataclysm-DDA-Android
| 9,649
|
Android/jni/SDL2_mixer/external/flac-1.2.1/src/libFLAC/ppc/as/lpc_asm.s
|
; libFLAC - Free Lossless Audio Codec library
; Copyright (C) 2004,2005,2006,2007 Josh Coalson
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions
; are met:
;
; - Redistributions of source code must retain the above copyright
; notice, this list of conditions and the following disclaimer.
;
; - Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in the
; documentation and/or other materials provided with the distribution.
;
; - Neither the name of the Xiph.org Foundation nor the names of its
; contributors may be used to endorse or promote products derived from
; this software without specific prior written permission.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
; ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.text
.align 2
.globl _FLAC__lpc_restore_signal_asm_ppc_altivec_16
.globl _FLAC__lpc_restore_signal_asm_ppc_altivec_16_order8
_FLAC__lpc_restore_signal_asm_ppc_altivec_16:
; r3: residual[]
; r4: data_len
; r5: qlp_coeff[]
; r6: order
; r7: lp_quantization
; r8: data[]
; see src/libFLAC/lpc.c:FLAC__lpc_restore_signal()
; these is a PowerPC/Altivec assembly version which requires bps<=16 (or actual
; bps<=15 for mid-side coding, since that uses an extra bit)
; these should be fast; the inner loop is unrolled (it takes no more than
; 3*(order%4) instructions, all of which are arithmetic), and all of the
; coefficients and all relevant history stay in registers, so the outer loop
; has only one load from memory (the residual)
; I have not yet run this through simg4, so there may be some avoidable stalls,
; and there may be a somewhat more clever way to do the outer loop
; the branch mechanism may prevent dynamic loading; I still need to examine
; this issue, and there may be a more elegant method
stmw r31,-4(r1)
addi r9,r1,-28
li r31,0xf
andc r9,r9,r31 ; for quadword-aligned stack data
slwi r6,r6,2 ; adjust for word size
slwi r4,r4,2
add r4,r4,r8 ; r4 = data+data_len
mfspr r0,256 ; cache old vrsave
addis r31,0,hi16(0xfffffc00)
ori r31,r31,lo16(0xfffffc00)
mtspr 256,r31 ; declare VRs in vrsave
cmplw cr0,r8,r4 ; i<data_len
bc 4,0,L1400
; load coefficients into v0-v7 and initial history into v8-v15
li r31,0xf
and r31,r8,r31 ; r31: data%4
li r11,16
subf r31,r31,r11 ; r31: 4-(data%4)
slwi r31,r31,3 ; convert to bits for vsro
li r10,-4
stw r31,-4(r9)
lvewx v0,r10,r9
vspltisb v18,-1
vsro v18,v18,v0 ; v18: mask vector
li r31,0x8
lvsl v0,0,r31
vsldoi v0,v0,v0,12
li r31,0xc
lvsl v1,0,r31
vspltisb v2,0
vspltisb v3,-1
vmrglw v2,v2,v3
vsel v0,v1,v0,v2 ; v0: reversal permutation vector
add r10,r5,r6
lvsl v17,0,r5 ; v17: coefficient alignment permutation vector
vperm v17,v17,v17,v0 ; v17: reversal coefficient alignment permutation vector
mr r11,r8
lvsl v16,0,r11 ; v16: history alignment permutation vector
lvx v0,0,r5
addi r5,r5,16
lvx v1,0,r5
vperm v0,v0,v1,v17
lvx v8,0,r11
addi r11,r11,-16
lvx v9,0,r11
vperm v8,v9,v8,v16
cmplw cr0,r5,r10
bc 12,0,L1101
vand v0,v0,v18
addis r31,0,hi16(L1307)
ori r31,r31,lo16(L1307)
b L1199
L1101:
addi r5,r5,16
lvx v2,0,r5
vperm v1,v1,v2,v17
addi r11,r11,-16
lvx v10,0,r11
vperm v9,v10,v9,v16
cmplw cr0,r5,r10
bc 12,0,L1102
vand v1,v1,v18
addis r31,0,hi16(L1306)
ori r31,r31,lo16(L1306)
b L1199
L1102:
addi r5,r5,16
lvx v3,0,r5
vperm v2,v2,v3,v17
addi r11,r11,-16
lvx v11,0,r11
vperm v10,v11,v10,v16
cmplw cr0,r5,r10
bc 12,0,L1103
vand v2,v2,v18
addis r31,0,hi16(L1305)
ori r31,r31,lo16(L1305)
b L1199
L1103:
addi r5,r5,16
lvx v4,0,r5
vperm v3,v3,v4,v17
addi r11,r11,-16
lvx v12,0,r11
vperm v11,v12,v11,v16
cmplw cr0,r5,r10
bc 12,0,L1104
vand v3,v3,v18
addis r31,0,hi16(L1304)
ori r31,r31,lo16(L1304)
b L1199
L1104:
addi r5,r5,16
lvx v5,0,r5
vperm v4,v4,v5,v17
addi r11,r11,-16
lvx v13,0,r11
vperm v12,v13,v12,v16
cmplw cr0,r5,r10
bc 12,0,L1105
vand v4,v4,v18
addis r31,0,hi16(L1303)
ori r31,r31,lo16(L1303)
b L1199
L1105:
addi r5,r5,16
lvx v6,0,r5
vperm v5,v5,v6,v17
addi r11,r11,-16
lvx v14,0,r11
vperm v13,v14,v13,v16
cmplw cr0,r5,r10
bc 12,0,L1106
vand v5,v5,v18
addis r31,0,hi16(L1302)
ori r31,r31,lo16(L1302)
b L1199
L1106:
addi r5,r5,16
lvx v7,0,r5
vperm v6,v6,v7,v17
addi r11,r11,-16
lvx v15,0,r11
vperm v14,v15,v14,v16
cmplw cr0,r5,r10
bc 12,0,L1107
vand v6,v6,v18
addis r31,0,hi16(L1301)
ori r31,r31,lo16(L1301)
b L1199
L1107:
addi r5,r5,16
lvx v19,0,r5
vperm v7,v7,v19,v17
addi r11,r11,-16
lvx v19,0,r11
vperm v15,v19,v15,v16
vand v7,v7,v18
addis r31,0,hi16(L1300)
ori r31,r31,lo16(L1300)
L1199:
mtctr r31
; set up invariant vectors
vspltish v16,0 ; v16: zero vector
li r10,-12
lvsr v17,r10,r8 ; v17: result shift vector
lvsl v18,r10,r3 ; v18: residual shift back vector
li r10,-4
stw r7,-4(r9)
lvewx v19,r10,r9 ; v19: lp_quantization vector
L1200:
vmulosh v20,v0,v8 ; v20: sum vector
bcctr 20,0
L1300:
vmulosh v21,v7,v15
vsldoi v15,v15,v14,4 ; increment history
vaddsws v20,v20,v21
L1301:
vmulosh v21,v6,v14
vsldoi v14,v14,v13,4
vaddsws v20,v20,v21
L1302:
vmulosh v21,v5,v13
vsldoi v13,v13,v12,4
vaddsws v20,v20,v21
L1303:
vmulosh v21,v4,v12
vsldoi v12,v12,v11,4
vaddsws v20,v20,v21
L1304:
vmulosh v21,v3,v11
vsldoi v11,v11,v10,4
vaddsws v20,v20,v21
L1305:
vmulosh v21,v2,v10
vsldoi v10,v10,v9,4
vaddsws v20,v20,v21
L1306:
vmulosh v21,v1,v9
vsldoi v9,v9,v8,4
vaddsws v20,v20,v21
L1307:
vsumsws v20,v20,v16 ; v20[3]: sum
vsraw v20,v20,v19 ; v20[3]: sum >> lp_quantization
lvewx v21,0,r3 ; v21[n]: *residual
vperm v21,v21,v21,v18 ; v21[3]: *residual
vaddsws v20,v21,v20 ; v20[3]: *residual + (sum >> lp_quantization)
vsldoi v18,v18,v18,4 ; increment shift vector
vperm v21,v20,v20,v17 ; v21[n]: shift for storage
vsldoi v17,v17,v17,12 ; increment shift vector
stvewx v21,0,r8
vsldoi v20,v20,v20,12
vsldoi v8,v8,v20,4 ; insert value onto history
addi r3,r3,4
addi r8,r8,4
cmplw cr0,r8,r4 ; i<data_len
bc 12,0,L1200
L1400:
mtspr 256,r0 ; restore old vrsave
lmw r31,-4(r1)
blr
_FLAC__lpc_restore_signal_asm_ppc_altivec_16_order8:
; r3: residual[]
; r4: data_len
; r5: qlp_coeff[]
; r6: order
; r7: lp_quantization
; r8: data[]
; see _FLAC__lpc_restore_signal_asm_ppc_altivec_16() above
; this version assumes order<=8; it uses fewer vector registers, which should
; save time in context switches, and has less code, which may improve
; instruction caching
stmw r31,-4(r1)
addi r9,r1,-28
li r31,0xf
andc r9,r9,r31 ; for quadword-aligned stack data
slwi r6,r6,2 ; adjust for word size
slwi r4,r4,2
add r4,r4,r8 ; r4 = data+data_len
mfspr r0,256 ; cache old vrsave
addis r31,0,hi16(0xffc00000)
ori r31,r31,lo16(0xffc00000)
mtspr 256,r31 ; declare VRs in vrsave
cmplw cr0,r8,r4 ; i<data_len
bc 4,0,L2400
; load coefficients into v0-v1 and initial history into v2-v3
li r31,0xf
and r31,r8,r31 ; r31: data%4
li r11,16
subf r31,r31,r11 ; r31: 4-(data%4)
slwi r31,r31,3 ; convert to bits for vsro
li r10,-4
stw r31,-4(r9)
lvewx v0,r10,r9
vspltisb v6,-1
vsro v6,v6,v0 ; v6: mask vector
li r31,0x8
lvsl v0,0,r31
vsldoi v0,v0,v0,12
li r31,0xc
lvsl v1,0,r31
vspltisb v2,0
vspltisb v3,-1
vmrglw v2,v2,v3
vsel v0,v1,v0,v2 ; v0: reversal permutation vector
add r10,r5,r6
lvsl v5,0,r5 ; v5: coefficient alignment permutation vector
vperm v5,v5,v5,v0 ; v5: reversal coefficient alignment permutation vector
mr r11,r8
lvsl v4,0,r11 ; v4: history alignment permutation vector
lvx v0,0,r5
addi r5,r5,16
lvx v1,0,r5
vperm v0,v0,v1,v5
lvx v2,0,r11
addi r11,r11,-16
lvx v3,0,r11
vperm v2,v3,v2,v4
cmplw cr0,r5,r10
bc 12,0,L2101
vand v0,v0,v6
addis r31,0,hi16(L2301)
ori r31,r31,lo16(L2301)
b L2199
L2101:
addi r5,r5,16
lvx v7,0,r5
vperm v1,v1,v7,v5
addi r11,r11,-16
lvx v7,0,r11
vperm v3,v7,v3,v4
vand v1,v1,v6
addis r31,0,hi16(L2300)
ori r31,r31,lo16(L2300)
L2199:
mtctr r31
; set up invariant vectors
vspltish v4,0 ; v4: zero vector
li r10,-12
lvsr v5,r10,r8 ; v5: result shift vector
lvsl v6,r10,r3 ; v6: residual shift back vector
li r10,-4
stw r7,-4(r9)
lvewx v7,r10,r9 ; v7: lp_quantization vector
L2200:
vmulosh v8,v0,v2 ; v8: sum vector
bcctr 20,0
L2300:
vmulosh v9,v1,v3
vsldoi v3,v3,v2,4
vaddsws v8,v8,v9
L2301:
vsumsws v8,v8,v4 ; v8[3]: sum
vsraw v8,v8,v7 ; v8[3]: sum >> lp_quantization
lvewx v9,0,r3 ; v9[n]: *residual
vperm v9,v9,v9,v6 ; v9[3]: *residual
vaddsws v8,v9,v8 ; v8[3]: *residual + (sum >> lp_quantization)
vsldoi v6,v6,v6,4 ; increment shift vector
vperm v9,v8,v8,v5 ; v9[n]: shift for storage
vsldoi v5,v5,v5,12 ; increment shift vector
stvewx v9,0,r8
vsldoi v8,v8,v8,12
vsldoi v2,v2,v8,4 ; insert value onto history
addi r3,r3,4
addi r8,r8,4
cmplw cr0,r8,r4 ; i<data_len
bc 12,0,L2200
L2400:
mtspr 256,r0 ; restore old vrsave
lmw r31,-4(r1)
blr
|
a1studmuffin/Cataclysm-DDA-Android
| 9,638
|
Android/jni/SDL2_mixer/external/flac-1.2.1/src/libFLAC/ppc/gas/lpc_asm.s
|
# libFLAC - Free Lossless Audio Codec library
# Copyright (C) 2004,2005,2006,2007 Josh Coalson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# - Neither the name of the Xiph.org Foundation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.text
.align 2
.globl _FLAC__lpc_restore_signal_asm_ppc_altivec_16
.type _FLAC__lpc_restore_signal_asm_ppc_altivec_16, @function
.globl _FLAC__lpc_restore_signal_asm_ppc_altivec_16_order8
.type _FLAC__lpc_restore_signal_asm_ppc_altivec_16_order8, @function
_FLAC__lpc_restore_signal_asm_ppc_altivec_16:
# r3: residual[]
# r4: data_len
# r5: qlp_coeff[]
# r6: order
# r7: lp_quantization
# r8: data[]
# see src/libFLAC/lpc.c:FLAC__lpc_restore_signal()
# these is a PowerPC/Altivec assembly version which requires bps<=16 (or actual
# bps<=15 for mid-side coding, since that uses an extra bit)
# these should be fast; the inner loop is unrolled (it takes no more than
# 3*(order%4) instructions, all of which are arithmetic), and all of the
# coefficients and all relevant history stay in registers, so the outer loop
# has only one load from memory (the residual)
# I have not yet run this through simg4, so there may be some avoidable stalls,
# and there may be a somewhat more clever way to do the outer loop
# the branch mechanism may prevent dynamic loading; I still need to examine
# this issue, and there may be a more elegant method
stmw r31,-4(r1)
addi r9,r1,-28
li r31,0xf
andc r9,r9,r31 # for quadword-aligned stack data
slwi r6,r6,2 # adjust for word size
slwi r4,r4,2
add r4,r4,r8 # r4 = data+data_len
mfspr r0,256 # cache old vrsave
addis r31,0,0xffff
ori r31,r31,0xfc00
mtspr 256,r31 # declare VRs in vrsave
cmplw cr0,r8,r4 # i<data_len
bc 4,0,L1400
# load coefficients into v0-v7 and initial history into v8-v15
li r31,0xf
and r31,r8,r31 # r31: data%4
li r11,16
subf r31,r31,r11 # r31: 4-(data%4)
slwi r31,r31,3 # convert to bits for vsro
li r10,-4
stw r31,-4(r9)
lvewx v0,r10,r9
vspltisb v18,-1
vsro v18,v18,v0 # v18: mask vector
li r31,0x8
lvsl v0,0,r31
vsldoi v0,v0,v0,12
li r31,0xc
lvsl v1,0,r31
vspltisb v2,0
vspltisb v3,-1
vmrglw v2,v2,v3
vsel v0,v1,v0,v2 # v0: reversal permutation vector
add r10,r5,r6
lvsl v17,0,r5 # v17: coefficient alignment permutation vector
vperm v17,v17,v17,v0 # v17: reversal coefficient alignment permutation vector
mr r11,r8
lvsl v16,0,r11 # v16: history alignment permutation vector
lvx v0,0,r5
addi r5,r5,16
lvx v1,0,r5
vperm v0,v0,v1,v17
lvx v8,0,r11
addi r11,r11,-16
lvx v9,0,r11
vperm v8,v9,v8,v16
cmplw cr0,r5,r10
bc 12,0,L1101
vand v0,v0,v18
addis r31,0,L1307@ha
ori r31,r31,L1307@l
b L1199
L1101:
addi r5,r5,16
lvx v2,0,r5
vperm v1,v1,v2,v17
addi r11,r11,-16
lvx v10,0,r11
vperm v9,v10,v9,v16
cmplw cr0,r5,r10
bc 12,0,L1102
vand v1,v1,v18
addis r31,0,L1306@ha
ori r31,r31,L1306@l
b L1199
L1102:
addi r5,r5,16
lvx v3,0,r5
vperm v2,v2,v3,v17
addi r11,r11,-16
lvx v11,0,r11
vperm v10,v11,v10,v16
cmplw cr0,r5,r10
bc 12,0,L1103
vand v2,v2,v18
lis r31,L1305@ha
la r31,L1305@l(r31)
b L1199
L1103:
addi r5,r5,16
lvx v4,0,r5
vperm v3,v3,v4,v17
addi r11,r11,-16
lvx v12,0,r11
vperm v11,v12,v11,v16
cmplw cr0,r5,r10
bc 12,0,L1104
vand v3,v3,v18
lis r31,L1304@ha
la r31,L1304@l(r31)
b L1199
L1104:
addi r5,r5,16
lvx v5,0,r5
vperm v4,v4,v5,v17
addi r11,r11,-16
lvx v13,0,r11
vperm v12,v13,v12,v16
cmplw cr0,r5,r10
bc 12,0,L1105
vand v4,v4,v18
lis r31,L1303@ha
la r31,L1303@l(r31)
b L1199
L1105:
addi r5,r5,16
lvx v6,0,r5
vperm v5,v5,v6,v17
addi r11,r11,-16
lvx v14,0,r11
vperm v13,v14,v13,v16
cmplw cr0,r5,r10
bc 12,0,L1106
vand v5,v5,v18
lis r31,L1302@ha
la r31,L1302@l(r31)
b L1199
L1106:
addi r5,r5,16
lvx v7,0,r5
vperm v6,v6,v7,v17
addi r11,r11,-16
lvx v15,0,r11
vperm v14,v15,v14,v16
cmplw cr0,r5,r10
bc 12,0,L1107
vand v6,v6,v18
lis r31,L1301@ha
la r31,L1301@l(r31)
b L1199
L1107:
addi r5,r5,16
lvx v19,0,r5
vperm v7,v7,v19,v17
addi r11,r11,-16
lvx v19,0,r11
vperm v15,v19,v15,v16
vand v7,v7,v18
lis r31,L1300@ha
la r31,L1300@l(r31)
L1199:
mtctr r31
# set up invariant vectors
vspltish v16,0 # v16: zero vector
li r10,-12
lvsr v17,r10,r8 # v17: result shift vector
lvsl v18,r10,r3 # v18: residual shift back vector
li r10,-4
stw r7,-4(r9)
lvewx v19,r10,r9 # v19: lp_quantization vector
L1200:
vmulosh v20,v0,v8 # v20: sum vector
bcctr 20,0
L1300:
vmulosh v21,v7,v15
vsldoi v15,v15,v14,4 # increment history
vaddsws v20,v20,v21
L1301:
vmulosh v21,v6,v14
vsldoi v14,v14,v13,4
vaddsws v20,v20,v21
L1302:
vmulosh v21,v5,v13
vsldoi v13,v13,v12,4
vaddsws v20,v20,v21
L1303:
vmulosh v21,v4,v12
vsldoi v12,v12,v11,4
vaddsws v20,v20,v21
L1304:
vmulosh v21,v3,v11
vsldoi v11,v11,v10,4
vaddsws v20,v20,v21
L1305:
vmulosh v21,v2,v10
vsldoi v10,v10,v9,4
vaddsws v20,v20,v21
L1306:
vmulosh v21,v1,v9
vsldoi v9,v9,v8,4
vaddsws v20,v20,v21
L1307:
vsumsws v20,v20,v16 # v20[3]: sum
vsraw v20,v20,v19 # v20[3]: sum >> lp_quantization
lvewx v21,0,r3 # v21[n]: *residual
vperm v21,v21,v21,v18 # v21[3]: *residual
vaddsws v20,v21,v20 # v20[3]: *residual + (sum >> lp_quantization)
vsldoi v18,v18,v18,4 # increment shift vector
vperm v21,v20,v20,v17 # v21[n]: shift for storage
vsldoi v17,v17,v17,12 # increment shift vector
stvewx v21,0,r8
vsldoi v20,v20,v20,12
vsldoi v8,v8,v20,4 # insert value onto history
addi r3,r3,4
addi r8,r8,4
cmplw cr0,r8,r4 # i<data_len
bc 12,0,L1200
L1400:
mtspr 256,r0 # restore old vrsave
lmw r31,-4(r1)
blr
_FLAC__lpc_restore_signal_asm_ppc_altivec_16_order8:
# r3: residual[]
# r4: data_len
# r5: qlp_coeff[]
# r6: order
# r7: lp_quantization
# r8: data[]
# see _FLAC__lpc_restore_signal_asm_ppc_altivec_16() above
# this version assumes order<=8; it uses fewer vector registers, which should
# save time in context switches, and has less code, which may improve
# instruction caching
stmw r31,-4(r1)
addi r9,r1,-28
li r31,0xf
andc r9,r9,r31 # for quadword-aligned stack data
slwi r6,r6,2 # adjust for word size
slwi r4,r4,2
add r4,r4,r8 # r4 = data+data_len
mfspr r0,256 # cache old vrsave
addis r31,0,0xffc0
ori r31,r31,0x0000
mtspr 256,r31 # declare VRs in vrsave
cmplw cr0,r8,r4 # i<data_len
bc 4,0,L2400
# load coefficients into v0-v1 and initial history into v2-v3
li r31,0xf
and r31,r8,r31 # r31: data%4
li r11,16
subf r31,r31,r11 # r31: 4-(data%4)
slwi r31,r31,3 # convert to bits for vsro
li r10,-4
stw r31,-4(r9)
lvewx v0,r10,r9
vspltisb v6,-1
vsro v6,v6,v0 # v6: mask vector
li r31,0x8
lvsl v0,0,r31
vsldoi v0,v0,v0,12
li r31,0xc
lvsl v1,0,r31
vspltisb v2,0
vspltisb v3,-1
vmrglw v2,v2,v3
vsel v0,v1,v0,v2 # v0: reversal permutation vector
add r10,r5,r6
lvsl v5,0,r5 # v5: coefficient alignment permutation vector
vperm v5,v5,v5,v0 # v5: reversal coefficient alignment permutation vector
mr r11,r8
lvsl v4,0,r11 # v4: history alignment permutation vector
lvx v0,0,r5
addi r5,r5,16
lvx v1,0,r5
vperm v0,v0,v1,v5
lvx v2,0,r11
addi r11,r11,-16
lvx v3,0,r11
vperm v2,v3,v2,v4
cmplw cr0,r5,r10
bc 12,0,L2101
vand v0,v0,v6
lis r31,L2301@ha
la r31,L2301@l(r31)
b L2199
L2101:
addi r5,r5,16
lvx v7,0,r5
vperm v1,v1,v7,v5
addi r11,r11,-16
lvx v7,0,r11
vperm v3,v7,v3,v4
vand v1,v1,v6
lis r31,L2300@ha
la r31,L2300@l(r31)
L2199:
mtctr r31
# set up invariant vectors
vspltish v4,0 # v4: zero vector
li r10,-12
lvsr v5,r10,r8 # v5: result shift vector
lvsl v6,r10,r3 # v6: residual shift back vector
li r10,-4
stw r7,-4(r9)
lvewx v7,r10,r9 # v7: lp_quantization vector
L2200:
vmulosh v8,v0,v2 # v8: sum vector
bcctr 20,0
L2300:
vmulosh v9,v1,v3
vsldoi v3,v3,v2,4
vaddsws v8,v8,v9
L2301:
vsumsws v8,v8,v4 # v8[3]: sum
vsraw v8,v8,v7 # v8[3]: sum >> lp_quantization
lvewx v9,0,r3 # v9[n]: *residual
vperm v9,v9,v9,v6 # v9[3]: *residual
vaddsws v8,v9,v8 # v8[3]: *residual + (sum >> lp_quantization)
vsldoi v6,v6,v6,4 # increment shift vector
vperm v9,v8,v8,v5 # v9[n]: shift for storage
vsldoi v5,v5,v5,12 # increment shift vector
stvewx v9,0,r8
vsldoi v8,v8,v8,12
vsldoi v2,v2,v8,4 # insert value onto history
addi r3,r3,4
addi r8,r8,4
cmplw cr0,r8,r4 # i<data_len
bc 12,0,L2200
L2400:
mtspr 256,r0 # restore old vrsave
lmw r31,-4(r1)
blr
|
a3f/bareDOOM
| 1,422
|
arch/sandbox/board/stickypage.S
|
/* SPDX-License-Identifier: GPL-2.0 */
.globl stickypage;
stickypage:
/* nvmem */ .org 0x300
.byte 0x01
/* env */ .org 0x400
.byte 0x79, 0xba, 0x8f, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x69, 0x9c, 0x7f, 0x00, 0x00, 0x00, 0x00
/* state */ .org 0xC00
.byte 0xf3, 0xfd, 0x54, 0x23, 0x18, 0x00, 0x00, 0x00, 0xa6, 0x86, 0x3b, 0xaa, 0x00, 0x00, 0x08, 0x00
.byte 0x19, 0x70, 0x3d, 0xbb, 0x64, 0x89, 0x3b, 0x31, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0xf3, 0xfd, 0x54, 0x23, 0x18, 0x00, 0x00, 0x00, 0xa6, 0x86, 0x3b, 0xaa, 0x00, 0x00, 0x08, 0x00
.byte 0x19, 0x70, 0x3d, 0xbb, 0x64, 0x89, 0x3b, 0x31, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
.byte 0xf3, 0xfd, 0x54, 0x23, 0x18, 0x00, 0x00, 0x00, 0xa6, 0x86, 0x3b, 0xaa, 0x00, 0x00, 0x08, 0x00
.byte 0x19, 0x70, 0x3d, 0xbb, 0x64, 0x89, 0x3b, 0x31, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00
.fill 4096-(.-stickypage), 1, 0
.size stickypage, 4096
|
a3f/bareDOOM
| 1,824
|
arch/openrisc/lib/ashrdi3.S
|
/*
* (C) Copyright 2012 - Franck JULLIEN <elec4fun@gmail.com>
*
* Extracted from gcc generated assembly.
*
* Extended precision shifts.
*
* R3/R4 (MSW, LSW) has 64 bit value
* R5 has shift count
* result in R11/R12
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
.globl __ashrdi3
__ashrdi3:
l.sfeqi r5,0x0 /* if count = 0, go out */
l.bf out
l.addi r6,r0,0x20 /* r6 = 32 */
l.sub r6,r6,r5 /* r6 = 32 - count */
l.sfgtsi r6,0x0 /* if count >= 32 */
l.bnf more_than_32 /* branch to more_than_32 */
l.nop 0x0
less_than_32:
l.sll r6,r3,r6 /* r6 gets the bits moved from MSW to LSW */
l.srl r4,r4,r5 /* shift LSW */
l.sra r5,r3,r5 /* shift MSW to r5 */
l.or r4,r6,r4 /* LSW gets bits shifted from MSW */
l.ori r3,r5,0x0 /* r3 = MSW */
out:
l.ori r11,r3,0x0
l.jr r9
l.ori r12,r4,0x0
more_than_32:
l.srai r5,r3,0x1f /* r5 = MSW sign extended */
l.sub r4,r0,r6 /* r4 = -r6, the number of bits above 32 */
l.sra r4,r3,r4 /* LSW gets bits shifted from MSB */
l.j out /* go out */
l.ori r3,r5,0x0 /* r3 = MSW */
|
a3f/bareDOOM
| 1,066
|
arch/openrisc/lib/setjmp.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/linkage.h>
/* int setjmp (jmp_buf); */
ENTRY(setjmp)
l.sw 0(r3), r1
l.sw 4(r3), r2
l.sw 8(r3), r9
l.sw 12(r3), r10
l.sw 16(r3), r14
l.sw 20(r3), r16
l.sw 24(r3), r18
l.sw 28(r3), r20
l.sw 32(r3), r22
l.sw 36(r3), r24
l.sw 40(r3), r26
l.sw 44(r3), r28
l.sw 48(r3), r30
l.jr r9
l.movhi r11, 0x0
END(setjmp)
/* volatile void longjmp (jmp_buf, int); */
ENTRY(longjmp)
l.lwz r1, 0(r3)
l.lwz r2, 4(r3)
/* if r4 is 0, something wrong, so set it to 1 */
l.sfeqi r4, 0x0
l.bnf 1f /* r4 != 0, longjmp value sensible */
l.nop
l.ori r4, r0, 0x1 /* make nonzero */
1:
l.lwz r9, 8(r3)
l.lwz r10, 12(r3)
l.lwz r14, 16(r3)
l.lwz r16, 20(r3)
l.lwz r18, 24(r3)
l.lwz r20, 28(r3)
l.lwz r22, 32(r3)
l.lwz r24, 36(r3)
l.lwz r26, 40(r3)
l.lwz r28, 44(r3)
l.lwz r30, 48(r3)
l.jr r9
l.addi r11, r4, 0x0
END(longjmp)
/* int initjmp(jmp_buf jmp, void __noreturn (*func)(void), void *stack_top); */
ENTRY(initjmp)
l.sw 8(r3), r4
l.sw 0(r3), r5
l.jr r9
l.movhi r11, 0x0
END(initjmp)
|
a3f/bareDOOM
| 1,174
|
arch/openrisc/lib/lshrdi3.S
|
/*
* (C) Copyright 2011 - Franck JULLIEN <elec4fun@gmail.com>
*
* Extracted from gcc generated assembly.
*
* Extended precision shifts.
*
* R3/R4 (MSW, LSW) has 64 bit value
* R5 has shift count
* result in R11/R12
*
*/
.globl __lshrdi3
__lshrdi3:
l.sfeqi r5,0x0
l.bf out /* if count = 0, go out */
l.addi r6,r0,0x20 /* r6 = 32 */
l.sub r6,r6,r5 /* r6 = 32 - count */
l.sfgtsi r6,0x0 /* if count >= 32 */
l.bnf more_than_32 /* branch to more_than_32 */
l.nop 0x0
less_than_32:
l.sll r6,r3,r6 /* r6 gets the bits moved from MSW to LSW */
l.srl r4,r4,r5 /* shift LSW */
l.srl r3,r3,r5 /* shift MSW */
l.or r4,r6,r4 /* LSW gets bits shifted from MSW */
out:
l.ori r11,r3,0x0
l.jr r9
l.ori r12,r4,0x0
more_than_32:
l.sub r4,r0,r6 /* r4 = -r6, the number of bits above 32 */
l.srl r4,r3,r4 /* LSW = MSW >> r4 */
l.j out /* go out */
l.addi r3,r0,0x0 /* MSW = 0 */
|
a3f/bareDOOM
| 1,173
|
arch/openrisc/lib/ashldi3.S
|
/*
* (C) Copyright 2011 - Franck JULLIEN <elec4fun@gmail.com>
*
* Extracted from gcc generated assembly.
*
* Extended precision shifts.
*
* R3/R4 (MSW, LSW) has 64 bit value
* R5 has shift count
* result in R11/R12
*
*/
.globl __ashldi3
__ashldi3:
l.sfeqi r5,0x0
l.bf out /* if count = 0, go out */
l.addi r6,r0,0x20 /* r6 = 32 */
l.sub r6,r6,r5 /* r6 = 32 - count */
l.sfgtsi r6,0x0 /* if count >= 32 */
l.bnf more_than_32 /* branch to more_than_32 */
l.nop 0x0
less_than_32:
l.srl r6,r4,r6 /* r6 gets the bits moved from LSW to MSW */
l.sll r3,r3,r5 /* shift MSW */
l.sll r4,r4,r5 /* shift LSW */
l.or r3,r6,r3 /* MSW gets bits shifted from LSW */
out:
l.ori r11,r3,0x0
l.jr r9
l.ori r12,r4,0x0
more_than_32:
l.sub r3,r0,r6 /* r3 = -r6, the number of bits above 32 */
l.sll r3,r4,r3 /* MSW = LSW << r3 */
l.j out /* go out */
l.addi r4,r0,0x0 /* LSW = 0 */
|
a3f/bareDOOM
| 1,310
|
arch/openrisc/lib/muldi3.S
|
/*
* (C) Copyright 2011 - Franck JULLIEN <elec4fun@gmail.com>
*
* Extracted from gcc generated assembly.
*
* Multiply two quads. Hereafter, the illustration of what is going on :
*
* | r3 | r4 |
* | r5 | r6 |
* --------------------
* | r4 * r6 |
* | r3 * r6 | | +
* | r5 * r4 | | +
* | r3 * r5 | | | +
* ------------------------------------------- =
* | 64 bits result |
*
*/
.globl __muldi3
__muldi3:
/* starts with the full 64 bits mul (r4 * r6) */
l.andi r7,r4,0xffff
l.srli r8,r4,0x10
l.andi r11,r6,0xffff
l.srli r12,r6,0x10
l.mul r13,r11,r7
l.mul r11,r11,r8
l.mul r7,r12,r7
l.srli r15,r13,0x10
l.add r7,r7,r15
l.add r7,r11,r7
l.sfleu r11,r7
l.bf no_carry
l.mul r8,r12,r8
l.movhi r15,0x1
l.add r8,r8,r15
no_carry:
/* Now compute r3 * r6 */
l.mul r6,r6,r3
/* and r4 * r5 */
l.mul r4,r4,r5
/* finaly previous results and put the result in r11:r12 */
l.srli r3,r7,0x10
l.slli r7,r7,0x10
l.andi r13,r13,0xffff
l.add r8,r8,r3
l.add r11,r4,r6
l.add r12,r7,r13
l.add r11,r11,r8
l.jr r9
l.nop
|
a3f/bareDOOM
| 1,411
|
arch/openrisc/cpu/barebox.lds.S
|
/*
* barebox - barebox.lds.S
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <config.h>
#include <asm-generic/barebox.lds.h>
OUTPUT_FORMAT("elf32-or1k", "elf32-or1k", "elf32-or1k")
ENTRY(__reset)
__DYNAMIC = 0;
MEMORY
{
vectors : ORIGIN = 0, LENGTH = 0x2000
ram : ORIGIN = TEXT_BASE,
LENGTH = BAREBOX_RESERVED_SIZE
}
SECTIONS
{
.vectors :
{
*(.vectors)
} > vectors
. = ALIGN(4);
__start = .;
.text : AT (__start) {
_stext = .;
*(.text)
_etext = .;
*(.lit)
*(.shdata)
_endtext = .;
} > ram
. = ALIGN(4);
.rodata : {
*(.rodata);
*(.rodata.*)
*(.bbenv.rodata.*)
RO_DATA_SECTION
} > ram
__etext = .; /* End of text and rodata section */
. = ALIGN(4);
.data : {
sdata = .;
_sdata = .;
*(.data)
edata = .;
_edata = .;
} > ram
. = ALIGN(4);
.bss :
{
__bss_start = .;
_bss_start = .;
*(.shbss)
*(.bss)
*(COMMON)
_bss_end = .;
__bss_stop = .;
} > ram
__end = .;
}
|
a3f/bareDOOM
| 6,638
|
arch/openrisc/cpu/start.S
|
/*
* (C) Copyright 2011, Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
* (C) Copyright 2011, Julius Baxter <julius@opencores.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <config.h>
#include <asm-generic/memory_layout.h>
#include <asm/spr-defs.h>
#define EXCEPTION_STACK_SIZE (128+128)
#define HANDLE_EXCEPTION \
l.addi r1, r1, -EXCEPTION_STACK_SIZE ;\
l.sw 0x1c(r1), r9 ;\
l.jal _exception_handler ;\
l.nop ;\
l.lwz r9, 0x1c(r1) ;\
l.addi r1, r1, EXCEPTION_STACK_SIZE ;\
l.rfe ;\
l.nop
.section .vectors, "ax"
.global __reset
/* reset */
.org 0x100
__reset:
/* there is no guarantee r0 is hardwired to zero, clear it here */
l.movhi r0, 0x0
/* reset stack and frame pointers */
l.movhi r1, 0x0
l.movhi r2, 0x0
/* set supervisor mode */
l.ori r3,r0,SPR_SR_SM
l.mtspr r0,r3,SPR_SR
l.jal _cur
l.nop
_cur:
l.ori r8, r9, 0 /* Get _cur current address */
l.movhi r3, hi(_cur)
l.ori r3, r3, lo(_cur)
l.sfeq r8, r3 /* If we are running at the linked address */
l.bf _no_vector_reloc /* there is not need for relocation */
l.sub r8, r8, r3
l.mfspr r4, r0, SPR_CPUCFGR
l.andi r4, r4, SPR_CPUCFGR_EVBARP /* Exception Vector Base Address Register present ? */
l.sfnei r4,0
l.bnf _reloc_vectors
l.movhi r5, 0 /* Destination */
l.mfspr r4, r0, SPR_EVBAR
l.add r5, r5, r4
_reloc_vectors:
/* Relocate vectors*/
l.movhi r6, hi(__start) /* Length */
l.ori r6, r6, lo(__start)
l.ori r3, r8, 0
.L_relocvectors:
l.lwz r7, 0(r3)
l.sw 0(r5), r7
l.addi r5, r5, 4
l.sfeq r5, r6
l.bnf .L_relocvectors
l.addi r3, r3, 4
_no_vector_reloc:
/* Relocate barebox */
l.movhi r3,hi(__start) /* source start offset */
l.ori r3,r3,lo(__start)
l.add r3,r8,r3
l.movhi r4,hi(_stext) /* dest start address */
l.ori r4,r4,lo(_stext)
l.movhi r5,hi(__end) /* dest end address */
l.ori r5,r5,lo(__end)
.L_reloc:
l.lwz r6,0(r3)
l.sw 0(r4),r6
l.addi r3,r3,4
l.sfltu r4,r5
l.bf .L_reloc
l.addi r4,r4,4 /*delay slot */
/* JUMP TO RELOC ADDR */
l.movhi r4, hi(_start)
l.ori r4, r4, lo(_start)
l.jr r4
l.nop
/* bus error */
.org 0x200
HANDLE_EXCEPTION
/* data page fault */
.org 0x300
HANDLE_EXCEPTION
/* instruction page fault */
.org 0x400
HANDLE_EXCEPTION
/* tick timer */
.org 0x500
HANDLE_EXCEPTION
/* alignment */
.org 0x600
HANDLE_EXCEPTION
/* illegal instruction */
.org 0x700
HANDLE_EXCEPTION
/* external interrupt */
.org 0x800
HANDLE_EXCEPTION
/* D-TLB miss */
.org 0x900
HANDLE_EXCEPTION
/* I-TLB miss */
.org 0xa00
HANDLE_EXCEPTION
/* range */
.org 0xb00
HANDLE_EXCEPTION
/* system call */
.org 0xc00
HANDLE_EXCEPTION
/* floating point */
.org 0xd00
HANDLE_EXCEPTION
/* trap */
.org 0xe00
HANDLE_EXCEPTION
/* reserved */
.org 0xf00
HANDLE_EXCEPTION
/* reserved */
.org 0x1100
HANDLE_EXCEPTION
/* reserved */
.org 0x1200
HANDLE_EXCEPTION
/* reserved */
.org 0x1300
HANDLE_EXCEPTION
/* reserved */
.org 0x1400
HANDLE_EXCEPTION
/* reserved */
.org 0x1500
HANDLE_EXCEPTION
/* reserved */
.org 0x1600
HANDLE_EXCEPTION
/* reserved */
.org 0x1700
HANDLE_EXCEPTION
/* reserved */
.org 0x1800
HANDLE_EXCEPTION
/* reserved */
.org 0x1900
HANDLE_EXCEPTION
/* reserved */
.org 0x1a00
HANDLE_EXCEPTION
/* reserved */
.org 0x1b00
HANDLE_EXCEPTION
/* reserved */
.org 0x1c00
HANDLE_EXCEPTION
/* reserved */
.org 0x1d00
HANDLE_EXCEPTION
/* reserved */
.org 0x1e00
HANDLE_EXCEPTION
/* reserved */
.org 0x1f00
HANDLE_EXCEPTION
/* Startup routine */
.text
.global _start
_start:
/* Init stack and frame pointers */
l.movhi r1, hi(STACK_BASE)
l.ori r1, r1, lo(STACK_BASE)
l.or r2, r0, r1
/* clear BSS segments */
l.movhi r4, hi(_bss_start)
l.ori r4, r4, lo(_bss_start)
l.movhi r5, hi(_bss_end)
l.ori r5, r5, lo(_bss_end)
.L_clear_bss:
l.sw 0(r4), r0
l.sfltu r4,r5
l.bf .L_clear_bss
l.addi r4,r4,4
/* Reset registers before jumping to board_init */
l.andi r3, r0, 0
l.andi r4, r0, 0
l.andi r5, r0, 0
l.andi r6, r0, 0
l.andi r7, r0, 0
l.andi r8, r0, 0
l.andi r9, r0, 0
l.andi r10, r0, 0
l.andi r11, r0, 0
l.andi r12, r0, 0
l.andi r13, r0, 0
l.andi r14, r0, 0
l.andi r15, r0, 0
l.andi r17, r0, 0
l.andi r18, r0, 0
l.andi r19, r0, 0
l.andi r20, r0, 0
l.andi r21, r0, 0
l.andi r22, r0, 0
l.andi r23, r0, 0
l.andi r24, r0, 0
l.andi r25, r0, 0
l.andi r26, r0, 0
l.andi r27, r0, 0
l.andi r28, r0, 0
l.andi r29, r0, 0
l.andi r30, r0, 0
l.andi r31, r0, 0
l.j openrisc_start_barebox
l.nop
.size _start, .-_start
/*
* Store state onto stack and call the real exception handler
*/
.section .text
.extern exception_handler
.type _exception_handler,@function
_exception_handler:
/* Store state (r9 already saved)*/
l.sw 0x00(r1), r2
l.sw 0x04(r1), r3
l.sw 0x08(r1), r4
l.sw 0x0c(r1), r5
l.sw 0x10(r1), r6
l.sw 0x14(r1), r7
l.sw 0x18(r1), r8
l.sw 0x20(r1), r10
l.sw 0x24(r1), r11
l.sw 0x28(r1), r12
l.sw 0x2c(r1), r13
l.sw 0x30(r1), r14
l.sw 0x34(r1), r15
l.sw 0x38(r1), r16
l.sw 0x3c(r1), r17
l.sw 0x40(r1), r18
l.sw 0x44(r1), r19
l.sw 0x48(r1), r20
l.sw 0x4c(r1), r21
l.sw 0x50(r1), r22
l.sw 0x54(r1), r23
l.sw 0x58(r1), r24
l.sw 0x5c(r1), r25
l.sw 0x60(r1), r26
l.sw 0x64(r1), r27
l.sw 0x68(r1), r28
l.sw 0x6c(r1), r29
l.sw 0x70(r1), r30
l.sw 0x74(r1), r31
/* Save return address */
l.or r14, r0, r9
/* Call exception handler with the link address as argument */
l.jal exception_handler
l.or r3, r0, r14
/* Load return address */
l.or r9, r0, r14
/* Restore state */
l.lwz r2, 0x00(r1)
l.lwz r3, 0x04(r1)
l.lwz r4, 0x08(r1)
l.lwz r5, 0x0c(r1)
l.lwz r6, 0x10(r1)
l.lwz r7, 0x14(r1)
l.lwz r8, 0x18(r1)
l.lwz r10, 0x20(r1)
l.lwz r11, 0x24(r1)
l.lwz r12, 0x28(r1)
l.lwz r13, 0x2c(r1)
l.lwz r14, 0x30(r1)
l.lwz r15, 0x34(r1)
l.lwz r16, 0x38(r1)
l.lwz r17, 0x3c(r1)
l.lwz r18, 0x40(r1)
l.lwz r19, 0x44(r1)
l.lwz r20, 0x48(r1)
l.lwz r21, 0x4c(r1)
l.lwz r22, 0x50(r1)
l.lwz r23, 0x54(r1)
l.lwz r24, 0x58(r1)
l.lwz r25, 0x5c(r1)
l.lwz r26, 0x60(r1)
l.lwz r27, 0x64(r1)
l.lwz r28, 0x68(r1)
l.lwz r29, 0x6c(r1)
l.lwz r30, 0x70(r1)
l.lwz r31, 0x74(r1)
l.jr r9
l.nop
|
a3f/bareDOOM
| 1,499
|
arch/riscv/lib/barebox.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Antony Pavlov <antonynpavlov@gmail.com>
*
* This file is part of barebox.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm-generic/barebox.lds.h>
OUTPUT_ARCH(riscv)
ENTRY(start)
#ifdef CONFIG_64BIT
OUTPUT_FORMAT("elf64-littleriscv")
#else
OUTPUT_FORMAT("elf32-littleriscv")
#endif
SECTIONS
{
. = 0x0;
.image_start : { *(.__image_start) }
. = ALIGN(4);
._text : { *(._text) }
.text :
{
_stext = .;
*(.text_entry*)
__bare_init_start = .;
*(.text_bare_init*)
__bare_init_end = .;
__exceptions_start = .;
KEEP(*(.text_exceptions*))
__exceptions_stop = .;
*(.text*)
}
BAREBOX_BARE_INIT_SIZE
. = ALIGN(4);
.rodata : {
*(.rodata*)
RO_DATA_SECTION
}
_etext = .; /* End of text and rodata section */
_sdata = .;
. = ALIGN(4);
.data : { *(.data*) }
.barebox_imd : { BAREBOX_IMD }
/DISCARD/ : { *(.rela.plt*) }
.rela.dyn : {
__rel_dyn_start = .;
*(.rel*)
__rel_dyn_end = .;
}
.dynsym : {
__dynsym_start = .;
*(.dynsym)
__dynsym_end = .;
}
_edata = .;
.image_end : { *(.__image_end) }
. = ALIGN(4);
.__bss_start : { *(.__bss_start) }
.bss : { *(.bss*) }
.__bss_stop : { *(.__bss_stop) }
_end = .;
_barebox_image_size = __bss_start;
}
|
a3f/bareDOOM
| 2,147
|
arch/riscv/lib/memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Regents of the University of California
*/
#include <linux/linkage.h>
#include <asm/asm.h>
/* void *memcpy(void *, const void *, size_t) */
ENTRY(__memcpy)
WEAK(memcpy)
move t6, a0 /* Preserve return value */
/* Defer to byte-oriented copy for small sizes */
sltiu a3, a2, 128
bnez a3, 4f
/* Use word-oriented copy only if low-order bits match */
andi a3, t6, SZREG-1
andi a4, a1, SZREG-1
bne a3, a4, 4f
beqz a3, 2f /* Skip if already aligned */
/*
* Round to nearest double word-aligned address
* greater than or equal to start address
*/
andi a3, a1, ~(SZREG-1)
addi a3, a3, SZREG
/* Handle initial misalignment */
sub a4, a3, a1
1:
lb a5, 0(a1)
addi a1, a1, 1
sb a5, 0(t6)
addi t6, t6, 1
bltu a1, a3, 1b
sub a2, a2, a4 /* Update count */
2:
andi a4, a2, ~((16*SZREG)-1)
beqz a4, 4f
add a3, a1, a4
3:
REG_L a4, 0(a1)
REG_L a5, SZREG(a1)
REG_L a6, 2*SZREG(a1)
REG_L a7, 3*SZREG(a1)
REG_L t0, 4*SZREG(a1)
REG_L t1, 5*SZREG(a1)
REG_L t2, 6*SZREG(a1)
REG_L t3, 7*SZREG(a1)
REG_L t4, 8*SZREG(a1)
REG_L t5, 9*SZREG(a1)
REG_S a4, 0(t6)
REG_S a5, SZREG(t6)
REG_S a6, 2*SZREG(t6)
REG_S a7, 3*SZREG(t6)
REG_S t0, 4*SZREG(t6)
REG_S t1, 5*SZREG(t6)
REG_S t2, 6*SZREG(t6)
REG_S t3, 7*SZREG(t6)
REG_S t4, 8*SZREG(t6)
REG_S t5, 9*SZREG(t6)
REG_L a4, 10*SZREG(a1)
REG_L a5, 11*SZREG(a1)
REG_L a6, 12*SZREG(a1)
REG_L a7, 13*SZREG(a1)
REG_L t0, 14*SZREG(a1)
REG_L t1, 15*SZREG(a1)
addi a1, a1, 16*SZREG
REG_S a4, 10*SZREG(t6)
REG_S a5, 11*SZREG(t6)
REG_S a6, 12*SZREG(t6)
REG_S a7, 13*SZREG(t6)
REG_S t0, 14*SZREG(t6)
REG_S t1, 15*SZREG(t6)
addi t6, t6, 16*SZREG
bltu a1, a3, 3b
andi a2, a2, (16*SZREG)-1 /* Update count */
4:
/* Handle trailing misalignment */
beqz a2, 6f
add a3, a1, a2
/* Use word-oriented copy if co-aligned to word boundary */
or a5, a1, t6
or a5, a5, a3
andi a5, a5, 3
bnez a5, 5f
7:
lw a4, 0(a1)
addi a1, a1, 4
sw a4, 0(t6)
addi t6, t6, 4
bltu a1, a3, 7b
ret
5:
lb a4, 0(a1)
addi a1, a1, 1
sb a4, 0(t6)
addi t6, t6, 1
bltu a1, a3, 5b
6:
ret
END(__memcpy)
|
a3f/bareDOOM
| 1,343
|
arch/riscv/lib/memmove.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/asm.h>
ENTRY(__memmove)
WEAK(memmove)
move t0, a0
move t1, a1
beq a0, a1, exit_memcpy
beqz a2, exit_memcpy
srli t2, a2, 0x2
slt t3, a0, a1
beqz t3, do_reverse
andi a2, a2, 0x3
li t4, 1
beqz t2, byte_copy
word_copy:
lw t3, 0(a1)
addi t2, t2, -1
addi a1, a1, 4
sw t3, 0(a0)
addi a0, a0, 4
bnez t2, word_copy
beqz a2, exit_memcpy
j byte_copy
do_reverse:
add a0, a0, a2
add a1, a1, a2
andi a2, a2, 0x3
li t4, -1
beqz t2, reverse_byte_copy
reverse_word_copy:
addi a1, a1, -4
addi t2, t2, -1
lw t3, 0(a1)
addi a0, a0, -4
sw t3, 0(a0)
bnez t2, reverse_word_copy
beqz a2, exit_memcpy
reverse_byte_copy:
addi a0, a0, -1
addi a1, a1, -1
byte_copy:
lb t3, 0(a1)
addi a2, a2, -1
sb t3, 0(a0)
add a1, a1, t4
add a0, a0, t4
bnez a2, byte_copy
exit_memcpy:
move a0, t0
move a1, t1
ret
END(__memmove)
|
a3f/bareDOOM
| 1,433
|
arch/riscv/lib/pbl.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* SPDX-FileCopyrightText: 2012 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix */
#include <linux/sizes.h>
#include <asm-generic/barebox.lds.h>
#include <asm-generic/memory_layout.h>
OUTPUT_ARCH(riscv)
#ifdef CONFIG_64BIT
OUTPUT_FORMAT("elf64-littleriscv")
#else
OUTPUT_FORMAT("elf32-littleriscv")
#endif
SECTIONS
{
. = 0x0;
.image_start : { *(.__image_start) }
. = ALIGN(4);
._text : { *(._text) }
.text :
{
_stext = .;
*(.text_head_entry*)
__bare_init_start = .;
*(.text_bare_init*)
__bare_init_end = .;
*(.text*)
}
BAREBOX_BARE_INIT_SIZE
BAREBOX_PBL_SIZE
. = ALIGN(4);
.rodata : { *(.rodata*) }
.barebox_imd : { BAREBOX_IMD }
_etext = .; /* End of text and rodata section */
.data : { *(.data*) }
__shasum_start = .;
.shasum : {
KEEP(*(.shasum))
}
__shasum_end = .;
/DISCARD/ : { *(.rela.plt*) }
.rela.dyn : {
__rel_dyn_start = .;
*(.rela*)
__rel_dyn_end = .;
}
.dynsym : {
__dynsym_start = .;
*(.dynsym)
__dynsym_end = .;
}
pbl_code_size = .;
.__bss_start : { *(.__bss_start) }
.bss : { *(.bss*) }
.__bss_stop : { *(.__bss_stop) }
_end = .;
pbl_memory_size = .;
. = ALIGN(4);
__piggydata_start = .;
.piggydata : {
*(.piggydata)
}
__piggydata_end = .;
.image_end : { KEEP(*(.__image_end)) }
pbl_image_size = .;
_barebox_image_size = __image_end;
_barebox_pbl_size = __bss_start;
}
|
a3f/bareDOOM
| 1,276
|
arch/riscv/lib/setupc.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: Copyright (c) 2021 Ahmad Fatoum, Pengutronix */
#include <linux/linkage.h>
#include <asm/sections.h>
#include <asm/asm.h>
/*
* setup_c: clear bss
*/
.section .text.setup_c
ENTRY(setup_c)
lla a0, __bss_start
li a1, 0
lla a2, __bss_stop
sub a2, a2, a0
j __memset
ENDPROC(setup_c)
/*
* void relocate_to_adr(unsigned long targetadr)
*
* Copy binary to targetadr, relocate code and continue
* executing at new address.
*/
.section .text.relocate_to_adr
ENTRY(relocate_to_adr)
/* a0: target address */
addi sp, sp, -SZREG * 2
lla a1, _text /* a1: source address */
/* adjust return address */
sub ra, ra, a1 /* sub address where we are actually running */
add ra, ra, a0 /* add address where we are going to run */
REG_S ra, (SZREG * 2)(sp)
beq a0, a1, copied /* skip if already at new address */
lla a2, copied
sub a2, a2, a1
add a2, a2, a0
REG_S a2, (SZREG * 1)(sp)
lla a2, __bss_start
sub a2, a2, a1 /* a2: size */
jal __memcpy
jal sync_caches_for_execution
REG_L a0, (SZREG * 1)(sp)
jr a0 /* jump to relocated address */
copied:
REG_L ra, (SZREG * 2)(sp)
addi sp, sp, SZREG * 2
j relocate_to_current_adr /* relocate binary */
ENDPROC(relocate_to_adr)
|
a3f/bareDOOM
| 2,363
|
arch/riscv/lib/memset.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Regents of the University of California
*/
#include <linux/linkage.h>
#include <asm/asm.h>
/* void *memset(void *, int, size_t) */
ENTRY(__memset)
WEAK(memset)
move t0, a0 /* Preserve return value */
/* Defer to byte-oriented fill for small sizes */
sltiu a3, a2, 16
bnez a3, 4f
/*
* Round to nearest XLEN-aligned address
* greater than or equal to start address
*/
addi a3, t0, SZREG-1
andi a3, a3, ~(SZREG-1)
beq a3, t0, 2f /* Skip if already aligned */
/* Handle initial misalignment */
sub a4, a3, t0
1:
sb a1, 0(t0)
addi t0, t0, 1
bltu t0, a3, 1b
sub a2, a2, a4 /* Update count */
2: /* Duff's device with 32 XLEN stores per iteration */
/* Broadcast value into all bytes */
andi a1, a1, 0xff
slli a3, a1, 8
or a1, a3, a1
slli a3, a1, 16
or a1, a3, a1
#ifdef CONFIG_64BIT
slli a3, a1, 32
or a1, a3, a1
#endif
/* Calculate end address */
andi a4, a2, ~(SZREG-1)
add a3, t0, a4
andi a4, a4, 31*SZREG /* Calculate remainder */
beqz a4, 3f /* Shortcut if no remainder */
neg a4, a4
addi a4, a4, 32*SZREG /* Calculate initial offset */
/* Adjust start address with offset */
sub t0, t0, a4
/* Jump into loop body */
/* Assumes 32-bit instruction lengths */
la a5, 3f
#ifdef CONFIG_64BIT
srli a4, a4, 1
#endif
add a5, a5, a4
jr a5
3:
REG_S a1, 0(t0)
REG_S a1, SZREG(t0)
REG_S a1, 2*SZREG(t0)
REG_S a1, 3*SZREG(t0)
REG_S a1, 4*SZREG(t0)
REG_S a1, 5*SZREG(t0)
REG_S a1, 6*SZREG(t0)
REG_S a1, 7*SZREG(t0)
REG_S a1, 8*SZREG(t0)
REG_S a1, 9*SZREG(t0)
REG_S a1, 10*SZREG(t0)
REG_S a1, 11*SZREG(t0)
REG_S a1, 12*SZREG(t0)
REG_S a1, 13*SZREG(t0)
REG_S a1, 14*SZREG(t0)
REG_S a1, 15*SZREG(t0)
REG_S a1, 16*SZREG(t0)
REG_S a1, 17*SZREG(t0)
REG_S a1, 18*SZREG(t0)
REG_S a1, 19*SZREG(t0)
REG_S a1, 20*SZREG(t0)
REG_S a1, 21*SZREG(t0)
REG_S a1, 22*SZREG(t0)
REG_S a1, 23*SZREG(t0)
REG_S a1, 24*SZREG(t0)
REG_S a1, 25*SZREG(t0)
REG_S a1, 26*SZREG(t0)
REG_S a1, 27*SZREG(t0)
REG_S a1, 28*SZREG(t0)
REG_S a1, 29*SZREG(t0)
REG_S a1, 30*SZREG(t0)
REG_S a1, 31*SZREG(t0)
addi t0, t0, 32*SZREG
bltu t0, a3, 3b
andi a2, a2, SZREG-1 /* Update count */
4:
/* Handle trailing misalignment */
beqz a2, 6f
add a3, t0, a2
5:
sb a1, 0(t0)
addi t0, t0, 1
bltu t0, a3, 5b
6:
ret
END(__memset)
|
a3f/bareDOOM
| 1,314
|
arch/arm/mach-omap/auxcr.S
|
/*
* Copyright (c) 2012 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/linkage.h>
#include <asm/unified.h>
.arm
ENTRY(setup_auxcr)
mov r12, #0x3
mrc p15, 0, r0, c1, c0, 1
orr r0, r0, #0x10 @ Enable ASA
orr r0, r0, #1 << 5 @ Enable L1NEON
.word 0xE1600070 @ SMC
mov r12, #0x2
mrc p15, 1, r0, c9, c0, 2
@ Set PLD_FWD bit in L2AUXCR (Cortex-A8 erratum 725233 workaround)
orr r0, r0, #1 << 27
.word 0xE1600070 @ SMC
bx lr
ENDPROC(setup_auxcr)
.arm
ENTRY(omap3_gp_romcode_call)
push {r4-r12, lr} @ Save all registers from ROM code!
mov r12, r0 @ Copy the Service ID in R12
mov r0, r1 @ Copy parameter to R0
mcr p15, 0, r0, c7, c10, 4 @ DSB
mcr p15, 0, r0, c7, c10, 5 @ DMB
.word 0xe1600070 @ SMC #0 to enter monitor - hand assembled
@ because we use -march=armv5
pop {r4-r12, pc}
ENDPROC(omap3_gp_romcode_call)
|
a3f/bareDOOM
| 1,541
|
arch/arm/mach-ep93xx/lowlevel_init.S
|
/*
* Low-level initialization for EP93xx
*
* Copyright (C) 2009 Matthias Kaehlcke <matthias@kaehlcke.net>
*
* Copyright (C) 2006 Dominic Rath <Dominic.Rath@gmx.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/sizes.h>
#include <mach/ep93xx-regs.h>
#include <asm/barebox-arm-head.h>
.globl barebox_arm_reset_vector
barebox_arm_reset_vector:
bl arm_cpu_lowlevel_init
/* Turn on both LEDs */
bl red_LED_on
bl green_LED_on
/* Configure flash wait states before we switch to the PLL */
bl flash_cfg
/* Set up PLL */
bl pll_cfg
/* Turn off the Green LED and leave the Red LED on */
bl green_LED_off
/* Setup SDRAM */
bl sdram_cfg
/* Turn on Green LED, Turn off the Red LED */
bl green_LED_on
bl red_LED_off
/* switch to async mode */
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #0xc0000000
mcr p15, 0, r0, c1, c0, 0
/*
* FIXME: This is suitable for the edb9301, the
* only ep93xx board we have in our defconfigs.
* Other boards need different values here.
*/
mov r0, #0x05000000
mov r1, #SZ_8M
mov r2, #0
b barebox_arm_entry
|
a3f/bareDOOM
| 8,577
|
arch/arm/mach-samsung/lowlevel-s3c24x0.S
|
/*
* (C) Copyright 2009
* Juergen Beisert <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <config.h>
#include <mach/s3c-iomap.h>
.section ".text_bare_init.s3c24x0_disable_wd","ax"
/*
* Disable the watchdog, else it continues to bark
*/
.globl s3c24x0_disable_wd
s3c24x0_disable_wd:
ldr r0, =S3C_WATCHDOG_BASE
mov r1, #0x0
str r1, [r0]
mov pc, lr
/*
* S3C2410 PLL configuration
* -------------------------
*
* Basic frequency calculation
*
* m * REFclk s = SDIV
* PLLclk = ------------ p = PDIV + 2
* p * 2^s m = MDIV + 8
*
* After reset the PLL of the s3c2410 processor uses:
*
* MPLL UPLL
* MDIV 0x5c 0x28
* PDIV 0x08 0x08
* SDIV 0x0 0x0
*
* 100 * 12MHz 1200MHz
* MPLLclk = ------------- = -------- = 120MHz
* 10 * 2^0 10
*
* 48 * 12MHz 576MHz
* UPLLclk = ------------- = -------- = 57,6MHz
* 10 * 2^0 10
*
* Note: Do not use "r10" here in this code
*/
#ifdef CONFIG_S3C_PLL_INIT
.section ".text_bare_init.s3c24x0_pll_init","ax"
.globl s3c24x0_pll_init
s3c24x0_pll_init:
mov r0, #S3C_CLOCK_POWER_BASE
/* configure internal clock ratio */
mov r1, #BOARD_SPECIFIC_CLKDIVN
str r1, [r0, #20]
/* enable all devices on this chip */
mov r1, #0xFFFFFFF0
str r1, [r0, #12]
/* ??????? */
#ifdef CONFIG_CPU_S3C2440
mov r1, #0xFFFFFFFF
#endif
#ifdef CONFIG_CPU_S3C2410
mov r1, #0x00FFFFFF
#endif
str r1, [r0, #0]
#ifdef CONFIG_CPU_S3C2440
/*
* Most of the time HDIVN is not 0, so we must use the
* asynchronous bus mode (refer datasheet "Clock and Power Management")
*/
mrc p15, 0, r1, c1, c0, 0
orr r1, r1, #0xc0000000
mcr p15, 0, r1, c1, c0, 0
#endif
/* configure UPLL */
ldr r1, =BOARD_SPECIFIC_UPLL
str r1, [r0, #8]
nop
nop
nop
nop
nop
nop
nop
nop
/* configure MPLL */
ldr r1, =BOARD_SPECIFIC_MPLL
str r1, [r0, #4]
nop
nop
nop
nop
nop
nop
nop
nop
mov pc, lr
#endif
/**
@page dev_s3c24xx_pll_handling PLL clock handling
To control the speed of your machine the PLLs must be reconfigured after reset.
For example the S3C2410 CPU wakes up after reset at 120MHz main PLL speed,
shared with all other system on chip components. Most of the time this
configuration is to slow for the CPU and to fast for the other components.
PLL reprogramming can be done in the machine specific manner very early when
the CONFIG_S3C_PLL_INIT and CONFIG_MACH_HAS_LOWLEVEL_INIT symbols are
defined. The board must provide a board_init_lowlevel() assembler function in
this case and calling the s3c24x0_pll_init() assembler function.
If the s3c24x0_pll_init() is called a few further symbols must be defined to
setup the correct values for the machine.
Define in the machine specific config.h the following symbols:
- S3C24XX_CLOCK_REFERENCE with the frequency in Hz of your reference crystal.
- BOARD_SPECIFIC_CLKDIVN with the value for the main clock ratio register (CLKDIVN)
- BOARD_SPECIFIC_MPLL with the value for the main PLL setup register
- BOARD_SPECIFIC_UPLL with the value for the USB PLL setup register
@note Valid values for the PLL settings can be found in the CPU manual.
@par Background: PLL frequency calculation for the S3C2410 CPU (both PLLs) and S3C2440 (UPLL only)
@f[
f_{PLL} = \frac{m * f_{Ref}}{p * 2^s}
@f]
With m = MDIV + 8, p = PDIV + 2 and s = SDIV.
@par Background: PLL frequency calculation for the S3C2440 CPU (MPLL only)
@f[
f_{PLL} = \frac{2 * m * f_{Ref}}{p * 2^s}
@f]
With m = MDIV + 8, p = PDIV + 2 and s = SDIV.
@note This routine can be used for the S3C2410 and the S3C2440 CPU.
*/
/* ----------------------------------------------------------------------- */
#ifdef CONFIG_S3C_SDRAM_INIT
.section ".text_bare_init.s3c24x0_sdram_init","ax"
.globl s3c24x0_sdram_init
s3c24x0_sdram_init:
adr r0, SDRAMDATA /* get the current relative address of the table */
mov r1, #S3C_MEMCTL_BASE
mov r2, #6 /* we *know* it contains 6 entries */
ldr r3, [r0], #4 /* write BSWCON first */
str r3, [r1], #0x1c /* post add register offset for bank6 */
/*
* Initializing the SDRAM controller is very simple:
* Just write some useful values into the SDRAM controller.
*/
0: ldr r3, [r0], #4
str r3, [r1], #4
subs r2, r2, #1
bne 0b
mov pc, lr
SDRAMDATA:
.word BOARD_SPECIFIC_BWSCON
.word BOARD_SPECIFIC_BANKCON6
.word BOARD_SPECIFIC_BANKCON7
.word BOARD_SPECIFIC_REFRESH
.word BOARD_SPECIFIC_BANKSIZE
.word BOARD_SPECIFIC_MRSRB6
.word BOARD_SPECIFIC_MRSRB7
#endif
/**
@page dev_s3c24xx_sdram_handling SDRAM controller initialisation
The SDRAM controller is very simple and its initialisation requires only a
few steps. barebox provides a generic routine to do this step.
Enable CONFIG_S3C_SDRAM_INIT and CONFIG_MACH_HAS_LOWLEVEL_INIT to be able
to call the generic s3c24x0_sdram_init() assembler function from within the
machine specific board_init_lowlevel() assembler function.
To use the s3c24x0_sdram_init() assembler function a few symbols must be
defined to setup correct values for the machine.
Define in the machine specific config.h the following list of symbols:
- BOARD_SPECIFIC_BWSCON with the values for SDRAM banks 6 and 7
- BOARD_SPECIFIC_BANKCON6 with the value for the BANKCON6 register
- BOARD_SPECIFIC_BANKCON7 with the value for the BANKCON7 register
- BOARD_SPECIFIC_REFRESH with the value for the REFRESH register
- BOARD_SPECIFIC_BANKSIZE with the value for the BANKSIZE register
- BOARD_SPECIFIC_MRSRB6 with the value for the MRSRB6 register
- BOARD_SPECIFIC_MRSRB7 with the value for the MRSRB7 register
*/
/* ----------------------------------------------------------------------- */
#ifdef CONFIG_S3C_NAND_BOOT
.section ".text_bare_init.s3c24x0_nand_boot","ax"
.globl s3c24x0_nand_boot
s3c24x0_nand_boot:
/*
* In the case of NOR boot we are running from the same address space.
* Detect this case to handle it correctly.
*/
mov r1, #S3C_MEMCTL_BASE
ldr r3, [r1]
and r3, r3, #0x6
cmp r3, #0x0 /* check for NAND case */
beq 2f
mov pc, lr /* NOR case: nothing to do here */
2: ldr sp, =_text /* Setup a temporary stack in SDRAM */
/*
* We still run at a location we are not linked to. But lets still running
* from the internal SRAM, this may speed up the boot
*/
push {lr}
bl nand_boot
pop {lr}
/*
* Adjust the return address to the correct address in SDRAM
*/
ldr r1, =_text
add lr, lr, r1
mov pc, lr
#endif
/**
@page dev_s3c24xx_nandboot_handling Booting from NAND
To be able to boot from NAND memory only, enable the S3C24x0 NAND driver. Also
enable CONFIG_S3C_NAND_BOOT and CONFIG_MACH_HAS_LOWLEVEL_INIT to be
able to call the s3c24x0_nand_boot() assembler routine from within the
machine specific board_init_lowlevel() assembler function.
@note This routine assumes an already working SDRAM controller and
an initialized stack pointer.
@note Basicly this routine runs from inside the internal SRAM. After load of
the whole barebox image from the NAND flash memory into the SDRAM it adjusts
the link register to the final SDRAM adress and returns.
@note In the NAND boot mode, ECC is not checked. So, the first x KBytes used
by barebox should have no bit error.
Due to the fact the code to load the whole barebox from NAND must fit into
the first 4kiB of the barebox image, the shrinked NAND driver is very
minimalistic. Setup the NAND access timing is done in a safe manner, what
means: Slowest possible values are used. If you want to increase the speed you
should define the BOARD_DEFAULT_NAND_TIMING to a valid setting into the
NFCONF register and add it to your board specific config.h. Refer S3C24x0's
datasheet for further details. The macro #CALC_NFCONF_TIMING could help to
calculate the register setting in a hardware independent manner.
@note The regular NAND driver uses a platform data structure to define the
NAND access timings.
@note Its still possible to boot this image from NOR memory. If this routine
detects it is running from NOR instead of the internal SRAM it skips any
loading and returns immediately.
*/
|
a3f/bareDOOM
| 2,241
|
arch/arm/mach-pxa/sleep.S
|
/*
* Low-level PXA250/210 sleep/wakeUp support
*
* Initial SA1110 code:
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
*
* Adapted for PXA by Nicolas Pitre:
* Copyright (c) 2002 Monta Vista Software, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#include <mach/pxa2xx-regs.h>
#define MDREFR_KDIV 0x200a4000 // all banks
#define CCCR_SLEEP 0x00000107 // L=7 2N=2 A=0 PPDIS=0 CPDIS=0
#define UNCACHED_PHYS_0 0
.text
#if (defined CONFIG_ARCH_PXA27X || defined CONFIG_ARCH_PXA25X)
/*
* pxa27x_finish_suspend()
*
* Forces CPU into sleep state.
*
* r0 = value for PWRMODE M field for desired sleep state
*/
ENTRY(pxa_suspend)
@ Put the processor to sleep
@ (also workaround for sighting 28071)
@ prepare value for sleep mode
mov r1, r0 @ sleep mode
@ Intel PXA270 Specification Update notes problems sleeping
@ with core operating above 91 MHz
@ (see Errata 50, ...processor does not exit from sleep...)
ldr r6, =CCCR
ldr r8, [r6] @ keep original value for resume
ldr r7, =CCCR_SLEEP @ prepare CCCR sleep value
mov r0, #0x2 @ prepare value for CLKCFG
@ align execution to a cache line
b pxa_cpu_do_suspend
#endif
.ltorg
.align 5
pxa_cpu_do_suspend:
@ All needed values are now in registers.
@ These last instructions should be in cache
@ initiate the frequency change...
str r7, [r6]
mcr p14, 0, r0, c6, c0, 0
@ restore the original cpu speed value for resume
str r8, [r6]
@ need 6 13-MHz cycles before changing PWRMODE
@ just set frequency to 91-MHz... 6*91/13 = 42
mov r0, #42
10: subs r0, r0, #1
bne 10b
@ Do not reorder...
@ Intel PXA270 Specification Update notes problems performing
@ external accesses after SDRAM is put in self-refresh mode
@ (see Errata 39 ...hangs when entering self-refresh mode)
@ enter sleep mode
mcr p14, 0, r1, c7, c0, 0 @ PWRMODE
20: b 20b @ loop waiting for sleep
/*
* pxa3xx_finish_suspend() - forces CPU into sleep state
*/
ENTRY(pxa3xx_suspend)
mcr p14, 0, r0, c7, c0, 0 @ enter sleep
20: b 20b @ waiting for sleep
|
a3f/bareDOOM
| 13,320
|
arch/arm/crypto/sha1-armv4-large.S
|
#define __ARM_ARCH__ __LINUX_ARM_ARCH__
@ ====================================================================
@ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ sha1_block procedure for ARMv4.
@
@ January 2007.
@ Size/performance trade-off
@ ====================================================================
@ impl size in bytes comp cycles[*] measured performance
@ ====================================================================
@ thumb 304 3212 4420
@ armv4-small 392/+29% 1958/+64% 2250/+96%
@ armv4-compact 740/+89% 1552/+26% 1840/+22%
@ armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
@ full unroll ~5100/+260% ~1260/+4% ~1300/+5%
@ ====================================================================
@ thumb = same as 'small' but in Thumb instructions[**] and
@ with recurring code in two private functions;
@ small = detached Xload/update, loops are folded;
@ compact = detached Xload/update, 5x unroll;
@ large = interleaved Xload/update, 5x unroll;
@ full unroll = interleaved Xload/update, full unroll, estimated[!];
@
@ [*] Manually counted instructions in "grand" loop body. Measured
@ performance is affected by prologue and epilogue overhead,
@ i-cache availability, branch penalties, etc.
@ [**] While each Thumb instruction is twice smaller, they are not as
@ diverse as ARM ones: e.g., there are only two arithmetic
@ instructions with 3 arguments, no [fixed] rotate, addressing
@ modes are limited. As result it takes more instructions to do
@ the same job in Thumb, therefore the code is never twice as
@ small and always slower.
@ [***] which is also ~35% better than compiler generated code. Dual-
@ issue Cortex A8 core was measured to process input block in
@ ~990 cycles.
@ August 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 13% improvement on
@ Cortex A8 core and in absolute terms ~870 cycles per input block
@ [or 13.6 cycles per byte].
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 10%
@ improvement on Cortex A8 core and 12.2 cycles per byte.
#include <linux/linkage.h>
.text
.align 2
ENTRY(sha1_block_data_order)
stmdb sp!,{r4-r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
.Lloop:
ldr r8,.LK_00_19
mov r14,sp
sub sp,sp,#15*4
mov r5,r5,ror#30
mov r6,r6,ror#30
mov r7,r7,ror#30 @ [6]
.L_00_15:
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r6,r8,r6,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r4,r5 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r6,r8,r6,ror#2 @ E+=K_00_19
eor r10,r4,r5 @ F_xx_xx
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r3,r10,ror#2
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r6,r6,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r5,r8,r5,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r3,r4 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r5,r8,r5,ror#2 @ E+=K_00_19
eor r10,r3,r4 @ F_xx_xx
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r7,r10,ror#2
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r5,r5,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r4,r8,r4,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r7,r3 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r4,r8,r4,ror#2 @ E+=K_00_19
eor r10,r7,r3 @ F_xx_xx
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r6,r10,ror#2
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r4,r4,r10 @ E+=F_00_19(B,C,D)
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r3,r8,r3,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r6,r7 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r3,r8,r3,ror#2 @ E+=K_00_19
eor r10,r6,r7 @ F_xx_xx
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r5,r10,ror#2
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
cmp r14,sp
bne .L_00_15 @ [((11+4)*5+2)*3]
sub sp,sp,#25*4
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
ldrb r9,[r1,#3]
ldrb r11,[r1,#1]
add r7,r8,r7,ror#2 @ E+=K_00_19
ldrb r12,[r1],#4
orr r9,r9,r10,lsl#8
eor r10,r5,r6 @ F_xx_xx
orr r9,r9,r11,lsl#16
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
orr r9,r9,r12,lsl#24
#else
ldr r9,[r1],#4 @ handles unaligned
add r7,r8,r7,ror#2 @ E+=K_00_19
eor r10,r5,r6 @ F_xx_xx
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
#ifdef __ARMEL__
rev r9,r9 @ byte swap
#endif
#endif
and r10,r4,r10,ror#2
add r7,r7,r9 @ E+=X[i]
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r7,r7,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
add r6,r6,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
add r5,r5,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
add r4,r4,r10 @ E+=F_00_19(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
add r3,r3,r10 @ E+=F_00_19(B,C,D)
ldr r8,.LK_20_39 @ [+15+16*4]
cmn sp,#0 @ [+3], clear carry to denote 20_39
.L_20_39_or_60_79:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r4,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_20_39(B,C,D)
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
eor r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
ARM( teq r14,sp ) @ preserve carry
THUMB( mov r11,sp )
THUMB( teq r14,r11 ) @ preserve carry
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
ldr r8,.LK_40_59
sub sp,sp,#20*4 @ [+2]
.L_40_59:
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r7,r8,r7,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r5,r6 @ F_xx_xx
mov r9,r9,ror#31
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r4,r10,ror#2 @ F_xx_xx
and r11,r5,r6 @ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_40_59(B,C,D)
add r7,r7,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r6,r8,r6,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r4,r5 @ F_xx_xx
mov r9,r9,ror#31
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r3,r10,ror#2 @ F_xx_xx
and r11,r4,r5 @ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_40_59(B,C,D)
add r6,r6,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r5,r8,r5,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r3,r4 @ F_xx_xx
mov r9,r9,ror#31
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r7,r10,ror#2 @ F_xx_xx
and r11,r3,r4 @ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_40_59(B,C,D)
add r5,r5,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r4,r8,r4,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r7,r3 @ F_xx_xx
mov r9,r9,ror#31
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r6,r10,ror#2 @ F_xx_xx
and r11,r7,r3 @ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_40_59(B,C,D)
add r4,r4,r11,ror#2
ldr r9,[r14,#15*4]
ldr r10,[r14,#13*4]
ldr r11,[r14,#7*4]
add r3,r8,r3,ror#2 @ E+=K_xx_xx
ldr r12,[r14,#2*4]
eor r9,r9,r10
eor r11,r11,r12 @ 1 cycle stall
eor r10,r6,r7 @ F_xx_xx
mov r9,r9,ror#31
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
and r10,r5,r10,ror#2 @ F_xx_xx
and r11,r6,r7 @ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
cmp r14,sp
bne .L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,.LK_60_79
sub sp,sp,#20*4
cmp sp,#0 @ set carry to denote 60_79
b .L_20_39_or_60_79 @ [+4], spare 300 bytes
.L_done:
add sp,sp,#80*4 @ "deallocate" stack frame
ldmia r0,{r8,r9,r10,r11,r12}
add r3,r8,r3
add r4,r9,r4
add r5,r10,r5,ror#2
add r6,r11,r6,ror#2
add r7,r12,r7,ror#2
stmia r0,{r3,r4,r5,r6,r7}
teq r1,r2
bne .Lloop @ [+18], total 1307
ldmia sp!,{r4-r12,pc}
.align 2
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6
ENDPROC(sha1_block_data_order)
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
.align 2
|
a3f/bareDOOM
| 2,099
|
arch/arm/lib/pbl.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* SPDX-FileCopyrightText: 2012 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix */
#include <linux/sizes.h>
#include <asm-generic/barebox.lds.h>
#include <asm-generic/memory_layout.h>
#ifdef CONFIG_PBL_RELOCATABLE
#define BASE 0x0
#else
#define BASE (TEXT_BASE - SZ_2M)
#endif
#ifdef CONFIG_CPU_32
OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
OUTPUT_ARCH(arm)
#else
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
#endif
SECTIONS
{
. = BASE;
.image_start : { *(.__image_start) }
PRE_IMAGE
. = ALIGN(4);
._text : { *(._text) }
.text :
{
_stext = .;
*(.text_head_entry*)
__bare_init_start = .;
*(.text_bare_init*)
__bare_init_end = .;
*(.text*)
}
/* Discard unwind if enable in barebox */
/DISCARD/ : { *(.ARM.ex*) }
BAREBOX_BARE_INIT_SIZE
BAREBOX_PBL_SIZE
. = ALIGN(4);
.rodata : { *(.rodata*) }
.barebox_imd : { BAREBOX_IMD }
_etext = .; /* End of text and rodata section */
. = ALIGN(4);
.data : { *(.data*) }
. = ALIGN(4);
__shasum_start = .;
.shasum : {
KEEP(*(.shasum))
}
__shasum_end = .;
.rel_dyn_start : { *(.__rel_dyn_start) }
#ifdef CONFIG_CPU_32
.rel.dyn : { *(.rel*) }
#else
.rela.dyn : { *(.rela*) }
#endif
.rel_dyn_end : { *(.__rel_dyn_end) }
.__dynsym_start : { *(.__dynsym_start) }
.dynsym : { *(.dynsym) }
.__dynsym_end : { *(.__dynsym_end) }
pbl_code_size = . - BASE;
. = ALIGN(4);
.__bss_start : { *(.__bss_start) }
.bss : { *(.bss*) }
.__bss_stop : { *(.__bss_stop) }
_end = .;
pbl_memory_size = . - BASE;
#if defined(CONFIG_CPU_64) && defined(CONFIG_HABV4)
. = ALIGN(0x1000);
__csf_start = .;
.hab_csf : {
BYTE(0x5a);
. += + 0x1fff;
} = 0x5a
__csf_end = .;
#endif /* CONFIG_CPU_64 && CONFIG_HABV4 */
. = ALIGN(4);
__piggydata_start = .;
.piggydata : {
*(.piggydata)
}
__piggydata_end = .;
.image_end : { KEEP(*(.__image_end)) }
pbl_image_size = . - BASE;
_barebox_image_size = __image_end - BASE;
_barebox_pbl_size = __bss_start - BASE;
}
|
a3f/bareDOOM
| 2,463
|
arch/arm/cpu/hyp.S
|
#include <linux/linkage.h>
#include <asm/system.h>
#include <asm/opcodes-virt.h>
#include <init.h>
.arch_extension sec
.arch_extension virt
__BARE_INIT
.data
.align 2
ENTRY(__boot_cpu_mode)
.long 0
.text
ENTRY(__hyp_install)
mrs r12, cpsr
and r12, r12, #MODE_MASK
@ Save the initial CPU state
adr r0, .L__boot_cpu_mode_offset
ldr r1, [r0]
str r12, [r0, r1]
cmp r12, #HYP_MODE
movne pc, lr @ give up if the CPU is not in HYP mode
@ Now install the hypervisor stub:
adr r12, __hyp_vectors
mcr p15, 4, r12, c12, c0, 0 @ set hypervisor vector base (HVBAR)
@ Disable all traps, so we don't get any nasty surprise
mov r12, #0
mcr p15, 4, r12, c1, c1, 0 @ HCR
mcr p15, 4, r12, c1, c1, 2 @ HCPTR
mcr p15, 4, r12, c1, c1, 3 @ HSTR
THUMB( orr r12, #(1 << 30) ) @ HSCTLR.TE
mcr p15, 4, r12, c1, c0, 0 @ HSCTLR
mrc p15, 4, r12, c1, c1, 1 @ HDCR
and r12, #0x1f @ Preserve HPMN
mcr p15, 4, r12, c1, c1, 1 @ HDCR
@ Make sure NS-SVC is initialised appropriately
mrc p15, 0, r12, c1, c0, 0 @ SCTLR
orr r12, #(1 << 5) @ CP15 barriers enabled
bic r12, #(3 << 7) @ Clear SED/ITD for v8 (RES0 for v7)
bic r12, #(3 << 19) @ WXN and UWXN disabled
mcr p15, 0, r12, c1, c0, 0 @ SCTLR
mrc p15, 0, r12, c0, c0, 0 @ MIDR
mcr p15, 4, r12, c0, c0, 0 @ VPIDR
mrc p15, 0, r12, c0, c0, 5 @ MPIDR
mcr p15, 4, r12, c0, c0, 5 @ VMPIDR
bx lr
ENDPROC(__hyp_install)
ENTRY(armv7_hyp_install)
mov r2, lr
bl __hyp_install
/* set the cpu to SVC32 mode, mask irq and fiq */
mrs r12, cpsr
eor r12, r12, #HYP_MODE
tst r12, #MODE_MASK
bic r12, r12, #MODE_MASK
orr r12, r12, #(PSR_I_BIT | PSR_F_BIT | SVC_MODE)
THUMB( orr r12, r12, #PSR_T_BIT )
bne 1f
orr r12, r12, #PSR_A_BIT
adr lr, 2f
msr spsr_cxsf, r12
__MSR_ELR_HYP(14)
__ERET
1: msr cpsr_c, r12
2:
mov pc, r2
ENDPROC(armv7_hyp_install)
ENTRY(armv7_switch_to_hyp)
mov r0, lr
mov r1, sp @ save SVC copy of LR and SP
isb
hvc #0 @ for older asm: .byte 0x70, 0x00, 0x40, 0xe1
mov sp, r1
mov lr, r0 @ restore SVC copy of LR and SP
bx lr
ENDPROC(armv7_switch_to_hyp)
.align 2
.L__boot_cpu_mode_offset:
.long __boot_cpu_mode - .
/* The HYP trap is crafted to match armv7_switch_to_hyp() */
__hyp_do_trap:
mov lr, r0
mov sp, r1
bx lr
ENDPROC(__hyp_do_trap)
.align 5
__hyp_vectors:
__hyp_reset: W(b) .
__hyp_und: W(b) .
__hyp_svc: W(b) .
__hyp_pabort: W(b) .
__hyp_dabort: W(b) .
__hyp_trap: W(b) __hyp_do_trap
__hyp_irq: W(b) .
__hyp_fiq: W(b) .
ENDPROC(__hyp_vectors)
|
a3f/bareDOOM
| 2,775
|
arch/arm/cpu/exceptions_64.S
|
/*
* (C) Copyright 2013
* David Feng <fenghua@phytium.com.cn>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <config.h>
#include <asm/ptrace.h>
#include <asm/assembler64.h>
#include <linux/linkage.h>
/*
* Enter Exception.
* This will save the processor state that is ELR/X0~X30
* to the stack frame.
*/
.macro exception_entry
stp x29, x30, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x19, x20, [sp, #-16]!
stp x17, x18, [sp, #-16]!
stp x15, x16, [sp, #-16]!
stp x13, x14, [sp, #-16]!
stp x11, x12, [sp, #-16]!
stp x9, x10, [sp, #-16]!
stp x7, x8, [sp, #-16]!
stp x5, x6, [sp, #-16]!
stp x3, x4, [sp, #-16]!
stp x1, x2, [sp, #-16]!
switch_el x11, 3f, 2f, 1f
3: mrs x1, esr_el3
mrs x2, elr_el3
mrs x3, far_el3
b 0f
2: mrs x1, esr_el2
mrs x2, elr_el2
mrs x3, far_el2
b 0f
1: mrs x1, esr_el1
mrs x2, elr_el1
mrs x3, far_el1
0:
stp x2, x0, [sp, #-16]!
mov x0, sp
.endm
/*
* Exception vectors.
*/
.align 11
.globl vectors
vectors:
.align 7
b _do_bad_sync /* Current EL Synchronous Thread */
.align 7
b _do_bad_irq /* Current EL IRQ Thread */
.align 7
b _do_bad_fiq /* Current EL FIQ Thread */
.align 7
b _do_bad_error /* Current EL Error Thread */
.align 7
b _do_sync /* Current EL Synchronous Handler */
.align 7
b _do_irq /* Current EL IRQ Handler */
.align 7
b _do_fiq /* Current EL FIQ Handler */
.align 7
b _do_error /* Current EL Error Handler */
_do_bad_sync:
exception_entry
bl do_bad_sync
b exception_exit
_do_bad_irq:
exception_entry
bl do_bad_irq
b exception_exit
_do_bad_fiq:
exception_entry
bl do_bad_fiq
b exception_exit
_do_bad_error:
exception_entry
bl do_bad_error
b exception_exit
_do_sync:
exception_entry
mov x2, x3
bl do_sync
b exception_exit
_do_irq:
exception_entry
bl do_irq
b exception_exit
_do_fiq:
exception_entry
bl do_fiq
b exception_exit
_do_error:
exception_entry
bl do_error
b exception_exit
exception_exit:
ldp x2, x0, [sp],#16
switch_el x11, 3f, 2f, 1f
3: msr elr_el3, x2
b 0f
2: msr elr_el2, x2
b 0f
1: msr elr_el1, x2
0:
ldp x1, x2, [sp],#16
ldp x3, x4, [sp],#16
ldp x5, x6, [sp],#16
ldp x7, x8, [sp],#16
ldp x9, x10, [sp],#16
ldp x11, x12, [sp],#16
ldp x13, x14, [sp],#16
ldp x15, x16, [sp],#16
ldp x17, x18, [sp],#16
ldp x19, x20, [sp],#16
ldp x21, x22, [sp],#16
ldp x23, x24, [sp],#16
ldp x25, x26, [sp],#16
ldp x27, x28, [sp],#16
ldp x29, x30, [sp],#16
eret
.section .data
.align 4
.global arm_ignore_data_abort
arm_ignore_data_abort:
.word 0 /* When != 0 data aborts are ignored */
.global arm_data_abort_occurred
arm_data_abort_occurred:
.word 0 /* set != 0 by the data abort handler */
abort_stack:
.space 8
|
a3f/bareDOOM
| 4,145
|
arch/arm/cpu/exceptions.S
|
#include <config.h>
#include <linux/linkage.h>
#include <asm-generic/memory_layout.h>
/*
*************************************************************************
*
* Interrupt handling
*
*************************************************************************
*/
@
@ IRQ stack frame.
@
#define S_FRAME_SIZE 72
#define S_OLD_R0 68
#define S_PSR 64
#define S_PC 60
#define S_LR 56
#define S_SP 52
#define S_IP 48
#define S_FP 44
#define S_R10 40
#define S_R9 36
#define S_R8 32
#define S_R7 28
#define S_R6 24
#define S_R5 20
#define S_R4 16
#define S_R3 12
#define S_R2 8
#define S_R1 4
#define S_R0 0
#define MODE_SVC 0x13
/*
* use bad_save_user_regs for abort/prefetch/undef/swi ...
* use irq_save_user_regs / irq_restore_user_regs for IRQ/FIQ handling
*/
.macro bad_save_user_regs
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0-r12
ldr r2, =abort_stack
ldmia r2, {r2 - r3} @ get pc, cpsr
add r0, sp, #S_FRAME_SIZE @ restore sp_SVC
add r5, sp, #S_SP
mov r1, lr
stmia r5, {r0 - r3} @ save sp_SVC, lr_SVC, pc, cpsr
mov r0, sp
.endm
.macro get_bad_stack
ldr r13, =abort_stack
str lr, [r13] @ save caller lr / spsr
mrs lr, spsr
str lr, [r13, #4]
mov r13, #MODE_SVC @ prepare SVC-Mode
@ msr spsr_c, r13
msr spsr, r13
mov lr, pc
movs pc, lr
.endm
.macro try_data_abort
ldr r13, =arm_ignore_data_abort @ check try mode
ldr r13, [r13]
cmp r13, #0
beq do_abort_\@
ldr r13, =arm_data_abort_occurred
str r13, [r13]
mrs r13, spsr @ read saved CPSR
tst r13, #1<<5 @ check Thumb mode
subeq lr, #4 @ next ARM instr
subne lr, #6 @ next Thumb instr
movs pc, lr
do_abort_\@:
.endm
/*
* exception handlers
*/
.section ".text","ax"
.arm
.align 5
undefined_instruction:
get_bad_stack
bad_save_user_regs
bl do_undefined_instruction
.align 5
software_interrupt:
get_bad_stack
bad_save_user_regs
bl do_software_interrupt
.align 5
prefetch_abort:
get_bad_stack
bad_save_user_regs
bl do_prefetch_abort
.align 5
data_abort:
try_data_abort
get_bad_stack
bad_save_user_regs
bl do_data_abort
.align 5
irq:
get_bad_stack
bad_save_user_regs
bl do_irq
.align 5
fiq:
get_bad_stack
bad_save_user_regs
bl do_fiq
#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_ARM_EXCEPTIONS)
/*
* With relocatable binary support the runtime exception vectors do not match
* the addresses in the binary. We have to fix them up during runtime
*/
ENTRY(arm_fixup_vectors)
ldr r0, =undefined_instruction
ldr r1, =_undefined_instruction
str r0, [r1]
ldr r0, =software_interrupt
ldr r1, =_software_interrupt
str r0, [r1]
ldr r0, =prefetch_abort
ldr r1, =_prefetch_abort
str r0, [r1]
ldr r0, =data_abort
ldr r1, =_data_abort
str r0, [r1]
ldr r0, =irq
ldr r1, =_irq
str r0, [r1]
ldr r0, =fiq
ldr r1, =_fiq
str r0, [r1]
bx lr
ENDPROC(arm_fixup_vectors)
#endif
.section .text_exceptions
.globl extable
extable:
1: b 1b /* barebox_arm_reset_vector */
#ifdef CONFIG_ARM_EXCEPTIONS
ldr pc, _undefined_instruction /* undefined instruction */
ldr pc, _software_interrupt /* software interrupt (SWI) */
ldr pc, _prefetch_abort /* prefetch abort */
ldr pc, _data_abort /* data abort */
1: b 1b /* (reserved) */
ldr pc, _irq /* irq (interrupt) */
ldr pc, _fiq /* fiq (fast interrupt) */
.globl _undefined_instruction
_undefined_instruction: .word undefined_instruction
.globl _software_interrupt
_software_interrupt: .word software_interrupt
.globl _prefetch_abort
_prefetch_abort: .word prefetch_abort
.globl _data_abort
_data_abort: .word data_abort
.globl _irq
_irq: .word irq
.globl _fiq
_fiq: .word fiq
#else
1: b 1b /* undefined instruction */
1: b 1b /* software interrupt (SWI) */
1: b 1b /* prefetch abort */
1: b 1b /* data abort */
1: b 1b /* (reserved) */
1: b 1b /* irq (interrupt) */
1: b 1b /* fiq (fast interrupt) */
#endif
.section .data
.align 4
.global arm_ignore_data_abort
arm_ignore_data_abort:
.word 0 /* When != 0 data aborts are ignored */
.global arm_data_abort_occurred
arm_data_abort_occurred:
.word 0 /* set != 0 by the data abort handler */
abort_stack:
.space 8
|
a3f/bareDOOM
| 3,665
|
arch/arm/cpu/cache-armv6.S
|
#include <linux/linkage.h>
#include <init.h>
#define HARVARD_CACHE
#define CACHE_LINE_SIZE 32
#define D_CACHE_LINE_SIZE 32
.section .text.v6_mmu_cache_on
ENTRY(v6_mmu_cache_on)
mov r12, lr
#ifdef CONFIG_MMU
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mov pc, r12
ENDPROC(v6_mmu_cache_on)
__common_mmu_cache_on:
orr r0, r0, #0x000d @ Write buffer, mmu
b 1f
.align 5 @ cache line aligned
1: mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back to
sub pc, lr, r0, lsr #32 @ properly flush pipeline
.section .text.v6_mmu_cache_off
ENTRY(v6_mmu_cache_off)
#ifdef CONFIG_MMU
mrc p15, 0, r0, c1, c0
bic r0, r0, #0x000d
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
#endif
mov pc, lr
.section .text.v6_mmu_cache_flush
ENTRY(v6_mmu_cache_flush)
mov r1, #0
mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mov pc, lr
ENDPROC(v6_mmu_cache_flush)
/*
* v6_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
.section .text.v6_dma_inv_range
ENTRY(v6_dma_inv_range)
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r0, c7, c10, 1 @ clean D line
#else
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
tst r1, #D_CACHE_LINE_SIZE - 1
bic r1, r1, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
#else
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
ENDPROC(v6_dma_inv_range)
/*
* v6_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
.section .text.v6_dma_clean_range
ENTRY(v6_dma_clean_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
#else
mcr p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
ENDPROC(v6_dma_clean_range)
/*
* v6_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
.section .text.v6_dma_flush_range
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
ENDPROC(v6_dma_flush_range)
|
a3f/bareDOOM
| 3,032
|
arch/arm/cpu/cache-armv5.S
|
#include <linux/linkage.h>
#include <init.h>
#define CACHE_DLINESIZE 32
.section .text.v5_mmu_cache_on
ENTRY(v5_mmu_cache_on)
mov r12, lr
#ifdef CONFIG_MMU
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mov pc, r12
ENDPROC(v5_mmu_cache_on)
__common_mmu_cache_on:
orr r0, r0, #0x000d @ Write buffer, mmu
b 1f
.align 5 @ cache line aligned
1: mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back to
sub pc, lr, r0, lsr #32 @ properly flush pipeline
.section .text.v5_mmu_cache_off
ENTRY(v5_mmu_cache_off)
#ifdef CONFIG_MMU
mrc p15, 0, r0, c1, c0
bic r0, r0, #0x000d
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
#endif
mov pc, lr
ENDPROC(v5_mmu_cache_off)
.section .text.v5_mmu_cache_flush
ENTRY(v5_mmu_cache_flush)
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
bne 1b
mcr p15, 0, r0, c7, c5, 0 @ flush I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENDPROC(v5_mmu_cache_flush)
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
.section .text.v5_dma_inv_range
ENTRY(v5_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
.section .text.v5_dma_clean_range
ENTRY(v5_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
.section .text.v5_dma_flush_range
ENTRY(v5_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
|
a3f/bareDOOM
| 2,288
|
arch/arm/cpu/lowlevel.S
|
#include <linux/linkage.h>
#include <init.h>
#include <asm/system.h>
#include <asm/opcodes-virt.h>
.section ".text_bare_init_","ax"
ENTRY(arm_cpu_lowlevel_init)
/* save lr, since it may be banked away with a processor mode change */
mov r2, lr
#ifdef CONFIG_CPU_32v7
/* careful: the hyp install corrupts r0 and r1 */
bl __hyp_install
#endif
/* set the cpu to SVC32 mode, mask irq and fiq */
mrs r12, cpsr
eor r12, r12, #HYP_MODE
tst r12, #MODE_MASK
bic r12, r12, #MODE_MASK
orr r12, r12, #(PSR_I_BIT | PSR_F_BIT | SVC_MODE)
THUMB( orr r12, r12, #PSR_T_BIT )
bne 1f
orr r12, r12, #PSR_A_BIT
adr lr, 2f
msr spsr_cxsf, r12
__MSR_ELR_HYP(14)
__ERET
1: msr cpsr_c, r12
2:
#if __LINUX_ARM_ARCH__ >= 6
/*
* ICIALLU: Invalidate all instruction caches to PoU,
* includes flushing of branch predictors.
* Even if the i-cache is off it might contain stale entries
* that are better discarded before enabling the cache.
* Architectually this is even possible after a cold reset.
*/
mcr p15, 0, r12, c7, c5, 0
/* DSB, ensure completion of the invalidation */
mcr p15, 0, r12, c7, c10, 4
/*
* ISB, ensure instruction fetch path is in sync.
* Note that the ARM Architecture Reference Manual, ARMv7-A and ARMv7-R
* edition (ARM DDI 0406C.c) doesn't define this instruction in the
* ARMv6 part (D12.7.10). It only has: "Support of additional
* operations is IMPLEMENTATION DEFINED".
* But an earlier version of the ARMARM (ARM DDI 0100I) does define it
* as "Flush prefetch buffer (PrefetchFlush)".
*/
mcr p15, 0, r12, c7, c5, 4
#endif
/* disable MMU stuff and data/unified caches */
mrc p15, 0, r12, c1, c0, 0 /* SCTLR */
bic r12, r12, #(CR_M | CR_C | CR_B)
bic r12, r12, #(CR_S | CR_R | CR_V)
#ifndef CONFIG_ARCH_IMX_EXTERNAL_BOOT_NAND
/* enable instruction cache */
orr r12, r12, #CR_I
#endif
#if __LINUX_ARM_ARCH__ >= 6
orr r12, r12, #CR_U
bic r12, r12, #CR_A
#else
orr r12, r12, #CR_A
#endif
#ifdef __ARMEB__
orr r12, r12, #CR_B
#endif
mcr p15, 0, r12, c1, c0, 0 /* SCTLR */
mov pc, r2
ENDPROC(arm_cpu_lowlevel_init)
ENTRY(cortex_a7_lowlevel_init)
mrc p15, 0, r12, c1, c0, 1
orr r12, r12, #(1 << 6) /* Enable SMP for cortex-a7 to make caches work */
mcr p15, 0, r12, c1, c0, 1
mov pc, lr
ENDPROC(cortex_a7_lowlevel_init)
|
a3f/bareDOOM
| 1,471
|
arch/arm/cpu/setupc_64.S
|
#include <linux/linkage.h>
#include <asm/sections.h>
.section .text.setupc
/*
* setup_c: clear bss
*/
ENTRY(setup_c)
mov x15, x30
ldr x0, =__bss_start
mov x1, #0
ldr x2, =__bss_stop
sub x2, x2, x0
bl memset /* clear bss */
mov x30, x15
ret
ENDPROC(setup_c)
/*
* void relocate_to_adr(unsigned long targetadr)
*
* Copy binary to targetadr, relocate code and continue
* executing at new address.
*/
.section .text.relocate_to_adr
/* x0: target address */
#ifdef __PBL__
ENTRY(relocate_to_adr_full)
ldr x2, =__piggydata_end
b 1f
#endif
ENTRY(relocate_to_adr)
ldr x2, =__bss_start
b 1f
1:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
mov x19, x30
mov x21, x0
bl get_runtime_offset
mov x5, x0
ldr x0, =_text
mov x20, x0
add x1, x0, x5 /* x1: from address */
cmp x1, x21 /* already at correct address? */
beq 1f /* yes, skip copy to new address */
sub x2, x2, x0 /* x2: size */
mov x0, x21 /* x0: target */
/* adjust return address */
sub x19, x19, x1 /* sub address where we are actually running */
add x19, x19, x0 /* add address where we are going to run */
bl memcpy /* copy binary */
bl sync_caches_for_execution
mov x0,#0
ic ivau, x0 /* flush icache */
ldr x0,=1f
sub x0, x0, x20
add x0, x0, x21
br x0 /* jump to relocated address */
1:
bl relocate_to_current_adr /* relocate binary */
mov x30, x19
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
ENDPROC(relocate_to_adr)
|
a3f/bareDOOM
| 1,207
|
arch/arm/cpu/smccc-call.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 2015 Linaro Limited */
#include <linux/linkage.h>
#include <asm/unwind.h>
.arch_extension sec
.arch_extension virt
.arm
/*
* Wrap c macros in asm macros to delay expansion until after the
* SMCCC asm macro is expanded.
*/
.macro SMCCC_SMC
smc #0
.endm
.macro SMCCC_HVC
hvc #0
.endm
.macro SMCCC instr
UNWIND( .fnstart)
mov r12, sp
push {r4-r7}
UNWIND( .save {r4-r7})
ldm r12, {r4-r7}
\instr
pop {r4-r7}
ldr r12, [sp, #(4 * 4)]
stm r12, {r0-r3}
bx lr
UNWIND( .fnend)
.endm
/*
* void __smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_smc)
SMCCC SMCCC_SMC
ENDPROC(__arm_smccc_smc)
/*
* void __smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_hvc)
SMCCC SMCCC_HVC
ENDPROC(__arm_smccc_hvc)
|
a3f/bareDOOM
| 1,827
|
arch/arm/cpu/setupc.S
|
#include <linux/linkage.h>
#include <asm/sections.h>
.section .text.setupc
/*
* setup_c: copy binary to link address, clear bss and
* continue executing at new address.
*
* This function does not return to the address it is
* called from, but to the same location in the copied
* binary.
*/
ENTRY(setup_c)
push {r4, r5}
mov r5, lr
bl get_runtime_offset
subs r4, r0, #0
beq 1f /* skip memcpy if already at correct address */
ldr r0,=_text
ldr r2,=__bss_start
sub r2, r2, r0
add r1, r0, r4
bl __memcpy /* memcpy(_text, _text + offset, __bss_start - _text) */
1: ldr r0, =__bss_start
mov r1, #0
ldr r2, =__bss_stop
sub r2, r2, r0
bl __memset /* clear bss */
bl sync_caches_for_execution
sub lr, r5, r4 /* adjust return address to new location */
pop {r4, r5}
mov pc, lr
ENDPROC(setup_c)
/*
* void relocate_to_adr(unsigned long targetadr)
*
* Copy binary to targetadr, relocate code and continue
* executing at new address.
*/
.section .text.relocate_to_adr
ENTRY(relocate_to_adr)
/* r0: target address */
push {r3, r4, r5, r6, r7, r8}
mov r7, lr
mov r6, r0
bl get_runtime_offset
mov r5, r0
ldr r8, =_text
add r1, r8, r5 /* r1: from address */
cmp r1, r6 /* already at correct address? */
beq 1f /* yes, skip copy to new address */
ldr r2, =__bss_start
sub r2, r2, r8 /* r2: size */
mov r0, r6 /* r0: target */
/* adjust return address */
sub r7, r7, r1 /* sub address where we are actually running */
add r7, r7, r0 /* add address where we are going to run */
bl __memcpy /* copy binary */
bl sync_caches_for_execution
ldr r0,=1f
sub r0, r0, r8
add r0, r0, r6
mov pc, r0 /* jump to relocated address */
1:
bl relocate_to_current_adr /* relocate binary */
mov lr, r7
pop {r3, r4, r5, r6, r7, r8}
mov pc, lr
ENDPROC(relocate_to_adr)
|
a3f/bareDOOM
| 4,170
|
arch/arm/cpu/cache-armv8.S
|
/*
* (C) Copyright 2013
* David Feng <fenghua@phytium.com.cn>
*
* This file is based on sample code from ARMv8 ARM.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <config.h>
#include <linux/linkage.h>
#include <init.h>
/*
* void v8_flush_dcache_level(level)
*
* clean and invalidate one level cache.
*
* x0: cache level
* x1: 0 flush & invalidate, 1 invalidate only
* x2~x9: clobbered
*/
.section .text.v8_flush_dcache_level
ENTRY(v8_flush_dcache_level)
lsl x12, x0, #1
msr csselr_el1, x12 /* select cache level */
isb /* sync change of cssidr_el1 */
mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
add x2, x2, #4 /* x2 <- log2(cache line size) */
mov x3, #0x3ff
and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
clz w5, w3 /* bit position of #ways */
mov x4, #0x7fff
and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
/* x12 <- cache level << 1 */
/* x2 <- line length offset */
/* x3 <- number of cache ways - 1 */
/* x4 <- number of cache sets - 1 */
/* x5 <- bit position of #ways */
loop_set:
mov x6, x3 /* x6 <- working copy of #ways */
loop_way:
lsl x7, x6, x5
orr x9, x12, x7 /* map way and level to cisw value */
lsl x7, x4, x2
orr x9, x9, x7 /* map set number to cisw value */
tbz w1, #0, 1f
dc isw, x9
b 2f
1: dc cisw, x9 /* clean & invalidate by set/way */
2: subs x6, x6, #1 /* decrement the way */
b.ge loop_way
subs x4, x4, #1 /* decrement the set */
b.ge loop_set
ret
ENDPROC(v8_flush_dcache_level)
/*
* void v8_flush_dcache_all(int invalidate_only)
*
* x0: 0 flush & invalidate, 1 invalidate only
*
* clean and invalidate all data cache by SET/WAY.
*/
.section .text.v8_dcache_all
ENTRY(v8_dcache_all)
mov x1, x0
dsb sy
mrs x10, clidr_el1 /* read clidr_el1 */
lsr x11, x10, #24
and x11, x11, #0x7 /* x11 <- loc */
cbz x11, finished /* if loc is 0, exit */
mov x15, x30
mov x0, #0 /* start flush at cache level 0 */
/* x0 <- cache level */
/* x10 <- clidr_el1 */
/* x11 <- loc */
/* x15 <- return address */
loop_level:
lsl x12, x0, #1
add x12, x12, x0 /* x0 <- tripled cache level */
lsr x12, x10, x12
and x12, x12, #7 /* x12 <- cache type */
cmp x12, #2
b.lt skip /* skip if no cache or icache */
bl v8_flush_dcache_level /* x1 = 0 flush, 1 invalidate */
skip:
add x0, x0, #1 /* increment cache level */
cmp x11, x0
b.gt loop_level
mov x0, #0
msr csselr_el1, x0 /* restore csselr_el1 */
dsb sy
isb
mov x30, x15
finished:
ret
ENDPROC(v8_dcache_all)
.section .text.v8_flush_dcache_all
ENTRY(v8_flush_dcache_all)
mov x16, x30
mov x0, #0
bl v8_dcache_all
mov x30, x16
ret
ENDPROC(v8_flush_dcache_all)
.section .text.v8_invalidate_dcache_all
ENTRY(v8_invalidate_dcache_all)
mov x16, x30
mov x0, #0x1
bl v8_dcache_all
mov x30, x16
ret
ENDPROC(v8_invalidate_dcache_all)
/*
* void v8_flush_dcache_range(start, end)
*
* clean & invalidate data cache in the range
*
* x0: start address
* x1: end address
*/
.section .text.v8_flush_dcache_range
ENTRY(v8_flush_dcache_range)
mrs x3, ctr_el0
lsr x3, x3, #16
and x3, x3, #0xf
mov x2, #4
lsl x2, x2, x3 /* cache line size */
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: dc civac, x0 /* clean & invalidate data or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
ENDPROC(v8_flush_dcache_range)
.section .text.v8_inv_dcache_range
ENTRY(v8_inv_dcache_range)
mrs x3, ctr_el0
lsr x3, x3, #16
and x3, x3, #0xf
mov x2, #4
lsl x2, x2, x3 /* cache line size */
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: dc ivac, x0 /* invalidate data or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
ENDPROC(v8_inv_dcache_range)
/*
* void v8_invalidate_icache_all(void)
*
* invalidate all tlb entries.
*/
.section .text.v8_invalidate_icache_all
ENTRY(v8_invalidate_icache_all)
ic ialluis
isb sy
ret
ENDPROC(v8_invalidate_icache_all)
.section .text.v8_flush_l3_cache
ENTRY(v8_flush_l3_cache)
mov x0, #0 /* return status as success */
ret
ENDPROC(v8_flush_l3_cache)
.weak v8_flush_l3_cache
|
a3f/bareDOOM
| 1,151
|
arch/arm/cpu/smccc-call_64.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 2015 Linaro Limited */
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
#include <asm/asm-offsets.h>
.macro SMCCC instr
.cfi_startproc
\instr #0
ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
ldr x4, [sp, #8]
cbz x4, 1f /* no quirk structure */
ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
b.ne 1f
str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
1: ret
.cfi_endproc
.endm
/*
* void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_smc)
SMCCC smc
ENDPROC(__arm_smccc_smc)
/*
* void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_hvc)
SMCCC hvc
ENDPROC(__arm_smccc_hvc)
|
a3f/bareDOOM
| 4,084
|
arch/arm/cpu/sm_as.S
|
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm-generic/memory_layout.h>
#include <asm/secure.h>
#include <asm/system.h>
.arch_extension sec
.arch_extension virt
.section ".text","ax"
.arm
.align 5
.globl secure_monitor_init_vectors
secure_monitor_init_vectors:
1: b 1b /* reset */
1: b 1b /* undefined instruction */
b secure_monitor_init /* software interrupt (SWI) */
1: b 1b /* prefetch abort */
1: b 1b /* data abort */
1: b 1b /* (reserved) */
1: b 1b /* irq (interrupt) */
1: b 1b /* fiq (fast interrupt) */
#define CPUID_ARM_GENTIMER_MASK (0xF << CPUID_ARM_GENTIMER_SHIFT)
#define CPUID_ARM_GENTIMER_SHIFT 16
#define CPUID_ARM_VIRT_MASK (0xF << CPUID_ARM_VIRT_SHIFT)
#define CPUID_ARM_VIRT_SHIFT 12
.macro is_cpu_virt_capable tmp
mrc p15, 0, \tmp, c0, c1, 1 @ read ID_PFR1
and \tmp, \tmp, #CPUID_ARM_VIRT_MASK @ mask virtualization bits
cmp \tmp, #(1 << CPUID_ARM_VIRT_SHIFT)
.endm
@ Requires dense and single-cluster CPU ID space
ENTRY(psci_get_cpu_id)
mrc p15, 0, r0, c0, c0, 5 /* read MPIDR */
and r0, r0, #0xff /* return CPU ID in cluster */
bx lr
ENDPROC(psci_get_cpu_id)
ENTRY(secure_monitor_stack_setup)
mrc p15, 0, r0, c0, c0, 5 /* read MPIDR */
and r0, r0, #0xff /* CPU ID => r0 */
@ stack top = __secure_stack_end - (cpuid << ARM_PSCI_STACK_SHIFT)
ldr r1, =__secure_stack_end
sub r0, r1, r0, LSL #ARM_SECURE_STACK_SHIFT
sub r0, r0, #4 @ Save space for target PC
mov sp, r0
bx lr
ENDPROC(secure_monitor_stack_setup)
secure_monitor_init:
mov r3, lr
bl secure_monitor_stack_setup
push {r4-r7}
mov r7, r3
ldr r5, =secure_monitor_vectors @ Switch MVBAR to secure_monitor_vectors
mcr p15, 0, r5, c12, c0, 1
isb
#ifdef CONFIG_MMU
mrc p15, 0, r5, c1, c0, 0
tst r5, #CR_M
beq 1f
bl __mmu_cache_off
1:
#endif
mrc p15, 0, r5, c1, c1, 0 @ read SCR
bic r5, r5, #0x4a @ clear IRQ, EA, nET bits
orr r5, r5, #0x31 @ enable NS, AW, FW bits
@ FIQ preserved for secure mode
mov r6, #SVC_MODE @ default mode is SVC
is_cpu_virt_capable r4
orreq r5, r5, #0x100 @ allow HVC instruction
mcr p15, 0, r5, c1, c1, 0 @ write SCR (with NS bit set)
isb
mrceq p15, 0, r0, c12, c0, 1 @ get MVBAR value
mcreq p15, 4, r0, c12, c0, 0 @ write HVBAR
bne 1f
@ Reset CNTVOFF to 0 before leaving monitor mode
mrc p15, 0, r4, c0, c1, 1 @ read ID_PFR1
ands r4, r4, #CPUID_ARM_GENTIMER_MASK @ test arch timer bits
movne r4, #0
mcrrne p15, 4, r4, r4, c14 @ Reset CNTVOFF to zero
1:
mov lr, r7
mov ip, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT) @ Set A, I and F
tst lr, #1 @ Check for Thumb PC
orrne ip, ip, #PSR_T_BIT @ Set T if Thumb
orr ip, ip, r6 @ Slot target mode in
msr spsr_cxfs, ip @ Set full SPSR
pop {r4-r7}
movs pc, lr @ ERET to non-secure
.align 5
secure_monitor_vectors:
1: b 1b /* reset */
1: b 1b /* undefined instruction */
b secure_monitor /* software interrupt (SWI) */
1: b 1b /* prefetch abort */
1: b 1b /* data abort */
1: b hyp_trap /* (reserved) */
1: b 1b /* irq (interrupt) */
1: b 1b /* fiq (fast interrupt) */
secure_monitor:
push {r4-r7,lr}
@ Switch to secure mode
mrc p15, 0, r7, c1, c1, 0
bic r4, r7, #1
mcr p15, 0, r4, c1, c1, 0
isb
/* r0-r6: Arguments */
sub sp, sp, #4*4 @ allocate result structure on stack
mov r12, sp
push {r4-r6, r12}
#ifdef CONFIG_ARM_PSCI
bl psci_entry
#endif
pop {r4-r6, r12}
ldm r12, {r0-r3}
add sp, sp, #4*4
/* r0-r3: results, r4-r14: preserved */
@ back to non-secure
mcr p15, 0, r7, c1, c1, 0
pop {r4-r7, lr}
movs pc, lr
hyp_trap:
mrs lr, elr_hyp @ for older asm: .byte 0x00, 0xe3, 0x0e, 0xe1
mov pc, lr @ do no switch modes, but
@ return to caller
ENTRY(psci_cpu_entry)
mrc p15, 0, r0, c1, c0, 1 @ ACTLR
orr r0, r0, #(1 << 6) @ Set SMP bit
mcr p15, 0, r0, c1, c0, 1 @ ACTLR
bl secure_monitor_stack_setup
#ifdef CONFIG_ARM_PSCI
bl psci_cpu_entry_c
#endif
ENDPROC(psci_cpu_entry)
|
a3f/bareDOOM
| 3,736
|
arch/arm/cpu/cache-armv4.S
|
#include <linux/linkage.h>
#include <init.h>
#define CACHE_DLINESIZE 32
.section .text.v4_mmu_cache_on
ENTRY(v4_mmu_cache_on)
mov r12, lr
#ifdef CONFIG_MMU
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mov pc, r12
ENDPROC(v4_mmu_cache_on)
__common_mmu_cache_on:
orr r0, r0, #0x000d @ Write buffer, mmu
b 1f
.align 5 @ cache line aligned
1: mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back to
sub pc, lr, r0, lsr #32 @ properly flush pipeline
.section .text.v4_mmu_cache_off
ENTRY(v4_mmu_cache_off)
#ifdef CONFIG_MMU
mrc p15, 0, r0, c1, c0
bic r0, r0, #0x000d
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
#endif
mov pc, lr
ENDPROC(v4_mmu_cache_off)
.section .text.v4_mmu_cache_flush
ENTRY(v4_mmu_cache_flush)
stmfd sp!, {r6, r11, lr}
mrc p15, 0, r6, c0, c0 @ get processor ID
mov r2, #64*1024 @ default: 32K dcache size (*2)
mov r11, #32 @ default: 32 byte line size
mrc p15, 0, r3, c0, c0, 1 @ read cache type
teq r3, r6 @ cache ID register present?
beq no_cache_id
mov r1, r3, lsr #18
and r1, r1, #7
mov r2, #1024
mov r2, r2, lsl r1 @ base dcache size *2
tst r3, #1 << 14 @ test M bit
addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
mov r3, r3, lsr #12
and r3, r3, #3
mov r11, #8
mov r11, r11, lsl r3 @ cache line size in bytes
no_cache_id:
mov r1, pc
bic r1, r1, #63 @ align to longest cache line
add r2, r1, r2
1:
ldr r3, [r1], r11 @ s/w flush D cache
teq r1, r2
bne 1b
mcr p15, 0, r1, c7, c5, 0 @ flush I cache
mcr p15, 0, r1, c7, c6, 0 @ flush D cache
mcr p15, 0, r1, c7, c10, 4 @ drain WB
ldmfd sp!, {r6, r11, pc}
ENDPROC(v4_mmu_cache_flush)
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
.section .text.v4_dma_inv_range
ENTRY(v4_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
.section .text.v4_dma_clean_range
ENTRY(v4_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
.section .text.v4_dma_flush_range
ENTRY(v4_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
|
a3f/bareDOOM
| 6,090
|
arch/arm/cpu/cache-armv7.S
|
#include <linux/linkage.h>
#include <init.h>
.section .text.v7_mmu_cache_on
ENTRY(v7_mmu_cache_on)
stmfd sp!, {r11, lr}
mov r12, lr
#ifdef CONFIG_MMU
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
mov r0, #0
dsb @ drain write buffer
tst r11, #0xf @ VMSA
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
#ifdef CONFIG_MMU
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
orrne r0, r0, #1 @ MMU enabled
#endif
isb
mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back
mov r0, #0
isb
ldmfd sp!, {r11, pc}
ENDPROC(v7_mmu_cache_on)
.section .text.v7_mmu_cache_off
ENTRY(v7_mmu_cache_off)
/* although 'r12' is an eabi scratch register which does
not need to be restored, save it to ensure an 8-byte
stack alignment */
stmfd sp!, {r4-r12, lr}
mrc p15, 0, r0, c1, c0
#ifdef CONFIG_MMU
bic r0, r0, #0x000d
#else
bic r0, r0, #0x000c
#endif
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
bl v7_mmu_cache_flush
mov r0, #0
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
#endif
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
dsb
isb
ldmfd sp!, {r4-r12, pc}
ENDPROC(v7_mmu_cache_off)
.section .text.v7_mmu_cache_flush_invalidate
ENTRY(v7_mmu_cache_invalidate)
mov r0, #1
b __v7_mmu_cache_flush_invalidate
ENDPROC(v7_mmu_cache_invalidate)
ENTRY(v7_mmu_cache_flush)
mov r0, #0
b __v7_mmu_cache_flush_invalidate
ENDPROC(v7_mmu_cache_flush)
ENTRY(__v7_mmu_cache_flush_invalidate)
dmb
mrc p15, 0, r12, c0, c1, 5 @ read ID_MMFR1
tst r12, #0xf << 16 @ hierarchical cache (ARMv7)
mov r12, #0
beq hierarchical
mcr p15, 0, r12, c7, c14, 0 @ clean+invalidate D
b iflush
hierarchical:
stmfd sp!, {r4-r11}
mov r8, r0
dmb
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
beq finished @ if loc is 0, then no need to clean
cmp r8, #0
THUMB( ite eq )
moveq r12, #0
subne r12, r3, #2 @ start invalidate at outmost cache level
loop1:
add r2, r12, r12, lsr #1 @ work out 3x current cache level
mov r1, r0, lsr r2 @ extract cache type bits from clidr
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
mcr p15, 2, r12, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
and r2, r1, #7 @ extract the length of the cache lines
add r2, r2, #4 @ add 4 (line length offset)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 @ find maximum number on the way size
clz r5, r4 @ find bit position of way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 @ extract max number of the index size
loop2:
mov r9, r4 @ create working copy of max way size
loop3:
ARM( orr r11, r12, r9, lsl r5 ) @ factor way and cache number into r11
ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
THUMB( lsl r6, r9, r5 )
THUMB( orr r11, r12, r6 ) @ factor way and cache number into r11
THUMB( lsl r6, r7, r2 )
THUMB( orr r11, r11, r6 ) @ factor index number into r11
cmp r8, #0
THUMB( ite eq )
mcreq p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
mcrne p15, 0, r11, c7, c6, 2 @ invalidate by set/way
subs r9, r9, #1 @ decrement the way
bge loop3
subs r7, r7, #1 @ decrement the index
bge loop2
skip:
cmp r8, #0
bne inval_check
add r12, r12, #2 @ increment cache number
cmp r3, r12
b loop_end_check
inval_check:
cmp r12, #0
sub r12, r12, #2 @ decrement cache number
loop_end_check:
dsb @ work-around Cortex-A7 erratum 814220
bgt loop1
finished:
ldmfd sp!, {r4-r11}
mov r12, #0 @ switch back to cache level 0
mcr p15, 2, r12, c0, c0, 0 @ select current cache level in cssr
iflush:
dsb
mcr p15, 0, r12, c7, c5, 0 @ invalidate I+BTB
dsb
isb
mov pc, lr
ENDPROC(__v7_mmu_cache_flush_invalidate)
/*
* cache_line_size - get the cache line size from the CSIDR register
* (available on ARMv7+). It assumes that the CSSR register was configured
* to access the L1 data cache CSIDR.
*/
.macro dcache_line_size, reg, tmp
mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR
and \tmp, \tmp, #7 @ cache line size encoding
mov \reg, #16 @ size offset
mov \reg, \reg, lsl \tmp @ actual cache line size
.endm
/*
* v7_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
.section .text.v7_dma_inv_range
ENTRY(v7_dma_inv_range)
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
bic r0, r0, r3
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
tst r1, r3
bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1:
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
add r0, r0, r2
cmp r0, r1
blo 1b
dsb
mov pc, lr
ENDPROC(v7_dma_inv_range)
/*
* v7_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
.section .text.v7_dma_clean_range
ENTRY(v7_dma_clean_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
add r0, r0, r2
cmp r0, r1
blo 1b
dsb
mov pc, lr
ENDPROC(v7_dma_clean_range)
/*
* v7_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
.section .text.v7_dma_flush_range
ENTRY(v7_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
add r0, r0, r2
cmp r0, r1
blo 1b
dsb
mov pc, lr
ENDPROC(v7_dma_flush_range)
|
a3f/bareDOOM
| 1,048
|
arch/arm/cpu/board-dt-2nd-aarch64.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <linux/linkage.h>
#include <asm/barebox-arm64.h>
#include <asm/image.h>
#define IMAGE_FLAGS \
(ARM64_IMAGE_FLAG_PAGE_SIZE_4K << ARM64_IMAGE_FLAG_PAGE_SIZE_SHIFT) | \
(ARM64_IMAGE_FLAG_PHYS_BASE << ARM64_IMAGE_FLAG_PHYS_BASE_SHIFT)
.section .text_head_entry_start_dt_2nd
ENTRY("start_dt_2nd")
adr x1, 0 /* code0 */
b 2f /* code1 */
.xword 0x80000 /* Image load offset */
.xword _barebox_image_size /* Effective Image size */
.xword IMAGE_FLAGS /* Kernel flags */
.xword 0 /* reserved */
.xword 0 /* reserved */
.xword 0 /* reserved */
.ascii ARM64_IMAGE_MAGIC /* magic number */
.int 0 /* reserved (PE-COFF offset) */
.asciz "barebox" /* unused for now */
2:
mov sp, x1
/* Stack now grows into the 0x80000 image load offset specified
* above. This is more than enough until FDT /memory is decoded.
*/
b dt_2nd_aarch64
ENTRY_PROC_END(start_dt_2nd)
|
a3f/bareDOOM
| 1,371
|
arch/arm/lib64/barebox.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* SPDX-FileCopyrightText: 2000-2004 Wolfgang Denk <wd@denx.de>, DENX Software Engineering */
#include <asm-generic/barebox.lds.h>
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(start)
SECTIONS
{
#ifdef CONFIG_RELOCATABLE
. = 0x0;
#else
. = TEXT_BASE;
#endif
.image_start : { *(.__image_start) }
#ifndef CONFIG_PBL_IMAGE
PRE_IMAGE
#endif
. = ALIGN(4);
._text : { *(._text) }
.text :
{
_stext = .;
*(.text_entry*)
__bare_init_start = .;
*(.text_bare_init*)
__bare_init_end = .;
__exceptions_start = .;
KEEP(*(.text_exceptions*))
__exceptions_stop = .;
*(.text*)
}
BAREBOX_BARE_INIT_SIZE
. = ALIGN(4);
.rodata : {
*(.rodata*)
RO_DATA_SECTION
}
_etext = .; /* End of text and rodata section */
_sdata = .;
. = ALIGN(4);
.data : { *(.data*) }
.barebox_imd : { BAREBOX_IMD }
.rel_dyn_start : { *(.__rel_dyn_start) }
.rela.dyn : { *(.rela*) }
.rel_dyn_end : { *(.__rel_dyn_end) }
.__dynsym_start : { *(.__dynsym_start) }
.dynsym : { *(.dynsym) }
.__dynsym_end : { *(.__dynsym_end) }
_edata = .;
.image_end : { *(.__image_end) }
. = ALIGN(4);
.__bss_start : { *(.__bss_start) }
.bss : { *(.bss*) }
.__bss_stop : { *(.__bss_stop) }
_end = .;
_barebox_image_size = __bss_start - TEXT_BASE;
}
|
a3f/bareDOOM
| 4,107
|
arch/arm/lib64/copy_template.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 2013 ARM Ltd. */
/* SPDX-FileCopyrightText: 2013 Linaro */
/*
* This code is based on glibc cortex strings work originally authored by Linaro
* and re-licensed under GPLv2 for the Linux kernel. The original code can
* be found @
*
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
* files/head:/src/aarch64/
*/
/*
* Copy a buffer from src to dest (alignment handled by the hardware)
*
* Parameters:
* x0 - dest
* x1 - src
* x2 - n
* Returns:
* x0 - dest
*/
dstin .req x0
src .req x1
count .req x2
tmp1 .req x3
tmp1w .req w3
tmp2 .req x4
tmp2w .req w4
dst .req x6
A_l .req x7
A_h .req x8
B_l .req x9
B_h .req x10
C_l .req x11
C_h .req x12
D_l .req x13
D_h .req x14
mov dst, dstin
cmp count, #16
/*When memory length is less than 16, the accessed are not aligned.*/
b.lo .Ltiny15
neg tmp2, src
ands tmp2, tmp2, #15/* Bytes to reach alignment. */
b.eq .LSrcAligned
sub count, count, tmp2
/*
* Copy the leading memory data from src to dst in an increasing
* address order.By this way,the risk of overwritting the source
* memory data is eliminated when the distance between src and
* dst is less than 16. The memory accesses here are alignment.
*/
tbz tmp2, #0, 1f
ldrb1 tmp1w, src, #1
strb1 tmp1w, dst, #1
1:
tbz tmp2, #1, 2f
ldrh1 tmp1w, src, #2
strh1 tmp1w, dst, #2
2:
tbz tmp2, #2, 3f
ldr1 tmp1w, src, #4
str1 tmp1w, dst, #4
3:
tbz tmp2, #3, .LSrcAligned
ldr1 tmp1, src, #8
str1 tmp1, dst, #8
.LSrcAligned:
cmp count, #64
b.ge .Lcpy_over64
/*
* Deal with small copies quickly by dropping straight into the
* exit block.
*/
.Ltail63:
/*
* Copy up to 48 bytes of data. At this point we only need the
* bottom 6 bits of count to be accurate.
*/
ands tmp1, count, #0x30
b.eq .Ltiny15
cmp tmp1w, #0x20
b.eq 1f
b.lt 2f
ldp1 A_l, A_h, src, #16
stp1 A_l, A_h, dst, #16
1:
ldp1 A_l, A_h, src, #16
stp1 A_l, A_h, dst, #16
2:
ldp1 A_l, A_h, src, #16
stp1 A_l, A_h, dst, #16
.Ltiny15:
/*
* Prefer to break one ldp/stp into several load/store to access
* memory in an increasing address order,rather than to load/store 16
* bytes from (src-16) to (dst-16) and to backward the src to aligned
* address,which way is used in original cortex memcpy. If keeping
* the original memcpy process here, memmove need to satisfy the
* precondition that src address is at least 16 bytes bigger than dst
* address,otherwise some source data will be overwritten when memove
* call memcpy directly. To make memmove simpler and decouple the
* memcpy's dependency on memmove, withdrew the original process.
*/
tbz count, #3, 1f
ldr1 tmp1, src, #8
str1 tmp1, dst, #8
1:
tbz count, #2, 2f
ldr1 tmp1w, src, #4
str1 tmp1w, dst, #4
2:
tbz count, #1, 3f
ldrh1 tmp1w, src, #2
strh1 tmp1w, dst, #2
3:
tbz count, #0, .Lexitfunc
ldrb1 tmp1w, src, #1
strb1 tmp1w, dst, #1
b .Lexitfunc
.Lcpy_over64:
subs count, count, #128
b.ge .Lcpy_body_large
/*
* Less than 128 bytes to copy, so handle 64 here and then jump
* to the tail.
*/
ldp1 A_l, A_h, src, #16
stp1 A_l, A_h, dst, #16
ldp1 B_l, B_h, src, #16
ldp1 C_l, C_h, src, #16
stp1 B_l, B_h, dst, #16
stp1 C_l, C_h, dst, #16
ldp1 D_l, D_h, src, #16
stp1 D_l, D_h, dst, #16
tst count, #0x3f
b.ne .Ltail63
b .Lexitfunc
/*
* Critical loop. Start at a new cache line boundary. Assuming
* 64 bytes per line this ensures the entire loop is in one line.
*/
.Lcpy_body_large:
/* pre-get 64 bytes data. */
ldp1 A_l, A_h, src, #16
ldp1 B_l, B_h, src, #16
ldp1 C_l, C_h, src, #16
ldp1 D_l, D_h, src, #16
1:
/*
* interlace the load of next 64 bytes data block with store of the last
* loaded 64 bytes data.
*/
stp1 A_l, A_h, dst, #16
ldp1 A_l, A_h, src, #16
stp1 B_l, B_h, dst, #16
ldp1 B_l, B_h, src, #16
stp1 C_l, C_h, dst, #16
ldp1 C_l, C_h, src, #16
stp1 D_l, D_h, dst, #16
ldp1 D_l, D_h, src, #16
subs count, count, #64
b.ge 1b
stp1 A_l, A_h, dst, #16
stp1 B_l, B_h, dst, #16
stp1 C_l, C_h, dst, #16
stp1 D_l, D_h, dst, #16
tst count, #0x3f
b.ne .Ltail63
.Lexitfunc:
|
a3f/bareDOOM
| 1,246
|
arch/arm/lib64/memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 2013 ARM Ltd. */
/* SPDX-FileCopyrightText: 2013 Linaro */
/*
* This code is based on glibc cortex strings work originally authored by Linaro
* and re-licensed under GPLv2 for the Linux kernel. The original code can
* be found @
*
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
* files/head:/src/aarch64/
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Copy a buffer from src to dest (alignment handled by the hardware)
*
* Parameters:
* x0 - dest
* x1 - src
* x2 - n
* Returns:
* x0 - dest
*/
.macro ldrb1 ptr, regB, val
ldrb \ptr, [\regB], \val
.endm
.macro strb1 ptr, regB, val
strb \ptr, [\regB], \val
.endm
.macro ldrh1 ptr, regB, val
ldrh \ptr, [\regB], \val
.endm
.macro strh1 ptr, regB, val
strh \ptr, [\regB], \val
.endm
.macro ldr1 ptr, regB, val
ldr \ptr, [\regB], \val
.endm
.macro str1 ptr, regB, val
str \ptr, [\regB], \val
.endm
.macro ldp1 ptr, regB, regC, val
ldp \ptr, \regB, [\regC], \val
.endm
.macro stp1 ptr, regB, regC, val
stp \ptr, \regB, [\regC], \val
.endm
.weak __arch_memcpy
ENTRY(__arch_memcpy)
#include "copy_template.S"
ret
ENDPROC(__arch_memcpy)
|
a3f/bareDOOM
| 1,044
|
arch/arm/lib64/setjmp.S
|
/* SPDX-License-Identifier: GPL-2.0+ */
/* SPDX-FileCopyrightText: 2017 Theobroma Systems Design und Consulting GmbH */
#include <config.h>
#include <linux/linkage.h>
.pushsection .text.setjmp, "ax"
ENTRY(setjmp)
/* Preserve all callee-saved registers and the SP */
stp x19, x20, [x0,#0]
stp x21, x22, [x0,#16]
stp x23, x24, [x0,#32]
stp x25, x26, [x0,#48]
stp x27, x28, [x0,#64]
stp x29, x30, [x0,#80]
mov x2, sp
str x2, [x0, #96]
mov x0, #0
ret
ENDPROC(setjmp)
.popsection
.pushsection .text.longjmp, "ax"
ENTRY(longjmp)
ldp x19, x20, [x0,#0]
ldp x21, x22, [x0,#16]
ldp x23, x24, [x0,#32]
ldp x25, x26, [x0,#48]
ldp x27, x28, [x0,#64]
ldp x29, x30, [x0,#80]
ldr x2, [x0,#96]
mov sp, x2
/* Move the return value in place, but return 1 if passed 0. */
adds x0, xzr, x1
csinc x0, x0, xzr, ne
ret
ENDPROC(longjmp)
.popsection
.pushsection .text.initjmp, "ax"
ENTRY(initjmp)
str x2, [x0, #96] /* stack pointer */
str x1, [x0, #88] /* return address */
mov x0, #0
ret
ENDPROC(initjmp)
.popsection
|
a3f/bareDOOM
| 4,464
|
arch/arm/lib64/memset.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 2013 ARM Ltd. */
/* SPDX-FileCopyrightText: 2013 Linaro */
/*
* This code is based on glibc cortex strings work originally authored by Linaro
* and re-licensed under GPLv2 for the Linux kernel. The original code can
* be found @
*
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
* files/head:/src/aarch64/
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Fill in the buffer with character c (alignment handled by the hardware)
*
* Parameters:
* x0 - buf
* x1 - c
* x2 - n
* Returns:
* x0 - buf
*/
dstin .req x0
val .req w1
count .req x2
tmp1 .req x3
tmp1w .req w3
tmp2 .req x4
tmp2w .req w4
zva_len_x .req x5
zva_len .req w5
zva_bits_x .req x6
A_l .req x7
A_lw .req w7
dst .req x8
tmp3w .req w9
tmp3 .req x9
.weak memset
ENTRY(__arch_memset)
mov dst, dstin /* Preserve return value. */
and A_lw, val, #255
orr A_lw, A_lw, A_lw, lsl #8
orr A_lw, A_lw, A_lw, lsl #16
orr A_l, A_l, A_l, lsl #32
cmp count, #15
b.hi .Lover16_proc
/*All store maybe are non-aligned..*/
tbz count, #3, 1f
str A_l, [dst], #8
1:
tbz count, #2, 2f
str A_lw, [dst], #4
2:
tbz count, #1, 3f
strh A_lw, [dst], #2
3:
tbz count, #0, 4f
strb A_lw, [dst]
4:
ret
.Lover16_proc:
/*Whether the start address is aligned with 16.*/
neg tmp2, dst
ands tmp2, tmp2, #15
b.eq .Laligned
/*
* The count is not less than 16, we can use stp to store the start 16 bytes,
* then adjust the dst aligned with 16.This process will make the current
* memory address at alignment boundary.
*/
stp A_l, A_l, [dst] /*non-aligned store..*/
/*make the dst aligned..*/
sub count, count, tmp2
add dst, dst, tmp2
.Laligned:
cbz A_l, .Lzero_mem
.Ltail_maybe_long:
cmp count, #64
b.ge .Lnot_short
.Ltail63:
ands tmp1, count, #0x30
b.eq 3f
cmp tmp1w, #0x20
b.eq 1f
b.lt 2f
stp A_l, A_l, [dst], #16
1:
stp A_l, A_l, [dst], #16
2:
stp A_l, A_l, [dst], #16
/*
* The last store length is less than 16,use stp to write last 16 bytes.
* It will lead some bytes written twice and the access is non-aligned.
*/
3:
ands count, count, #15
cbz count, 4f
add dst, dst, count
stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
4:
ret
/*
* Critical loop. Start at a new cache line boundary. Assuming
* 64 bytes per line, this ensures the entire loop is in one line.
*/
.Lnot_short:
sub dst, dst, #16/* Pre-bias. */
sub count, count, #64
1:
stp A_l, A_l, [dst, #16]
stp A_l, A_l, [dst, #32]
stp A_l, A_l, [dst, #48]
stp A_l, A_l, [dst, #64]!
subs count, count, #64
b.ge 1b
tst count, #0x3f
add dst, dst, #16
b.ne .Ltail63
.Lexitfunc:
ret
/*
* For zeroing memory, check to see if we can use the ZVA feature to
* zero entire 'cache' lines.
*/
.Lzero_mem:
cmp count, #63
b.le .Ltail63
/*
* For zeroing small amounts of memory, it's not worth setting up
* the line-clear code.
*/
cmp count, #128
b.lt .Lnot_short /*count is at least 128 bytes*/
mrs tmp1, dczid_el0
tbnz tmp1, #4, .Lnot_short
mov tmp3w, #4
and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
lsl zva_len, tmp3w, zva_len
ands tmp3w, zva_len, #63
/*
* ensure the zva_len is not less than 64.
* It is not meaningful to use ZVA if the block size is less than 64.
*/
b.ne .Lnot_short
.Lzero_by_line:
/*
* Compute how far we need to go to become suitably aligned. We're
* already at quad-word alignment.
*/
cmp count, zva_len_x
b.lt .Lnot_short /* Not enough to reach alignment. */
sub zva_bits_x, zva_len_x, #1
neg tmp2, dst
ands tmp2, tmp2, zva_bits_x
b.eq 2f /* Already aligned. */
/* Not aligned, check that there's enough to copy after alignment.*/
sub tmp1, count, tmp2
/*
* grantee the remain length to be ZVA is bigger than 64,
* avoid to make the 2f's process over mem range.*/
cmp tmp1, #64
ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
b.lt .Lnot_short
/*
* We know that there's at least 64 bytes to zero and that it's safe
* to overrun by 64 bytes.
*/
mov count, tmp1
1:
stp A_l, A_l, [dst]
stp A_l, A_l, [dst, #16]
stp A_l, A_l, [dst, #32]
subs tmp2, tmp2, #64
stp A_l, A_l, [dst, #48]
add dst, dst, #64
b.ge 1b
/* We've overrun a bit, so adjust dst downwards.*/
add dst, dst, tmp2
2:
sub count, count, zva_len_x
3:
dc zva, dst
add dst, dst, zva_len_x
subs count, count, zva_len_x
b.ge 3b
ands count, count, zva_bits_x
b.ne .Ltail_maybe_long
ret
ENDPROC(__arch_memset)
|
a3f/bareDOOM
| 1,873
|
arch/arm/lib32/barebox.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* SPDX-FileCopyrightText: 2000-2004 Wolfgang Denk <wd@denx.de>, DENX Software Engineering */
#include <asm-generic/barebox.lds.h>
#include <asm/secure.h>
OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(start)
SECTIONS
{
#ifdef CONFIG_RELOCATABLE
. = 0x0;
#else
. = TEXT_BASE;
#endif
.image_start : { *(.__image_start) }
#ifndef CONFIG_PBL_IMAGE
PRE_IMAGE
#endif
. = ALIGN(4);
._text : { *(._text) }
.text :
{
_stext = .;
*(.text_entry*)
__bare_init_start = .;
*(.text_bare_init*)
__bare_init_end = .;
. = ALIGN(0x20);
__exceptions_start = .;
KEEP(*(.text_exceptions*))
__exceptions_stop = .;
*(.text*)
}
BAREBOX_BARE_INIT_SIZE
. = ALIGN(4);
.rodata : {
*(.rodata*)
RO_DATA_SECTION
}
#ifdef CONFIG_ARM_UNWIND
/*
* Stack unwinding tables
*/
. = ALIGN(8);
.ARM.unwind_idx : {
__start_unwind_idx = .;
*(.ARM.exidx*)
__stop_unwind_idx = .;
}
.ARM.unwind_tab : {
__start_unwind_tab = .;
*(.ARM.extab*)
__stop_unwind_tab = .;
}
#endif
_etext = .; /* End of text and rodata section */
_sdata = .;
. = ALIGN(4);
.data : { *(.data*)
CONSTRUCTORS
}
.barebox_imd : { BAREBOX_IMD }
. = .;
.rel_dyn_start : { *(.__rel_dyn_start) }
.rel.dyn : { *(.rel*) }
.rel_dyn_end : { *(.__rel_dyn_end) }
.__dynsym_start : { *(.__dynsym_start) }
.dynsym : { *(.dynsym) }
.__dynsym_end : { *(.__dynsym_end) }
_edata = .;
.image_end : { *(.__image_end) }
. = ALIGN(4);
.__bss_start : { *(.__bss_start) }
.bss : { *(.bss*) }
.__bss_stop : { *(.__bss_stop) }
#ifdef CONFIG_ARM_SECURE_MONITOR
. = ALIGN(16);
__secure_stack_start = .;
. = . + (ARM_SECURE_MAX_CPU << ARM_SECURE_STACK_SHIFT);
__secure_stack_end = .;
__secure_end = .;
#endif
_end = .;
_barebox_image_size = __bss_start - TEXT_BASE;
}
|
a3f/bareDOOM
| 5,914
|
arch/arm/lib32/copy_template.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 2005 MontaVista Software, Inc (Nicolas Pitre)
/*
* linux/arch/arm/lib/copy_template.s
*
* Code template for optimized memory copy functions
*/
/*
* Theory of operation
* -------------------
*
* This file provides the core code for a forward memory copy used in
* the implementation of memcopy(), copy_to_user() and copy_from_user().
*
* The including file must define the following accessor macros
* according to the need of the given function:
*
* ldr1w ptr reg abort
*
* This loads one word from 'ptr', stores it in 'reg' and increments
* 'ptr' to the next word. The 'abort' argument is used for fixup tables.
*
* ldr4w ptr reg1 reg2 reg3 reg4 abort
* ldr8w ptr, reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
*
* This loads four or eight words starting from 'ptr', stores them
* in provided registers and increments 'ptr' past those words.
* The'abort' argument is used for fixup tables.
*
* ldr1b ptr reg cond abort
*
* Similar to ldr1w, but it loads a byte and increments 'ptr' one byte.
* It also must apply the condition code if provided, otherwise the
* "al" condition is assumed by default.
*
* str1w ptr reg abort
* str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
* str1b ptr reg cond abort
*
* Same as their ldr* counterparts, but data is stored to 'ptr' location
* rather than being loaded.
*
* enter reg1 reg2
*
* Preserve the provided registers on the stack plus any additional
* data as needed by the implementation including this code. Called
* upon code entry.
*
* exit reg1 reg2
*
* Restore registers with the values previously saved with the
* 'preserv' macro. Called upon code termination.
*
* LDR1W_SHIFT
* STR1W_SHIFT
*
* Correction to be applied to the "ip" register when branching into
* the ldr1w or str1w instructions (some of these macros may expand to
* than one 32bit instruction in Thumb-2)
*/
enter r4, lr
subs r2, r2, #4
blt 8f
ands ip, r0, #3
PLD( pld [r1, #0] )
bne 9f
ands ip, r1, #3
bne 10f
1: subs r2, r2, #(28)
stmfd sp!, {r5 - r8}
blt 5f
CALGN( ands ip, r0, #31 )
CALGN( rsb r3, ip, #32 )
CALGN( sbcnes r4, r3, r2 ) @ C is always set here
CALGN( bcs 2f )
CALGN( adr r4, 6f )
CALGN( subs r2, r2, r3 ) @ C gets set
CALGN( add pc, r4, ip )
PLD( pld [r1, #0] )
2: PLD( subs r2, r2, #96 )
PLD( pld [r1, #28] )
PLD( blt 4f )
PLD( pld [r1, #60] )
PLD( pld [r1, #92] )
3: PLD( pld [r1, #124] )
4: ldr8w r1, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
subs r2, r2, #32
str8w r0, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
bge 3b
PLD( cmn r2, #96 )
PLD( bge 4b )
5: ands ip, r2, #28
rsb ip, ip, #32
#if LDR1W_SHIFT > 0
lsl ip, ip, #LDR1W_SHIFT
#endif
addne pc, pc, ip @ C is always clear here
b 7f
6:
.rept (1 << LDR1W_SHIFT)
W(nop)
.endr
ldr1w r1, r3, abort=20f
ldr1w r1, r4, abort=20f
ldr1w r1, r5, abort=20f
ldr1w r1, r6, abort=20f
ldr1w r1, r7, abort=20f
ldr1w r1, r8, abort=20f
ldr1w r1, lr, abort=20f
#if LDR1W_SHIFT < STR1W_SHIFT
lsl ip, ip, #STR1W_SHIFT - LDR1W_SHIFT
#elif LDR1W_SHIFT > STR1W_SHIFT
lsr ip, ip, #LDR1W_SHIFT - STR1W_SHIFT
#endif
add pc, pc, ip
nop
.rept (1 << STR1W_SHIFT)
W(nop)
.endr
str1w r0, r3, abort=20f
str1w r0, r4, abort=20f
str1w r0, r5, abort=20f
str1w r0, r6, abort=20f
str1w r0, r7, abort=20f
str1w r0, r8, abort=20f
str1w r0, lr, abort=20f
CALGN( bcs 2b )
7: ldmfd sp!, {r5 - r8}
8: movs r2, r2, lsl #31
ldr1b r1, r3, ne, abort=21f
ldr1b r1, r4, cs, abort=21f
ldr1b r1, ip, cs, abort=21f
str1b r0, r3, ne, abort=21f
str1b r0, r4, cs, abort=21f
str1b r0, ip, cs, abort=21f
exit r4, pc
9: rsb ip, ip, #4
cmp ip, #2
ldr1b r1, r3, gt, abort=21f
ldr1b r1, r4, ge, abort=21f
ldr1b r1, lr, abort=21f
str1b r0, r3, gt, abort=21f
str1b r0, r4, ge, abort=21f
subs r2, r2, ip
str1b r0, lr, abort=21f
blt 8b
ands ip, r1, #3
beq 1b
10: bic r1, r1, #3
cmp ip, #2
ldr1w r1, lr, abort=21f
beq 17f
bgt 18f
.macro forward_copy_shift pull push
subs r2, r2, #28
blt 14f
CALGN( ands ip, r0, #31 )
CALGN( rsb ip, ip, #32 )
CALGN( sbcnes r4, ip, r2 ) @ C is always set here
CALGN( subcc r2, r2, ip )
CALGN( bcc 15f )
11: stmfd sp!, {r5 - r9}
PLD( pld [r1, #0] )
PLD( subs r2, r2, #96 )
PLD( pld [r1, #28] )
PLD( blt 13f )
PLD( pld [r1, #60] )
PLD( pld [r1, #92] )
12: PLD( pld [r1, #124] )
13: ldr4w r1, r4, r5, r6, r7, abort=19f
mov r3, lr, pull #\pull
subs r2, r2, #32
ldr4w r1, r8, r9, ip, lr, abort=19f
orr r3, r3, r4, push #\push
mov r4, r4, pull #\pull
orr r4, r4, r5, push #\push
mov r5, r5, pull #\pull
orr r5, r5, r6, push #\push
mov r6, r6, pull #\pull
orr r6, r6, r7, push #\push
mov r7, r7, pull #\pull
orr r7, r7, r8, push #\push
mov r8, r8, pull #\pull
orr r8, r8, r9, push #\push
mov r9, r9, pull #\pull
orr r9, r9, ip, push #\push
mov ip, ip, pull #\pull
orr ip, ip, lr, push #\push
str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f
bge 12b
PLD( cmn r2, #96 )
PLD( bge 13b )
ldmfd sp!, {r5 - r9}
14: ands ip, r2, #28
beq 16f
15: mov r3, lr, pull #\pull
ldr1w r1, lr, abort=21f
subs ip, ip, #4
orr r3, r3, lr, push #\push
str1w r0, r3, abort=21f
bgt 15b
CALGN( cmp r2, #0 )
CALGN( bge 11b )
16: sub r1, r1, #(\push / 8)
b 8b
.endm
forward_copy_shift pull=8 push=24
17: forward_copy_shift pull=16 push=16
18: forward_copy_shift pull=24 push=8
/*
* Abort preamble and completion macros.
* If a fixup handler is required then those macros must surround it.
* It is assumed that the fixup code will handle the private part of
* the exit macro.
*/
.macro copy_abort_preamble
19: ldmfd sp!, {r5 - r9}
b 21f
20: ldmfd sp!, {r5 - r8}
21:
.endm
.macro copy_abort_end
ldmfd sp!, {r4, pc}
.endm
|
a3f/bareDOOM
| 1,221
|
arch/arm/lib32/memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 2005 MontaVista Software, Inc (Nicolas Pitre)
/*
* linux/arch/arm/lib/memcpy.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
.macro ldr1w ptr reg abort
W(ldr) \reg, [\ptr], #4
.endm
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4}
.endm
.macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
.endm
.macro ldr1b ptr reg cond=al abort
ldr\cond\()b \reg, [\ptr], #1
.endm
.macro str1w ptr reg abort
W(str) \reg, [\ptr], #4
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
.endm
.macro str1b ptr reg cond=al abort
str\cond\()b \reg, [\ptr], #1
.endm
.macro enter reg1 reg2
stmdb sp!, {r0, \reg1, \reg2}
.endm
.macro exit reg1 reg2
ldmfd sp!, {r0, \reg1, \reg2}
.endm
.text
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
.weak memcpy
ENTRY(memcpy)
ENTRY(__memcpy)
#include "copy_template.S"
ENDPROC(__memcpy)
ENDPROC(memcpy)
|
a3f/bareDOOM
| 1,458
|
arch/arm/lib32/ashrdi3.S
|
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
*/
#include <linux/linkage.h>
#ifdef __ARMEB__
#define al r1
#define ah r0
#else
#define al r0
#define ah r1
#endif
.section .text.__ashrdi3
ENTRY(__ashrdi3)
ENTRY(__aeabi_lasr)
subs r3, r2, #32
rsb ip, r2, #32
movmi al, al, lsr r2
movpl al, ah, asr r3
ARM( orrmi al, al, ah, lsl ip )
THUMB( lslmi r3, ah, ip )
THUMB( orrmi al, al, r3 )
mov ah, ah, asr r2
mov pc, lr
ENDPROC(__ashrdi3)
ENDPROC(__aeabi_lasr)
|
a3f/bareDOOM
| 1,564
|
arch/arm/lib32/io-writesb.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 1995-2000 Russell King */
/*
* linux/arch/arm/lib/io-writesb.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.macro outword, rd
#ifndef __ARMEB__
strb \rd, [r0]
mov \rd, \rd, lsr #8
strb \rd, [r0]
mov \rd, \rd, lsr #8
strb \rd, [r0]
mov \rd, \rd, lsr #8
strb \rd, [r0]
#else
mov lr, \rd, lsr #24
strb lr, [r0]
mov lr, \rd, lsr #16
strb lr, [r0]
mov lr, \rd, lsr #8
strb lr, [r0]
strb \rd, [r0]
#endif
.endm
.section .text.writesb
.Loutsb_align: rsb ip, ip, #4
cmp ip, r2
movgt ip, r2
cmp ip, #2
ldrb r3, [r1], #1
strb r3, [r0]
ldrgeb r3, [r1], #1
strgeb r3, [r0]
ldrgtb r3, [r1], #1
strgtb r3, [r0]
subs r2, r2, ip
bne .Loutsb_aligned
ENTRY(writesb)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
ands ip, r1, #3
bne .Loutsb_align
.Loutsb_aligned:
stmfd sp!, {r4, r5, lr}
subs r2, r2, #16
bmi .Loutsb_no_16
.Loutsb_16_lp: ldmia r1!, {r3, r4, r5, ip}
outword r3
outword r4
outword r5
outword ip
subs r2, r2, #16
bpl .Loutsb_16_lp
tst r2, #15
ldmeqfd sp!, {r4, r5, pc}
.Loutsb_no_16: tst r2, #8
beq .Loutsb_no_8
ldmia r1!, {r3, r4}
outword r3
outword r4
.Loutsb_no_8: tst r2, #4
beq .Loutsb_no_4
ldr r3, [r1], #4
outword r3
.Loutsb_no_4: ands r2, r2, #3
ldmeqfd sp!, {r4, r5, pc}
cmp r2, #2
ldrb r3, [r1], #1
strb r3, [r0]
ldrgeb r3, [r1], #1
strgeb r3, [r0]
ldrgtb r3, [r1]
strgtb r3, [r0]
ldmfd sp!, {r4, r5, pc}
ENDPROC(writesb)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.