repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
sigongzi/riscv-isa-vector | 1,183 | tests/corpus/rv32zbkb.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
ror x0, x0, x0 # 60005033
ror x4, x13, x27 # 61b6d233
ror x31, x31, x31 # 61ffdfb3
rol x0, x0, x0 # 60001033
rol x4, x13, x27 # 61b69233
rol x31, x31, x31 # 61ff9fb3
rori x0, x0, 0 # 60005013
rori x4, x13, 27 # 61b6d213
rori x31, x31, 31 # 61ffdf93
andn x0, x0, x0 # 40007033
andn x4, x13, x27 # 41b6f233
andn x31, x31, x31 # 41ffffb3
orn x0, x0, x0 # 40006033
orn x4, x13, x27 # 41b6e233
orn x31, x31, x31 # 41ffefb3
xnor x0, x0, x0 # 40004033
xnor x4, x13, x27 # 41b6c233
xnor x31, x31, x31 # 41ffcfb3
pack x0, x0, x0 # 08004033
pack x4, x13, x27 # 09b6c233
pack x31, x31, x31 # 09ffcfb3
packh x0, x0, x0 # 08007033
packh x4, x13, x27 # 09b6f233
packh x31, x31, x31 # 09ffffb3
brev8 x0, x0 # 68705013
brev8 x4, x27 # 687DD213
brev8 x31, x31 # 687FDF93
rev8 x0, x0 # 69805013
rev8 x4, x27 # 698dd213
rev8 x31, x31 # 698fdf93
zip x0, x0 # 08f01013
zip x4, x27 # 08fd9213
zip x31, x31 # 08ff9f93
unzip x0, x0 # 08f05013
unzip x4, x27 # 08fdd213
unzip x31, x31 # 08ffdf93
|
sigongzi/riscv-isa-vector | 5,155 | tests/corpus/rv32i.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
lui x0, 0 # 00000037
lui x1, 123 # 0007b0b7
lui x31, 1048575 # ffffffb7
auipc x0, 0 # 00000017
auipc x1, 123 # 0007b097
auipc x31, 1048575 # ffffff97
jal x0, 0 # 0000006f
jal x1, -2 # fffff0ef
jal x31, 10 # 00a00fef
jal x31, 1048574 # 7fffffef
jalr x0, 0(x0) # 00000067
jalr x1, -2(x31) # ffef80e7
jalr x5, 10(x27) # 00ad82e7
jalr x31, 2047(x31) # 7fff8fe7
beq x0, x0, 0 # 00000063
beq x1, x31, -2 # fff08fe3
beq x5, x27, 10 # 01b28563
beq x31, x31, 2046 # 7fff8f63
bne x0, x0, 0 # 00001063
bne x1, x31, -2 # fff09fe3
bne x5, x27, 10 # 01b29563
bne x31, x31, 2046 # 7fff9f63
blt x0, x0, 0 # 00004063
blt x1, x31, -2 # fff0cfe3
blt x5, x27, 10 # 01b2c563
blt x31, x31, 2046 # 7fffcf63
bge x0, x0, 0 # 00005063
bge x1, x31, -2 # fff0dfe3
bge x5, x27, 10 # 01b2d563
bge x31, x31, 2046 # 7fffdf63
bltu x0, x0, 0 # 00006063
bltu x1, x31, -2 # fff0efe3
bltu x5, x27, 10 # 01b2e563
bltu x31, x31, 2046 # 7fffef63
bgeu x0, x0, 0 # 00007063
bgeu x1, x31, -2 # fff0ffe3
bgeu x5, x27, 10 # 01b2f563
bgeu x31, x31, 2046 # 7fffff63
lb x0, 0(x0) # 00000003
lb x1, -2(x31) # ffef8083
lb x5, 10(x27) # 00ad8283
lb x31, 2047(x31) # 7fff8f83
lh x0, 0(x0) # 00001003
lh x1, -2(x31) # ffef9083
lh x5, 10(x27) # 00ad9283
lh x31, 2047(x31) # 7fff9f83
lw x0, 0(x0) # 00002003
lw x1, -2(x31) # ffefa083
lw x5, 10(x27) # 00ada283
lw x31, 2047(x31) # 7fffaf83
lbu x0, 0(x0) # 00004003
lbu x1, -2(x31) # ffefc083
lbu x5, 10(x27) # 00adc283
lbu x31, 2047(x31) # 7fffcf83
lhu x0, 0(x0) # 00005003
lhu x1, -2(x31) # ffefd083
lhu x5, 10(x27) # 00add283
lhu x31, 2047(x31) # 7fffdf83
sb x0, 0(x0) # 00000023
sb x1, -2(x31) # fe1f8f23
sb x5, 10(x27) # 005d8523
sb x31, 2047(x31) # 7fff8fa3
sh x0, 0(x0) # 00001023
sh x1, -2(x31) # fe1f9f23
sh x5, 10(x27) # 005d9523
sh x31, 2047(x31) # 7fff9fa3
sw x0, 0(x0) # 00002023
sw x1, -2(x31) # fe1faf23
sw x5, 10(x27) # 005da523
sw x31, 2047(x31) # 7fffafa3
addi x0, x0, 0 # 00000013
addi x1, x31, -2 # ffef8093
addi x5, x27, 10 # 00ad8293
addi x31, x31, 2047 # 7fff8f93
slti x0, x0, 0 # 00002013
slti x1, x31, -2 # ffefa093
slti x5, x27, 10 # 00ada293
slti x31, x31, 2047 # 7fffaf93
sltiu x0, x0, 0 # 00003013
sltiu x1, x31, -2 # ffefb093
sltiu x5, x27, 10 # 00adb293
sltiu x31, x31, 2047 # 7fffbf93
xori x0, x0, 0 # 00004013
xori x1, x31, -2 # ffefc093
xori x5, x27, 10 # 00adc293
xori x31, x31, 2047 # 7fffcf93
ori x0, x0, 0 # 00006013
ori x1, x31, -2 # ffefe093
ori x5, x27, 10 # 00ade293
ori x31, x31, 2047 # 7fffef93
andi x0, x0, 0 # 00007013
andi x1, x31, -2 # ffeff093
andi x5, x27, 10 # 00adf293
andi x31, x31, 2047 # 7fffff93
slli x0, x0, 0 # 00001013
slli x5, x27, 10 # 00ad9293
slli x31, x31, 31 # 01ff9f93
srli x0, x0, 0 # 00005013
srli x5, x27, 10 # 00add293
srli x31, x31, 31 # 01ffdf93
srai x0, x0, 0 # 40005013
srai x5, x27, 10 # 40add293
srai x31, x31, 31 # 41ffdf93
add x0, x0, x0 # 00000033
add x1, x31, x13 # 00df80b3
add x5, x27, x31 # 01fd82b3
add x31, x31, x31 # 01ff8fb3
sub x0, x0, x0 # 40000033
sub x1, x31, x13 # 40df80b3
sub x5, x27, x31 # 41fd82b3
sub x31, x31, x31 # 41ff8fb3
sll x0, x0, x0 # 00001033
sll x1, x31, x13 # 00df90b3
sll x5, x27, x31 # 01fd92b3
sll x31, x31, x31 # 01ff9fb3
slt x0, x0, x0 # 00002033
slt x1, x31, x13 # 00dfa0b3
slt x5, x27, x31 # 01fda2b3
slt x31, x31, x31 # 01ffafb3
sltu x0, x0, x0 # 00003033
sltu x1, x31, x13 # 00dfb0b3
sltu x5, x27, x31 # 01fdb2b3
sltu x31, x31, x31 # 01ffbfb3
xor x0, x0, x0 # 00004033
xor x1, x31, x13 # 00dfc0b3
xor x5, x27, x31 # 01fdc2b3
xor x31, x31, x31 # 01ffcfb3
srl x0, x0, x0 # 00005033
srl x1, x31, x13 # 00dfd0b3
srl x5, x27, x31 # 01fdd2b3
srl x31, x31, x31 # 01ffdfb3
sra x0, x0, x0 # 40005033
sra x1, x31, x13 # 40dfd0b3
sra x5, x27, x31 # 41fdd2b3
sra x31, x31, x31 # 41ffdfb3
or x0, x0, x0 # 00006033
or x1, x31, x13 # 00dfe0b3
or x5, x27, x31 # 01fde2b3
or x31, x31, x31 # 01ffefb3
and x0, x0, x0 # 00007033
and x1, x31, x13 # 00dff0b3
and x5, x27, x31 # 01fdf2b3
and x31, x31, x31 # 01ffffb3
fence r, r # 0220000f
fence w, w # 0110000f
fence w, r # 0120000f
fence r, w # 0210000f
fence r, rw # 0230000f
fence rw, w # 0310000f
fence rw, rw # 0330000f
fence o, r # 0420000f
fence r, o # 0240000f
fence i, r # 0820000f
fence r, i # 0280000f
fence io, r # 0c20000f
fence r, io # 02c0000f
fence iorw, iorw # 0ff0000f
ecall # 00000073
ebreak # 00100073
|
sigongzi/riscv-isa-vector | 3,343 | tests/corpus/rv32d.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
fld f0, 0(x0) # 00003007
fld f13, -45(x7) # fd33b687
fld f4, 612(x23) # 264bb207
fld f15, 2047(x12) # 7ff63787
fld f25, -2048(x16) # 80083c87
fld f31, -1(x31) # ffffbf87
fsd f0, 0(x0) # 00003027
fsd f13, -45(x7) # fcd3b9a7
fsd f4, 612(x23) # 264bb227
fsd f15, 2047(x12) # 7ef63fa7
fsd f25, -2048(x16) # 81983027
fsd f31, -1(x31) # ffffbfa7
fmadd.d f0, f0, f0, f0 # 02007043
fmadd.d f13, f25, f3, f18 # 923cf6c3
fmadd.d f31, f31, f31, f31 # fbffffc3
fmsub.d f0, f0, f0, f0 # 02007047
fmsub.d f13, f25, f3, f18 # 923cf6c7
fmsub.d f31, f31, f31, f31 # fbffffc7
fnmsub.d f0, f0, f0, f0 # 0200704b
fnmsub.d f13, f25, f3, f18 # 923cf6cb
fnmsub.d f31, f31, f31, f31 # fbffffcb
fnmadd.d f0, f0, f0, f0 # 0200704f
fnmadd.d f13, f25, f3, f18 # 923cf6cf
fnmadd.d f31, f31, f31, f31 # fbffffcf
fadd.d f0, f0, f0 # 02007053
fadd.d f13, f26, f3 # 023d76d3
fadd.d f31, f31, f31 # 03ffffd3
fsub.d f0, f0, f0 # 0a007053
fsub.d f13, f26, f3 # 0a3d76d3
fsub.d f31, f31, f31 # 0bffffd3
fmul.d f0, f0, f0 # 12007053
fmul.d f13, f26, f3 # 123d76d3
fmul.d f31, f31, f31 # 13ffffd3
fdiv.d f0, f0, f0 # 1a007053
fdiv.d f13, f26, f3 # 1a3d76d3
fdiv.d f31, f31, f31 # 1bffffd3
fsqrt.d f0, f0 # 5a007053
fsqrt.d f13, f26 # 5a0d76d3
fsqrt.d f31, f31 # 5a0fffd3
fsgnj.d f0, f0, f0 # 22000053
fsgnj.d f13, f26, f3 # 223d06d3
fsgnj.d f31, f31, f31 # 23ff8fd3
fsgnjn.d f0, f0, f0 # 22001053
fsgnjn.d f13, f26, f3 # 223d16d3
fsgnjn.d f31, f31, f31 # 23ff9fd3
fsgnjx.d f0, f0, f0 # 22002053
fsgnjx.d f13, f26, f3 # 223d26d3
fsgnjx.d f31, f31, f31 # 23ffafd3
fmin.d f0, f0, f0 # 2a000053
fmin.d f13, f26, f3 # 2a3d06d3
fmin.d f31, f31, f31 # 2bff8fd3
fmax.d f0, f0, f0 # 2a001053
fmax.d f13, f26, f3 # 2a3d16d3
fmax.d f31, f31, f31 # 2bff9fd3
fcvt.s.d f0, f0 # 40107053
fcvt.s.d f12, f23 # 401bf653
fcvt.s.d f31, f31 # 401fffd3
fcvt.d.s f0, f0 # 42007053
fcvt.d.s f12, f23 # 420bf653
fcvt.d.s f31, f31 # 420fffd3
feq.d x0, f0, f0 # a2002053
feq.d x13, f26, f3 # a23d26d3
feq.d x31, f31, f31 # a3ffafd3
flt.d x0, f0, f0 # a2001053
flt.d x13, f26, f3 # a23d16d3
flt.d x31, f31, f31 # a3ff9fd3
fle.d x0, f0, f0 # a2000053
fle.d x13, f26, f3 # a23d06d3
fle.d x31, f31, f31 # a3ff8fd3
fclass.d x0, f0 # e2001053
fclass.d x12, f23 # e20b9653
fclass.d x31, f31 # e20f9fd3
fcvt.w.d x0, f0 # c2007053
fcvt.w.d x12, f23 # c20bf653
fcvt.w.d x31, f31 # c20fffd3
fcvt.wu.d x0, f0 # c2107053
fcvt.wu.d x12, f23 # c21bf653
fcvt.wu.d x31, f31 # c21fffd3
fcvt.d.w f0, x0 # d2007053
fcvt.d.w f12, x23 # d20bf653
fcvt.d.w f31, x31 # d20fffd3
fcvt.d.wu f0, x0 # d2107053
fcvt.d.wu f12, x23 # d21bf653
fcvt.d.wu f31, x31 # d21fffd3
|
sigongzi/riscv-isa-vector | 1,882 | tests/corpus/rv64i.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
lwu x0, 0(x0) # 00006003
lwu x1, -2(x31) # ffefe083
lwu x31, -203(x5) # f352ef83
lwu x31, 2047(x31) # 7fffef83
lwu x31, -2048(x31) # 800fef83
ld x0, 0(x0) # 00003003
ld x1, -2(x31) # ffefb083
ld x31, -203(x5) # f352bf83
ld x31, 2047(x31) # 7fffbf83
ld x31, -2048(x31) # 800fbf83
sd x0, 0(x0) # 00003023
sd x1, -2(x31) # fe1fbf23
sd x31, -203(x5) # f3f2baa3
sd x31, 2047(x31) # 7fffbfa3
sd x31, -2048(x31) # 81ffb023
slli x0, x0, 0 # 00001013
slli x1, x13, 27 # 01b69093
slli x16, x23, 31 # 01fb9813
slli x31, x31, 63 # 03ff9f93
srli x0, x0, 0 # 00005013
srli x1, x13, 27 # 01b6d093
srli x16, x23, 31 # 01fbd813
srli x31, x31, 63 # 03ffdf93
srai x0, x0, 0 # 40005013
srai x1, x13, 27 # 41b6d093
srai x16, x23, 31 # 41fbd813
srai x31, x31, 63 # 43ffdf93
addiw x0, x0, 0 # 0000001b
addiw x5, x27, 24 # 018d829b
addiw x31, x31, 2047 # 7fff8f9b
addiw x31, x31, -2048 # 800f8f9b
slliw x0, x0, 0 # 0000101b
slliw x5, x13, 27 # 01b6929b
slliw x31, x31, 31 # 01ff9f9b
srliw x0, x0, 0 # 0000501b
srliw x5, x13, 27 # 01b6d29b
srliw x31, x31, 31 # 01ffdf9b
sraiw x0, x0, 0 # 4000501b
sraiw x5, x13, 27 # 41b6d29b
sraiw x31, x31, 31 # 41ffdf9b
addw x0, x0, x0 # 0000003b
addw x4, x13, x27 # 01b6823b
addw x31, x31, x31 # 01ff8fbb
subw x0, x0, x0 # 4000003b
subw x4, x13, x27 # 41b6823b
subw x31, x31, x31 # 41ff8fbb
sllw x0, x0, x0 # 0000103b
sllw x4, x13, x27 # 01b6923b
sllw x31, x31, x31 # 01ff9fbb
srlw x0, x0, x0 # 0000503b
srlw x4, x13, x27 # 01b6d23b
srlw x31, x31, x31 # 01ffdfbb
sraw x0, x0, x0 # 4000503b
sraw x4, x13, x27 # 41b6d23b
sraw x31, x31, x31 # 41ffdfbb
|
sigongzi/riscv-isa-vector | 3,343 | tests/corpus/rv32f.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
flw f0, 0(x0) # 00002007
flw f13, -45(x7) # fd33a687
flw f4, 612(x23) # 264ba207
flw f15, 2047(x12) # 7ff62787
flw f25, -2048(x16) # 80082c87
flw f31, -1(x31) # ffffaf87
fsw f0, 0(x0) # 00002027
fsw f13, -45(x7) # fcd3a9a7
fsw f4, 612(x23) # 264ba227
fsw f15, 2047(x12) # 7ef62fa7
fsw f25, -2048(x16) # 81982027
fsw f31, -1(x31) # ffffafa7
fmadd.s f0, f0, f0, f0 # 00007043
fmadd.s f13, f25, f3, f18 # 903cf6c3
fmadd.s f31, f31, f31, f31 # f9ffffc3
fmsub.s f0, f0, f0, f0 # 00007047
fmsub.s f13, f25, f3, f18 # 903cf6c7
fmsub.s f31, f31, f31, f31 # f9ffffc7
fnmsub.s f0, f0, f0, f0 # 0000704b
fnmsub.s f13, f25, f3, f18 # 903cf6cb
fnmsub.s f31, f31, f31, f31 # f9ffffcb
fnmadd.s f0, f0, f0, f0 # 0000704f
fnmadd.s f13, f25, f3, f18 # 903cf6cf
fnmadd.s f31, f31, f31, f31 # f9ffffcf
fadd.s f0, f0, f0 # 00007053
fadd.s f13, f26, f3 # 003d76d3
fadd.s f31, f31, f31 # 01ffffd3
fsub.s f0, f0, f0 # 08007053
fsub.s f13, f26, f3 # 083d76d3
fsub.s f31, f31, f31 # 09ffffd3
fmul.s f0, f0, f0 # 10007053
fmul.s f13, f26, f3 # 103d76d3
fmul.s f31, f31, f31 # 11ffffd3
fdiv.s f0, f0, f0 # 18007053
fdiv.s f13, f26, f3 # 183d76d3
fdiv.s f31, f31, f31 # 19ffffd3
fsqrt.s f0, f0 # 58007053
fsqrt.s f13, f26 # 580d76d3
fsqrt.s f31, f31 # 580fffd3
fsgnj.s f0, f0, f0 # 20000053
fsgnj.s f13, f26, f3 # 203d06d3
fsgnj.s f31, f31, f31 # 21ff8fd3
fsgnjn.s f0, f0, f0 # 20001053
fsgnjn.s f13, f26, f3 # 203d16d3
fsgnjn.s f31, f31, f31 # 21ff9fd3
fsgnjx.s f0, f0, f0 # 20002053
fsgnjx.s f13, f26, f3 # 203d26d3
fsgnjx.s f31, f31, f31 # 21ffafd3
fmin.s f0, f0, f0 # 28000053
fmin.s f13, f26, f3 # 283d06d3
fmin.s f31, f31, f31 # 29ff8fd3
fmax.s f0, f0, f0 # 28001053
fmax.s f13, f26, f3 # 283d16d3
fmax.s f31, f31, f31 # 29ff9fd3
fcvt.w.s x0, f0 # c0007053
fcvt.w.s x12, f23 # c00bf653
fcvt.w.s x31, f31 # c00fffd3
fcvt.wu.s x0, f0 # c0107053
fcvt.wu.s x12, f23 # c01bf653
fcvt.wu.s x31, f31 # c01fffd3
fmv.x.w x0, f0 # e0000053
fmv.x.w x12, f23 # e00b8653
fmv.x.w x31, f31 # e00f8fd3
feq.s x0, f0, f0 # a0002053
feq.s x13, f26, f3 # a03d26d3
feq.s x31, f31, f31 # a1ffafd3
flt.s x0, f0, f0 # a0001053
flt.s x13, f26, f3 # a03d16d3
flt.s x31, f31, f31 # a1ff9fd3
fle.s x0, f0, f0 # a0000053
fle.s x13, f26, f3 # a03d06d3
fle.s x31, f31, f31 # a1ff8fd3
fclass.s x0, f0 # e0001053
fclass.s x12, f23 # e00b9653
fclass.s x31, f31 # e00f9fd3
fcvt.s.w f0, x0 # d0007053
fcvt.s.w f12, x23 # d00bf653
fcvt.s.w f31, x31 # d00fffd3
fcvt.s.wu f0, x0 # d0107053
fcvt.s.wu f12, x23 # d01bf653
fcvt.s.wu f31, x31 # d01fffd3
fmv.w.x f0, x0 # f0000053
fmv.w.x f12, x23 # f00b8653
fmv.w.x f31, x31 # f00f8fd3
|
sigongzi/riscv-isa-vector | 4,045 | tests/corpus/rv32zfh.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
flh f0, 0(x0) # 00001007
flh f13, -45(x7) # fd339687
flh f4, 612(x23) # 264b9207
flh f15, 2047(x12) # 7ff61787
flh f25, -2048(x16) # 80081c87
flh f31, -1(x31) # ffff9f87
fsh f0, 0(x0) # 00001027
fsh f13, -45(x7) # fcd399a7
fsh f4, 612(x23) # 264b9227
fsh f15, 2047(x12) # 7ef61fa7
fsh f25, -2048(x16) # 81981027
fsh f31, -1(x31) # ffff9fa7
fmadd.h f0, f0, f0, f0 # 04007043
fmadd.h f13, f25, f3, f18 # 943cf6c3
fmadd.h f31, f31, f31, f31 # fdffffc3
fmsub.h f0, f0, f0, f0 # 04007047
fmsub.h f13, f25, f3, f18 # 943cf6c7
fmsub.h f31, f31, f31, f31 # fdffffc7
fnmsub.h f0, f0, f0, f0 # 0400704b
fnmsub.h f13, f25, f3, f18 # 943cf6cb
fnmsub.h f31, f31, f31, f31 # fdffffcb
fnmadd.h f0, f0, f0, f0 # 0400704f
fnmadd.h f13, f25, f3, f18 # 943cf6cf
fnmadd.h f31, f31, f31, f31 # fdffffcf
fadd.h f0, f0, f0 # 04007053
fadd.h f13, f26, f3 # 043d76d3
fadd.h f31, f31, f31 # 05ffffd3
fsub.h f0, f0, f0 # 0c007053
fsub.h f13, f26, f3 # 0c3d76d3
fsub.h f31, f31, f31 # 0dffffd3
fmul.h f0, f0, f0 # 14007053
fmul.h f13, f26, f3 # 143d76d3
fmul.h f31, f31, f31 # 15ffffd3
fdiv.h f0, f0, f0 # 1c007053
fdiv.h f13, f26, f3 # 1c3d76d3
fdiv.h f31, f31, f31 # 1dffffd3
fsqrt.h f0, f0 # 5c007053
fsqrt.h f13, f26 # 5c0d76d3
fsqrt.h f31, f31 # 5c0fffd3
fsgnj.h f0, f0, f0 # 24000053
fsgnj.h f13, f26, f3 # 243d06d3
fsgnj.h f31, f31, f31 # 25ff8fd3
fsgnjn.h f0, f0, f0 # 24001053
fsgnjn.h f13, f26, f3 # 243d16d3
fsgnjn.h f31, f31, f31 # 25ff9fd3
fsgnjx.h f0, f0, f0 # 24002053
fsgnjx.h f13, f26, f3 # 243d26d3
fsgnjx.h f31, f31, f31 # 25ffafd3
fmin.h f0, f0, f0 # 2c000053
fmin.h f13, f26, f3 # 2c3d06d3
fmin.h f31, f31, f31 # 2dff8fd3
fmax.h f0, f0, f0 # 2c001053
fmax.h f13, f26, f3 # 2c3d16d3
fmax.h f31, f31, f31 # 2dff9fd3
fcvt.s.h f0, f0 # 40200053
fcvt.s.h f12, f23 # 402b8653
fcvt.s.h f31, f31 # 402f8fd3
fcvt.h.s f0, f0 # 44007053
fcvt.h.s f12, f23 # 440bf653
fcvt.h.s f31, f31 # 440fffd3
fcvt.d.h f0, f0 # 42200053
fcvt.d.h f12, f23 # 422b8653
fcvt.d.h f31, f31 # 422f8fd3
fcvt.h.d f0, f0 # 44107053
fcvt.h.d f12, f23 # 441bf653
fcvt.h.d f31, f31 # 441fffd3
fcvt.q.h f0, f0 # 46200053
fcvt.q.h f12, f23 # 462b8653
fcvt.q.h f31, f31 # 462fffd3
fcvt.h.q f0, f0 # 44300053
fcvt.h.q f12, f23 # 443b8653
fcvt.h.q f31, f31 # 443f8fd3
feq.h x0, f0, f0 # a4002053
feq.h x13, f26, f3 # a43d26d3
feq.h x31, f31, f31 # a5ffafd3
flt.h x0, f0, f0 # a4001053
flt.h x13, f26, f3 # a43d16d3
flt.h x31, f31, f31 # a5ff9fd3
fle.h x0, f0, f0 # a4000053
fle.h x13, f26, f3 # a43d06d3
fle.h x31, f31, f31 # a5ff8fd3
fclass.h x0, f0 # e4001053
fclass.h x12, f23 # e40b9653
fclass.h x31, f31 # e40f9fd3
fcvt.w.h x0, f0 # c4007053
fcvt.w.h x12, f23 # c40bf653
fcvt.w.h x31, f31 # c40fffd3
fcvt.wu.h x0, f0 # c4107053
fcvt.wu.h x12, f23 # c41bf653
fcvt.wu.h x31, f31 # c41fffd3
fmv.x.h f0, x0 # e4000053
fmv.x.h f12, x23 # e40b8653
fmv.x.h f31, x31 # e40f8fd3
fcvt.h.w f0, x0 # d4007053
fcvt.h.w f12, x23 # d40bf653
fcvt.h.w f31, x31 # d40fffd3
fcvt.h.wu f0, x0 # d4107053
fcvt.h.wu f12, x23 # d41bf653
fcvt.h.wu f31, x31 # d41fffd3
fmv.h.x f0, x0 # f4000053
fmv.h.x f12, x23 # f40b8653
fmv.h.x f31, x31 # f40f8fd3
|
sigongzi/riscv-isa-vector | 3,577 | tests/corpus/rv32q.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
flq f0, 0(x0) # 00004007
flq f13, -45(x7) # fd33c687
flq f4, 612(x23) # 264bc207
flq f15, 2047(x12) # 7ff64787
flq f25, -2048(x16) # 80084c87
flq f31, -1(x31) # ffffcf87
fsq f0, 0(x0) # 00004027
fsq f13, -45(x7) # fcd3c9a7
fsq f4, 612(x23) # 264bc227
fsq f15, 2047(x12) # 7ef64fa7
fsq f25, -2048(x16) # 81984027
fsq f31, -1(x31) # ffffcfa7
fmadd.q f0, f0, f0, f0 # 06007043
fmadd.q f13, f25, f3, f18 # 963cf6c3
fmadd.q f31, f31, f31, f31 # ffffffc3
fmsub.q f0, f0, f0, f0 # 06007047
fmsub.q f13, f25, f3, f18 # 963cf6c7
fmsub.q f31, f31, f31, f31 # ffffffc7
fnmsub.q f0, f0, f0, f0 # 0600704b
fnmsub.q f13, f25, f3, f18 # 963cf6cb
fnmsub.q f31, f31, f31, f31 # ffffffcb
fnmadd.q f0, f0, f0, f0 # 0600704f
fnmadd.q f13, f25, f3, f18 # 963cf6cf
fnmadd.q f31, f31, f31, f31 # ffffffcf
fadd.q f0, f0, f0 # 06007053
fadd.q f13, f26, f3 # 063d76d3
fadd.q f31, f31, f31 # 07ffffd3
fsub.q f0, f0, f0 # 0e007053
fsub.q f13, f26, f3 # 0e3d76d3
fsub.q f31, f31, f31 # 0fffffd3
fmul.q f0, f0, f0 # 16007053
fmul.q f13, f26, f3 # 163d76d3
fmul.q f31, f31, f31 # 17ffffd3
fdiv.q f0, f0, f0 # 1e007053
fdiv.q f13, f26, f3 # 1e3d76d3
fdiv.q f31, f31, f31 # 1fffffd3
fsqrt.q f0, f0 # 5e007053
fsqrt.q f13, f26 # 5e0d76d3
fsqrt.q f31, f31 # 5e0fffd3
fsgnj.q f0, f0, f0 # 26000053
fsgnj.q f13, f26, f3 # 263d06d3
fsgnj.q f31, f31, f31 # 27ff8fd3
fsgnjn.q f0, f0, f0 # 26001053
fsgnjn.q f13, f26, f3 # 263d16d3
fsgnjn.q f31, f31, f31 # 27ff9fd3
fsgnjx.q f0, f0, f0 # 26002053
fsgnjx.q f13, f26, f3 # 263d26d3
fsgnjx.q f31, f31, f31 # 27ffafd3
fmin.q f0, f0, f0 # 2e000053
fmin.q f13, f26, f3 # 2e3d06d3
fmin.q f31, f31, f31 # 2fff8fd3
fmax.q f0, f0, f0 # 2e001053
fmax.q f13, f26, f3 # 2e3d16d3
fmax.q f31, f31, f31 # 2fff9fd3
fcvt.s.q f0, f0 # 40307053
fcvt.s.q f12, f23 # 403bf653
fcvt.s.q f31, f31 # 403fffd3
fcvt.q.s f0, f0 # 46007053
fcvt.q.s f12, f23 # 460bf653
fcvt.q.s f31, f31 # 460fffd3
fcvt.d.q f0, f0 # 42307053
fcvt.d.q f12, f23 # 423bf653
fcvt.d.q f31, f31 # 423fffd3
fcvt.q.d f0, f0 # 46107053
fcvt.q.d f12, f23 # 461bf653
fcvt.q.d f31, f31 # 461fffd3
feq.q x0, f0, f0 # a6002053
feq.q x13, f26, f3 # a63d26d3
feq.q x31, f31, f31 # a7ffafd3
flt.q x0, f0, f0 # a6001053
flt.q x13, f26, f3 # a63d16d3
flt.q x31, f31, f31 # a7ff9fd3
fle.q x0, f0, f0 # a6000053
fle.q x13, f26, f3 # a63d06d3
fle.q x31, f31, f31 # a7ff8fd3
fclass.q x0, f0 # e6001053
fclass.q x12, f23 # e60b9653
fclass.q x31, f31 # e60f9fd3
fcvt.w.q x0, f0 # c6007053
fcvt.w.q x12, f23 # c60bf653
fcvt.w.q x31, f31 # c60fffd3
fcvt.wu.q x0, f0 # c6107053
fcvt.wu.q x12, f23 # c61bf653
fcvt.wu.q x31, f31 # c61fffd3
fcvt.q.w f0, x0 # d6007053
fcvt.q.w f12, x23 # d60bf653
fcvt.q.w f31, x31 # d60fffd3
fcvt.q.wu f0, x0 # d6107053
fcvt.q.wu f12, x23 # d61bf653
fcvt.q.wu f31, x31 # d61fffd3
|
sigongzi/riscv-isa-vector | 1,687 | tests/corpus/rv32zbb.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
andn x0, x0, x0 # 40007033
andn x3, x14, x27 # 41b771b3
andn x31, x31, x31 # 41ffffb3
orn x0, x0, x0 # 40006033
orn x3, x14, x27 # 41b761b3
orn x31, x31, x31 # 41ffefb3
xnor x0, x0, x0 # 40004033
xnor x3, x14, x27 # 41b741b3
xnor x31, x31, x31 # 41ffcfb3
clz x0, x0 # 60001013
clz x3, x26 # 600d1193
clz x31, x31 # 600f9f93
ctz x0, x0 # 60101013
ctz x3, x26 # 601d1193
ctz x31, x31 # 601f9f93
cpop x0, x0 # 60201013
cpop x3, x26 # 602d1193
cpop x31, x31 # 602f9f93
max x0, x0, x0 # 0a006033
max x3, x14, x27 # 0bb761b3
max x31, x31, x31 # 0bffefb3
maxu x0, x0, x0 # 0a007033
maxu x3, x14, x27 # 0bb771b3
maxu x31, x31, x31 # 0bffffb3
min x0, x0, x0 # 0a004033
min x3, x14, x27 # 0bb741b3
min x31, x31, x31 # 0bffcfb3
minu x0, x0, x0 # 0a005033
minu x3, x14, x27 # 0bb751b3
minu x31, x31, x31 # 0bffdfb3
sext.b x0, x0 # 60401013
sext.b x3, x26 # 604d1193
sext.b x31, x31 # 604f9f93
sext.h x0, x0 # 60501013
sext.h x3, x26 # 605d1193
sext.h x31, x31 # 605f9f93
zext.h x0, x0 # 08004033
zext.h x3, x26 # 080d41b3
zext.h x31, x31 # 080fcfb3
rol x0, x0, x0 # 60001033
rol x3, x14, x27 # 61b711b3
rol x31, x31, x31 # 61ff9fb3
ror x0, x0, x0 # 60005033
ror x3, x14, x27 # 61b751b3
ror x31, x31, x31 # 61ffdfb3
rori x0, x0, 0 # 60005013
rori x3, x14, 27 # 61b75193
rori x31, x31, 31 # 61ffdf93
orc.b x0, x0 # 28705013
orc.b x3, x26 # 287d5193
orc.b x31, x31 # 287fdf93
rev8 x0, x0 # 69805013
rev8 x3, x26 # 698d5193
rev8 x31, x31 # 698fdf93
|
sigongzi/riscv-isa-vector | 1,288 | tests/corpus/rv64a.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
lr.d x0, (x0) # 1000302f
lr.d x4, (x17) # 1008b22f
lr.d x31, (x31) # 100fbfaf
sc.d x0, x0, (x0) # 1800302f
sc.d x4, x13, (x27) # 18ddb22f
sc.d x31, x31, (x31) # 19ffbfaf
amoswap.d x0, x0, (x0) # 0800302f
amoswap.d x4, x13, (x27) # 08ddb22f
amoswap.d x31, x31, (x31) # 09ffbfaf
amoadd.d x0, x0, (x0) # 0000302f
amoadd.d x4, x13, (x27) # 00ddb22f
amoadd.d x31, x31, (x31) # 01ffbfaf
amoxor.d x0, x0, (x0) # 2000302f
amoxor.d x4, x13, (x27) # 20ddb22f
amoxor.d x31, x31, (x31) # 21ffbfaf
amoand.d x0, x0, (x0) # 6000302f
amoand.d x4, x13, (x27) # 60ddb22f
amoand.d x31, x31, (x31) # 61ffbfaf
amoor.d x0, x0, (x0) # 4000302f
amoor.d x4, x13, (x27) # 40ddb22f
amoor.d x31, x31, (x31) # 41ffbfaf
amomin.d x0, x0, (x0) # 8000302f
amomin.d x4, x13, (x27) # 80ddb22f
amomin.d x31, x31, (x31) # 81ffbfaf
amomax.d x0, x0, (x0) # a000302f
amomax.d x4, x13, (x27) # a0ddb22f
amomax.d x31, x31, (x31) # a1ffbfaf
amominu.d x0, x0, (x0) # c000302f
amominu.d x4, x13, (x27) # c0ddb22f
amominu.d x31, x31, (x31) # c1ffbfaf
amomaxu.d x0, x0, (x0) # e000302f
amomaxu.d x4, x13, (x27) # e0ddb22f
amomaxu.d x31, x31, (x31) # e1ffbfaf
|
Simon4290/gardenner | 1,600 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
__restore:
# case1: start running app by __restore
# case2: back to U after handling trap
# mv sp, a0
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
sigongzi/riscv-isa-vector | 3,107 | tests/corpus/rv32c.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
c.lwsp x1, 0(x2) # 4082
c.lwsp x9, 44(x2) # 54b2
c.lwsp x31, 128(x2) # 4f8a
c.swsp x1, 0(x2) # c006
c.swsp x9, 44(x2) # d626
c.swsp x31, 128(x2) # c17e
c.lw x8, 0(x8) # 4000
c.lw x10, 36(x13) # 52c8
c.lw x15, 64(x15) # 43bc
c.sw x8, 0(x8) # c000
c.sw x10, 36(x13) # d2c8
c.sw x15, 64(x15) # c3bc
c.addi x1, 1 # 0085
c.addi x13, 27 # 06ed
c.addi x31, 31 # 0ffd
c.addi x31, -32 # 1f81
c.j 0 # a001
c.j 484 # a2d5
c.j -486 # bd29
c.j 2046 # affd
c.j -2048 # b001
c.jal 0 # 2001
c.jal 484 # 22d5
c.jal -486 # 3d29
c.jal 2046 # 2ffd
c.jal -2048 # 3001
c.jr x1 # 8082
c.jr x27 # 8d82
c.jr x31 # 8f82
c.jalr x1 # 9082
c.jalr x27 # 9d82
c.jalr x31 # 9f82
c.beqz x8, 0 # c001
c.beqz x9, 42 # c48d
c.beqz x10, -44 # d971
c.beqz x15, 254 # cffd
c.beqz x15, -256 # d381
c.bnez x8, 0 # e001
c.bnez x9, 42 # e48d
c.bnez x10, -44 # f971
c.bnez x15, 254 # effd
c.bnez x15, -256 # f381
c.li x1, 0 # 4081
c.li x13, 27 # 46ed
c.li x17, -27 # 5895
c.li x31, 31 # 4ffd
c.li x31, -32 # 5f81
c.lui x1, 1 # 6085
c.lui x13, 27 # 66ed
c.lui x31, 31 # 6ffd
c.lui x31, 1048575 # 7ffd
c.addi x1, 0 # 0081
c.addi x13, 13 # 06b5
c.addi x17, -13 # 18cd
c.addi x31, 31 # 0ffd
c.addi x31, -32 # 1f81
c.addi16sp x2, 16 # 6141
c.addi16sp x2, 48 # 6145
c.addi16sp x2, -512 # 7101
c.addi16sp x2, 496 # 617d
c.addi4spn x8, x2, 4 # 0040
c.addi4spn x12, x2, 248 # 19b0
c.addi4spn x15, x2, 1020 # 1ffc
c.slli x1, 1 # 0086
c.slli x13, 27 # 06ee
c.slli x31, 31 # 0ffe
c.srli x8, 1 # 8005
c.srli x9, 16 # 80c1
c.srli x15, 31 # 83fd
c.srai x8, 1 # 8405
c.srai x9, 16 # 84c1
c.srai x15, 31 # 87fd
c.andi x8, 0 # 8801
c.andi x10, 5 # 8915
c.andi x12, -5 # 9a6d
c.andi x15, 31 # 8bfd
c.andi x15, -32 # 9b81
c.mv x1, x1 # 8086
c.mv x5, x17 # 82c6
c.mv x31, x31 # 8ffe
c.add x1, x1 # 9086
c.add x5, x17 # 92c6
c.add x31, x31 # 9ffe
c.or x8, x8 # 8c41
c.or x12, x13 # 8e55
c.or x15, x15 # 8fdd
c.xor x8, x8 # 8c21
c.xor x12, x13 # 8e35
c.xor x15, x15 # 8fbd
c.sub x8, x8 # 8c01
c.sub x12, x13 # 8e15
c.sub x15, x15 # 8f9d
unimp # 0000
c.nop # 0001
c.ebreak # 9002
|
sigongzi/riscv-isa-vector | 1,288 | tests/corpus/rv32a.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
lr.w x0, (x0) # 1000202f
lr.w x4, (x17) # 1008a22f
lr.w x31, (x31) # 100fafaf
sc.w x0, x0, (x0) # 1800202f
sc.w x4, x13, (x27) # 18dda22f
sc.w x31, x31, (x31) # 19ffafaf
amoswap.w x0, x0, (x0) # 0800202f
amoswap.w x4, x13, (x27) # 08dda22f
amoswap.w x31, x31, (x31) # 09ffafaf
amoadd.w x0, x0, (x0) # 0000202f
amoadd.w x4, x13, (x27) # 00dda22f
amoadd.w x31, x31, (x31) # 01ffafaf
amoxor.w x0, x0, (x0) # 2000202f
amoxor.w x4, x13, (x27) # 20dda22f
amoxor.w x31, x31, (x31) # 21ffafaf
amoand.w x0, x0, (x0) # 6000202f
amoand.w x4, x13, (x27) # 60dda22f
amoand.w x31, x31, (x31) # 61ffafaf
amoor.w x0, x0, (x0) # 4000202f
amoor.w x4, x13, (x27) # 40dda22f
amoor.w x31, x31, (x31) # 41ffafaf
amomin.w x0, x0, (x0) # 8000202f
amomin.w x4, x13, (x27) # 80dda22f
amomin.w x31, x31, (x31) # 81ffafaf
amomax.w x0, x0, (x0) # a000202f
amomax.w x4, x13, (x27) # a0dda22f
amomax.w x31, x31, (x31) # a1ffafaf
amominu.w x0, x0, (x0) # c000202f
amominu.w x4, x13, (x27) # c0dda22f
amominu.w x31, x31, (x31) # c1ffafaf
amomaxu.w x0, x0, (x0) # e000202f
amomaxu.w x4, x13, (x27) # e0dda22f
amomaxu.w x31, x31, (x31) # e1ffafaf
|
sigongzi/riscv-isa-vector | 1,183 | tests/corpus/rv32zbkb.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
ror x0, x0, x0 # 60005033
ror x4, x13, x27 # 61b6d233
ror x31, x31, x31 # 61ffdfb3
rol x0, x0, x0 # 60001033
rol x4, x13, x27 # 61b69233
rol x31, x31, x31 # 61ff9fb3
rori x0, x0, 0 # 60005013
rori x4, x13, 27 # 61b6d213
rori x31, x31, 31 # 61ffdf93
andn x0, x0, x0 # 40007033
andn x4, x13, x27 # 41b6f233
andn x31, x31, x31 # 41ffffb3
orn x0, x0, x0 # 40006033
orn x4, x13, x27 # 41b6e233
orn x31, x31, x31 # 41ffefb3
xnor x0, x0, x0 # 40004033
xnor x4, x13, x27 # 41b6c233
xnor x31, x31, x31 # 41ffcfb3
pack x0, x0, x0 # 08004033
pack x4, x13, x27 # 09b6c233
pack x31, x31, x31 # 09ffcfb3
packh x0, x0, x0 # 08007033
packh x4, x13, x27 # 09b6f233
packh x31, x31, x31 # 09ffffb3
brev8 x0, x0 # 68705013
brev8 x4, x27 # 687DD213
brev8 x31, x31 # 687FDF93
rev8 x0, x0 # 69805013
rev8 x4, x27 # 698dd213
rev8 x31, x31 # 698fdf93
zip x0, x0 # 08f01013
zip x4, x27 # 08fd9213
zip x31, x31 # 08ff9f93
unzip x0, x0 # 08f05013
unzip x4, x27 # 08fdd213
unzip x31, x31 # 08ffdf93
|
sigongzi/riscv-isa-vector | 5,155 | tests/corpus/rv32i.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
lui x0, 0 # 00000037
lui x1, 123 # 0007b0b7
lui x31, 1048575 # ffffffb7
auipc x0, 0 # 00000017
auipc x1, 123 # 0007b097
auipc x31, 1048575 # ffffff97
jal x0, 0 # 0000006f
jal x1, -2 # fffff0ef
jal x31, 10 # 00a00fef
jal x31, 1048574 # 7fffffef
jalr x0, 0(x0) # 00000067
jalr x1, -2(x31) # ffef80e7
jalr x5, 10(x27) # 00ad82e7
jalr x31, 2047(x31) # 7fff8fe7
beq x0, x0, 0 # 00000063
beq x1, x31, -2 # fff08fe3
beq x5, x27, 10 # 01b28563
beq x31, x31, 2046 # 7fff8f63
bne x0, x0, 0 # 00001063
bne x1, x31, -2 # fff09fe3
bne x5, x27, 10 # 01b29563
bne x31, x31, 2046 # 7fff9f63
blt x0, x0, 0 # 00004063
blt x1, x31, -2 # fff0cfe3
blt x5, x27, 10 # 01b2c563
blt x31, x31, 2046 # 7fffcf63
bge x0, x0, 0 # 00005063
bge x1, x31, -2 # fff0dfe3
bge x5, x27, 10 # 01b2d563
bge x31, x31, 2046 # 7fffdf63
bltu x0, x0, 0 # 00006063
bltu x1, x31, -2 # fff0efe3
bltu x5, x27, 10 # 01b2e563
bltu x31, x31, 2046 # 7fffef63
bgeu x0, x0, 0 # 00007063
bgeu x1, x31, -2 # fff0ffe3
bgeu x5, x27, 10 # 01b2f563
bgeu x31, x31, 2046 # 7fffff63
lb x0, 0(x0) # 00000003
lb x1, -2(x31) # ffef8083
lb x5, 10(x27) # 00ad8283
lb x31, 2047(x31) # 7fff8f83
lh x0, 0(x0) # 00001003
lh x1, -2(x31) # ffef9083
lh x5, 10(x27) # 00ad9283
lh x31, 2047(x31) # 7fff9f83
lw x0, 0(x0) # 00002003
lw x1, -2(x31) # ffefa083
lw x5, 10(x27) # 00ada283
lw x31, 2047(x31) # 7fffaf83
lbu x0, 0(x0) # 00004003
lbu x1, -2(x31) # ffefc083
lbu x5, 10(x27) # 00adc283
lbu x31, 2047(x31) # 7fffcf83
lhu x0, 0(x0) # 00005003
lhu x1, -2(x31) # ffefd083
lhu x5, 10(x27) # 00add283
lhu x31, 2047(x31) # 7fffdf83
sb x0, 0(x0) # 00000023
sb x1, -2(x31) # fe1f8f23
sb x5, 10(x27) # 005d8523
sb x31, 2047(x31) # 7fff8fa3
sh x0, 0(x0) # 00001023
sh x1, -2(x31) # fe1f9f23
sh x5, 10(x27) # 005d9523
sh x31, 2047(x31) # 7fff9fa3
sw x0, 0(x0) # 00002023
sw x1, -2(x31) # fe1faf23
sw x5, 10(x27) # 005da523
sw x31, 2047(x31) # 7fffafa3
addi x0, x0, 0 # 00000013
addi x1, x31, -2 # ffef8093
addi x5, x27, 10 # 00ad8293
addi x31, x31, 2047 # 7fff8f93
slti x0, x0, 0 # 00002013
slti x1, x31, -2 # ffefa093
slti x5, x27, 10 # 00ada293
slti x31, x31, 2047 # 7fffaf93
sltiu x0, x0, 0 # 00003013
sltiu x1, x31, -2 # ffefb093
sltiu x5, x27, 10 # 00adb293
sltiu x31, x31, 2047 # 7fffbf93
xori x0, x0, 0 # 00004013
xori x1, x31, -2 # ffefc093
xori x5, x27, 10 # 00adc293
xori x31, x31, 2047 # 7fffcf93
ori x0, x0, 0 # 00006013
ori x1, x31, -2 # ffefe093
ori x5, x27, 10 # 00ade293
ori x31, x31, 2047 # 7fffef93
andi x0, x0, 0 # 00007013
andi x1, x31, -2 # ffeff093
andi x5, x27, 10 # 00adf293
andi x31, x31, 2047 # 7fffff93
slli x0, x0, 0 # 00001013
slli x5, x27, 10 # 00ad9293
slli x31, x31, 31 # 01ff9f93
srli x0, x0, 0 # 00005013
srli x5, x27, 10 # 00add293
srli x31, x31, 31 # 01ffdf93
srai x0, x0, 0 # 40005013
srai x5, x27, 10 # 40add293
srai x31, x31, 31 # 41ffdf93
add x0, x0, x0 # 00000033
add x1, x31, x13 # 00df80b3
add x5, x27, x31 # 01fd82b3
add x31, x31, x31 # 01ff8fb3
sub x0, x0, x0 # 40000033
sub x1, x31, x13 # 40df80b3
sub x5, x27, x31 # 41fd82b3
sub x31, x31, x31 # 41ff8fb3
sll x0, x0, x0 # 00001033
sll x1, x31, x13 # 00df90b3
sll x5, x27, x31 # 01fd92b3
sll x31, x31, x31 # 01ff9fb3
slt x0, x0, x0 # 00002033
slt x1, x31, x13 # 00dfa0b3
slt x5, x27, x31 # 01fda2b3
slt x31, x31, x31 # 01ffafb3
sltu x0, x0, x0 # 00003033
sltu x1, x31, x13 # 00dfb0b3
sltu x5, x27, x31 # 01fdb2b3
sltu x31, x31, x31 # 01ffbfb3
xor x0, x0, x0 # 00004033
xor x1, x31, x13 # 00dfc0b3
xor x5, x27, x31 # 01fdc2b3
xor x31, x31, x31 # 01ffcfb3
srl x0, x0, x0 # 00005033
srl x1, x31, x13 # 00dfd0b3
srl x5, x27, x31 # 01fdd2b3
srl x31, x31, x31 # 01ffdfb3
sra x0, x0, x0 # 40005033
sra x1, x31, x13 # 40dfd0b3
sra x5, x27, x31 # 41fdd2b3
sra x31, x31, x31 # 41ffdfb3
or x0, x0, x0 # 00006033
or x1, x31, x13 # 00dfe0b3
or x5, x27, x31 # 01fde2b3
or x31, x31, x31 # 01ffefb3
and x0, x0, x0 # 00007033
and x1, x31, x13 # 00dff0b3
and x5, x27, x31 # 01fdf2b3
and x31, x31, x31 # 01ffffb3
fence r, r # 0220000f
fence w, w # 0110000f
fence w, r # 0120000f
fence r, w # 0210000f
fence r, rw # 0230000f
fence rw, w # 0310000f
fence rw, rw # 0330000f
fence o, r # 0420000f
fence r, o # 0240000f
fence i, r # 0820000f
fence r, i # 0280000f
fence io, r # 0c20000f
fence r, io # 02c0000f
fence iorw, iorw # 0ff0000f
ecall # 00000073
ebreak # 00100073
|
sigongzi/riscv-isa-vector | 3,343 | tests/corpus/rv32d.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
fld f0, 0(x0) # 00003007
fld f13, -45(x7) # fd33b687
fld f4, 612(x23) # 264bb207
fld f15, 2047(x12) # 7ff63787
fld f25, -2048(x16) # 80083c87
fld f31, -1(x31) # ffffbf87
fsd f0, 0(x0) # 00003027
fsd f13, -45(x7) # fcd3b9a7
fsd f4, 612(x23) # 264bb227
fsd f15, 2047(x12) # 7ef63fa7
fsd f25, -2048(x16) # 81983027
fsd f31, -1(x31) # ffffbfa7
fmadd.d f0, f0, f0, f0 # 02007043
fmadd.d f13, f25, f3, f18 # 923cf6c3
fmadd.d f31, f31, f31, f31 # fbffffc3
fmsub.d f0, f0, f0, f0 # 02007047
fmsub.d f13, f25, f3, f18 # 923cf6c7
fmsub.d f31, f31, f31, f31 # fbffffc7
fnmsub.d f0, f0, f0, f0 # 0200704b
fnmsub.d f13, f25, f3, f18 # 923cf6cb
fnmsub.d f31, f31, f31, f31 # fbffffcb
fnmadd.d f0, f0, f0, f0 # 0200704f
fnmadd.d f13, f25, f3, f18 # 923cf6cf
fnmadd.d f31, f31, f31, f31 # fbffffcf
fadd.d f0, f0, f0 # 02007053
fadd.d f13, f26, f3 # 023d76d3
fadd.d f31, f31, f31 # 03ffffd3
fsub.d f0, f0, f0 # 0a007053
fsub.d f13, f26, f3 # 0a3d76d3
fsub.d f31, f31, f31 # 0bffffd3
fmul.d f0, f0, f0 # 12007053
fmul.d f13, f26, f3 # 123d76d3
fmul.d f31, f31, f31 # 13ffffd3
fdiv.d f0, f0, f0 # 1a007053
fdiv.d f13, f26, f3 # 1a3d76d3
fdiv.d f31, f31, f31 # 1bffffd3
fsqrt.d f0, f0 # 5a007053
fsqrt.d f13, f26 # 5a0d76d3
fsqrt.d f31, f31 # 5a0fffd3
fsgnj.d f0, f0, f0 # 22000053
fsgnj.d f13, f26, f3 # 223d06d3
fsgnj.d f31, f31, f31 # 23ff8fd3
fsgnjn.d f0, f0, f0 # 22001053
fsgnjn.d f13, f26, f3 # 223d16d3
fsgnjn.d f31, f31, f31 # 23ff9fd3
fsgnjx.d f0, f0, f0 # 22002053
fsgnjx.d f13, f26, f3 # 223d26d3
fsgnjx.d f31, f31, f31 # 23ffafd3
fmin.d f0, f0, f0 # 2a000053
fmin.d f13, f26, f3 # 2a3d06d3
fmin.d f31, f31, f31 # 2bff8fd3
fmax.d f0, f0, f0 # 2a001053
fmax.d f13, f26, f3 # 2a3d16d3
fmax.d f31, f31, f31 # 2bff9fd3
fcvt.s.d f0, f0 # 40107053
fcvt.s.d f12, f23 # 401bf653
fcvt.s.d f31, f31 # 401fffd3
fcvt.d.s f0, f0 # 42007053
fcvt.d.s f12, f23 # 420bf653
fcvt.d.s f31, f31 # 420fffd3
feq.d x0, f0, f0 # a2002053
feq.d x13, f26, f3 # a23d26d3
feq.d x31, f31, f31 # a3ffafd3
flt.d x0, f0, f0 # a2001053
flt.d x13, f26, f3 # a23d16d3
flt.d x31, f31, f31 # a3ff9fd3
fle.d x0, f0, f0 # a2000053
fle.d x13, f26, f3 # a23d06d3
fle.d x31, f31, f31 # a3ff8fd3
fclass.d x0, f0 # e2001053
fclass.d x12, f23 # e20b9653
fclass.d x31, f31 # e20f9fd3
fcvt.w.d x0, f0 # c2007053
fcvt.w.d x12, f23 # c20bf653
fcvt.w.d x31, f31 # c20fffd3
fcvt.wu.d x0, f0 # c2107053
fcvt.wu.d x12, f23 # c21bf653
fcvt.wu.d x31, f31 # c21fffd3
fcvt.d.w f0, x0 # d2007053
fcvt.d.w f12, x23 # d20bf653
fcvt.d.w f31, x31 # d20fffd3
fcvt.d.wu f0, x0 # d2107053
fcvt.d.wu f12, x23 # d21bf653
fcvt.d.wu f31, x31 # d21fffd3
|
sigongzi/riscv-isa-vector | 1,882 | tests/corpus/rv64i.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
lwu x0, 0(x0) # 00006003
lwu x1, -2(x31) # ffefe083
lwu x31, -203(x5) # f352ef83
lwu x31, 2047(x31) # 7fffef83
lwu x31, -2048(x31) # 800fef83
ld x0, 0(x0) # 00003003
ld x1, -2(x31) # ffefb083
ld x31, -203(x5) # f352bf83
ld x31, 2047(x31) # 7fffbf83
ld x31, -2048(x31) # 800fbf83
sd x0, 0(x0) # 00003023
sd x1, -2(x31) # fe1fbf23
sd x31, -203(x5) # f3f2baa3
sd x31, 2047(x31) # 7fffbfa3
sd x31, -2048(x31) # 81ffb023
slli x0, x0, 0 # 00001013
slli x1, x13, 27 # 01b69093
slli x16, x23, 31 # 01fb9813
slli x31, x31, 63 # 03ff9f93
srli x0, x0, 0 # 00005013
srli x1, x13, 27 # 01b6d093
srli x16, x23, 31 # 01fbd813
srli x31, x31, 63 # 03ffdf93
srai x0, x0, 0 # 40005013
srai x1, x13, 27 # 41b6d093
srai x16, x23, 31 # 41fbd813
srai x31, x31, 63 # 43ffdf93
addiw x0, x0, 0 # 0000001b
addiw x5, x27, 24 # 018d829b
addiw x31, x31, 2047 # 7fff8f9b
addiw x31, x31, -2048 # 800f8f9b
slliw x0, x0, 0 # 0000101b
slliw x5, x13, 27 # 01b6929b
slliw x31, x31, 31 # 01ff9f9b
srliw x0, x0, 0 # 0000501b
srliw x5, x13, 27 # 01b6d29b
srliw x31, x31, 31 # 01ffdf9b
sraiw x0, x0, 0 # 4000501b
sraiw x5, x13, 27 # 41b6d29b
sraiw x31, x31, 31 # 41ffdf9b
addw x0, x0, x0 # 0000003b
addw x4, x13, x27 # 01b6823b
addw x31, x31, x31 # 01ff8fbb
subw x0, x0, x0 # 4000003b
subw x4, x13, x27 # 41b6823b
subw x31, x31, x31 # 41ff8fbb
sllw x0, x0, x0 # 0000103b
sllw x4, x13, x27 # 01b6923b
sllw x31, x31, x31 # 01ff9fbb
srlw x0, x0, x0 # 0000503b
srlw x4, x13, x27 # 01b6d23b
srlw x31, x31, x31 # 01ffdfbb
sraw x0, x0, x0 # 4000503b
sraw x4, x13, x27 # 41b6d23b
sraw x31, x31, x31 # 41ffdfbb
|
sigongzi/riscv-isa-vector | 3,343 | tests/corpus/rv32f.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
flw f0, 0(x0) # 00002007
flw f13, -45(x7) # fd33a687
flw f4, 612(x23) # 264ba207
flw f15, 2047(x12) # 7ff62787
flw f25, -2048(x16) # 80082c87
flw f31, -1(x31) # ffffaf87
fsw f0, 0(x0) # 00002027
fsw f13, -45(x7) # fcd3a9a7
fsw f4, 612(x23) # 264ba227
fsw f15, 2047(x12) # 7ef62fa7
fsw f25, -2048(x16) # 81982027
fsw f31, -1(x31) # ffffafa7
fmadd.s f0, f0, f0, f0 # 00007043
fmadd.s f13, f25, f3, f18 # 903cf6c3
fmadd.s f31, f31, f31, f31 # f9ffffc3
fmsub.s f0, f0, f0, f0 # 00007047
fmsub.s f13, f25, f3, f18 # 903cf6c7
fmsub.s f31, f31, f31, f31 # f9ffffc7
fnmsub.s f0, f0, f0, f0 # 0000704b
fnmsub.s f13, f25, f3, f18 # 903cf6cb
fnmsub.s f31, f31, f31, f31 # f9ffffcb
fnmadd.s f0, f0, f0, f0 # 0000704f
fnmadd.s f13, f25, f3, f18 # 903cf6cf
fnmadd.s f31, f31, f31, f31 # f9ffffcf
fadd.s f0, f0, f0 # 00007053
fadd.s f13, f26, f3 # 003d76d3
fadd.s f31, f31, f31 # 01ffffd3
fsub.s f0, f0, f0 # 08007053
fsub.s f13, f26, f3 # 083d76d3
fsub.s f31, f31, f31 # 09ffffd3
fmul.s f0, f0, f0 # 10007053
fmul.s f13, f26, f3 # 103d76d3
fmul.s f31, f31, f31 # 11ffffd3
fdiv.s f0, f0, f0 # 18007053
fdiv.s f13, f26, f3 # 183d76d3
fdiv.s f31, f31, f31 # 19ffffd3
fsqrt.s f0, f0 # 58007053
fsqrt.s f13, f26 # 580d76d3
fsqrt.s f31, f31 # 580fffd3
fsgnj.s f0, f0, f0 # 20000053
fsgnj.s f13, f26, f3 # 203d06d3
fsgnj.s f31, f31, f31 # 21ff8fd3
fsgnjn.s f0, f0, f0 # 20001053
fsgnjn.s f13, f26, f3 # 203d16d3
fsgnjn.s f31, f31, f31 # 21ff9fd3
fsgnjx.s f0, f0, f0 # 20002053
fsgnjx.s f13, f26, f3 # 203d26d3
fsgnjx.s f31, f31, f31 # 21ffafd3
fmin.s f0, f0, f0 # 28000053
fmin.s f13, f26, f3 # 283d06d3
fmin.s f31, f31, f31 # 29ff8fd3
fmax.s f0, f0, f0 # 28001053
fmax.s f13, f26, f3 # 283d16d3
fmax.s f31, f31, f31 # 29ff9fd3
fcvt.w.s x0, f0 # c0007053
fcvt.w.s x12, f23 # c00bf653
fcvt.w.s x31, f31 # c00fffd3
fcvt.wu.s x0, f0 # c0107053
fcvt.wu.s x12, f23 # c01bf653
fcvt.wu.s x31, f31 # c01fffd3
fmv.x.w x0, f0 # e0000053
fmv.x.w x12, f23 # e00b8653
fmv.x.w x31, f31 # e00f8fd3
feq.s x0, f0, f0 # a0002053
feq.s x13, f26, f3 # a03d26d3
feq.s x31, f31, f31 # a1ffafd3
flt.s x0, f0, f0 # a0001053
flt.s x13, f26, f3 # a03d16d3
flt.s x31, f31, f31 # a1ff9fd3
fle.s x0, f0, f0 # a0000053
fle.s x13, f26, f3 # a03d06d3
fle.s x31, f31, f31 # a1ff8fd3
fclass.s x0, f0 # e0001053
fclass.s x12, f23 # e00b9653
fclass.s x31, f31 # e00f9fd3
fcvt.s.w f0, x0 # d0007053
fcvt.s.w f12, x23 # d00bf653
fcvt.s.w f31, x31 # d00fffd3
fcvt.s.wu f0, x0 # d0107053
fcvt.s.wu f12, x23 # d01bf653
fcvt.s.wu f31, x31 # d01fffd3
fmv.w.x f0, x0 # f0000053
fmv.w.x f12, x23 # f00b8653
fmv.w.x f31, x31 # f00f8fd3
|
sigongzi/riscv-isa-vector | 4,045 | tests/corpus/rv32zfh.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
flh f0, 0(x0) # 00001007
flh f13, -45(x7) # fd339687
flh f4, 612(x23) # 264b9207
flh f15, 2047(x12) # 7ff61787
flh f25, -2048(x16) # 80081c87
flh f31, -1(x31) # ffff9f87
fsh f0, 0(x0) # 00001027
fsh f13, -45(x7) # fcd399a7
fsh f4, 612(x23) # 264b9227
fsh f15, 2047(x12) # 7ef61fa7
fsh f25, -2048(x16) # 81981027
fsh f31, -1(x31) # ffff9fa7
fmadd.h f0, f0, f0, f0 # 04007043
fmadd.h f13, f25, f3, f18 # 943cf6c3
fmadd.h f31, f31, f31, f31 # fdffffc3
fmsub.h f0, f0, f0, f0 # 04007047
fmsub.h f13, f25, f3, f18 # 943cf6c7
fmsub.h f31, f31, f31, f31 # fdffffc7
fnmsub.h f0, f0, f0, f0 # 0400704b
fnmsub.h f13, f25, f3, f18 # 943cf6cb
fnmsub.h f31, f31, f31, f31 # fdffffcb
fnmadd.h f0, f0, f0, f0 # 0400704f
fnmadd.h f13, f25, f3, f18 # 943cf6cf
fnmadd.h f31, f31, f31, f31 # fdffffcf
fadd.h f0, f0, f0 # 04007053
fadd.h f13, f26, f3 # 043d76d3
fadd.h f31, f31, f31 # 05ffffd3
fsub.h f0, f0, f0 # 0c007053
fsub.h f13, f26, f3 # 0c3d76d3
fsub.h f31, f31, f31 # 0dffffd3
fmul.h f0, f0, f0 # 14007053
fmul.h f13, f26, f3 # 143d76d3
fmul.h f31, f31, f31 # 15ffffd3
fdiv.h f0, f0, f0 # 1c007053
fdiv.h f13, f26, f3 # 1c3d76d3
fdiv.h f31, f31, f31 # 1dffffd3
fsqrt.h f0, f0 # 5c007053
fsqrt.h f13, f26 # 5c0d76d3
fsqrt.h f31, f31 # 5c0fffd3
fsgnj.h f0, f0, f0 # 24000053
fsgnj.h f13, f26, f3 # 243d06d3
fsgnj.h f31, f31, f31 # 25ff8fd3
fsgnjn.h f0, f0, f0 # 24001053
fsgnjn.h f13, f26, f3 # 243d16d3
fsgnjn.h f31, f31, f31 # 25ff9fd3
fsgnjx.h f0, f0, f0 # 24002053
fsgnjx.h f13, f26, f3 # 243d26d3
fsgnjx.h f31, f31, f31 # 25ffafd3
fmin.h f0, f0, f0 # 2c000053
fmin.h f13, f26, f3 # 2c3d06d3
fmin.h f31, f31, f31 # 2dff8fd3
fmax.h f0, f0, f0 # 2c001053
fmax.h f13, f26, f3 # 2c3d16d3
fmax.h f31, f31, f31 # 2dff9fd3
fcvt.s.h f0, f0 # 40200053
fcvt.s.h f12, f23 # 402b8653
fcvt.s.h f31, f31 # 402f8fd3
fcvt.h.s f0, f0 # 44007053
fcvt.h.s f12, f23 # 440bf653
fcvt.h.s f31, f31 # 440fffd3
fcvt.d.h f0, f0 # 42200053
fcvt.d.h f12, f23 # 422b8653
fcvt.d.h f31, f31 # 422f8fd3
fcvt.h.d f0, f0 # 44107053
fcvt.h.d f12, f23 # 441bf653
fcvt.h.d f31, f31 # 441fffd3
fcvt.q.h f0, f0 # 46200053
fcvt.q.h f12, f23 # 462b8653
fcvt.q.h f31, f31 # 462fffd3
fcvt.h.q f0, f0 # 44300053
fcvt.h.q f12, f23 # 443b8653
fcvt.h.q f31, f31 # 443f8fd3
feq.h x0, f0, f0 # a4002053
feq.h x13, f26, f3 # a43d26d3
feq.h x31, f31, f31 # a5ffafd3
flt.h x0, f0, f0 # a4001053
flt.h x13, f26, f3 # a43d16d3
flt.h x31, f31, f31 # a5ff9fd3
fle.h x0, f0, f0 # a4000053
fle.h x13, f26, f3 # a43d06d3
fle.h x31, f31, f31 # a5ff8fd3
fclass.h x0, f0 # e4001053
fclass.h x12, f23 # e40b9653
fclass.h x31, f31 # e40f9fd3
fcvt.w.h x0, f0 # c4007053
fcvt.w.h x12, f23 # c40bf653
fcvt.w.h x31, f31 # c40fffd3
fcvt.wu.h x0, f0 # c4107053
fcvt.wu.h x12, f23 # c41bf653
fcvt.wu.h x31, f31 # c41fffd3
fmv.x.h f0, x0 # e4000053
fmv.x.h f12, x23 # e40b8653
fmv.x.h f31, x31 # e40f8fd3
fcvt.h.w f0, x0 # d4007053
fcvt.h.w f12, x23 # d40bf653
fcvt.h.w f31, x31 # d40fffd3
fcvt.h.wu f0, x0 # d4107053
fcvt.h.wu f12, x23 # d41bf653
fcvt.h.wu f31, x31 # d41fffd3
fmv.h.x f0, x0 # f4000053
fmv.h.x f12, x23 # f40b8653
fmv.h.x f31, x31 # f40f8fd3
|
sigongzi/riscv-isa-vector | 3,577 | tests/corpus/rv32q.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
flq f0, 0(x0) # 00004007
flq f13, -45(x7) # fd33c687
flq f4, 612(x23) # 264bc207
flq f15, 2047(x12) # 7ff64787
flq f25, -2048(x16) # 80084c87
flq f31, -1(x31) # ffffcf87
fsq f0, 0(x0) # 00004027
fsq f13, -45(x7) # fcd3c9a7
fsq f4, 612(x23) # 264bc227
fsq f15, 2047(x12) # 7ef64fa7
fsq f25, -2048(x16) # 81984027
fsq f31, -1(x31) # ffffcfa7
fmadd.q f0, f0, f0, f0 # 06007043
fmadd.q f13, f25, f3, f18 # 963cf6c3
fmadd.q f31, f31, f31, f31 # ffffffc3
fmsub.q f0, f0, f0, f0 # 06007047
fmsub.q f13, f25, f3, f18 # 963cf6c7
fmsub.q f31, f31, f31, f31 # ffffffc7
fnmsub.q f0, f0, f0, f0 # 0600704b
fnmsub.q f13, f25, f3, f18 # 963cf6cb
fnmsub.q f31, f31, f31, f31 # ffffffcb
fnmadd.q f0, f0, f0, f0 # 0600704f
fnmadd.q f13, f25, f3, f18 # 963cf6cf
fnmadd.q f31, f31, f31, f31 # ffffffcf
fadd.q f0, f0, f0 # 06007053
fadd.q f13, f26, f3 # 063d76d3
fadd.q f31, f31, f31 # 07ffffd3
fsub.q f0, f0, f0 # 0e007053
fsub.q f13, f26, f3 # 0e3d76d3
fsub.q f31, f31, f31 # 0fffffd3
fmul.q f0, f0, f0 # 16007053
fmul.q f13, f26, f3 # 163d76d3
fmul.q f31, f31, f31 # 17ffffd3
fdiv.q f0, f0, f0 # 1e007053
fdiv.q f13, f26, f3 # 1e3d76d3
fdiv.q f31, f31, f31 # 1fffffd3
fsqrt.q f0, f0 # 5e007053
fsqrt.q f13, f26 # 5e0d76d3
fsqrt.q f31, f31 # 5e0fffd3
fsgnj.q f0, f0, f0 # 26000053
fsgnj.q f13, f26, f3 # 263d06d3
fsgnj.q f31, f31, f31 # 27ff8fd3
fsgnjn.q f0, f0, f0 # 26001053
fsgnjn.q f13, f26, f3 # 263d16d3
fsgnjn.q f31, f31, f31 # 27ff9fd3
fsgnjx.q f0, f0, f0 # 26002053
fsgnjx.q f13, f26, f3 # 263d26d3
fsgnjx.q f31, f31, f31 # 27ffafd3
fmin.q f0, f0, f0 # 2e000053
fmin.q f13, f26, f3 # 2e3d06d3
fmin.q f31, f31, f31 # 2fff8fd3
fmax.q f0, f0, f0 # 2e001053
fmax.q f13, f26, f3 # 2e3d16d3
fmax.q f31, f31, f31 # 2fff9fd3
fcvt.s.q f0, f0 # 40307053
fcvt.s.q f12, f23 # 403bf653
fcvt.s.q f31, f31 # 403fffd3
fcvt.q.s f0, f0 # 46007053
fcvt.q.s f12, f23 # 460bf653
fcvt.q.s f31, f31 # 460fffd3
fcvt.d.q f0, f0 # 42307053
fcvt.d.q f12, f23 # 423bf653
fcvt.d.q f31, f31 # 423fffd3
fcvt.q.d f0, f0 # 46107053
fcvt.q.d f12, f23 # 461bf653
fcvt.q.d f31, f31 # 461fffd3
feq.q x0, f0, f0 # a6002053
feq.q x13, f26, f3 # a63d26d3
feq.q x31, f31, f31 # a7ffafd3
flt.q x0, f0, f0 # a6001053
flt.q x13, f26, f3 # a63d16d3
flt.q x31, f31, f31 # a7ff9fd3
fle.q x0, f0, f0 # a6000053
fle.q x13, f26, f3 # a63d06d3
fle.q x31, f31, f31 # a7ff8fd3
fclass.q x0, f0 # e6001053
fclass.q x12, f23 # e60b9653
fclass.q x31, f31 # e60f9fd3
fcvt.w.q x0, f0 # c6007053
fcvt.w.q x12, f23 # c60bf653
fcvt.w.q x31, f31 # c60fffd3
fcvt.wu.q x0, f0 # c6107053
fcvt.wu.q x12, f23 # c61bf653
fcvt.wu.q x31, f31 # c61fffd3
fcvt.q.w f0, x0 # d6007053
fcvt.q.w f12, x23 # d60bf653
fcvt.q.w f31, x31 # d60fffd3
fcvt.q.wu f0, x0 # d6107053
fcvt.q.wu f12, x23 # d61bf653
fcvt.q.wu f31, x31 # d61fffd3
|
sigongzi/riscv-isa-vector | 1,687 | tests/corpus/rv32zbb.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
andn x0, x0, x0 # 40007033
andn x3, x14, x27 # 41b771b3
andn x31, x31, x31 # 41ffffb3
orn x0, x0, x0 # 40006033
orn x3, x14, x27 # 41b761b3
orn x31, x31, x31 # 41ffefb3
xnor x0, x0, x0 # 40004033
xnor x3, x14, x27 # 41b741b3
xnor x31, x31, x31 # 41ffcfb3
clz x0, x0 # 60001013
clz x3, x26 # 600d1193
clz x31, x31 # 600f9f93
ctz x0, x0 # 60101013
ctz x3, x26 # 601d1193
ctz x31, x31 # 601f9f93
cpop x0, x0 # 60201013
cpop x3, x26 # 602d1193
cpop x31, x31 # 602f9f93
max x0, x0, x0 # 0a006033
max x3, x14, x27 # 0bb761b3
max x31, x31, x31 # 0bffefb3
maxu x0, x0, x0 # 0a007033
maxu x3, x14, x27 # 0bb771b3
maxu x31, x31, x31 # 0bffffb3
min x0, x0, x0 # 0a004033
min x3, x14, x27 # 0bb741b3
min x31, x31, x31 # 0bffcfb3
minu x0, x0, x0 # 0a005033
minu x3, x14, x27 # 0bb751b3
minu x31, x31, x31 # 0bffdfb3
sext.b x0, x0 # 60401013
sext.b x3, x26 # 604d1193
sext.b x31, x31 # 604f9f93
sext.h x0, x0 # 60501013
sext.h x3, x26 # 605d1193
sext.h x31, x31 # 605f9f93
zext.h x0, x0 # 08004033
zext.h x3, x26 # 080d41b3
zext.h x31, x31 # 080fcfb3
rol x0, x0, x0 # 60001033
rol x3, x14, x27 # 61b711b3
rol x31, x31, x31 # 61ff9fb3
ror x0, x0, x0 # 60005033
ror x3, x14, x27 # 61b751b3
ror x31, x31, x31 # 61ffdfb3
rori x0, x0, 0 # 60005013
rori x3, x14, 27 # 61b75193
rori x31, x31, 31 # 61ffdf93
orc.b x0, x0 # 28705013
orc.b x3, x26 # 287d5193
orc.b x31, x31 # 287fdf93
rev8 x0, x0 # 69805013
rev8 x3, x26 # 698d5193
rev8 x31, x31 # 698fdf93
|
sigongzi/riscv-isa-vector | 1,288 | tests/corpus/rv64a.s | # Copyright James Wainwright
#
# SPDX-License-Identifier: MPL-2.0
lr.d x0, (x0) # 1000302f
lr.d x4, (x17) # 1008b22f
lr.d x31, (x31) # 100fbfaf
sc.d x0, x0, (x0) # 1800302f
sc.d x4, x13, (x27) # 18ddb22f
sc.d x31, x31, (x31) # 19ffbfaf
amoswap.d x0, x0, (x0) # 0800302f
amoswap.d x4, x13, (x27) # 08ddb22f
amoswap.d x31, x31, (x31) # 09ffbfaf
amoadd.d x0, x0, (x0) # 0000302f
amoadd.d x4, x13, (x27) # 00ddb22f
amoadd.d x31, x31, (x31) # 01ffbfaf
amoxor.d x0, x0, (x0) # 2000302f
amoxor.d x4, x13, (x27) # 20ddb22f
amoxor.d x31, x31, (x31) # 21ffbfaf
amoand.d x0, x0, (x0) # 6000302f
amoand.d x4, x13, (x27) # 60ddb22f
amoand.d x31, x31, (x31) # 61ffbfaf
amoor.d x0, x0, (x0) # 4000302f
amoor.d x4, x13, (x27) # 40ddb22f
amoor.d x31, x31, (x31) # 41ffbfaf
amomin.d x0, x0, (x0) # 8000302f
amomin.d x4, x13, (x27) # 80ddb22f
amomin.d x31, x31, (x31) # 81ffbfaf
amomax.d x0, x0, (x0) # a000302f
amomax.d x4, x13, (x27) # a0ddb22f
amomax.d x31, x31, (x31) # a1ffbfaf
amominu.d x0, x0, (x0) # c000302f
amominu.d x4, x13, (x27) # c0ddb22f
amominu.d x31, x31, (x31) # c1ffbfaf
amomaxu.d x0, x0, (x0) # e000302f
amomaxu.d x4, x13, (x27) # e0ddb22f
amomaxu.d x31, x31, (x31) # e1ffbfaf
|
Simon4290/gardenner | 1,600 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
__restore:
# case1: start running app by __restore
# case2: back to U after handling trap
# mv sp, a0
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
skanehira/rust-greenthread | 4,445 | src/arch/aarch64/switch.s | // ============================================================================
// AArch64 (ARM64) コンテキストスイッチ実装
// ============================================================================
//
// このファイルは、コルーチン間でCPUの実行状態を切り替えるための
// 低レベルなアセンブリコードを実装しています。
//
// # コンテキストスイッチとは
//
// プログラムの実行状態(レジスタ、スタックポインタなど)を保存し、
// 別の実行状態に切り替える処理です。これにより、複数のコルーチンが
// あたかも同時に実行されているように見せることができます。
//
// # AArch64のレジスタについて
//
// AArch64には32個の汎用レジスタ(x0-x30)とスタックポインタ(SP)があります:
// - x0-x7 : 引数レジスタ(関数の引数と戻り値)※ 呼び出し元保存
// - x8 : 間接結果レジスタ ※ 呼び出し元保存
// - x9-x15 : 一時レジスタ(呼び出し元保存)
// - x16-x17 : プラットフォーム用レジスタ ※ 呼び出し元保存
// - x18 : プラットフォーム用レジスタ(予約)
// - x19-x28 : 汎用レジスタ(呼び出し先保存)← 保存が必要
// - x29 : フレームポインタ(FP)← 保存が必要
// - x30 : リンクレジスタ(LR)← 保存が必要
// - SP : スタックポインタ ← 保存が必要
//
// 呼び出し元保存(caller-saved)レジスタ:
// - 関数呼び出し時に値が破壊される可能性がある
// - 呼び出し元が必要なら保存する責任がある
// - switch_context内では自由に使用可能(x0-x18)
//
// ============================================================================
// macOSでは、C関数名の前にアンダースコアが付きます
.global _switch_context
// 関数を16バイト境界にアラインメント(AArch64の要件)
.p2align 2
// ============================================================================
// switch_context(from: *mut Context, to: *mut Context)
//
// 引数:
// x0 = from: 現在のコンテキストを保存する先のポインタ
// x1 = to: 切り替え先のコンテキストが保存されているポインタ
//
// 処理の流れ:
// 1. 現在のレジスタ状態を'from'が指すメモリに保存
// 2. 'to'が指すメモリから新しいレジスタ状態を復元
// 3. ret命令で新しいコンテキストの実行を開始
// ============================================================================
_switch_context:
// ========================================================================
// 1. 現在のコンテキストを保存
// ========================================================================
// stp命令: Store Pair - 2つのレジスタを連続したメモリに保存
// 形式: stp レジスタ1, レジスタ2, [ベースアドレス, #オフセット]
// x19, x20を保存(Context構造体のオフセット0, 8バイト)
stp x19, x20, [x0, #0] // メモリ[x0+0] = x19, メモリ[x0+8] = x20
// x21, x22を保存(オフセット16, 24バイト)
stp x21, x22, [x0, #16] // メモリ[x0+16] = x21, メモリ[x0+24] = x22
// x23, x24を保存(オフセット32, 40バイト)
stp x23, x24, [x0, #32] // メモリ[x0+32] = x23, メモリ[x0+40] = x24
// x25, x26を保存(オフセット48, 56バイト)
stp x25, x26, [x0, #48] // メモリ[x0+48] = x25, メモリ[x0+56] = x26
// x27, x28を保存(オフセット64, 72バイト)
stp x27, x28, [x0, #64] // メモリ[x0+64] = x27, メモリ[x0+72] = x28
// x29(FP), x30(LR)を保存(オフセット80, 88バイト)
stp x29, x30, [x0, #80] // メモリ[x0+80] = x29, メモリ[x0+88] = x30
// スタックポインタを保存
// SPは特殊レジスタのため、str命令で直接メモリに保存できません。
// AArch64では、SPは以下の制約があります:
// 1. SPは汎用レジスタ(x0-x30)とは別の専用レジスタ
// 2. str/ldr命令のソースオペランドとして直接使用不可
// 3. 算術演算の一部でのみ直接使用可能(add sp, sp, #16など)
// そのため、一度汎用レジスタにコピーしてから保存する必要があります
//
// x2レジスタを使用しても問題ない理由:
// - x2は一時レジスタ(temporary register)として定義されている
// - AArch64 ABIでは、x0-x18は呼び出し元保存(caller-saved)レジスタ
// - この関数内でx2の元の値を保存する必要はない(破壊してもOK)
// - コンテキストスイッチ後、x2の値は新しいコンテキストから復元される
mov x2, sp // x2 = sp (movはSPから汎用レジスタへのコピーをサポート)
str x2, [x0, #96] // メモリ[x0+96] = x2 (= sp)
// ========================================================================
// 2. 新しいコンテキストを復元
// ========================================================================
// ldp命令: Load Pair - 連続したメモリから2つのレジスタに読み込み
// 形式: ldp レジスタ1, レジスタ2, [ベースアドレス, #オフセット]
// x19, x20を復元
ldp x19, x20, [x1, #0] // x19 = メモリ[x1+0], x20 = メモリ[x1+8]
// x21, x22を復元
ldp x21, x22, [x1, #16] // x21 = メモリ[x1+16], x22 = メモリ[x1+24]
// x23, x24を復元
ldp x23, x24, [x1, #32] // x23 = メモリ[x1+32], x24 = メモリ[x1+40]
// x25, x26を復元
ldp x25, x26, [x1, #48] // x25 = メモリ[x1+48], x26 = メモリ[x1+56]
// x27, x28を復元
ldp x27, x28, [x1, #64] // x27 = メモリ[x1+64], x28 = メモリ[x1+72]
// x29(FP), x30(LR)を復元
ldp x29, x30, [x1, #80] // x29 = メモリ[x1+80], x30 = メモリ[x1+88]
// スタックポインタを復元
ldr x2, [x1, #96] // x2 = メモリ[x1+96]
mov sp, x2 // sp = x2
// ========================================================================
// 3. 新しいコンテキストの実行を開始
// ========================================================================
// ret命令: x30(LR)に保存されているアドレスにジャンプ
// 新しいコンテキストが初めて実行される場合、x30にはエントリー関数の
// アドレスが設定されているため、その関数から実行が始まります。
// 既に実行されていたコンテキストの場合、前回switch_contextを呼び出した
// 直後のアドレスから実行が再開されます。
ret |
skydder/mnasm | 3,924 | test/t18.s | @(
fn parse(tokenizer) {
let code = "";
let lhs = read_macro(tokenizer);
asm_skip_space(tokenizer);
let mid = asm_next_token(tokenizer);
match mid {
case "?" {
asm_skip_space(tokenizer);
let rhs = read_macro(tokenizer);
code += "cmp(" + lhs + ", " + rhs + ")";
}
case "=" {
asm_skip_space(tokenizer);
let rhs = read_macro(tokenizer);
code += "mov(" + lhs + ", " + rhs + ")";
}
case "+" {
if asm_next_token(tokenizer) == "=" {
asm_skip_space(tokenizer);
let rhs = read_macro(tokenizer);
code += "add(" + lhs + ", " + rhs + ")";
} else {
print("error1");
}
}
case "-" {
if asm_next_token(tokenizer) == "=" {
asm_skip_space(tokenizer);
let rhs = read_macro(tokenizer);
code += "sub(" + lhs + ", " + rhs + ")";
} else {
print("error");
}
}
case "|" {
if asm_next_token(tokenizer) == "=" {
asm_skip_space(tokenizer);
let rhs = read_macro(tokenizer);
code += "or(" + lhs + ", " + rhs + ")";
} else {
print("error");
}
}
}
eval(code);
}
fn read_macro(tokenizer) {
let code = "";
match asm_peek_token(tokenizer) {
case "`" {
asm_next_token(tokenizer);
code += "`";
}
case "@" {
asm_next_token(tokenizer);
code += "@";
}
}
let label = asm_parse(Operand, tokenizer);
if is_none(label) {
print("error");
} else {
code += label;
}
eval(code);
}
fn main() {
output += parse(asm_tokenizer(input));
print(output);
}
)
macro if(cond, then, else) {
`cond
!jne(.else)
`then
!jmp(.end)
<else>
`else
<end>
}
macro print(len, str,) {
@[rax = 1]
@[rdi = 1]
@[rsi = `str]
@[rdx = `len]
!syscall()
}
macro exit(code,) {
@[rax = 60]
@[rdi = `code]
!syscall()
}
macro divide1(a, b,) {
@[rax = `a]
cqo()
@[rdi = `b]
div(rdi)
}
macro divide2(a, b,) {
@divide1(`a)(`b)
}
macro divide3(a, b,) {
@divide2(`a)(`b)
}
macro divide(a, b,) {
@divide3(`a)(`b)
}
macro for(init, cond, inc, loop,) {
`init
<start>
`cond
!je(.end)
`loop
`inc
!jmp(.start)
<end>
#`cond
}
macro l(lhs, rhs,) {
cmp(`lhs, `rhs)
#!setl(al)
nasm("setl al")
movsx(rax, al)
cmp(rax, 0)
}
<fizz:global:.data> {
db("fizz")
db(10)#5
}
<buzz:global:.data> {
db("buzz")
db(10)#5
}
<fizzbuzz:global:.data> {
db("fizzbuzz")
db(10)#9
}
<num:global:.data> {
db("num")
db(10)#4
}
<_start:global:.text> {
let(counter, r8) #r8:counter
@for(@[@counter=1])(@l(@counter)(40))(@[@counter+=1]) {
let(is_mul3, r9)
@divide(@counter)(3)
@[@is_mul3 = rdx]
let(is_mul5, r10)
@divide(@counter)(5)
@[@is_mul5 = rdx]
let(is_mul15, r11)
@[@is_mul15 = rdx]
@[@is_mul15 |= @is_mul3]
@if (@[@is_mul15 ? 0]) {
@print(9)(fizzbuzz)
}{
@if (@[@is_mul5 ? 0]) {
@print(5)(buzz)
}{
@if (@[@is_mul3 ? 0]) {
@print(5)(fizz)
}{
@print(4)(num)
}
}
}
}
@exit(0)
}
|
skydder/mnasm | 2,947 | test/t17.s | @(
fn parse(tokenizer) {
let code = "";
if asm_peek_token(tokenizer) == "@" {
code += "@";
}
let lhs = asm_parse(Operand, tokenizer);
print("lhs", lhs);
asm_skip_space(tokenizer);
let mid = asm_next_token(tokenizer);
print(mid);
if mid == "=" {
asm_skip_space(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
if asm_peek_token(tokenizer) == "`" {
asm_next_token(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
print("erroedfa");
} else {
code += "mov(" + lhs + ", `" + rhs + ")";
}
} else {
print("error");
}
} else {
code += "mov(" + lhs + ", " + rhs + ")";
}
} else if mid == "+" {
if asm_next_token(tokenizer) == "=" {
asm_skip_space(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
if asm_peek_token(tokenizer) == "`" {
asm_next_token(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
print("erroedfa");
} else {
code += "mov(" + lhs + ", `" + rhs + ")";
}
} else {
print("error");
}
} else {
code += "add(" + lhs + ", " + rhs + ")";
}
} else {
print("error1");
}
} else if mid == "-" {
if asm_next_token(tokenizer) == "=" {
asm_skip_space(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
if asm_peek_token(tokenizer) == "`" {
asm_next_token(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
print("erroedfa");
} else {
code += "mov(" + lhs + ", `" + rhs + ")";
}
} else {
print("error");
}
} else {
code += "sub(" + lhs + ", " + rhs + ")";
}
} else {
print("error");
}
} else {
print("error?");
}
eval(code);
}
fn main() {
output += parse(asm_tokenizer(input));
print(output);
}
)
<_start:global:.text> {
@[rax += 200]
@[rax = 200]
@[rax -= 300]
} |
skydder/mnasm | 1,075 | test/t15.s | let(a, rax)
let(b, helloworld)
macro if(cond, then, else,) {
`cond
jne(else)
`then
jmp(end)
<else>
`else
<end>
}
<helloworld:.data> {
db("Hello world!", 10)
}
<_start:global:.text> {
@(fn test(x) {
print("testT", x , 100);
}
fn test_while(in) {
let i = 0;
while i < len(in) {
print(i, in[i]);
i += 1;
}
let t = ["test", in, 0, 3, i];
print(t);
t = "test";
}
fn main() {
let sys = "!syscall()";
let mov = "!move()";
let tes = "!test()";
test(mov);
test_while(input);
if input == "syscall" {
output += mov;
} else if input == "move" {
output += sys;
} else {
print(input);
output += tes;
}
print(output);
}
)
mov(@a, 1);mov(rdi, 1);
mov(rsi, @b)
mov(rdx, 14)
@[syscall]
@[move]
@[adsfasd]
mov(rax, 60)
mov(rdi, 0)
!syscall()
#@if(mov(a, 1))(mov(a, 1))(mov(a, 1))
} |
skydder/mnasm | 1,134 | test/t16.s | let(a, rax)
let(b, helloworld)
macro if(cond, then, else,) {
`cond
jne(else)
`then
jmp(end)
<else>
`else
<end>
}
<helloworld:.data> {
db("Hello world!", 10)
}
<_start:global:.text> {
@(
fn tokenize(in) {
let i = 0;
let lis = [];
while i < len(in) {
if in[i] == " " {
i += 1;
} else if is_digit(in[i]) {
let num = 0;
while i < len(in) && is_digit(in[i]) {
print(i, in[i]);
num *= 10;
num += get_digit(in[i]);
i += 1;
}
lis += num;
}
}
eval(lis);
}
fn tokenize2(in) {
let tokenizer = asm_tokenizer(in);
print(asm_parse(Operand, tokenizer));
print(asm_next_token(tokenizer));
print(asm_next_token(tokenizer));
}
fn main() {
tokenize2(input);
}
)
@[rax 200 23 32]
} |
skydder/mnasm | 1,213 | test/t12.s | macro if(cond, then, else,) {
`cond
jne!(.else)
`then
jmp!(.end)
<else>
`else
<end>
}
<fizz:global:.data> {
db("fizz")
db(10)#5
}
<buzz:global:.data> {
db("buzz")
db(10)#5
}
macro print(len, str,) {
@[rax = 1]
@[rdi = 1]
@[rsi = `str]
@[rdx = `len]
syscall!()
}
macro exit(code,) {
@[rax = 60]
@[rdi = `code]
syscall!()
}
macro divide1(a, b,) {
@[rax = `a]
cqo()
@[rdi = `b]
div(rdi)
}
macro divide2(a, b,) {
@divide1(`a)(`b)
}
macro divide3(a, b,) {
@divide2(`a)(`b)
}
macro divide(a, b,) {
@divide3(`a)(`b)
}
macro for(init, cond, inc, loop,) {
`init
<start>
`cond
je!(.end)
`loop
`inc
jmp!(.start)
<end>
#`cond
}
macro l(lhs, rhs,) {
cmp(`lhs, `rhs)
setl!(al)
movsx(rax, al)
cmp(rax, 0)
}
<_start:global:.text> {
let(counter, r8) #r8:counter
@for(@[counter=1])(@l(counter)(15))(@[counter+=1]) {
@divide(counter)(3)
@if (cmp(rdx, 0)) {
@print(5)(fizz)
} {
@divide(counter)(5)
@if (cmp(rdx, 0)) {
@print(5)(buzz)
}()
}
}
@exit(0)
}
|
skydder/mnasm | 1,092 | test/t14.s | # from https://en.wikibooks.org/wiki/X86_Assembly/SSE#Arithmetic_example_using_packed_singles
# this is a test for using not supported instructions by raw nasm pseudo-ins
<v1:global:.data> {
!dd("1.1", "2.2", "3.3", "4.4")
}
<v2:global:.data> {
!dd("5.5", "6.6", "7.7", "8.8")
}
<v3:global:.bss> {
!resd(4)
}
<_start:global:.text>{
!movups("xmm0", "[v1]")
!movups("xmm1", "[v2]") ;#load v2 into xmm1
!addps("xmm0", "xmm1") ;#add the 4 numbers in xmm1 (from v2) to the 4 numbers in xmm0 (from v1), store in xmm0. for the first float the result will be 5.5+1.1=6.6
!mulps("xmm0", "xmm1") ;#multiply the four numbers in xmm1 (from v2, unchanged) with the results from the previous calculation (in xmm0), store in xmm0. for the first float the result will be 5.5*6.6=36.3
!subps("xmm0", "xmm1") ;#subtract the four numbers in v2 (in xmm1, still unchanged) from result from previous calculation (in xmm1). for the first float, the result will be 36.3-5.5=30.8
!movups("[v3]", "xmm0") ;#store v1 in v3
#;end program
!ret()
} |
skydder/mnasm | 4,741 | test/t13.s | @(
fn parse(tokenizer) {
let code = "";
let m = "";
if asm_peek_token(tokenizer) == "@" {
m += "@";
asm_next_token(tokenizer);
}
let lhs = m + asm_parse(Operand, tokenizer);
print(lhs);
asm_skip_space(tokenizer);
let mid = asm_next_token(tokenizer);
print(mid);
if mid == "=" {
asm_skip_space(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
print(3, rhs);
if is_none(rhs) {
if asm_peek_token(tokenizer) == "`" {
print(1);
asm_next_token(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
print("erroedfa");
} else {
code += "mov(" + lhs + ", `" + rhs + ")";
}
} else {
print("error");
}
} else {
print(2);
code += "mov(" + lhs + ", " + rhs + ")";
}
} else if mid == "+" {
if asm_next_token(tokenizer) == "=" {
asm_skip_space(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
if asm_peek_token(tokenizer) == "`" {
asm_next_token(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
print("erroedfa");
} else {
code += "mov(" + lhs + ", `" + rhs + ")";
}
} else {
print("error");
}
} else {
code += "add(" + lhs + ", " + rhs + ")";
}
} else {
print("error1");
}
} else if mid == "-" {
if asm_next_token(tokenizer) == "=" {
asm_skip_space(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
if asm_peek_token(tokenizer) == "`" {
asm_next_token(tokenizer);
let rhs = asm_parse(Operand, tokenizer);
if is_none(rhs) {
print("erroedfa");
} else {
code += "mov(" + lhs + ", `" + rhs + ")";
}
} else {
print("error");
}
} else {
code += "sub(" + lhs + ", " + rhs + ")";
}
} else {
print("error");
}
} else {
print("error?");
}
eval(code);
}
fn main() {
output += parse(asm_tokenizer(input));
print(output);
}
)
macro if(cond, then, else) {
`cond
!jne(.else)
`then
!jmp(.end)
<else>
`else
<end>
}
macro print(len, str,) {
@[rax = 1]
@[rdi = 1]
@[rsi = `str]
@[rdx = `len]
!syscall()
}
macro exit(code,) {
@[rax = 60]
@[rdi = `code]
!syscall()
}
macro divide1(a, b,) {
@[rax = `a]
cqo()
@[rdi = `b]
div(rdi)
}
macro divide2(a, b,) {
@divide1(`a)(`b)
}
macro divide3(a, b,) {
@divide2(`a)(`b)
}
macro divide(a, b,) {
@divide3(`a)(`b)
}
macro for(init, cond, inc, loop,) {
`init
<start>
`cond
!je(.end)
`loop
`inc
!jmp(.start)
<end>
#`cond
}
macro l(lhs, rhs,) {
cmp(`lhs, `rhs)
#!setl(al)
nasm("setl al")
movsx(rax, al)
cmp(rax, 0)
}
<fizz:global:.data> {
db("fizz")
db(10)#5
}
<buzz:global:.data> {
db("buzz")
db(10)#5
}
<fizzbuzz:global:.data> {
db("fizzbuzz")
db(10)#9
}
<num:global:.data> {
db("num")
db(10)#4
}
<_start:global:.text> {
let(counter, r8) #r8:counter
@for(@[@counter=1])(@l(@counter)(40))(@[@counter+=1]) {
let(is_mul3, r9)
let(is_mul5, r10)
let(is_mul15, r11)
@divide(@counter)(3)
@[@is_mul3 = rdx]
@divide(@counter)(5)
@[@is_mul5 = rdx]
@[@is_mul15 = rdx]
or(@is_mul15, @is_mul3)
@if (cmp(@is_mul15, 0)) {
@print(9)(fizzbuzz)
}{
@if (cmp(@is_mul5, 0)) {
@print(5)(buzz)
}{
@if (cmp(@is_mul3, 0)) {
@print(5)(fizz)
}{
@print(4)(num)
}
}
}
}
@exit(0)
}
|
skydder/mnasm | 1,072 | test/t11.s | macro if(cond, then, else,) {
`cond
jne!(.else)
`then
jmp!(.end)
<else>
`else
<end>
}
<fizz:global:.data> {
db("fizz")
db(10)#5
}
<buzz:global:.data> {
db("buzz")
db(10)#5
}
macro print(len, str,) {
@[rax = 1]
@[rdi = 1]
@[rsi = `str]
@[rdx = `len]
syscall!()
}
macro exit(code,) {
@[rax = 60]
@[rdi = `code]
syscall!()
}
macro divide(a, b,) {
@[rax = `a]
cqo()
@[rdi = `b]
div(rdi)
}
macro for(init, cond, inc, loop,) {
`init
<start>
`cond
je!(.end)
`loop
`inc
jmp!(.start)
<end>
#`cond
}
macro l(lhs, rhs,) {
cmp(`lhs, `rhs)
setl!(al)
movsx(rax, al)
cmp(rax, 0)
}
<_start:global:.text> {
let(counter, r8) #r8:counter
@for(@[counter=1])(@l(counter)(15))(@[counter+=1]) {
@divide(counter)(3)
@if (cmp(rdx, 0)) {
@print(5)(fizz)
} {
@divide(counter)(5)
@if (cmp(rdx, 0)) {
@print(5)(buzz)
}()
}
}
@exit(0)
}
|
skydder/mnasm | 1,302 | idea/ex4_abs.s | <abs:global:text> {
# -------------------
# if (rdi < 0) {
# rdi = -rdi;
# }
# return rdi
# -------------------
cmp(rdi, 0)
jl(minus)
jmp(end)
<minus:_:_> {
neg(rdi)
}
<end:_:_>
mov(rax, rdi)
ret()
}
<abs2:global:text> {
# -------------------
# if (rdi < 0) {
# rdi = -rdi;
# }
# return rdi
# -------------------
{
cmp(rdi, 0), jl(minus), jmp(end)
<minus:_:_> {
neg(rdi)
}
<end:_:_>
}
mov(rax, rdi)
ret()
}
<abs:global:text> {
# -------------------
# if (rdi < 0) {
# rdi = -rdi;
# }
# return rdi
# -------------------
{
cmp(rdi, 0)
jl(minus)
jmp(end)
<minus:_:_> {
neg(rdi)
}
<end:_:_>
}
mov(rax, rdi)
ret()
}
// macro-def (grammar not determined)
if ($0, $1, $(cc)) {
$(code)
}
=>
{
cmp($0, $1)
j$(cc) _if
jmp(else)
<_if:_:_> {
$(code)
}
<_else:_:>
}
// macro above might be usefull
<abs4:global:text> {
# -------------------
# if (rdi < 0) {
# rdi = -rdi;
# }
# return rdi
# -------------------
if! (rdi, 0, le) {
neg(rdi)
}!
mov(rax, rdi)
ret()
} |
skydder/mnasm | 2,672 | test/fizzbuzz/fizzbuzz.s | extern printf, assert_
section .data
global GL_L_L_3
GL_L_L_3:
db 110, 117, 109, 10, 0
section .data
global GL_L_L_2
GL_L_L_2:
db 102, 105, 122, 122, 10, 0
section .data
global GL_L_L_1
GL_L_L_1:
db 98, 117, 122, 122, 10, 0
section .data
global GL_L_L_0
GL_L_L_0:
db 102, 105, 122, 122, 98, 117, 122, 122, 10, 0
section .text
global main
main:
push rbp
mov rbp, rsp
sub rsp, 16
lea rax, [rbp - 16]
push rax
mov rax, 1
pop rdi
mov dword [rdi], eax
main__N_L_L_1__L_begin_1:
mov rax, 40
push rax
lea rax, [rbp - 16]
movsx rax, dword [rax]
pop rdi
cmp eax, edi
setl al
movsx rax, al
cmp rax, 0
je main__N_L_L_1__L_end_1
lea rax, [rbp - 12]
push rax
mov rax, 0
push rax
mov rax, 3
push rax
lea rax, [rbp - 16]
movsx rax, dword [rax]
pop rdi
cdq
idiv edi
mov eax, edx
pop rdi
cmp eax, edi
sete al
movsx rax, al
pop rdi
mov dword [rdi], eax
lea rax, [rbp - 8]
push rax
mov rax, 0
push rax
mov rax, 5
push rax
lea rax, [rbp - 16]
movsx rax, dword [rax]
pop rdi
cdq
idiv edi
mov eax, edx
pop rdi
cmp eax, edi
sete al
movsx rax, al
pop rdi
mov dword [rdi], eax
lea rax, [rbp - 4]
push rax
lea rax, [rbp - 8]
movsx rax, dword [rax]
push rax
lea rax, [rbp - 12]
movsx rax, dword [rax]
pop rdi
and eax, edi
pop rdi
mov dword [rdi], eax
lea rax, [rbp - 4]
movsx rax, dword [rax]
cmp rax, 0
je main__N_L_L_1__N_L_L_23__L_else_2
mov rax, GL_L_L_0
push rax
pop rdi
mov rax, 0
call printf
jmp main__N_L_L_1__N_L_L_23__L_end_2
main__N_L_L_1__N_L_L_23__L_else_2:
lea rax, [rbp - 8]
movsx rax, dword [rax]
cmp rax, 0
je main__N_L_L_1__N_L_L_23__L_else_2__N_L_L_27__L_else_3
mov rax, GL_L_L_1
push rax
pop rdi
mov rax, 0
call printf
jmp main__N_L_L_1__N_L_L_23__L_else_2__N_L_L_27__L_end_3
main__N_L_L_1__N_L_L_23__L_else_2__N_L_L_27__L_else_3:
lea rax, [rbp - 12]
movsx rax, dword [rax]
cmp rax, 0
je main__N_L_L_1__N_L_L_23__L_else_2__N_L_L_27__L_else_3__N_L_L_31__L_else_4
mov rax, GL_L_L_2
push rax
pop rdi
mov rax, 0
call printf
jmp main__N_L_L_1__N_L_L_23__L_else_2__N_L_L_27__L_else_3__N_L_L_31__L_end_4
main__N_L_L_1__N_L_L_23__L_else_2__N_L_L_27__L_else_3__N_L_L_31__L_else_4:
mov rax, GL_L_L_3
push rax
pop rdi
mov rax, 0
call printf
main__N_L_L_1__N_L_L_23__L_else_2__N_L_L_27__L_else_3__N_L_L_31__L_end_4:
main__N_L_L_1__N_L_L_23__L_else_2__N_L_L_27__L_end_3:
main__N_L_L_1__N_L_L_23__L_end_2:
lea rax, [rbp - 16]
push rax
mov rax, 1
push rax
lea rax, [rbp - 16]
movsx rax, dword [rax]
pop rdi
add eax, edi
pop rdi
mov dword [rdi], eax
jmp main__N_L_L_1__L_begin_1
main__N_L_L_1__L_end_1:
main__L_return_main:
mov rsp, rbp
pop rbp
ret
extern printf |
skywong14/ACore2025 | 1,894 | os/src/trap/trap.s | # this file is from rCore-ch4
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
# 0xfffffffffffff000
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# allocate a TrapContext on kernel stack
#addi sp, sp, -34*8 ATTENTION we don't need to move sp
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
# 0xfffffffffffff062
__restore:
# @para a0: *TrapContext in user space(Constant)
# @para a1: user space satp_token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# save *TrapContext to sscratch (we'll use it in __alltraps)
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
Sober7135/rost | 4,227 | os/src/trap/trap.S | .altmacro
.macro SAVE n
sd x\n, \n * 8(sp)
.endm
.macro LOAD n
ld x\n, \n * 8(sp)
.endm
.section .text.trampoline
.global __alltraps
.global __restore
.align 2
__alltraps:
#! Attention! The trap context is always stored in the kernel stack.
# sp point to the trap context which is stored in user space..
# swap user space pointer (originally in sscratch) and user stack pointer (originally in sp)
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# In ch4, we allocate a page frame for storing trap context, and the sp point to the bottom of the stack, so we don't need to minus sp
SAVE 1 # x1 is ra
# skip x2(sp), stack pointer, store it later
SAVE 3 # x3 is gp (global pointer)
# skip x4(tp), thread pointer, user will not use it
.set n, 5
.rept 27
SAVE %n
.set n, n + 1
.endr
# store user stack pointer (current is in sscratch) into stack
csrr t0, sscratch
csrr t1, sstatus
csrr t2, sepc
sd t0, 2*8(sp)
sd t1, 32*8(sp)
sd t2, 33*8(sp)
# load kernel satp
ld t0, 34*8(sp)
# load trap_handler address
ld t1, 36*8(sp)
# load kernel sp
ld sp, 35*8(sp)
# switch kernel space
csrw satp, t0
sfence.vma
// Why not straightly `call trap_handler`
// TLDR, because we use the virtual address
// AFAIK, `call` instruction calculate the address between current place and trap_handler when linking, and then add it to pc
// But we mapped the __alltraps into the trampoline, so current, the pc point to the virtual address...
// > The nature of the problem can be summarized as follows: the virtual address where a jump instruction is actually executed is different from the address where this instruction was set when performing backend code generation and linking in the compiler/assembler/linker, producing the final machine code.
jr t1
__restore:
# a0: *TrapContext in user space, a1: user satp
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# start restore
ld t0, 32*8(sp)
ld t1, 33*8(sp)
# ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
# csrw sscratch, t2
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD %n
.set n, n + 1
.endr
# let sp point to user stack
ld sp, 2*8(sp)
sret
# .altmacro
# .macro SAVE_GP n
# sd x\n, \n*8(sp)
# .endm
# .macro LOAD_GP n
# ld x\n, \n*8(sp)
# .endm
# .section .text.trampoline
# .globl __alltraps
# .globl __restore
# .align 2
# __alltraps:
# csrrw sp, sscratch, sp
# # now sp->*TrapContext in user space, sscratch->user stack
# # save other general purpose registers
# sd x1, 1*8(sp)
# # skip sp(x2), we will save it later
# sd x3, 3*8(sp)
# # skip tp(x4), application does not use it
# # save x5~x31
# .set n, 5
# .rept 27
# SAVE_GP %n
# .set n, n+1
# .endr
# # we can use t0/t1/t2 freely, because they have been saved in TrapContext
# csrr t0, sstatus
# csrr t1, sepc
# sd t0, 32*8(sp)
# sd t1, 33*8(sp)
# # read user stack from sscratch and save it in TrapContext
# csrr t2, sscratch
# sd t2, 2*8(sp)
# # load kernel_satp into t0
# ld t0, 34*8(sp)
# # load trap_handler into t1
# ld t1, 36*8(sp)
# # move to kernel_sp
# ld sp, 35*8(sp)
# # switch to kernel space
# csrw satp, t0
# sfence.vma
# # jump to trap_handler
# jr t1
# __restore:
# # a0: *TrapContext in user space(Constant); a1: user space token
# # switch to user space
# csrw satp, a1
# sfence.vma
# csrw sscratch, a0
# mv sp, a0
# # now sp points to TrapContext in user space, start restoring based on it
# # restore sstatus/sepc
# ld t0, 32*8(sp)
# ld t1, 33*8(sp)
# csrw sstatus, t0
# csrw sepc, t1
# # restore general purpose registers except x0/sp/tp
# ld x1, 1*8(sp)
# ld x3, 3*8(sp)
# .set n, 5
# .rept 27
# LOAD_GP %n
# .set n, n+1
# .endr
# # back to user stack
# ld sp, 2*8(sp)
# sret
|
Sober7135/rvos | 4,227 | os/src/trap/trap.S | .altmacro
.macro SAVE n
sd x\n, \n * 8(sp)
.endm
.macro LOAD n
ld x\n, \n * 8(sp)
.endm
.section .text.trampoline
.global __alltraps
.global __restore
.align 2
__alltraps:
#! Attention! The trap context is always stored in the kernel stack.
# sp point to the trap context which is stored in user space..
# swap user space pointer (originally in sscratch) and user stack pointer (originally in sp)
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# In ch4, we allocate a page frame for storing trap context, and the sp point to the bottom of the stack, so we don't need to minus sp
SAVE 1 # x1 is ra
# skip x2(sp), stack pointer, store it later
SAVE 3 # x3 is gp (global pointer)
# skip x4(tp), thread pointer, user will not use it
.set n, 5
.rept 27
SAVE %n
.set n, n + 1
.endr
# store user stack pointer (current is in sscratch) into stack
csrr t0, sscratch
csrr t1, sstatus
csrr t2, sepc
sd t0, 2*8(sp)
sd t1, 32*8(sp)
sd t2, 33*8(sp)
# load kernel satp
ld t0, 34*8(sp)
# load trap_handler address
ld t1, 36*8(sp)
# load kernel sp
ld sp, 35*8(sp)
# switch kernel space
csrw satp, t0
sfence.vma
// Why not straightly `call trap_handler`
// TLDR, because we use the virtual address
// AFAIK, `call` instruction calculate the address between current place and trap_handler when linking, and then add it to pc
// But we mapped the __alltraps into the trampoline, so current, the pc point to the virtual address...
// > The nature of the problem can be summarized as follows: the virtual address where a jump instruction is actually executed is different from the address where this instruction was set when performing backend code generation and linking in the compiler/assembler/linker, producing the final machine code.
jr t1
__restore:
# a0: *TrapContext in user space, a1: user satp
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# start restore
ld t0, 32*8(sp)
ld t1, 33*8(sp)
# ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
# csrw sscratch, t2
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD %n
.set n, n + 1
.endr
# let sp point to user stack
ld sp, 2*8(sp)
sret
# .altmacro
# .macro SAVE_GP n
# sd x\n, \n*8(sp)
# .endm
# .macro LOAD_GP n
# ld x\n, \n*8(sp)
# .endm
# .section .text.trampoline
# .globl __alltraps
# .globl __restore
# .align 2
# __alltraps:
# csrrw sp, sscratch, sp
# # now sp->*TrapContext in user space, sscratch->user stack
# # save other general purpose registers
# sd x1, 1*8(sp)
# # skip sp(x2), we will save it later
# sd x3, 3*8(sp)
# # skip tp(x4), application does not use it
# # save x5~x31
# .set n, 5
# .rept 27
# SAVE_GP %n
# .set n, n+1
# .endr
# # we can use t0/t1/t2 freely, because they have been saved in TrapContext
# csrr t0, sstatus
# csrr t1, sepc
# sd t0, 32*8(sp)
# sd t1, 33*8(sp)
# # read user stack from sscratch and save it in TrapContext
# csrr t2, sscratch
# sd t2, 2*8(sp)
# # load kernel_satp into t0
# ld t0, 34*8(sp)
# # load trap_handler into t1
# ld t1, 36*8(sp)
# # move to kernel_sp
# ld sp, 35*8(sp)
# # switch to kernel space
# csrw satp, t0
# sfence.vma
# # jump to trap_handler
# jr t1
# __restore:
# # a0: *TrapContext in user space(Constant); a1: user space token
# # switch to user space
# csrw satp, a1
# sfence.vma
# csrw sscratch, a0
# mv sp, a0
# # now sp points to TrapContext in user space, start restoring based on it
# # restore sstatus/sepc
# ld t0, 32*8(sp)
# ld t1, 33*8(sp)
# csrw sstatus, t0
# csrw sepc, t1
# # restore general purpose registers except x0/sp/tp
# ld x1, 1*8(sp)
# ld x3, 3*8(sp)
# .set n, 5
# .rept 27
# LOAD_GP %n
# .set n, n+1
# .endr
# # back to user stack
# ld sp, 2*8(sp)
# sret
|
soc-hub-fi/headsail-tvm | 5,304 | src/runtime/hexagon/profiler/lwp_handler.S | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
Lightweight profiling handler to record processor cycles in a buffer
(pointed by __lwp_buffer_ptr) for a given invocation of the handler. To keep the
buffer size within a resonable limit, we only recond data for the first 100
invocation of the handler for a given loop or function ID (passed in R0 register).
The buffer size wouldn't be a concern if the loops with only siblings are getting
profiled. However, since the instrumentation provides several different profiling
options, this approach ensures that they all function as expexted. We use second
buffer (pointed by __lwp_counter) to keep count of the calls made to lwp_handler
function for each function/loop.
Brief explanation of all the global variables used:
1) __lwp_counter : Pointer to the buffer that keeps count of the number of times handler
is called for a given ID. To reduce the complexity of the handler, __lwp_counter is
indexed using the ID itself.
2) __lwp_buffer_ptr : Pointer to the buffer that records loop/function ID, processor cycles
and return addresss of the handler. Return address is used to reconstruct the call graph
(loop-nest) to make it easier to analyze the profiling data.
3) __lwp_buffer_size : Size of the buffer
4) __lwp_buffer_count : Offset into main lwp buffer where data for the current handler
invocation needs to be written.
NOTE: The handler function saves and restores R0-R5 registers which are caller saved registers
on Hexagon and should be handled at the callsite. However, to reduce the codegen impact
of the handler calls on the caller functions, we decided to move this part into the
handler itself.
*/
.text
.globl lwp_handler
.falign
.type lwp_handler,@function
lwp_handler:
{ allocframe(#24) // Allocate 24 bytes on the stack to save R0-R5 registers
memd(r29+#-16) = r5:4 // Save R5,R4
}
{
memd(r29+#8) = r3:2 // Save R3,R2
memd(r29+#0) = r1:0 // Save R1, R0
r2 = add(pc,##_GLOBAL_OFFSET_TABLE_@PCREL) // Get GOT address
}
{
r5 = memw(r2+##__lwp_counter@GOT) // Get address of the pointer to __lwp_counter
r3 = memw(r2+##__lwp_buffer_count@GOT) // Get the address of __lwp_buffer_count
}
{
r5 = memw(r5+#0) // Get the address of __lwp_counter (address of the main lwp buffer)
r3 = memw(r3+#0) // Get the __lwp_buffer_count value (offset into the main buffer)
}
{
r4 = memw(r5+r0<<#2) // Get the handler invocation count for the ID (passed in R0)
r1 = memw(r2+##__lwp_buffer_size@GOT) // Get the address of __lwp_buffer_size
}
{
r4 = add(r4,#1) // Increment count
memw(r5+r0<<#2) = r4.new // Update count in __lwp_counter for a given ID
r1 = memw(r1+#0) // Get the buffer size
}
{
p0 = cmp.gtu(r4,#100) // Exit if count for a given ID is greater than 100
if (p0.new) jump:nt .LBB0_3
r5 = memw(r2+##__lwp_buffer_ptr@GOT) // Get address of the pointer to __lwp_buffer_ptr
}
{
r5 = memw(r5+#0) // Get address of __lwp_buffer_ptr
r2 = memw(r2+##__lwp_buffer_count@GOT) // Get address of __lwp_buffer_count
}
{
r4 = add(r3,#4) // Increment the offset by 4 since 4 int32 values are stored for each invocation
if (!cmp.gtu(r1,r4.new)) jump:t .LBB0_3 // Exit if the main lwp buffer has run out of space
}
{
r5 = addasl(r5,r3,#2) // Get the address where the data needs to be recorded
memw(r2+#0) = r4 // Save next offset into __lwp_buffer_count
}
{
memw(r5+#0) = r31 // Save return address of this function
r1:0 = C15:14 // Control registers that keep processor cycle count (64-bits)
memw(r5+#4) = r0 // Save loop/function ID
}
{
memw(r5+#12) = r1 // Save upper 32 bits
memw(r5+#8) = r0 // Save lower 32 bits
}
.falign
.LBB0_3:
{
r5:4 = memd(r29+#16) // Restore the registers from the stack
r3:2 = memd(r29+#8)
}
{
r1:0 = memd(r29+#0)
dealloc_return // Deallocate the stack and return
}
.Lfunc_end0:
.size lwp_handler, .Lfunc_end0-lwp_handler
|
sonhs99/RedOS | 1,620 | ap_bootstrap/entry.s | [ORG 0x8000]
[BITS 16]
jmp START16
align 4, db 0
PAGE_TABLE_PTR: dd 0x00
AP_ENTRY_POINT: dq 0x00
STACK_ADDR: dd 0x00
STACK_SIZE: dd 0x00
START16:
cli
lgdt [GDTR]
mov eax, 0x4000003B
mov cr0, eax
jmp dword 0x18:START32
[BITS 32]
START32:
mov ax, 0x20
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ss, ax
mov eax, cr4
or eax, 0x620
mov cr4, eax
mov ecx, 0xC0000080
rdmsr
or eax, 0x0101
wrmsr
mov eax, [PAGE_TABLE_PTR]
mov cr3, eax
mov eax, cr0
or eax, 0xE000000E
xor eax, 0x6000000C
mov cr0, eax
jmp 0x08:START64
align 8, db 0
[BITS 64]
START64:
mov ax, 0x10
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ss, ax
xor rax, rax
mov eax, [STACK_ADDR]
mov rsp, rax
mov rbp, rax
xor rax, rax
mov rbx, 0xFEE00020
mov eax, dword [rbx]
shr rax, 24
xor rbx, rbx
mov ebx, dword [STACK_SIZE]
mul rbx
sub rsp, rax
sub rbp, rax
push qword ENDLESS
mov rax, qword [AP_ENTRY_POINT]
jmp rax
ENDLESS:
jmp ENDLESS
align 8, db 0
dw 0x0000
GDTR:
dw GDTEND - GDT - 1
dd GDT
GDT:
NULL:
dw 0x0000
dw 0x0000
db 0x00
db 0x00
db 0x00
db 0x00
CODE64:
dw 0xFFFF
dw 0x0000
db 0x00
db 0x9A
db 0xAF
db 0x00
DATA64:
dw 0xFFFF
dw 0x0000
db 0x00
db 0x92
db 0xAF
db 0x00
CODE32:
dw 0xFFFF
dw 0x0000
db 0x00
db 0x9A
db 0xCF
db 0x00
DATA32:
dw 0xFFFF
dw 0x0000
db 0x00
db 0x92
db 0xCF
db 0x00
GDTEND:
|
sonith17/CO_PROJECT | 1,139 | Program2.s | .data
.word 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1
base: .word 0x00000400
.text
addi x1 x0 1024
addi x2 x0 0
addi x2 x2 16
selection_sort:
addi x3 x0 0
addi x4 x0 0
addi x5 x0 0
for_loop_1:
addi x6 x2 -1
bge x3 x6 end_loop_1
add x5 x0 x3
addi x4 x3 1
for_loop_2:
bge x4 x2 end_loop_2
slli x7 x4 2
add x8 x1 x7
lw x9 0(x8)
slli x10 x5 2
add x11 x1 x10
lw x12 0(x11)
if_1:
bge x9 x12 end_if_1
add x5 x0 x4
addi x4 x4 1
j for_loop_2
if_2:
beq x5 x3 end_if_2
slli x7 x3 2
add x8 x1 x7
lw x9 0(x8)
slli x10 x5 2
add x11 x1 x10
lw x12 0(x11)
sw x9 0(x11)
sw x12 0(x8)
addi x3 x3 1
j for_loop_1
end_if_2:
addi x3 x3 1
j for_loop_1
end_if_1:
addi x4 x4 1
j for_loop_2
end_loop_2:
j if_2
addi x0 x0 0
end_loop_1:
addi x0 x0 0 |
sozud/psy-q-splitter | 3,131 | splitter/test_data/SpuVmVSetUp.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel SpuVmVSetUp
/* 12338 80021B38 21308000 */ addu $a2, $a0, $zero
/* 1233C 80021B3C FFFFC230 */ andi $v0, $a2, 0xFFFF
/* 12340 80021B40 1000422C */ sltiu $v0, $v0, 0x10
/* 12344 80021B44 10004010 */ beqz $v0, .L80021B88
/* 12348 80021B48 2138A000 */ addu $a3, $a1, $zero
/* 1234C 80021B4C 00140400 */ sll $v0, $a0, 16
/* 12350 80021B50 03240200 */ sra $a0, $v0, 16
/* 12354 80021B54 0980013C */ lui $at, %hi(_svm_vab_used)
/* 12358 80021B58 21082400 */ addu $at, $at, $a0
/* 1235C 80021B5C E8782390 */ lbu $v1, %lo(_svm_vab_used)($at)
/* 12360 80021B60 01000234 */ ori $v0, $zero, 0x1
/* 12364 80021B64 23006214 */ bne $v1, $v0, .L80021BF4
/* 12368 80021B68 FFFF0224 */ addiu $v0, $zero, -0x1
/* 1236C 80021B6C 001C0500 */ sll $v1, $a1, 16
/* 12370 80021B70 0780023C */ lui $v0, %hi(kMaxPrograms)
/* 12374 80021B74 94C34284 */ lh $v0, %lo(kMaxPrograms)($v0)
/* 12378 80021B78 032C0300 */ sra $a1, $v1, 16
/* 1237C 80021B7C 2A10A200 */ slt $v0, $a1, $v0
/* 12380 80021B80 03004014 */ bnez $v0, .L80021B90
/* 12384 80021B84 80100400 */ sll $v0, $a0, 2
.L80021B88:
/* 12388 80021B88 FD860008 */ j .L80021BF4
/* 1238C 80021B8C FFFF0224 */ addiu $v0, $zero, -0x1
.L80021B90:
/* 12390 80021B90 0480013C */ lui $at, %hi(_svm_vab_vh)
/* 12394 80021B94 21082200 */ addu $at, $at, $v0
/* 12398 80021B98 14C9238C */ lw $v1, %lo(_svm_vab_vh)($at)
/* 1239C 80021B9C 0480013C */ lui $at, %hi(_svm_vab_pg)
/* 123A0 80021BA0 21082200 */ addu $at, $at, $v0
/* 123A4 80021BA4 C8C8248C */ lw $a0, %lo(_svm_vab_pg)($at)
/* 123A8 80021BA8 0480013C */ lui $at, %hi(_svm_vab_tn)
/* 123AC 80021BAC 21082200 */ addu $at, $at, $v0
/* 123B0 80021BB0 58C9228C */ lw $v0, %lo(_svm_vab_tn)($at)
/* 123B4 80021BB4 0980013C */ lui $at, %hi(_svm_cur+1)
/* 123B8 80021BB8 C97826A0 */ sb $a2, %lo(_svm_cur+1)($at)
/* 123BC 80021BBC 0980013C */ lui $at, %hi(_svm_cur+6)
/* 123C0 80021BC0 CE7827A0 */ sb $a3, %lo(_svm_cur+6)($at)
/* 123C4 80021BC4 0780013C */ lui $at, %hi(_svm_tn)
/* 123C8 80021BC8 C8CB22AC */ sw $v0, %lo(_svm_tn)($at)
/* 123CC 80021BCC 00110500 */ sll $v0, $a1, 4
/* 123D0 80021BD0 21104400 */ addu $v0, $v0, $a0
/* 123D4 80021BD4 0780013C */ lui $at, %hi(_svm_vh)
/* 123D8 80021BD8 C0C323AC */ sw $v1, %lo(_svm_vh)($at)
/* 123DC 80021BDC 0780013C */ lui $at, %hi(_svm_pg)
/* 123E0 80021BE0 B4C324AC */ sw $a0, %lo(_svm_pg)($at)
/* 123E4 80021BE4 08004390 */ lbu $v1, 0x8($v0)
/* 123E8 80021BE8 21100000 */ addu $v0, $zero, $zero
/* 123EC 80021BEC 0980013C */ lui $at, %hi(_svm_cur+7)
/* 123F0 80021BF0 CF7823A0 */ sb $v1, %lo(_svm_cur+7)($at)
.L80021BF4:
/* 123F4 80021BF4 0800E003 */ jr $ra
/* 123F8 80021BF8 00000000 */ nop
.size SpuVmVSetUp, . - SpuVmVSetUp
|
sozud/psy-q-splitter | 5,371 | splitter/test_data/SsVabTransBodyPartly.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel SsVabTransBodyPartly
/* 12080 80021880 D8FFBD27 */ addiu $sp, $sp, -0x28
/* 12084 80021884 2000B4AF */ sw $s4, 0x20($sp)
/* 12088 80021888 21A08000 */ addu $s4, $a0, $zero
/* 1208C 8002188C 1C00B3AF */ sw $s3, 0x1C($sp)
/* 12090 80021890 2198A000 */ addu $s3, $a1, $zero
/* 12094 80021894 2128C000 */ addu $a1, $a2, $zero
/* 12098 80021898 FFFFA230 */ andi $v0, $a1, 0xFFFF
/* 1209C 8002189C 1100422C */ sltiu $v0, $v0, 0x11
/* 120A0 800218A0 2400BFAF */ sw $ra, 0x24($sp)
/* 120A4 800218A4 1800B2AF */ sw $s2, 0x18($sp)
/* 120A8 800218A8 1400B1AF */ sw $s1, 0x14($sp)
/* 120AC 800218AC 21004010 */ beqz $v0, .L80021934
/* 120B0 800218B0 1000B0AF */ sw $s0, 0x10($sp)
/* 120B4 800218B4 00140600 */ sll $v0, $a2, 16
/* 120B8 800218B8 038C0200 */ sra $s1, $v0, 16
/* 120BC 800218BC 0980013C */ lui $at, %hi(_svm_vab_used)
/* 120C0 800218C0 21083100 */ addu $at, $at, $s1
/* 120C4 800218C4 E8782390 */ lbu $v1, %lo(_svm_vab_used)($at)
/* 120C8 800218C8 02000234 */ ori $v0, $zero, 0x2
/* 120CC 800218CC 19006214 */ bne $v1, $v0, .L80021934
/* 120D0 800218D0 00000000 */ nop
/* 120D4 800218D4 0380023C */ lui $v0, %hi(D_00000000)
/* 120D8 800218D8 082F428C */ lw $v0, %lo(D_00000000)($v0)
/* 120DC 800218DC 00000000 */ nop
/* 120E0 800218E0 0F004014 */ bnez $v0, .L80021920
/* 120E4 800218E4 80801100 */ sll $s0, $s1, 2
/* 120E8 800218E8 0A80013C */ lui $at, %hi(_svm_vab_total)
/* 120EC 800218EC 21083000 */ addu $at, $at, $s0
/* 120F0 800218F0 CC87228C */ lw $v0, %lo(_svm_vab_total)($at)
/* 120F4 800218F4 0380013C */ lui $at, %hi(D_00000004)
/* 120F8 800218F8 0C2F25A4 */ sh $a1, %lo(D_00000004)($at)
/* 120FC 800218FC 0380013C */ lui $at, %hi(D_00000000)
/* 12100 80021900 082F22AC */ sw $v0, %lo(D_00000000)($at)
/* 12104 80021904 FDAA000C */ jal SpuSetTransferMode
/* 12108 80021908 21200000 */ addu $a0, $zero, $zero
/* 1210C 8002190C 0A80013C */ lui $at, %hi(_svm_vab_start)
/* 12110 80021910 21083000 */ addu $at, $at, $s0
/* 12114 80021914 1088248C */ lw $a0, %lo(_svm_vab_start)($at)
/* 12118 80021918 EEAA000C */ jal SpuSetTransferStartAddr
/* 1211C 8002191C 00000000 */ nop
.L80021920:
/* 12120 80021920 0380123C */ lui $s2, %hi(D_00000004)
/* 12124 80021924 0C2F5286 */ lh $s2, %lo(D_00000004)($s2)
/* 12128 80021928 00000000 */ nop
/* 1212C 8002192C 05005112 */ beq $s2, $s1, .L80021944
/* 12130 80021930 21806002 */ addu $s0, $s3, $zero
.L80021934:
/* 12134 80021934 5BAB000C */ jal _spu_setInTransfer
/* 12138 80021938 21200000 */ addu $a0, $zero, $zero
/* 1213C 8002193C 6F860008 */ j .L800219BC
/* 12140 80021940 FFFF0224 */ addiu $v0, $zero, -0x1
.L80021944:
/* 12144 80021944 0380033C */ lui $v1, %hi(D_00000000)
/* 12148 80021948 082F638C */ lw $v1, %lo(D_00000000)($v1)
/* 1214C 8002194C 00000000 */ nop
/* 12150 80021950 2B107000 */ sltu $v0, $v1, $s0
/* 12154 80021954 02004010 */ beqz $v0, .L80021960
/* 12158 80021958 00000000 */ nop
/* 1215C 8002195C 21806000 */ addu $s0, $v1, $zero
.L80021960:
/* 12160 80021960 5BAB000C */ jal _spu_setInTransfer
/* 12164 80021964 01000434 */ ori $a0, $zero, 0x1
/* 12168 80021968 21208002 */ addu $a0, $s4, $zero
/* 1216C 8002196C 0AAB000C */ jal SpuWritePartly
/* 12170 80021970 21280002 */ addu $a1, $s0, $zero
/* 12174 80021974 0380023C */ lui $v0, %hi(D_00000000)
/* 12178 80021978 082F428C */ lw $v0, %lo(D_00000000)($v0)
/* 1217C 8002197C 00000000 */ nop
/* 12180 80021980 23105000 */ subu $v0, $v0, $s0
/* 12184 80021984 0380013C */ lui $at, %hi(D_00000000)
/* 12188 80021988 082F22AC */ sw $v0, %lo(D_00000000)($at)
/* 1218C 8002198C 0B004014 */ bnez $v0, .L800219BC
/* 12190 80021990 FEFF0224 */ addiu $v0, $zero, -0x2
/* 12194 80021994 21104002 */ addu $v0, $s2, $zero
/* 12198 80021998 FFFF0324 */ addiu $v1, $zero, -0x1
/* 1219C 8002199C 0380013C */ lui $at, %hi(D_00000004)
/* 121A0 800219A0 0C2F23A4 */ sh $v1, %lo(D_00000004)($at)
/* 121A4 800219A4 01000334 */ ori $v1, $zero, 0x1
/* 121A8 800219A8 0380013C */ lui $at, %hi(D_00000000)
/* 121AC 800219AC 082F20AC */ sw $zero, %lo(D_00000000)($at)
/* 121B0 800219B0 0980013C */ lui $at, %hi(_svm_vab_used)
/* 121B4 800219B4 21082200 */ addu $at, $at, $v0
/* 121B8 800219B8 E87823A0 */ sb $v1, %lo(_svm_vab_used)($at)
.L800219BC:
/* 121BC 800219BC 2400BF8F */ lw $ra, 0x24($sp)
/* 121C0 800219C0 2000B48F */ lw $s4, 0x20($sp)
/* 121C4 800219C4 1C00B38F */ lw $s3, 0x1C($sp)
/* 121C8 800219C8 1800B28F */ lw $s2, 0x18($sp)
/* 121CC 800219CC 1400B18F */ lw $s1, 0x14($sp)
/* 121D0 800219D0 1000B08F */ lw $s0, 0x10($sp)
/* 121D4 800219D4 2800BD27 */ addiu $sp, $sp, 0x28
/* 121D8 800219D8 0800E003 */ jr $ra
/* 121DC 800219DC 00000000 */ nop
.size SsVabTransBodyPartly, . - SsVabTransBodyPartly
|
sozud/psy-q-splitter | 13,330 | splitter/test_data/_SsInitSoundSep.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsInitSoundSep
/* 344C 80012C4C E0FFBD27 */ addiu $sp, $sp, -0x20
/* 3450 80012C50 21608000 */ addu $t4, $a0, $zero
/* 3454 80012C54 2158A000 */ addu $t3, $a1, $zero
/* 3458 80012C58 1400B1AF */ sw $s1, 0x14($sp)
/* 345C 80012C5C 21880000 */ addu $s1, $zero, $zero
/* 3460 80012C60 21400000 */ addu $t0, $zero, $zero
/* 3464 80012C64 40000A34 */ ori $t2, $zero, 0x40
/* 3468 80012C68 7F000934 */ ori $t1, $zero, 0x7F
/* 346C 80012C6C 00240400 */ sll $a0, $a0, 16
/* 3470 80012C70 83230400 */ sra $a0, $a0, 14
/* 3474 80012C74 002C0500 */ sll $a1, $a1, 16
/* 3478 80012C78 032C0500 */ sra $a1, $a1, 16
/* 347C 80012C7C 40100500 */ sll $v0, $a1, 1
/* 3480 80012C80 21104500 */ addu $v0, $v0, $a1
/* 3484 80012C84 80100200 */ sll $v0, $v0, 2
/* 3488 80012C88 23104500 */ subu $v0, $v0, $a1
/* 348C 80012C8C 80100200 */ sll $v0, $v0, 2
/* 3490 80012C90 23104500 */ subu $v0, $v0, $a1
/* 3494 80012C94 1C00BFAF */ sw $ra, 0x1C($sp)
/* 3498 80012C98 1800B2AF */ sw $s2, 0x18($sp)
/* 349C 80012C9C 1000B0AF */ sw $s0, 0x10($sp)
/* 34A0 80012CA0 0280013C */ lui $at, %hi(_ss_score)
/* 34A4 80012CA4 21082400 */ addu $at, $at, $a0
/* 34A8 80012CA8 2C29238C */ lw $v1, %lo(_ss_score)($at)
/* 34AC 80012CAC 80100200 */ sll $v0, $v0, 2
/* 34B0 80012CB0 21804300 */ addu $s0, $v0, $v1
/* 34B4 80012CB4 21180002 */ addu $v1, $s0, $zero
/* 34B8 80012CB8 01000234 */ ori $v0, $zero, 0x1
/* 34BC 80012CBC 6E0002A6 */ sh $v0, 0x6E($s0)
/* 34C0 80012CC0 7F000234 */ ori $v0, $zero, 0x7F
/* 34C4 80012CC4 100000A2 */ sb $zero, 0x10($s0)
/* 34C8 80012CC8 110000A2 */ sb $zero, 0x11($s0)
/* 34CC 80012CCC 120000A2 */ sb $zero, 0x12($s0)
/* 34D0 80012CD0 130000A2 */ sb $zero, 0x13($s0)
/* 34D4 80012CD4 140000A2 */ sb $zero, 0x14($s0)
/* 34D8 80012CD8 150000A2 */ sb $zero, 0x15($s0)
/* 34DC 80012CDC 160000A2 */ sb $zero, 0x16($s0)
/* 34E0 80012CE0 270000A2 */ sb $zero, 0x27($s0)
/* 34E4 80012CE4 280000A2 */ sb $zero, 0x28($s0)
/* 34E8 80012CE8 290000A2 */ sb $zero, 0x29($s0)
/* 34EC 80012CEC 2A0000A2 */ sb $zero, 0x2A($s0)
/* 34F0 80012CF0 2B0000A2 */ sb $zero, 0x2B($s0)
/* 34F4 80012CF4 480000A6 */ sh $zero, 0x48($s0)
/* 34F8 80012CF8 4A0000A6 */ sh $zero, 0x4A($s0)
/* 34FC 80012CFC 4C0006A6 */ sh $a2, 0x4C($s0)
/* 3500 80012D00 720000A6 */ sh $zero, 0x72($s0)
/* 3504 80012D04 7C0000AE */ sw $zero, 0x7C($s0)
/* 3508 80012D08 800000AE */ sw $zero, 0x80($s0)
/* 350C 80012D0C 840000AE */ sw $zero, 0x84($s0)
/* 3510 80012D10 880000AE */ sw $zero, 0x88($s0)
/* 3514 80012D14 A80002A6 */ sh $v0, 0xA8($s0)
/* 3518 80012D18 AA0000A6 */ sh $zero, 0xAA($s0)
.L80012D1C:
/* 351C 80012D1C 21100802 */ addu $v0, $s0, $t0
/* 3520 80012D20 17004AA0 */ sb $t2, 0x17($v0)
/* 3524 80012D24 2C0048A0 */ sb $t0, 0x2C($v0)
/* 3528 80012D28 4E0069A4 */ sh $t1, 0x4E($v1)
/* 352C 80012D2C 01000825 */ addiu $t0, $t0, 0x1
/* 3530 80012D30 10000229 */ slti $v0, $t0, 0x10
/* 3534 80012D34 F9FF4014 */ bnez $v0, .L80012D1C
/* 3538 80012D38 02006324 */ addiu $v1, $v1, 0x2
/* 353C 80012D3C 00140B00 */ sll $v0, $t3, 16
/* 3540 80012D40 16004014 */ bnez $v0, .L80012D9C
/* 3544 80012D44 040007AE */ sw $a3, 0x4($s0)
/* 3548 80012D48 0000E390 */ lbu $v1, 0x0($a3)
/* 354C 80012D4C 53000234 */ ori $v0, $zero, 0x53
/* 3550 80012D50 03006210 */ beq $v1, $v0, .L80012D60
/* 3554 80012D54 70000234 */ ori $v0, $zero, 0x70
/* 3558 80012D58 14006214 */ bne $v1, $v0, .L80012DAC
/* 355C 80012D5C 9303053C */ lui $a1, 0x393
.L80012D60:
/* 3560 80012D60 0600E224 */ addiu $v0, $a3, 0x6
/* 3564 80012D64 040002AE */ sw $v0, 0x4($s0)
/* 3568 80012D68 0500E290 */ lbu $v0, 0x5($a3)
/* 356C 80012D6C 00000000 */ nop
/* 3570 80012D70 07004010 */ beqz $v0, .L80012D90
/* 3574 80012D74 0800E224 */ addiu $v0, $a3, 0x8
/* 3578 80012D78 0180043C */ lui $a0, %hi(R_00000000)
/* 357C 80012D7C 88008424 */ addiu $a0, $a0, %lo(R_00000000)
/* 3580 80012D80 717E000C */ jal printf
/* 3584 80012D84 00000000 */ nop
/* 3588 80012D88 FC4B0008 */ j .L80012FF0
/* 358C 80012D8C FFFF0224 */ addiu $v0, $zero, -0x1
.L80012D90:
/* 3590 80012D90 040002AE */ sw $v0, 0x4($s0)
/* 3594 80012D94 6A4B0008 */ j .L80012DA8
/* 3598 80012D98 08003126 */ addiu $s1, $s1, 0x8
.L80012D9C:
/* 359C 80012D9C 0200E224 */ addiu $v0, $a3, 0x2
/* 35A0 80012DA0 040002AE */ sw $v0, 0x4($s0)
/* 35A4 80012DA4 02003126 */ addiu $s1, $s1, 0x2
.L80012DA8:
/* 35A8 80012DA8 9303053C */ lui $a1, 0x393
.L80012DAC:
/* 35AC 80012DAC 0400038E */ lw $v1, 0x4($s0)
/* 35B0 80012DB0 0087A534 */ ori $a1, $a1, 0x8700
/* 35B4 80012DB4 01006224 */ addiu $v0, $v1, 0x1
/* 35B8 80012DB8 040002AE */ sw $v0, 0x4($s0)
/* 35BC 80012DBC 00006690 */ lbu $a2, 0x0($v1)
/* 35C0 80012DC0 02006224 */ addiu $v0, $v1, 0x2
/* 35C4 80012DC4 040002AE */ sw $v0, 0x4($s0)
/* 35C8 80012DC8 01006290 */ lbu $v0, 0x1($v1)
/* 35CC 80012DCC 0400038E */ lw $v1, 0x4($s0)
/* 35D0 80012DD0 00220600 */ sll $a0, $a2, 8
/* 35D4 80012DD4 25104400 */ or $v0, $v0, $a0
/* 35D8 80012DD8 4A0002A6 */ sh $v0, 0x4A($s0)
/* 35DC 80012DDC 01006224 */ addiu $v0, $v1, 0x1
/* 35E0 80012DE0 040002AE */ sw $v0, 0x4($s0)
/* 35E4 80012DE4 00006790 */ lbu $a3, 0x0($v1)
/* 35E8 80012DE8 02006224 */ addiu $v0, $v1, 0x2
/* 35EC 80012DEC 040002AE */ sw $v0, 0x4($s0)
/* 35F0 80012DF0 01006490 */ lbu $a0, 0x1($v1)
/* 35F4 80012DF4 03006224 */ addiu $v0, $v1, 0x3
/* 35F8 80012DF8 040002AE */ sw $v0, 0x4($s0)
/* 35FC 80012DFC 02006390 */ lbu $v1, 0x2($v1)
/* 3600 80012E00 00140700 */ sll $v0, $a3, 16
/* 3604 80012E04 00220400 */ sll $a0, $a0, 8
/* 3608 80012E08 25104400 */ or $v0, $v0, $a0
/* 360C 80012E0C 25104300 */ or $v0, $v0, $v1
/* 3610 80012E10 1A00A200 */ div $zero, $a1, $v0
/* 3614 80012E14 02004014 */ bnez $v0, .L80012E20
/* 3618 80012E18 00000000 */ nop
/* 361C 80012E1C 0D000700 */ break 7
.L80012E20:
/* 3620 80012E20 FFFF0124 */ addiu $at, $zero, -0x1
/* 3624 80012E24 04004114 */ bne $v0, $at, .L80012E38
/* 3628 80012E28 0080013C */ lui $at, 0x8000
/* 362C 80012E2C 0200A114 */ bne $a1, $at, .L80012E38
/* 3630 80012E30 00000000 */ nop
/* 3634 80012E34 0D000600 */ break 6
.L80012E38:
/* 3638 80012E38 12280000 */ mflo $a1
/* 363C 80012E3C 10180000 */ mfhi $v1
/* 3640 80012E40 840002AE */ sw $v0, 0x84($s0)
/* 3644 80012E44 42100200 */ srl $v0, $v0, 1
/* 3648 80012E48 2A104300 */ slt $v0, $v0, $v1
/* 364C 80012E4C 04004010 */ beqz $v0, .L80012E60
/* 3650 80012E50 05003126 */ addiu $s1, $s1, 0x5
/* 3654 80012E54 0100A224 */ addiu $v0, $a1, 0x1
/* 3658 80012E58 994B0008 */ j .L80012E64
/* 365C 80012E5C 840002AE */ sw $v0, 0x84($s0)
.L80012E60:
/* 3660 80012E60 840005AE */ sw $a1, 0x84($s0)
.L80012E64:
/* 3664 80012E64 00240C00 */ sll $a0, $t4, 16
/* 3668 80012E68 8400028E */ lw $v0, 0x84($s0)
/* 366C 80012E6C 0400058E */ lw $a1, 0x4($s0)
/* 3670 80012E70 03240400 */ sra $a0, $a0, 16
/* 3674 80012E74 8C0002AE */ sw $v0, 0x8C($s0)
/* 3678 80012E78 0300A224 */ addiu $v0, $a1, 0x3
/* 367C 80012E7C 040002AE */ sw $v0, 0x4($s0)
/* 3680 80012E80 0200A690 */ lbu $a2, 0x2($a1)
/* 3684 80012E84 0400A224 */ addiu $v0, $a1, 0x4
/* 3688 80012E88 040002AE */ sw $v0, 0x4($s0)
/* 368C 80012E8C 0300A390 */ lbu $v1, 0x3($a1)
/* 3690 80012E90 0500A224 */ addiu $v0, $a1, 0x5
/* 3694 80012E94 040002AE */ sw $v0, 0x4($s0)
/* 3698 80012E98 0400A790 */ lbu $a3, 0x4($a1)
/* 369C 80012E9C 0600A224 */ addiu $v0, $a1, 0x6
/* 36A0 80012EA0 040002AE */ sw $v0, 0x4($s0)
/* 36A4 80012EA4 0500A590 */ lbu $a1, 0x5($a1)
/* 36A8 80012EA8 00160600 */ sll $v0, $a2, 24
/* 36AC 80012EAC 001C0300 */ sll $v1, $v1, 16
/* 36B0 80012EB0 21104300 */ addu $v0, $v0, $v1
/* 36B4 80012EB4 001A0700 */ sll $v1, $a3, 8
/* 36B8 80012EB8 21104300 */ addu $v0, $v0, $v1
/* 36BC 80012EBC 21904500 */ addu $s2, $v0, $a1
/* 36C0 80012EC0 002C0B00 */ sll $a1, $t3, 16
/* 36C4 80012EC4 7454000C */ jal _SsReadDeltaValue
/* 36C8 80012EC8 032C0500 */ sra $a1, $a1, 16
/* 36CC 80012ECC 4A000486 */ lh $a0, 0x4A($s0)
/* 36D0 80012ED0 8400038E */ lw $v1, 0x84($s0)
/* 36D4 80012ED4 00000000 */ nop
/* 36D8 80012ED8 18008300 */ mult $a0, $v1
/* 36DC 80012EDC 0400048E */ lw $a0, 0x4($s0)
/* 36E0 80012EE0 0400038E */ lw $v1, 0x4($s0)
/* 36E4 80012EE4 0C0004AE */ sw $a0, 0xC($s0)
/* 36E8 80012EE8 0280043C */ lui $a0, %hi(VBLANK_MINUS)
/* 36EC 80012EEC 2429848C */ lw $a0, %lo(VBLANK_MINUS)($a0)
/* 36F0 80012EF0 7C0002AE */ sw $v0, 0x7C($s0)
/* 36F4 80012EF4 880002AE */ sw $v0, 0x88($s0)
/* 36F8 80012EF8 080003AE */ sw $v1, 0x8($s0)
/* 36FC 80012EFC 00190400 */ sll $v1, $a0, 4
/* 3700 80012F00 23386400 */ subu $a3, $v1, $a0
/* 3704 80012F04 80280700 */ sll $a1, $a3, 2
/* 3708 80012F08 12300000 */ mflo $a2
/* 370C 80012F0C 80100600 */ sll $v0, $a2, 2
/* 3710 80012F10 21104600 */ addu $v0, $v0, $a2
/* 3714 80012F14 40100200 */ sll $v0, $v0, 1
/* 3718 80012F18 2B104500 */ sltu $v0, $v0, $a1
/* 371C 80012F1C 0F004010 */ beqz $v0, .L80012F5C
/* 3720 80012F20 06003126 */ addiu $s1, $s1, 0x6
/* 3724 80012F24 80100400 */ sll $v0, $a0, 2
/* 3728 80012F28 21104400 */ addu $v0, $v0, $a0
/* 372C 80012F2C 00190200 */ sll $v1, $v0, 4
/* 3730 80012F30 23186200 */ subu $v1, $v1, $v0
/* 3734 80012F34 C0180300 */ sll $v1, $v1, 3
/* 3738 80012F38 1B006600 */ divu $zero, $v1, $a2
/* 373C 80012F3C 0200C014 */ bnez $a2, .L80012F48
/* 3740 80012F40 00000000 */ nop
/* 3744 80012F44 0D000700 */ break 7
.L80012F48:
/* 3748 80012F48 12180000 */ mflo $v1
/* 374C 80012F4C 00000000 */ nop
/* 3750 80012F50 6E0003A6 */ sh $v1, 0x6E($s0)
/* 3754 80012F54 F94B0008 */ j .L80012FE4
/* 3758 80012F58 700003A6 */ sh $v1, 0x70($s0)
.L80012F5C:
/* 375C 80012F5C 4A000386 */ lh $v1, 0x4A($s0)
/* 3760 80012F60 8400028E */ lw $v0, 0x84($s0)
/* 3764 80012F64 00000000 */ nop
/* 3768 80012F68 18006200 */ mult $v1, $v0
/* 376C 80012F6C 12180000 */ mflo $v1
/* 3770 80012F70 80100300 */ sll $v0, $v1, 2
/* 3774 80012F74 21104300 */ addu $v0, $v0, $v1
/* 3778 80012F78 40100200 */ sll $v0, $v0, 1
/* 377C 80012F7C 1B004500 */ divu $zero, $v0, $a1
/* 3780 80012F80 0200A014 */ bnez $a1, .L80012F8C
/* 3784 80012F84 00000000 */ nop
/* 3788 80012F88 0D000700 */ break 7
.L80012F8C:
/* 378C 80012F8C 12200000 */ mflo $a0
/* 3790 80012F90 4A000386 */ lh $v1, 0x4A($s0)
/* 3794 80012F94 8400028E */ lw $v0, 0x84($s0)
/* 3798 80012F98 00000000 */ nop
/* 379C 80012F9C 18006200 */ mult $v1, $v0
/* 37A0 80012FA0 12180000 */ mflo $v1
/* 37A4 80012FA4 80100300 */ sll $v0, $v1, 2
/* 37A8 80012FA8 21104300 */ addu $v0, $v0, $v1
/* 37AC 80012FAC 40100200 */ sll $v0, $v0, 1
/* 37B0 80012FB0 1B004500 */ divu $zero, $v0, $a1
/* 37B4 80012FB4 0200A014 */ bnez $a1, .L80012FC0
/* 37B8 80012FB8 00000000 */ nop
/* 37BC 80012FBC 0D000700 */ break 7
.L80012FC0:
/* 37C0 80012FC0 10180000 */ mfhi $v1
/* 37C4 80012FC4 FFFF0224 */ addiu $v0, $zero, -0x1
/* 37C8 80012FC8 6E0002A6 */ sh $v0, 0x6E($s0)
/* 37CC 80012FCC 40100700 */ sll $v0, $a3, 1
/* 37D0 80012FD0 2B104300 */ sltu $v0, $v0, $v1
/* 37D4 80012FD4 03004010 */ beqz $v0, .L80012FE4
/* 37D8 80012FD8 700004A6 */ sh $a0, 0x70($s0)
/* 37DC 80012FDC 01008224 */ addiu $v0, $a0, 0x1
/* 37E0 80012FE0 700002A6 */ sh $v0, 0x70($s0)
.L80012FE4:
/* 37E4 80012FE4 70000396 */ lhu $v1, 0x70($s0)
/* 37E8 80012FE8 21103202 */ addu $v0, $s1, $s2
/* 37EC 80012FEC 720003A6 */ sh $v1, 0x72($s0)
.L80012FF0:
/* 37F0 80012FF0 1C00BF8F */ lw $ra, 0x1C($sp)
/* 37F4 80012FF4 1800B28F */ lw $s2, 0x18($sp)
/* 37F8 80012FF8 1400B18F */ lw $s1, 0x14($sp)
/* 37FC 80012FFC 1000B08F */ lw $s0, 0x10($sp)
/* 3800 80013000 2000BD27 */ addiu $sp, $sp, 0x20
/* 3804 80013004 0800E003 */ jr $ra
/* 3808 80013008 00000000 */ nop
.size _SsInitSoundSep, . - _SsInitSoundSep
|
sozud/psy-q-splitter | 4,580 | splitter/test_data/_SsInit.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsInit
/* 00000000 27BDFFE8 */ addiu $sp, $sp, -0x18
/* 00000004 AFB00010 */ sw $s0, 0x10($sp)
/* 00000008 AFBF0014 */ sw $ra, 0x14($sp)
/* 0000000C 0C000000 */ jal func_80000000
/* 00000010 00808021 */ addu $s0, $a0, $zero
/* 00000014 16000005 */ bnez $s0, .L0000002C
/* 00000018 00000000 */ nop
/* 0000001C 0C000000 */ jal SpuInit
/* 00000020 00000000 */ nop
/* 00000024 08000000 */ j .L00000038
/* 00000028 3C061F80 */ lui $a2, 0x1F80
.L0000002C:
/* 0000002C 0C000000 */ jal SpuInitHot
/* 00000030 00000000 */ nop
/* 00000034 3C061F80 */ lui $a2, 0x1F80
.L00000038:
/* 00000038 34C61C00 */ ori $a2, $a2, 0x1C00
/* 0000003C 00002021 */ addu $a0, $zero, $zero
/* 00000040 3C070000 */ lui $a3, %hi(D_00000000)
/* 00000044 24E70000 */ addiu $a3, $a3, %lo(D_00000000)
.L00000048:
/* 00000048 00002821 */ addu $a1, $zero, $zero
/* 0000004C 00E01821 */ addu $v1, $a3, $zero
.L00000050:
/* 00000050 94620000 */ lhu $v0, 0x0($v1)
/* 00000054 24630002 */ addiu $v1, $v1, 0x2
/* 00000058 24A50001 */ addiu $a1, $a1, 0x1
/* 0000005C A4C20000 */ sh $v0, 0x0($a2)
/* 00000060 28A20008 */ slti $v0, $a1, 0x8
/* 00000064 1440FFFA */ bnez $v0, .L00000050
/* 00000068 24C60002 */ addiu $a2, $a2, 0x2
/* 0000006C 24840001 */ addiu $a0, $a0, 0x1
/* 00000070 28820018 */ slti $v0, $a0, 0x18
/* 00000074 1440FFF4 */ bnez $v0, .L00000048
/* 00000078 00000000 */ nop
/* 0000007C 3C061F80 */ lui $a2, 0x1F80
/* 00000080 34C61D80 */ ori $a2, $a2, 0x1D80
/* 00000084 00002021 */ addu $a0, $zero, $zero
/* 00000088 3C030000 */ lui $v1, %hi(D_00000010)
/* 0000008C 24630000 */ addiu $v1, $v1, %lo(D_00000010)
.L00000090:
/* 00000090 94620000 */ lhu $v0, 0x0($v1)
/* 00000094 24630002 */ addiu $v1, $v1, 0x2
/* 00000098 24840001 */ addiu $a0, $a0, 0x1
/* 0000009C A4C20000 */ sh $v0, 0x0($a2)
/* 000000A0 28820010 */ slti $v0, $a0, 0x10
/* 000000A4 1440FFFA */ bnez $v0, .L00000090
/* 000000A8 24C60002 */ addiu $a2, $a2, 0x2
/* 000000AC 0C000000 */ jal SpuVmInit
/* 000000B0 34040018 */ ori $a0, $zero, 0x18
/* 000000B4 00002821 */ addu $a1, $zero, $zero
/* 000000B8 3C030000 */ lui $v1, 0x0
/* 000000BC 24630000 */ addiu $v1, $v1, 0x0
.L000000C0:
/* 000000C0 3404000F */ ori $a0, $zero, 0xF
/* 000000C4 2462003C */ addiu $v0, $v1, 0x3C
.L000000C8:
/* 000000C8 AC400000 */ sw $zero, 0x0($v0)
/* 000000CC 2484FFFF */ addiu $a0, $a0, -0x1
/* 000000D0 0481FFFD */ bgez $a0, .L000000C8
/* 000000D4 2442FFFC */ addiu $v0, $v0, -0x4
/* 000000D8 24A50001 */ addiu $a1, $a1, 0x1
/* 000000DC 28A20020 */ slti $v0, $a1, 0x20
/* 000000E0 1440FFF7 */ bnez $v0, .L000000C0
/* 000000E4 24630040 */ addiu $v1, $v1, 0x40
/* 000000E8 3402003C */ ori $v0, $zero, 0x3C
/* 000000EC 3C010000 */ lui $at, 0x0
/* 000000F0 AC220000 */ sw $v0, 0x0($at)
/* 000000F4 2402FFFF */ addiu $v0, $zero, -0x1
/* 000000F8 3C010000 */ lui $at, 0x0
/* 000000FC AC200000 */ sw $zero, 0x0($at)
/* 00000100 3C010000 */ lui $at, %hi(_snd_use_vsync_cb)
/* 00000104 AC200000 */ sw $zero, %lo(_snd_use_vsync_cb)($at)
/* 00000108 3C010000 */ lui $at, %hi(_snd_use_interrupt_id)
/* 0000010C AC220000 */ sw $v0, %lo(_snd_use_interrupt_id)($at)
/* 00000110 3C010000 */ lui $at, %hi(_snd_use_event)
/* 00000114 AC200000 */ sw $zero, %lo(_snd_use_event)($at)
/* 00000118 3C010000 */ lui $at, %hi(_snd_1per2)
/* 0000011C AC200000 */ sw $zero, %lo(_snd_1per2)($at)
/* 00000120 3C010000 */ lui $at, %hi(_snd_vsync_cb)
/* 00000124 AC200000 */ sw $zero, %lo(_snd_vsync_cb)($at)
/* 00000128 0C000000 */ jal GetVideoMode
/* 0000012C 00000000 */ nop
/* 00000130 3C010000 */ lui $at, %hi(_snd_video_mode)
/* 00000134 AC220000 */ sw $v0, %lo(_snd_video_mode)($at)
/* 00000138 3C010000 */ lui $at, 0x0
/* 0000013C AC200000 */ sw $zero, 0x0($at)
/* 00000140 8FBF0014 */ lw $ra, 0x14($sp)
/* 00000144 8FB00010 */ lw $s0, 0x10($sp)
/* 00000148 27BD0018 */ addiu $sp, $sp, 0x18
/* 0000014C 03E00008 */ jr $ra
/* 00000150 00000000 */ nop
.size _SsInit, . - _SsInit
|
sozud/psy-q-splitter | 9,294 | splitter/test_data/SpuVmAlloc.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel SpuVmAlloc
/* 12A0C 8002220C D8FFBD27 */ addiu $sp, $sp, -0x28
/* 12A10 80022210 2000B0AF */ sw $s0, 0x20($sp)
/* 12A14 80022214 63001034 */ ori $s0, $zero, 0x63
/* 12A18 80022218 FFFF0B34 */ ori $t3, $zero, 0xFFFF
/* 12A1C 8002221C 21500000 */ addu $t2, $zero, $zero
/* 12A20 80022220 21400000 */ addu $t0, $zero, $zero
/* 12A24 80022224 63000934 */ ori $t1, $zero, 0x63
/* 12A28 80022228 0880023C */ lui $v0, %hi(spuVmMaxVoice)
/* 12A2C 8002222C E86F4290 */ lbu $v0, %lo(spuVmMaxVoice)($v0)
/* 12A30 80022230 09800C3C */ lui $t4, %hi(_svm_cur+15)
/* 12A34 80022234 D7788C91 */ lbu $t4, %lo(_svm_cur+15)($t4)
/* 12A38 80022238 21380000 */ addu $a3, $zero, $zero
/* 12A3C 8002223C 4C004010 */ beqz $v0, .L80022370
/* 12A40 80022240 2400BFAF */ sw $ra, 0x24($sp)
/* 12A44 80022244 FF00E230 */ andi $v0, $a3, 0xFF
.L80022248:
/* 12A48 80022248 40180200 */ sll $v1, $v0, 1
/* 12A4C 8002224C 21186200 */ addu $v1, $v1, $v0
/* 12A50 80022250 80180300 */ sll $v1, $v1, 2
/* 12A54 80022254 21186200 */ addu $v1, $v1, $v0
/* 12A58 80022258 80180300 */ sll $v1, $v1, 2
/* 12A5C 8002225C 0480013C */ lui $at, %hi(_svm_voice+27)
/* 12A60 80022260 21082300 */ addu $at, $at, $v1
/* 12A64 80022264 43B82290 */ lbu $v0, %lo(_svm_voice+27)($at)
/* 12A68 80022268 00000000 */ nop
/* 12A6C 8002226C 09004014 */ bnez $v0, .L80022294
/* 12A70 80022270 FF00E230 */ andi $v0, $a3, 0xFF
/* 12A74 80022274 0480013C */ lui $at, %hi(_svm_voice+6)
/* 12A78 80022278 21082300 */ addu $at, $at, $v1
/* 12A7C 8002227C 2EB82294 */ lhu $v0, %lo(_svm_voice+6)($at)
/* 12A80 80022280 00000000 */ nop
/* 12A84 80022284 03004014 */ bnez $v0, .L80022294
/* 12A88 80022288 FF00E230 */ andi $v0, $a3, 0xFF
/* 12A8C 8002228C DC880008 */ j .L80022370
/* 12A90 80022290 2180E000 */ addu $s0, $a3, $zero
.L80022294:
/* 12A94 80022294 40180200 */ sll $v1, $v0, 1
/* 12A98 80022298 21186200 */ addu $v1, $v1, $v0
/* 12A9C 8002229C 80180300 */ sll $v1, $v1, 2
/* 12AA0 800222A0 21186200 */ addu $v1, $v1, $v0
/* 12AA4 800222A4 80180300 */ sll $v1, $v1, 2
/* 12AA8 800222A8 0480013C */ lui $at, %hi(_svm_voice+24)
/* 12AAC 800222AC 21082300 */ addu $at, $at, $v1
/* 12AB0 800222B0 40B82684 */ lh $a2, %lo(_svm_voice+24)($at)
/* 12AB4 800222B4 FFFF8431 */ andi $a0, $t4, 0xFFFF
/* 12AB8 800222B8 2A10C400 */ slt $v0, $a2, $a0
/* 12ABC 800222BC 0B004010 */ beqz $v0, .L800222EC
/* 12AC0 800222C0 2128C000 */ addu $a1, $a2, $zero
/* 12AC4 800222C4 2160A000 */ addu $t4, $a1, $zero
/* 12AC8 800222C8 2148E000 */ addu $t1, $a3, $zero
/* 12ACC 800222CC 0480013C */ lui $at, %hi(_svm_voice+6)
/* 12AD0 800222D0 21082300 */ addu $at, $at, $v1
/* 12AD4 800222D4 2EB82B94 */ lhu $t3, %lo(_svm_voice+6)($at)
/* 12AD8 800222D8 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12ADC 800222DC 21082300 */ addu $at, $at, $v1
/* 12AE0 800222E0 2AB82894 */ lhu $t0, %lo(_svm_voice+2)($at)
/* 12AE4 800222E4 D5880008 */ j .L80022354
/* 12AE8 800222E8 01000A34 */ ori $t2, $zero, 0x1
.L800222EC:
/* 12AEC 800222EC 1900C414 */ bne $a2, $a0, .L80022354
/* 12AF0 800222F0 FFFF6531 */ andi $a1, $t3, 0xFFFF
/* 12AF4 800222F4 0480013C */ lui $at, %hi(_svm_voice+6)
/* 12AF8 800222F8 21082300 */ addu $at, $at, $v1
/* 12AFC 800222FC 2EB82494 */ lhu $a0, %lo(_svm_voice+6)($at)
/* 12B00 80022300 00000000 */ nop
/* 12B04 80022304 2B108500 */ sltu $v0, $a0, $a1
/* 12B08 80022308 06004010 */ beqz $v0, .L80022324
/* 12B0C 8002230C 01004A25 */ addiu $t2, $t2, 0x1
/* 12B10 80022310 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12B14 80022314 21082300 */ addu $at, $at, $v1
/* 12B18 80022318 2AB82894 */ lhu $t0, %lo(_svm_voice+2)($at)
/* 12B1C 8002231C D4880008 */ j .L80022350
/* 12B20 80022320 21588000 */ addu $t3, $a0, $zero
.L80022324:
/* 12B24 80022324 0B008514 */ bne $a0, $a1, .L80022354
/* 12B28 80022328 00000000 */ nop
/* 12B2C 8002232C 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12B30 80022330 21082300 */ addu $at, $at, $v1
/* 12B34 80022334 2AB82284 */ lh $v0, %lo(_svm_voice+2)($at)
/* 12B38 80022338 00000000 */ nop
/* 12B3C 8002233C 21184000 */ addu $v1, $v0, $zero
/* 12B40 80022340 2A100201 */ slt $v0, $t0, $v0
/* 12B44 80022344 03004010 */ beqz $v0, .L80022354
/* 12B48 80022348 00000000 */ nop
/* 12B4C 8002234C 21406000 */ addu $t0, $v1, $zero
.L80022350:
/* 12B50 80022350 2148E000 */ addu $t1, $a3, $zero
.L80022354:
/* 12B54 80022354 0100E724 */ addiu $a3, $a3, 0x1
/* 12B58 80022358 0880033C */ lui $v1, %hi(spuVmMaxVoice)
/* 12B5C 8002235C E86F6390 */ lbu $v1, %lo(spuVmMaxVoice)($v1)
/* 12B60 80022360 FF00E230 */ andi $v0, $a3, 0xFF
/* 12B64 80022364 2B104300 */ sltu $v0, $v0, $v1
/* 12B68 80022368 B7FF4014 */ bnez $v0, .L80022248
/* 12B6C 8002236C FF00E230 */ andi $v0, $a3, 0xFF
.L80022370:
/* 12B70 80022370 FF000332 */ andi $v1, $s0, 0xFF
/* 12B74 80022374 63000234 */ ori $v0, $zero, 0x63
/* 12B78 80022378 05006214 */ bne $v1, $v0, .L80022390
/* 12B7C 8002237C FF004231 */ andi $v0, $t2, 0xFF
/* 12B80 80022380 03004014 */ bnez $v0, .L80022390
/* 12B84 80022384 21802001 */ addu $s0, $t1, $zero
/* 12B88 80022388 0880103C */ lui $s0, %hi(spuVmMaxVoice)
/* 12B8C 8002238C E86F1092 */ lbu $s0, %lo(spuVmMaxVoice)($s0)
.L80022390:
/* 12B90 80022390 0880043C */ lui $a0, %hi(spuVmMaxVoice)
/* 12B94 80022394 E86F8490 */ lbu $a0, %lo(spuVmMaxVoice)($a0)
/* 12B98 80022398 FF000232 */ andi $v0, $s0, 0xFF
/* 12B9C 8002239C 2B104400 */ sltu $v0, $v0, $a0
/* 12BA0 800223A0 2E004010 */ beqz $v0, .L8002245C
/* 12BA4 800223A4 00000000 */ nop
/* 12BA8 800223A8 14008010 */ beqz $a0, .L800223FC
/* 12BAC 800223AC 21380000 */ addu $a3, $zero, $zero
/* 12BB0 800223B0 0480053C */ lui $a1, %hi(_svm_voice)
/* 12BB4 800223B4 28B8A524 */ addiu $a1, $a1, %lo(_svm_voice)
/* 12BB8 800223B8 FF00E330 */ andi $v1, $a3, 0xFF
.L800223BC:
/* 12BBC 800223BC 40100300 */ sll $v0, $v1, 1
/* 12BC0 800223C0 21104300 */ addu $v0, $v0, $v1
/* 12BC4 800223C4 80100200 */ sll $v0, $v0, 2
/* 12BC8 800223C8 21104300 */ addu $v0, $v0, $v1
/* 12BCC 800223CC 80100200 */ sll $v0, $v0, 2
/* 12BD0 800223D0 0100E724 */ addiu $a3, $a3, 0x1
/* 12BD4 800223D4 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12BD8 800223D8 21082200 */ addu $at, $at, $v0
/* 12BDC 800223DC 2AB82394 */ lhu $v1, %lo(_svm_voice+2)($at)
/* 12BE0 800223E0 21104500 */ addu $v0, $v0, $a1
/* 12BE4 800223E4 01006324 */ addiu $v1, $v1, 0x1
/* 12BE8 800223E8 020043A4 */ sh $v1, 0x2($v0)
/* 12BEC 800223EC FF00E230 */ andi $v0, $a3, 0xFF
/* 12BF0 800223F0 2B104400 */ sltu $v0, $v0, $a0
/* 12BF4 800223F4 F1FF4014 */ bnez $v0, .L800223BC
/* 12BF8 800223F8 FF00E330 */ andi $v1, $a3, 0xFF
.L800223FC:
/* 12BFC 800223FC FF000332 */ andi $v1, $s0, 0xFF
/* 12C00 80022400 40100300 */ sll $v0, $v1, 1
/* 12C04 80022404 21104300 */ addu $v0, $v0, $v1
/* 12C08 80022408 80100200 */ sll $v0, $v0, 2
/* 12C0C 8002240C 21104300 */ addu $v0, $v0, $v1
/* 12C10 80022410 80100200 */ sll $v0, $v0, 2
/* 12C14 80022414 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12C18 80022418 21082200 */ addu $at, $at, $v0
/* 12C1C 8002241C 2AB820A4 */ sh $zero, %lo(_svm_voice+2)($at)
/* 12C20 80022420 0980033C */ lui $v1, %hi(_svm_cur+15)
/* 12C24 80022424 D7786390 */ lbu $v1, %lo(_svm_cur+15)($v1)
/* 12C28 80022428 0480013C */ lui $at, %hi(_svm_voice+24)
/* 12C2C 8002242C 21082200 */ addu $at, $at, $v0
/* 12C30 80022430 40B823A4 */ sh $v1, %lo(_svm_voice+24)($at)
/* 12C34 80022434 0480013C */ lui $at, %hi(_svm_voice+27)
/* 12C38 80022438 21082200 */ addu $at, $at, $v0
/* 12C3C 8002243C 43B82390 */ lbu $v1, %lo(_svm_voice+27)($at)
/* 12C40 80022440 02000234 */ ori $v0, $zero, 0x2
/* 12C44 80022444 06006214 */ bne $v1, $v0, .L80022460
/* 12C48 80022448 FF000232 */ andi $v0, $s0, 0xFF
/* 12C4C 8002244C FF00053C */ lui $a1, 0xFF
/* 12C50 80022450 FFFFA534 */ ori $a1, $a1, 0xFFFF
/* 12C54 80022454 1CA4000C */ jal SpuSetNoiseVoice
/* 12C58 80022458 21200000 */ addu $a0, $zero, $zero
.L8002245C:
/* 12C5C 8002245C FF000232 */ andi $v0, $s0, 0xFF
.L80022460:
/* 12C60 80022460 2400BF8F */ lw $ra, 0x24($sp)
/* 12C64 80022464 2000B08F */ lw $s0, 0x20($sp)
/* 12C68 80022468 2800BD27 */ addiu $sp, $sp, 0x28
/* 12C6C 8002246C 0800E003 */ jr $ra
/* 12C70 80022470 00000000 */ nop
.size SpuVmAlloc, . - SpuVmAlloc
|
sozud/psy-q-splitter | 1,435 | splitter/test_data/_SsUtResolveADSR.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsUtResolveADSR
/* FDBC 8001F5BC 00808230 */ andi $v0, $a0, 0x8000
/* FDC0 8001F5C0 0A00C2A4 */ sh $v0, 0xA($a2)
/* FDC4 8001F5C4 0080A230 */ andi $v0, $a1, 0x8000
/* FDC8 8001F5C8 0C00C2A4 */ sh $v0, 0xC($a2)
/* FDCC 8001F5CC 0040A230 */ andi $v0, $a1, 0x4000
/* FDD0 8001F5D0 1000C2A4 */ sh $v0, 0x10($a2)
/* FDD4 8001F5D4 2000A230 */ andi $v0, $a1, 0x20
/* FDD8 8001F5D8 FFFF8330 */ andi $v1, $a0, 0xFFFF
/* FDDC 8001F5DC 0E00C2A4 */ sh $v0, 0xE($a2)
/* FDE0 8001F5E0 02120300 */ srl $v0, $v1, 8
/* FDE4 8001F5E4 7F004230 */ andi $v0, $v0, 0x7F
/* FDE8 8001F5E8 02190300 */ srl $v1, $v1, 4
/* FDEC 8001F5EC 0F006330 */ andi $v1, $v1, 0xF
/* FDF0 8001F5F0 0F008430 */ andi $a0, $a0, 0xF
/* FDF4 8001F5F4 0000C2A4 */ sh $v0, 0x0($a2)
/* FDF8 8001F5F8 82110500 */ srl $v0, $a1, 6
/* FDFC 8001F5FC 7F004230 */ andi $v0, $v0, 0x7F
/* FE00 8001F600 1F00A530 */ andi $a1, $a1, 0x1F
/* FE04 8001F604 0200C3A4 */ sh $v1, 0x2($a2)
/* FE08 8001F608 0400C4A4 */ sh $a0, 0x4($a2)
/* FE0C 8001F60C 0600C2A4 */ sh $v0, 0x6($a2)
/* FE10 8001F610 0800E003 */ jr $ra
/* FE14 8001F614 0800C5A4 */ sh $a1, 0x8($a2)
.size _SsUtResolveADSR, . - _SsUtResolveADSR
|
sozud/psy-q-splitter | 10,308 | splitter/test_data/_SsSndCrescendo.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsSndCrescendo
/* 102A8 8001FAA8 C0FFBD27 */ addiu $sp, $sp, -0x40
/* 102AC 8001FAAC 21388000 */ addu $a3, $a0, $zero
/* 102B0 8001FAB0 00140700 */ sll $v0, $a3, 16
/* 102B4 8001FAB4 0980033C */ lui $v1, %hi(_ss_score)
/* 102B8 8001FAB8 9C7C6324 */ addiu $v1, $v1, %lo(_ss_score)
/* 102BC 8001FABC 83130200 */ sra $v0, $v0, 14
/* 102C0 8001FAC0 3000B2AF */ sw $s2, 0x30($sp)
/* 102C4 8001FAC4 21904300 */ addu $s2, $v0, $v1
/* 102C8 8001FAC8 001C0500 */ sll $v1, $a1, 16
/* 102CC 8001FACC 031C0300 */ sra $v1, $v1, 16
/* 102D0 8001FAD0 40100300 */ sll $v0, $v1, 1
/* 102D4 8001FAD4 21104300 */ addu $v0, $v0, $v1
/* 102D8 8001FAD8 80100200 */ sll $v0, $v0, 2
/* 102DC 8001FADC 23104300 */ subu $v0, $v0, $v1
/* 102E0 8001FAE0 80100200 */ sll $v0, $v0, 2
/* 102E4 8001FAE4 23104300 */ subu $v0, $v0, $v1
/* 102E8 8001FAE8 2C00B1AF */ sw $s1, 0x2C($sp)
/* 102EC 8001FAEC 80880200 */ sll $s1, $v0, 2
/* 102F0 8001FAF0 3800B4AF */ sw $s4, 0x38($sp)
/* 102F4 8001FAF4 21A0E000 */ addu $s4, $a3, $zero
/* 102F8 8001FAF8 3C00BFAF */ sw $ra, 0x3C($sp)
/* 102FC 8001FAFC 3400B3AF */ sw $s3, 0x34($sp)
/* 10300 8001FB00 2800B0AF */ sw $s0, 0x28($sp)
/* 10304 8001FB04 0000438E */ lw $v1, 0x0($s2)
/* 10308 8001FB08 2198A000 */ addu $s3, $a1, $zero
/* 1030C 8001FB0C 21802302 */ addu $s0, $s1, $v1
/* 10310 8001FB10 9800028E */ lw $v0, 0x98($s0)
/* 10314 8001FB14 42000686 */ lh $a2, 0x42($s0)
/* 10318 8001FB18 FFFF4224 */ addiu $v0, $v0, -0x1
/* 1031C 8001FB1C 2118C000 */ addu $v1, $a2, $zero
/* 10320 8001FB20 2500C018 */ blez $a2, .L8001FBB8
/* 10324 8001FB24 980002AE */ sw $v0, 0x98($s0)
/* 10328 8001FB28 1B004600 */ divu $zero, $v0, $a2
/* 1032C 8001FB2C 0200C014 */ bnez $a2, .L8001FB38
/* 10330 8001FB30 00000000 */ nop
/* 10334 8001FB34 0D000700 */ break 7
.L8001FB38:
/* 10338 8001FB38 10100000 */ mfhi $v0
/* 1033C 8001FB3C 00000000 */ nop
/* 10340 8001FB40 80004014 */ bnez $v0, .L8001FD44
/* 10344 8001FB44 00000000 */ nop
/* 10348 8001FB48 40000296 */ lhu $v0, 0x40($s0)
/* 1034C 8001FB4C 00000000 */ nop
/* 10350 8001FB50 FFFF4224 */ addiu $v0, $v0, -0x1
/* 10354 8001FB54 400002A6 */ sh $v0, 0x40($s0)
/* 10358 8001FB58 00140200 */ sll $v0, $v0, 16
/* 1035C 8001FB5C 50004004 */ bltz $v0, .L8001FCA0
/* 10360 8001FB60 00120500 */ sll $v0, $a1, 8
/* 10364 8001FB64 2510E200 */ or $v0, $a3, $v0
/* 10368 8001FB68 00140200 */ sll $v0, $v0, 16
/* 1036C 8001FB6C 038C0200 */ sra $s1, $v0, 16
/* 10370 8001FB70 21202002 */ addu $a0, $s1, $zero
/* 10374 8001FB74 1000A527 */ addiu $a1, $sp, 0x10
/* 10378 8001FB78 7C95000C */ jal SpuVmGetSeqVol
/* 1037C 8001FB7C 1200A627 */ addiu $a2, $sp, 0x12
/* 10380 8001FB80 1000A297 */ lhu $v0, 0x10($sp)
/* 10384 8001FB84 40000386 */ lh $v1, 0x40($s0)
/* 10388 8001FB88 01004424 */ addiu $a0, $v0, 0x1
/* 1038C 8001FB8C 21104300 */ addu $v0, $v0, $v1
/* 10390 8001FB90 2A104400 */ slt $v0, $v0, $a0
/* 10394 8001FB94 51004014 */ bnez $v0, .L8001FCDC
/* 10398 8001FB98 21202002 */ addu $a0, $s1, $zero
/* 1039C 8001FB9C 21380000 */ addu $a3, $zero, $zero
/* 103A0 8001FBA0 1000A597 */ lhu $a1, 0x10($sp)
/* 103A4 8001FBA4 1200A697 */ lhu $a2, 0x12($sp)
/* 103A8 8001FBA8 0100A524 */ addiu $a1, $a1, 0x1
/* 103AC 8001FBAC 0100C624 */ addiu $a2, $a2, 0x1
/* 103B0 8001FBB0 247F0008 */ j .L8001FC90
/* 103B4 8001FBB4 FFFFA530 */ andi $a1, $a1, 0xFFFF
.L8001FBB8:
/* 103B8 8001FBB8 6300C104 */ bgez $a2, .L8001FD48
/* 103BC 8001FBBC 00221300 */ sll $a0, $s3, 8
/* 103C0 8001FBC0 40000296 */ lhu $v0, 0x40($s0)
/* 103C4 8001FBC4 00000000 */ nop
/* 103C8 8001FBC8 21104300 */ addu $v0, $v0, $v1
/* 103CC 8001FBCC 400002A6 */ sh $v0, 0x40($s0)
/* 103D0 8001FBD0 00140200 */ sll $v0, $v0, 16
/* 103D4 8001FBD4 32004004 */ bltz $v0, .L8001FCA0
/* 103D8 8001FBD8 00120500 */ sll $v0, $a1, 8
/* 103DC 8001FBDC 2510E200 */ or $v0, $a3, $v0
/* 103E0 8001FBE0 00140200 */ sll $v0, $v0, 16
/* 103E4 8001FBE4 038C0200 */ sra $s1, $v0, 16
/* 103E8 8001FBE8 21202002 */ addu $a0, $s1, $zero
/* 103EC 8001FBEC 1000A527 */ addiu $a1, $sp, 0x10
/* 103F0 8001FBF0 7C95000C */ jal SpuVmGetSeqVol
/* 103F4 8001FBF4 1200A627 */ addiu $a2, $sp, 0x12
/* 103F8 8001FBF8 1000A297 */ lhu $v0, 0x10($sp)
/* 103FC 8001FBFC 42000386 */ lh $v1, 0x42($s0)
/* 10400 8001FC00 00000000 */ nop
/* 10404 8001FC04 23104300 */ subu $v0, $v0, $v1
/* 10408 8001FC08 7F004228 */ slti $v0, $v0, 0x7F
/* 1040C 8001FC0C 0B004014 */ bnez $v0, .L8001FC3C
/* 10410 8001FC10 00000000 */ nop
/* 10414 8001FC14 1200A297 */ lhu $v0, 0x12($sp)
/* 10418 8001FC18 00000000 */ nop
/* 1041C 8001FC1C 23104300 */ subu $v0, $v0, $v1
/* 10420 8001FC20 7F004228 */ slti $v0, $v0, 0x7F
/* 10424 8001FC24 05004014 */ bnez $v0, .L8001FC3C
/* 10428 8001FC28 21202002 */ addu $a0, $s1, $zero
/* 1042C 8001FC2C 7F000534 */ ori $a1, $zero, 0x7F
/* 10430 8001FC30 7F000634 */ ori $a2, $zero, 0x7F
/* 10434 8001FC34 1F95000C */ jal SpuVmSetSeqVol
/* 10438 8001FC38 21380000 */ addu $a3, $zero, $zero
.L8001FC3C:
/* 1043C 8001FC3C 9400038E */ lw $v1, 0x94($s0)
/* 10440 8001FC40 9800028E */ lw $v0, 0x98($s0)
/* 10444 8001FC44 42000486 */ lh $a0, 0x42($s0)
/* 10448 8001FC48 23186200 */ subu $v1, $v1, $v0
/* 1044C 8001FC4C 23100400 */ negu $v0, $a0
/* 10450 8001FC50 18006200 */ mult $v1, $v0
/* 10454 8001FC54 3E000386 */ lh $v1, 0x3E($s0)
/* 10458 8001FC58 12100000 */ mflo $v0
/* 1045C 8001FC5C 2B104300 */ sltu $v0, $v0, $v1
/* 10460 8001FC60 1E004010 */ beqz $v0, .L8001FCDC
/* 10464 8001FC64 21408000 */ addu $t0, $a0, $zero
/* 10468 8001FC68 00221300 */ sll $a0, $s3, 8
/* 1046C 8001FC6C 25208402 */ or $a0, $s4, $a0
/* 10470 8001FC70 00240400 */ sll $a0, $a0, 16
/* 10474 8001FC74 03240400 */ sra $a0, $a0, 16
/* 10478 8001FC78 21380000 */ addu $a3, $zero, $zero
/* 1047C 8001FC7C 1000A597 */ lhu $a1, 0x10($sp)
/* 10480 8001FC80 1200A697 */ lhu $a2, 0x12($sp)
/* 10484 8001FC84 2328A800 */ subu $a1, $a1, $t0
/* 10488 8001FC88 FFFFA530 */ andi $a1, $a1, 0xFFFF
/* 1048C 8001FC8C 2330C800 */ subu $a2, $a2, $t0
.L8001FC90:
/* 10490 8001FC90 1F95000C */ jal SpuVmSetSeqVol
/* 10494 8001FC94 FFFFC630 */ andi $a2, $a2, 0xFFFF
/* 10498 8001FC98 377F0008 */ j .L8001FCDC
/* 1049C 8001FC9C 00000000 */ nop
.L8001FCA0:
/* 104A0 8001FCA0 00220500 */ sll $a0, $a1, 8
/* 104A4 8001FCA4 2520E400 */ or $a0, $a3, $a0
/* 104A8 8001FCA8 00240400 */ sll $a0, $a0, 16
/* 104AC 8001FCAC 03240400 */ sra $a0, $a0, 16
/* 104B0 8001FCB0 7F000534 */ ori $a1, $zero, 0x7F
/* 104B4 8001FCB4 7F000634 */ ori $a2, $zero, 0x7F
/* 104B8 8001FCB8 1F95000C */ jal SpuVmSetSeqVol
/* 104BC 8001FCBC 21380000 */ addu $a3, $zero, $zero
/* 104C0 8001FCC0 0000438E */ lw $v1, 0x0($s2)
/* 104C4 8001FCC4 00000000 */ nop
/* 104C8 8001FCC8 21182302 */ addu $v1, $s1, $v1
/* 104CC 8001FCCC 9000628C */ lw $v0, 0x90($v1)
/* 104D0 8001FCD0 EFFF0424 */ addiu $a0, $zero, -0x11
/* 104D4 8001FCD4 24104400 */ and $v0, $v0, $a0
/* 104D8 8001FCD8 900062AC */ sw $v0, 0x90($v1)
.L8001FCDC:
/* 104DC 8001FCDC 9800028E */ lw $v0, 0x98($s0)
/* 104E0 8001FCE0 00000000 */ nop
/* 104E4 8001FCE4 05004010 */ beqz $v0, .L8001FCFC
/* 104E8 8001FCE8 00241400 */ sll $a0, $s4, 16
/* 104EC 8001FCEC 40000286 */ lh $v0, 0x40($s0)
/* 104F0 8001FCF0 00000000 */ nop
/* 104F4 8001FCF4 13004014 */ bnez $v0, .L8001FD44
/* 104F8 8001FCF8 00000000 */ nop
.L8001FCFC:
/* 104FC 8001FCFC 83230400 */ sra $a0, $a0, 14
/* 10500 8001FD00 001C1300 */ sll $v1, $s3, 16
/* 10504 8001FD04 031C0300 */ sra $v1, $v1, 16
/* 10508 8001FD08 40100300 */ sll $v0, $v1, 1
/* 1050C 8001FD0C 21104300 */ addu $v0, $v0, $v1
/* 10510 8001FD10 80100200 */ sll $v0, $v0, 2
/* 10514 8001FD14 23104300 */ subu $v0, $v0, $v1
/* 10518 8001FD18 80100200 */ sll $v0, $v0, 2
/* 1051C 8001FD1C 23104300 */ subu $v0, $v0, $v1
/* 10520 8001FD20 0980013C */ lui $at, %hi(_ss_score)
/* 10524 8001FD24 21082400 */ addu $at, $at, $a0
/* 10528 8001FD28 9C7C238C */ lw $v1, %lo(_ss_score)($at)
/* 1052C 8001FD2C 80100200 */ sll $v0, $v0, 2
/* 10530 8001FD30 21104300 */ addu $v0, $v0, $v1
/* 10534 8001FD34 9000438C */ lw $v1, 0x90($v0)
/* 10538 8001FD38 EFFF0424 */ addiu $a0, $zero, -0x11
/* 1053C 8001FD3C 24186400 */ and $v1, $v1, $a0
/* 10540 8001FD40 900043AC */ sw $v1, 0x90($v0)
.L8001FD44:
/* 10544 8001FD44 00221300 */ sll $a0, $s3, 8
.L8001FD48:
/* 10548 8001FD48 25208402 */ or $a0, $s4, $a0
/* 1054C 8001FD4C 00240400 */ sll $a0, $a0, 16
/* 10550 8001FD50 03240400 */ sra $a0, $a0, 16
/* 10554 8001FD54 78000526 */ addiu $a1, $s0, 0x78
/* 10558 8001FD58 7C95000C */ jal SpuVmGetSeqVol
/* 1055C 8001FD5C 7A000626 */ addiu $a2, $s0, 0x7A
/* 10560 8001FD60 3C00BF8F */ lw $ra, 0x3C($sp)
/* 10564 8001FD64 3800B48F */ lw $s4, 0x38($sp)
/* 10568 8001FD68 3400B38F */ lw $s3, 0x34($sp)
/* 1056C 8001FD6C 3000B28F */ lw $s2, 0x30($sp)
/* 10570 8001FD70 2C00B18F */ lw $s1, 0x2C($sp)
/* 10574 8001FD74 2800B08F */ lw $s0, 0x28($sp)
/* 10578 8001FD78 4000BD27 */ addiu $sp, $sp, 0x40
/* 1057C 8001FD7C 0800E003 */ jr $ra
/* 10580 8001FD80 00000000 */ nop
.size _SsSndCrescendo, . - _SsSndCrescendo
|
sozud/psy-q-splitter | 3,068 | splitter/test_data/_SpuInit.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SpuInit
/* 00000000 27BDFFE8 */ addiu $sp, $sp, -0x18
/* 00000004 AFB00010 */ sw $s0, 0x10($sp)
/* 00000008 AFBF0014 */ sw $ra, 0x14($sp)
/* 0000000C 0C000000 */ jal ResetCallback
/* 00000010 00808021 */ addu $s0, $a0, $zero
/* 00000014 0C000000 */ jal _spu_init
/* 00000018 02002021 */ addu $a0, $s0, $zero
/* 0000001C 16000008 */ bnez $s0, .L00000040
/* 00000020 3404C000 */ ori $a0, $zero, 0xC000
/* 00000024 34030017 */ ori $v1, $zero, 0x17
/* 00000028 3C020000 */ lui $v0, %hi(_spu_voice_centerNote+46)
/* 0000002C 24420000 */ addiu $v0, $v0, %lo(_spu_voice_centerNote+46)
.L00000030:
/* 00000030 A4440000 */ sh $a0, 0x0($v0)
/* 00000034 2463FFFF */ addiu $v1, $v1, -0x1
/* 00000038 0461FFFD */ bgez $v1, .L00000030
/* 0000003C 2442FFFE */ addiu $v0, $v0, -0x2
.L00000040:
/* 00000040 0C000000 */ jal SpuStart
/* 00000044 00000000 */ nop
/* 00000048 340400D1 */ ori $a0, $zero, 0xD1
/* 0000004C 3C050000 */ lui $a1, %hi(_spu_rev_startaddr)
/* 00000050 8CA50000 */ lw $a1, %lo(_spu_rev_startaddr)($a1)
/* 00000054 3C010000 */ lui $at, %hi(_spu_rev_flag)
/* 00000058 AC200000 */ sw $zero, %lo(_spu_rev_flag)($at)
/* 0000005C 3C010000 */ lui $at, %hi(_spu_rev_reserve_wa)
/* 00000060 AC200000 */ sw $zero, %lo(_spu_rev_reserve_wa)($at)
/* 00000064 3C010000 */ lui $at, %hi(_spu_rev_attr+4)
/* 00000068 AC200000 */ sw $zero, %lo(_spu_rev_attr+4)($at)
/* 0000006C 3C010000 */ lui $at, %hi(_spu_rev_attr+8)
/* 00000070 A4200000 */ sh $zero, %lo(_spu_rev_attr+8)($at)
/* 00000074 3C010000 */ lui $at, %hi(_spu_rev_attr+10)
/* 00000078 A4200000 */ sh $zero, %lo(_spu_rev_attr+10)($at)
/* 0000007C 3C010000 */ lui $at, %hi(_spu_rev_attr+12)
/* 00000080 AC200000 */ sw $zero, %lo(_spu_rev_attr+12)($at)
/* 00000084 3C010000 */ lui $at, %hi(_spu_rev_attr+16)
/* 00000088 AC200000 */ sw $zero, %lo(_spu_rev_attr+16)($at)
/* 0000008C 3C010000 */ lui $at, %hi(_spu_rev_offsetaddr)
/* 00000090 AC250000 */ sw $a1, %lo(_spu_rev_offsetaddr)($at)
/* 00000094 0C000000 */ jal _spu_FsetRXX
/* 00000098 00003021 */ addu $a2, $zero, $zero
/* 0000009C 3C010000 */ lui $at, %hi(_spu_trans_mode)
/* 000000A0 AC200000 */ sw $zero, %lo(_spu_trans_mode)($at)
/* 000000A4 3C010000 */ lui $at, %hi(_spu_transMode)
/* 000000A8 AC200000 */ sw $zero, %lo(_spu_transMode)($at)
/* 000000AC 3C010000 */ lui $at, %hi(_spu_keystat)
/* 000000B0 AC200000 */ sw $zero, %lo(_spu_keystat)($at)
/* 000000B4 8FBF0014 */ lw $ra, 0x14($sp)
/* 000000B8 8FB00010 */ lw $s0, 0x10($sp)
/* 000000BC 27BD0018 */ addiu $sp, $sp, 0x18
/* 000000C0 03E00008 */ jr $ra
/* 000000C4 00000000 */ nop
.size _SpuInit, . - _SpuInit
|
sozud/psy-q-splitter | 3,890 | splitter/test_data/_SsSeqPlay.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsSeqPlay
/* E064 8001D864 C8FFBD27 */ addiu $sp, $sp, -0x38
/* E068 8001D868 003C0400 */ sll $a3, $a0, 16
/* E06C 8001D86C 83230700 */ sra $a0, $a3, 14
/* E070 8001D870 002C0500 */ sll $a1, $a1, 16
/* E074 8001D874 031C0500 */ sra $v1, $a1, 16
/* E078 8001D878 40100300 */ sll $v0, $v1, 1
/* E07C 8001D87C 21104300 */ addu $v0, $v0, $v1
/* E080 8001D880 80100200 */ sll $v0, $v0, 2
/* E084 8001D884 23104300 */ subu $v0, $v0, $v1
/* E088 8001D888 80100200 */ sll $v0, $v0, 2
/* E08C 8001D88C 23104300 */ subu $v0, $v0, $v1
/* E090 8001D890 3000BFAF */ sw $ra, 0x30($sp)
/* E094 8001D894 2C00B3AF */ sw $s3, 0x2C($sp)
/* E098 8001D898 2800B2AF */ sw $s2, 0x28($sp)
/* E09C 8001D89C 2400B1AF */ sw $s1, 0x24($sp)
/* E0A0 8001D8A0 2000B0AF */ sw $s0, 0x20($sp)
/* E0A4 8001D8A4 0980013C */ lui $at, %hi(_ss_score)
/* E0A8 8001D8A8 21082400 */ addu $at, $at, $a0
/* E0AC 8001D8AC 9C7C238C */ lw $v1, %lo(_ss_score)($at)
/* E0B0 8001D8B0 80100200 */ sll $v0, $v0, 2
/* E0B4 8001D8B4 21884300 */ addu $s1, $v0, $v1
/* E0B8 8001D8B8 70002286 */ lh $v0, 0x70($s1)
/* E0BC 8001D8BC 8800238E */ lw $v1, 0x88($s1)
/* E0C0 8001D8C0 00000000 */ nop
/* E0C4 8001D8C4 23206200 */ subu $a0, $v1, $v0
/* E0C8 8001D8C8 10008018 */ blez $a0, .L8001D90C
/* E0CC 8001D8CC 21304000 */ addu $a2, $v0, $zero
/* E0D0 8001D8D0 6E002386 */ lh $v1, 0x6E($s1)
/* E0D4 8001D8D4 00000000 */ nop
/* E0D8 8001D8D8 04006018 */ blez $v1, .L8001D8EC
/* E0DC 8001D8DC 21106000 */ addu $v0, $v1, $zero
/* E0E0 8001D8E0 FFFF4224 */ addiu $v0, $v0, -0x1
/* E0E4 8001D8E4 55760008 */ j .L8001D954
/* E0E8 8001D8E8 6E0022A6 */ sh $v0, 0x6E($s1)
.L8001D8EC:
/* E0EC 8001D8EC 05006014 */ bnez $v1, .L8001D904
/* E0F0 8001D8F0 00000000 */ nop
/* E0F4 8001D8F4 8800228E */ lw $v0, 0x88($s1)
/* E0F8 8001D8F8 6E0026A6 */ sh $a2, 0x6E($s1)
/* E0FC 8001D8FC 54760008 */ j .L8001D950
/* E100 8001D900 FFFF4224 */ addiu $v0, $v0, -0x1
.L8001D904:
/* E104 8001D904 55760008 */ j .L8001D954
/* E108 8001D908 880024AE */ sw $a0, 0x88($s1)
.L8001D90C:
/* E10C 8001D90C 2A104300 */ slt $v0, $v0, $v1
/* E110 8001D910 10004014 */ bnez $v0, .L8001D954
/* E114 8001D914 21806000 */ addu $s0, $v1, $zero
/* E118 8001D918 2198E000 */ addu $s3, $a3, $zero
/* E11C 8001D91C 2190A000 */ addu $s2, $a1, $zero
/* E120 8001D920 03241300 */ sra $a0, $s3, 16
.L8001D924:
/* E124 8001D924 5D76000C */ jal _SsGetSeqData
/* E128 8001D928 032C1200 */ sra $a1, $s2, 16
/* E12C 8001D92C 8800228E */ lw $v0, 0x88($s1)
/* E130 8001D930 00000000 */ nop
/* E134 8001D934 FBFF4010 */ beqz $v0, .L8001D924
/* E138 8001D938 03241300 */ sra $a0, $s3, 16
/* E13C 8001D93C 70002386 */ lh $v1, 0x70($s1)
/* E140 8001D940 21800202 */ addu $s0, $s0, $v0
/* E144 8001D944 2A100302 */ slt $v0, $s0, $v1
/* E148 8001D948 F6FF4014 */ bnez $v0, .L8001D924
/* E14C 8001D94C 23100302 */ subu $v0, $s0, $v1
.L8001D950:
/* E150 8001D950 880022AE */ sw $v0, 0x88($s1)
.L8001D954:
/* E154 8001D954 3000BF8F */ lw $ra, 0x30($sp)
/* E158 8001D958 2C00B38F */ lw $s3, 0x2C($sp)
/* E15C 8001D95C 2800B28F */ lw $s2, 0x28($sp)
/* E160 8001D960 2400B18F */ lw $s1, 0x24($sp)
/* E164 8001D964 2000B08F */ lw $s0, 0x20($sp)
/* E168 8001D968 3800BD27 */ addiu $sp, $sp, 0x38
/* E16C 8001D96C 0800E003 */ jr $ra
/* E170 8001D970 00000000 */ nop
.size _SsSeqPlay, . - _SsSeqPlay
|
spotty118/Rustos | 1,033 | src/boot.s | # Multiboot header
.set ALIGN, 1<<0 # align loaded modules on page boundaries
.set MEMINFO, 1<<1 # provide memory map
.set VIDEO, 1<<2 # request video mode
.set FLAGS, ALIGN | MEMINFO | VIDEO # multiboot 'flag' field
.set MAGIC, 0x1BADB002 # magic number lets bootloader find the header
.set CHECKSUM, -(MAGIC + FLAGS) # checksum required to prove we are multiboot
# Multiboot header section
.section .multiboot
.align 4
.long MAGIC
.long FLAGS
.long CHECKSUM
.long 0 # header_addr (unused for ELF)
.long 0 # load_addr
.long 0 # load_end_addr
.long 0 # bss_end_addr
.long 0 # entry_addr
.long 0 # mode_type (0 = linear framebuffer)
.long 1024 # width
.long 768 # height
.long 32 # depth (bits per pixel)
# Stack section
.section .bss
.align 16
stack_bottom:
.skip 16384 # 16 KiB
stack_top:
# Entry point
.section .text
.global _start
.type _start, @function
_start:
mov $stack_top, %esp
call rust_main
cli
1: hlt
jmp 1b
.size _start, . - _start
|
sozud/psy-q-splitter | 3,131 | splitter/test_data/SpuVmVSetUp.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel SpuVmVSetUp
/* 12338 80021B38 21308000 */ addu $a2, $a0, $zero
/* 1233C 80021B3C FFFFC230 */ andi $v0, $a2, 0xFFFF
/* 12340 80021B40 1000422C */ sltiu $v0, $v0, 0x10
/* 12344 80021B44 10004010 */ beqz $v0, .L80021B88
/* 12348 80021B48 2138A000 */ addu $a3, $a1, $zero
/* 1234C 80021B4C 00140400 */ sll $v0, $a0, 16
/* 12350 80021B50 03240200 */ sra $a0, $v0, 16
/* 12354 80021B54 0980013C */ lui $at, %hi(_svm_vab_used)
/* 12358 80021B58 21082400 */ addu $at, $at, $a0
/* 1235C 80021B5C E8782390 */ lbu $v1, %lo(_svm_vab_used)($at)
/* 12360 80021B60 01000234 */ ori $v0, $zero, 0x1
/* 12364 80021B64 23006214 */ bne $v1, $v0, .L80021BF4
/* 12368 80021B68 FFFF0224 */ addiu $v0, $zero, -0x1
/* 1236C 80021B6C 001C0500 */ sll $v1, $a1, 16
/* 12370 80021B70 0780023C */ lui $v0, %hi(kMaxPrograms)
/* 12374 80021B74 94C34284 */ lh $v0, %lo(kMaxPrograms)($v0)
/* 12378 80021B78 032C0300 */ sra $a1, $v1, 16
/* 1237C 80021B7C 2A10A200 */ slt $v0, $a1, $v0
/* 12380 80021B80 03004014 */ bnez $v0, .L80021B90
/* 12384 80021B84 80100400 */ sll $v0, $a0, 2
.L80021B88:
/* 12388 80021B88 FD860008 */ j .L80021BF4
/* 1238C 80021B8C FFFF0224 */ addiu $v0, $zero, -0x1
.L80021B90:
/* 12390 80021B90 0480013C */ lui $at, %hi(_svm_vab_vh)
/* 12394 80021B94 21082200 */ addu $at, $at, $v0
/* 12398 80021B98 14C9238C */ lw $v1, %lo(_svm_vab_vh)($at)
/* 1239C 80021B9C 0480013C */ lui $at, %hi(_svm_vab_pg)
/* 123A0 80021BA0 21082200 */ addu $at, $at, $v0
/* 123A4 80021BA4 C8C8248C */ lw $a0, %lo(_svm_vab_pg)($at)
/* 123A8 80021BA8 0480013C */ lui $at, %hi(_svm_vab_tn)
/* 123AC 80021BAC 21082200 */ addu $at, $at, $v0
/* 123B0 80021BB0 58C9228C */ lw $v0, %lo(_svm_vab_tn)($at)
/* 123B4 80021BB4 0980013C */ lui $at, %hi(_svm_cur+1)
/* 123B8 80021BB8 C97826A0 */ sb $a2, %lo(_svm_cur+1)($at)
/* 123BC 80021BBC 0980013C */ lui $at, %hi(_svm_cur+6)
/* 123C0 80021BC0 CE7827A0 */ sb $a3, %lo(_svm_cur+6)($at)
/* 123C4 80021BC4 0780013C */ lui $at, %hi(_svm_tn)
/* 123C8 80021BC8 C8CB22AC */ sw $v0, %lo(_svm_tn)($at)
/* 123CC 80021BCC 00110500 */ sll $v0, $a1, 4
/* 123D0 80021BD0 21104400 */ addu $v0, $v0, $a0
/* 123D4 80021BD4 0780013C */ lui $at, %hi(_svm_vh)
/* 123D8 80021BD8 C0C323AC */ sw $v1, %lo(_svm_vh)($at)
/* 123DC 80021BDC 0780013C */ lui $at, %hi(_svm_pg)
/* 123E0 80021BE0 B4C324AC */ sw $a0, %lo(_svm_pg)($at)
/* 123E4 80021BE4 08004390 */ lbu $v1, 0x8($v0)
/* 123E8 80021BE8 21100000 */ addu $v0, $zero, $zero
/* 123EC 80021BEC 0980013C */ lui $at, %hi(_svm_cur+7)
/* 123F0 80021BF0 CF7823A0 */ sb $v1, %lo(_svm_cur+7)($at)
.L80021BF4:
/* 123F4 80021BF4 0800E003 */ jr $ra
/* 123F8 80021BF8 00000000 */ nop
.size SpuVmVSetUp, . - SpuVmVSetUp
|
sozud/psy-q-splitter | 5,371 | splitter/test_data/SsVabTransBodyPartly.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel SsVabTransBodyPartly
/* 12080 80021880 D8FFBD27 */ addiu $sp, $sp, -0x28
/* 12084 80021884 2000B4AF */ sw $s4, 0x20($sp)
/* 12088 80021888 21A08000 */ addu $s4, $a0, $zero
/* 1208C 8002188C 1C00B3AF */ sw $s3, 0x1C($sp)
/* 12090 80021890 2198A000 */ addu $s3, $a1, $zero
/* 12094 80021894 2128C000 */ addu $a1, $a2, $zero
/* 12098 80021898 FFFFA230 */ andi $v0, $a1, 0xFFFF
/* 1209C 8002189C 1100422C */ sltiu $v0, $v0, 0x11
/* 120A0 800218A0 2400BFAF */ sw $ra, 0x24($sp)
/* 120A4 800218A4 1800B2AF */ sw $s2, 0x18($sp)
/* 120A8 800218A8 1400B1AF */ sw $s1, 0x14($sp)
/* 120AC 800218AC 21004010 */ beqz $v0, .L80021934
/* 120B0 800218B0 1000B0AF */ sw $s0, 0x10($sp)
/* 120B4 800218B4 00140600 */ sll $v0, $a2, 16
/* 120B8 800218B8 038C0200 */ sra $s1, $v0, 16
/* 120BC 800218BC 0980013C */ lui $at, %hi(_svm_vab_used)
/* 120C0 800218C0 21083100 */ addu $at, $at, $s1
/* 120C4 800218C4 E8782390 */ lbu $v1, %lo(_svm_vab_used)($at)
/* 120C8 800218C8 02000234 */ ori $v0, $zero, 0x2
/* 120CC 800218CC 19006214 */ bne $v1, $v0, .L80021934
/* 120D0 800218D0 00000000 */ nop
/* 120D4 800218D4 0380023C */ lui $v0, %hi(D_00000000)
/* 120D8 800218D8 082F428C */ lw $v0, %lo(D_00000000)($v0)
/* 120DC 800218DC 00000000 */ nop
/* 120E0 800218E0 0F004014 */ bnez $v0, .L80021920
/* 120E4 800218E4 80801100 */ sll $s0, $s1, 2
/* 120E8 800218E8 0A80013C */ lui $at, %hi(_svm_vab_total)
/* 120EC 800218EC 21083000 */ addu $at, $at, $s0
/* 120F0 800218F0 CC87228C */ lw $v0, %lo(_svm_vab_total)($at)
/* 120F4 800218F4 0380013C */ lui $at, %hi(D_00000004)
/* 120F8 800218F8 0C2F25A4 */ sh $a1, %lo(D_00000004)($at)
/* 120FC 800218FC 0380013C */ lui $at, %hi(D_00000000)
/* 12100 80021900 082F22AC */ sw $v0, %lo(D_00000000)($at)
/* 12104 80021904 FDAA000C */ jal SpuSetTransferMode
/* 12108 80021908 21200000 */ addu $a0, $zero, $zero
/* 1210C 8002190C 0A80013C */ lui $at, %hi(_svm_vab_start)
/* 12110 80021910 21083000 */ addu $at, $at, $s0
/* 12114 80021914 1088248C */ lw $a0, %lo(_svm_vab_start)($at)
/* 12118 80021918 EEAA000C */ jal SpuSetTransferStartAddr
/* 1211C 8002191C 00000000 */ nop
.L80021920:
/* 12120 80021920 0380123C */ lui $s2, %hi(D_00000004)
/* 12124 80021924 0C2F5286 */ lh $s2, %lo(D_00000004)($s2)
/* 12128 80021928 00000000 */ nop
/* 1212C 8002192C 05005112 */ beq $s2, $s1, .L80021944
/* 12130 80021930 21806002 */ addu $s0, $s3, $zero
.L80021934:
/* 12134 80021934 5BAB000C */ jal _spu_setInTransfer
/* 12138 80021938 21200000 */ addu $a0, $zero, $zero
/* 1213C 8002193C 6F860008 */ j .L800219BC
/* 12140 80021940 FFFF0224 */ addiu $v0, $zero, -0x1
.L80021944:
/* 12144 80021944 0380033C */ lui $v1, %hi(D_00000000)
/* 12148 80021948 082F638C */ lw $v1, %lo(D_00000000)($v1)
/* 1214C 8002194C 00000000 */ nop
/* 12150 80021950 2B107000 */ sltu $v0, $v1, $s0
/* 12154 80021954 02004010 */ beqz $v0, .L80021960
/* 12158 80021958 00000000 */ nop
/* 1215C 8002195C 21806000 */ addu $s0, $v1, $zero
.L80021960:
/* 12160 80021960 5BAB000C */ jal _spu_setInTransfer
/* 12164 80021964 01000434 */ ori $a0, $zero, 0x1
/* 12168 80021968 21208002 */ addu $a0, $s4, $zero
/* 1216C 8002196C 0AAB000C */ jal SpuWritePartly
/* 12170 80021970 21280002 */ addu $a1, $s0, $zero
/* 12174 80021974 0380023C */ lui $v0, %hi(D_00000000)
/* 12178 80021978 082F428C */ lw $v0, %lo(D_00000000)($v0)
/* 1217C 8002197C 00000000 */ nop
/* 12180 80021980 23105000 */ subu $v0, $v0, $s0
/* 12184 80021984 0380013C */ lui $at, %hi(D_00000000)
/* 12188 80021988 082F22AC */ sw $v0, %lo(D_00000000)($at)
/* 1218C 8002198C 0B004014 */ bnez $v0, .L800219BC
/* 12190 80021990 FEFF0224 */ addiu $v0, $zero, -0x2
/* 12194 80021994 21104002 */ addu $v0, $s2, $zero
/* 12198 80021998 FFFF0324 */ addiu $v1, $zero, -0x1
/* 1219C 8002199C 0380013C */ lui $at, %hi(D_00000004)
/* 121A0 800219A0 0C2F23A4 */ sh $v1, %lo(D_00000004)($at)
/* 121A4 800219A4 01000334 */ ori $v1, $zero, 0x1
/* 121A8 800219A8 0380013C */ lui $at, %hi(D_00000000)
/* 121AC 800219AC 082F20AC */ sw $zero, %lo(D_00000000)($at)
/* 121B0 800219B0 0980013C */ lui $at, %hi(_svm_vab_used)
/* 121B4 800219B4 21082200 */ addu $at, $at, $v0
/* 121B8 800219B8 E87823A0 */ sb $v1, %lo(_svm_vab_used)($at)
.L800219BC:
/* 121BC 800219BC 2400BF8F */ lw $ra, 0x24($sp)
/* 121C0 800219C0 2000B48F */ lw $s4, 0x20($sp)
/* 121C4 800219C4 1C00B38F */ lw $s3, 0x1C($sp)
/* 121C8 800219C8 1800B28F */ lw $s2, 0x18($sp)
/* 121CC 800219CC 1400B18F */ lw $s1, 0x14($sp)
/* 121D0 800219D0 1000B08F */ lw $s0, 0x10($sp)
/* 121D4 800219D4 2800BD27 */ addiu $sp, $sp, 0x28
/* 121D8 800219D8 0800E003 */ jr $ra
/* 121DC 800219DC 00000000 */ nop
.size SsVabTransBodyPartly, . - SsVabTransBodyPartly
|
sozud/psy-q-splitter | 13,330 | splitter/test_data/_SsInitSoundSep.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsInitSoundSep
/* 344C 80012C4C E0FFBD27 */ addiu $sp, $sp, -0x20
/* 3450 80012C50 21608000 */ addu $t4, $a0, $zero
/* 3454 80012C54 2158A000 */ addu $t3, $a1, $zero
/* 3458 80012C58 1400B1AF */ sw $s1, 0x14($sp)
/* 345C 80012C5C 21880000 */ addu $s1, $zero, $zero
/* 3460 80012C60 21400000 */ addu $t0, $zero, $zero
/* 3464 80012C64 40000A34 */ ori $t2, $zero, 0x40
/* 3468 80012C68 7F000934 */ ori $t1, $zero, 0x7F
/* 346C 80012C6C 00240400 */ sll $a0, $a0, 16
/* 3470 80012C70 83230400 */ sra $a0, $a0, 14
/* 3474 80012C74 002C0500 */ sll $a1, $a1, 16
/* 3478 80012C78 032C0500 */ sra $a1, $a1, 16
/* 347C 80012C7C 40100500 */ sll $v0, $a1, 1
/* 3480 80012C80 21104500 */ addu $v0, $v0, $a1
/* 3484 80012C84 80100200 */ sll $v0, $v0, 2
/* 3488 80012C88 23104500 */ subu $v0, $v0, $a1
/* 348C 80012C8C 80100200 */ sll $v0, $v0, 2
/* 3490 80012C90 23104500 */ subu $v0, $v0, $a1
/* 3494 80012C94 1C00BFAF */ sw $ra, 0x1C($sp)
/* 3498 80012C98 1800B2AF */ sw $s2, 0x18($sp)
/* 349C 80012C9C 1000B0AF */ sw $s0, 0x10($sp)
/* 34A0 80012CA0 0280013C */ lui $at, %hi(_ss_score)
/* 34A4 80012CA4 21082400 */ addu $at, $at, $a0
/* 34A8 80012CA8 2C29238C */ lw $v1, %lo(_ss_score)($at)
/* 34AC 80012CAC 80100200 */ sll $v0, $v0, 2
/* 34B0 80012CB0 21804300 */ addu $s0, $v0, $v1
/* 34B4 80012CB4 21180002 */ addu $v1, $s0, $zero
/* 34B8 80012CB8 01000234 */ ori $v0, $zero, 0x1
/* 34BC 80012CBC 6E0002A6 */ sh $v0, 0x6E($s0)
/* 34C0 80012CC0 7F000234 */ ori $v0, $zero, 0x7F
/* 34C4 80012CC4 100000A2 */ sb $zero, 0x10($s0)
/* 34C8 80012CC8 110000A2 */ sb $zero, 0x11($s0)
/* 34CC 80012CCC 120000A2 */ sb $zero, 0x12($s0)
/* 34D0 80012CD0 130000A2 */ sb $zero, 0x13($s0)
/* 34D4 80012CD4 140000A2 */ sb $zero, 0x14($s0)
/* 34D8 80012CD8 150000A2 */ sb $zero, 0x15($s0)
/* 34DC 80012CDC 160000A2 */ sb $zero, 0x16($s0)
/* 34E0 80012CE0 270000A2 */ sb $zero, 0x27($s0)
/* 34E4 80012CE4 280000A2 */ sb $zero, 0x28($s0)
/* 34E8 80012CE8 290000A2 */ sb $zero, 0x29($s0)
/* 34EC 80012CEC 2A0000A2 */ sb $zero, 0x2A($s0)
/* 34F0 80012CF0 2B0000A2 */ sb $zero, 0x2B($s0)
/* 34F4 80012CF4 480000A6 */ sh $zero, 0x48($s0)
/* 34F8 80012CF8 4A0000A6 */ sh $zero, 0x4A($s0)
/* 34FC 80012CFC 4C0006A6 */ sh $a2, 0x4C($s0)
/* 3500 80012D00 720000A6 */ sh $zero, 0x72($s0)
/* 3504 80012D04 7C0000AE */ sw $zero, 0x7C($s0)
/* 3508 80012D08 800000AE */ sw $zero, 0x80($s0)
/* 350C 80012D0C 840000AE */ sw $zero, 0x84($s0)
/* 3510 80012D10 880000AE */ sw $zero, 0x88($s0)
/* 3514 80012D14 A80002A6 */ sh $v0, 0xA8($s0)
/* 3518 80012D18 AA0000A6 */ sh $zero, 0xAA($s0)
.L80012D1C:
/* 351C 80012D1C 21100802 */ addu $v0, $s0, $t0
/* 3520 80012D20 17004AA0 */ sb $t2, 0x17($v0)
/* 3524 80012D24 2C0048A0 */ sb $t0, 0x2C($v0)
/* 3528 80012D28 4E0069A4 */ sh $t1, 0x4E($v1)
/* 352C 80012D2C 01000825 */ addiu $t0, $t0, 0x1
/* 3530 80012D30 10000229 */ slti $v0, $t0, 0x10
/* 3534 80012D34 F9FF4014 */ bnez $v0, .L80012D1C
/* 3538 80012D38 02006324 */ addiu $v1, $v1, 0x2
/* 353C 80012D3C 00140B00 */ sll $v0, $t3, 16
/* 3540 80012D40 16004014 */ bnez $v0, .L80012D9C
/* 3544 80012D44 040007AE */ sw $a3, 0x4($s0)
/* 3548 80012D48 0000E390 */ lbu $v1, 0x0($a3)
/* 354C 80012D4C 53000234 */ ori $v0, $zero, 0x53
/* 3550 80012D50 03006210 */ beq $v1, $v0, .L80012D60
/* 3554 80012D54 70000234 */ ori $v0, $zero, 0x70
/* 3558 80012D58 14006214 */ bne $v1, $v0, .L80012DAC
/* 355C 80012D5C 9303053C */ lui $a1, 0x393
.L80012D60:
/* 3560 80012D60 0600E224 */ addiu $v0, $a3, 0x6
/* 3564 80012D64 040002AE */ sw $v0, 0x4($s0)
/* 3568 80012D68 0500E290 */ lbu $v0, 0x5($a3)
/* 356C 80012D6C 00000000 */ nop
/* 3570 80012D70 07004010 */ beqz $v0, .L80012D90
/* 3574 80012D74 0800E224 */ addiu $v0, $a3, 0x8
/* 3578 80012D78 0180043C */ lui $a0, %hi(R_00000000)
/* 357C 80012D7C 88008424 */ addiu $a0, $a0, %lo(R_00000000)
/* 3580 80012D80 717E000C */ jal printf
/* 3584 80012D84 00000000 */ nop
/* 3588 80012D88 FC4B0008 */ j .L80012FF0
/* 358C 80012D8C FFFF0224 */ addiu $v0, $zero, -0x1
.L80012D90:
/* 3590 80012D90 040002AE */ sw $v0, 0x4($s0)
/* 3594 80012D94 6A4B0008 */ j .L80012DA8
/* 3598 80012D98 08003126 */ addiu $s1, $s1, 0x8
.L80012D9C:
/* 359C 80012D9C 0200E224 */ addiu $v0, $a3, 0x2
/* 35A0 80012DA0 040002AE */ sw $v0, 0x4($s0)
/* 35A4 80012DA4 02003126 */ addiu $s1, $s1, 0x2
.L80012DA8:
/* 35A8 80012DA8 9303053C */ lui $a1, 0x393
.L80012DAC:
/* 35AC 80012DAC 0400038E */ lw $v1, 0x4($s0)
/* 35B0 80012DB0 0087A534 */ ori $a1, $a1, 0x8700
/* 35B4 80012DB4 01006224 */ addiu $v0, $v1, 0x1
/* 35B8 80012DB8 040002AE */ sw $v0, 0x4($s0)
/* 35BC 80012DBC 00006690 */ lbu $a2, 0x0($v1)
/* 35C0 80012DC0 02006224 */ addiu $v0, $v1, 0x2
/* 35C4 80012DC4 040002AE */ sw $v0, 0x4($s0)
/* 35C8 80012DC8 01006290 */ lbu $v0, 0x1($v1)
/* 35CC 80012DCC 0400038E */ lw $v1, 0x4($s0)
/* 35D0 80012DD0 00220600 */ sll $a0, $a2, 8
/* 35D4 80012DD4 25104400 */ or $v0, $v0, $a0
/* 35D8 80012DD8 4A0002A6 */ sh $v0, 0x4A($s0)
/* 35DC 80012DDC 01006224 */ addiu $v0, $v1, 0x1
/* 35E0 80012DE0 040002AE */ sw $v0, 0x4($s0)
/* 35E4 80012DE4 00006790 */ lbu $a3, 0x0($v1)
/* 35E8 80012DE8 02006224 */ addiu $v0, $v1, 0x2
/* 35EC 80012DEC 040002AE */ sw $v0, 0x4($s0)
/* 35F0 80012DF0 01006490 */ lbu $a0, 0x1($v1)
/* 35F4 80012DF4 03006224 */ addiu $v0, $v1, 0x3
/* 35F8 80012DF8 040002AE */ sw $v0, 0x4($s0)
/* 35FC 80012DFC 02006390 */ lbu $v1, 0x2($v1)
/* 3600 80012E00 00140700 */ sll $v0, $a3, 16
/* 3604 80012E04 00220400 */ sll $a0, $a0, 8
/* 3608 80012E08 25104400 */ or $v0, $v0, $a0
/* 360C 80012E0C 25104300 */ or $v0, $v0, $v1
/* 3610 80012E10 1A00A200 */ div $zero, $a1, $v0
/* 3614 80012E14 02004014 */ bnez $v0, .L80012E20
/* 3618 80012E18 00000000 */ nop
/* 361C 80012E1C 0D000700 */ break 7
.L80012E20:
/* 3620 80012E20 FFFF0124 */ addiu $at, $zero, -0x1
/* 3624 80012E24 04004114 */ bne $v0, $at, .L80012E38
/* 3628 80012E28 0080013C */ lui $at, 0x8000
/* 362C 80012E2C 0200A114 */ bne $a1, $at, .L80012E38
/* 3630 80012E30 00000000 */ nop
/* 3634 80012E34 0D000600 */ break 6
.L80012E38:
/* 3638 80012E38 12280000 */ mflo $a1
/* 363C 80012E3C 10180000 */ mfhi $v1
/* 3640 80012E40 840002AE */ sw $v0, 0x84($s0)
/* 3644 80012E44 42100200 */ srl $v0, $v0, 1
/* 3648 80012E48 2A104300 */ slt $v0, $v0, $v1
/* 364C 80012E4C 04004010 */ beqz $v0, .L80012E60
/* 3650 80012E50 05003126 */ addiu $s1, $s1, 0x5
/* 3654 80012E54 0100A224 */ addiu $v0, $a1, 0x1
/* 3658 80012E58 994B0008 */ j .L80012E64
/* 365C 80012E5C 840002AE */ sw $v0, 0x84($s0)
.L80012E60:
/* 3660 80012E60 840005AE */ sw $a1, 0x84($s0)
.L80012E64:
/* 3664 80012E64 00240C00 */ sll $a0, $t4, 16
/* 3668 80012E68 8400028E */ lw $v0, 0x84($s0)
/* 366C 80012E6C 0400058E */ lw $a1, 0x4($s0)
/* 3670 80012E70 03240400 */ sra $a0, $a0, 16
/* 3674 80012E74 8C0002AE */ sw $v0, 0x8C($s0)
/* 3678 80012E78 0300A224 */ addiu $v0, $a1, 0x3
/* 367C 80012E7C 040002AE */ sw $v0, 0x4($s0)
/* 3680 80012E80 0200A690 */ lbu $a2, 0x2($a1)
/* 3684 80012E84 0400A224 */ addiu $v0, $a1, 0x4
/* 3688 80012E88 040002AE */ sw $v0, 0x4($s0)
/* 368C 80012E8C 0300A390 */ lbu $v1, 0x3($a1)
/* 3690 80012E90 0500A224 */ addiu $v0, $a1, 0x5
/* 3694 80012E94 040002AE */ sw $v0, 0x4($s0)
/* 3698 80012E98 0400A790 */ lbu $a3, 0x4($a1)
/* 369C 80012E9C 0600A224 */ addiu $v0, $a1, 0x6
/* 36A0 80012EA0 040002AE */ sw $v0, 0x4($s0)
/* 36A4 80012EA4 0500A590 */ lbu $a1, 0x5($a1)
/* 36A8 80012EA8 00160600 */ sll $v0, $a2, 24
/* 36AC 80012EAC 001C0300 */ sll $v1, $v1, 16
/* 36B0 80012EB0 21104300 */ addu $v0, $v0, $v1
/* 36B4 80012EB4 001A0700 */ sll $v1, $a3, 8
/* 36B8 80012EB8 21104300 */ addu $v0, $v0, $v1
/* 36BC 80012EBC 21904500 */ addu $s2, $v0, $a1
/* 36C0 80012EC0 002C0B00 */ sll $a1, $t3, 16
/* 36C4 80012EC4 7454000C */ jal _SsReadDeltaValue
/* 36C8 80012EC8 032C0500 */ sra $a1, $a1, 16
/* 36CC 80012ECC 4A000486 */ lh $a0, 0x4A($s0)
/* 36D0 80012ED0 8400038E */ lw $v1, 0x84($s0)
/* 36D4 80012ED4 00000000 */ nop
/* 36D8 80012ED8 18008300 */ mult $a0, $v1
/* 36DC 80012EDC 0400048E */ lw $a0, 0x4($s0)
/* 36E0 80012EE0 0400038E */ lw $v1, 0x4($s0)
/* 36E4 80012EE4 0C0004AE */ sw $a0, 0xC($s0)
/* 36E8 80012EE8 0280043C */ lui $a0, %hi(VBLANK_MINUS)
/* 36EC 80012EEC 2429848C */ lw $a0, %lo(VBLANK_MINUS)($a0)
/* 36F0 80012EF0 7C0002AE */ sw $v0, 0x7C($s0)
/* 36F4 80012EF4 880002AE */ sw $v0, 0x88($s0)
/* 36F8 80012EF8 080003AE */ sw $v1, 0x8($s0)
/* 36FC 80012EFC 00190400 */ sll $v1, $a0, 4
/* 3700 80012F00 23386400 */ subu $a3, $v1, $a0
/* 3704 80012F04 80280700 */ sll $a1, $a3, 2
/* 3708 80012F08 12300000 */ mflo $a2
/* 370C 80012F0C 80100600 */ sll $v0, $a2, 2
/* 3710 80012F10 21104600 */ addu $v0, $v0, $a2
/* 3714 80012F14 40100200 */ sll $v0, $v0, 1
/* 3718 80012F18 2B104500 */ sltu $v0, $v0, $a1
/* 371C 80012F1C 0F004010 */ beqz $v0, .L80012F5C
/* 3720 80012F20 06003126 */ addiu $s1, $s1, 0x6
/* 3724 80012F24 80100400 */ sll $v0, $a0, 2
/* 3728 80012F28 21104400 */ addu $v0, $v0, $a0
/* 372C 80012F2C 00190200 */ sll $v1, $v0, 4
/* 3730 80012F30 23186200 */ subu $v1, $v1, $v0
/* 3734 80012F34 C0180300 */ sll $v1, $v1, 3
/* 3738 80012F38 1B006600 */ divu $zero, $v1, $a2
/* 373C 80012F3C 0200C014 */ bnez $a2, .L80012F48
/* 3740 80012F40 00000000 */ nop
/* 3744 80012F44 0D000700 */ break 7
.L80012F48:
/* 3748 80012F48 12180000 */ mflo $v1
/* 374C 80012F4C 00000000 */ nop
/* 3750 80012F50 6E0003A6 */ sh $v1, 0x6E($s0)
/* 3754 80012F54 F94B0008 */ j .L80012FE4
/* 3758 80012F58 700003A6 */ sh $v1, 0x70($s0)
.L80012F5C:
/* 375C 80012F5C 4A000386 */ lh $v1, 0x4A($s0)
/* 3760 80012F60 8400028E */ lw $v0, 0x84($s0)
/* 3764 80012F64 00000000 */ nop
/* 3768 80012F68 18006200 */ mult $v1, $v0
/* 376C 80012F6C 12180000 */ mflo $v1
/* 3770 80012F70 80100300 */ sll $v0, $v1, 2
/* 3774 80012F74 21104300 */ addu $v0, $v0, $v1
/* 3778 80012F78 40100200 */ sll $v0, $v0, 1
/* 377C 80012F7C 1B004500 */ divu $zero, $v0, $a1
/* 3780 80012F80 0200A014 */ bnez $a1, .L80012F8C
/* 3784 80012F84 00000000 */ nop
/* 3788 80012F88 0D000700 */ break 7
.L80012F8C:
/* 378C 80012F8C 12200000 */ mflo $a0
/* 3790 80012F90 4A000386 */ lh $v1, 0x4A($s0)
/* 3794 80012F94 8400028E */ lw $v0, 0x84($s0)
/* 3798 80012F98 00000000 */ nop
/* 379C 80012F9C 18006200 */ mult $v1, $v0
/* 37A0 80012FA0 12180000 */ mflo $v1
/* 37A4 80012FA4 80100300 */ sll $v0, $v1, 2
/* 37A8 80012FA8 21104300 */ addu $v0, $v0, $v1
/* 37AC 80012FAC 40100200 */ sll $v0, $v0, 1
/* 37B0 80012FB0 1B004500 */ divu $zero, $v0, $a1
/* 37B4 80012FB4 0200A014 */ bnez $a1, .L80012FC0
/* 37B8 80012FB8 00000000 */ nop
/* 37BC 80012FBC 0D000700 */ break 7
.L80012FC0:
/* 37C0 80012FC0 10180000 */ mfhi $v1
/* 37C4 80012FC4 FFFF0224 */ addiu $v0, $zero, -0x1
/* 37C8 80012FC8 6E0002A6 */ sh $v0, 0x6E($s0)
/* 37CC 80012FCC 40100700 */ sll $v0, $a3, 1
/* 37D0 80012FD0 2B104300 */ sltu $v0, $v0, $v1
/* 37D4 80012FD4 03004010 */ beqz $v0, .L80012FE4
/* 37D8 80012FD8 700004A6 */ sh $a0, 0x70($s0)
/* 37DC 80012FDC 01008224 */ addiu $v0, $a0, 0x1
/* 37E0 80012FE0 700002A6 */ sh $v0, 0x70($s0)
.L80012FE4:
/* 37E4 80012FE4 70000396 */ lhu $v1, 0x70($s0)
/* 37E8 80012FE8 21103202 */ addu $v0, $s1, $s2
/* 37EC 80012FEC 720003A6 */ sh $v1, 0x72($s0)
.L80012FF0:
/* 37F0 80012FF0 1C00BF8F */ lw $ra, 0x1C($sp)
/* 37F4 80012FF4 1800B28F */ lw $s2, 0x18($sp)
/* 37F8 80012FF8 1400B18F */ lw $s1, 0x14($sp)
/* 37FC 80012FFC 1000B08F */ lw $s0, 0x10($sp)
/* 3800 80013000 2000BD27 */ addiu $sp, $sp, 0x20
/* 3804 80013004 0800E003 */ jr $ra
/* 3808 80013008 00000000 */ nop
.size _SsInitSoundSep, . - _SsInitSoundSep
|
sozud/psy-q-splitter | 4,580 | splitter/test_data/_SsInit.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsInit
/* 00000000 27BDFFE8 */ addiu $sp, $sp, -0x18
/* 00000004 AFB00010 */ sw $s0, 0x10($sp)
/* 00000008 AFBF0014 */ sw $ra, 0x14($sp)
/* 0000000C 0C000000 */ jal func_80000000
/* 00000010 00808021 */ addu $s0, $a0, $zero
/* 00000014 16000005 */ bnez $s0, .L0000002C
/* 00000018 00000000 */ nop
/* 0000001C 0C000000 */ jal SpuInit
/* 00000020 00000000 */ nop
/* 00000024 08000000 */ j .L00000038
/* 00000028 3C061F80 */ lui $a2, 0x1F80
.L0000002C:
/* 0000002C 0C000000 */ jal SpuInitHot
/* 00000030 00000000 */ nop
/* 00000034 3C061F80 */ lui $a2, 0x1F80
.L00000038:
/* 00000038 34C61C00 */ ori $a2, $a2, 0x1C00
/* 0000003C 00002021 */ addu $a0, $zero, $zero
/* 00000040 3C070000 */ lui $a3, %hi(D_00000000)
/* 00000044 24E70000 */ addiu $a3, $a3, %lo(D_00000000)
.L00000048:
/* 00000048 00002821 */ addu $a1, $zero, $zero
/* 0000004C 00E01821 */ addu $v1, $a3, $zero
.L00000050:
/* 00000050 94620000 */ lhu $v0, 0x0($v1)
/* 00000054 24630002 */ addiu $v1, $v1, 0x2
/* 00000058 24A50001 */ addiu $a1, $a1, 0x1
/* 0000005C A4C20000 */ sh $v0, 0x0($a2)
/* 00000060 28A20008 */ slti $v0, $a1, 0x8
/* 00000064 1440FFFA */ bnez $v0, .L00000050
/* 00000068 24C60002 */ addiu $a2, $a2, 0x2
/* 0000006C 24840001 */ addiu $a0, $a0, 0x1
/* 00000070 28820018 */ slti $v0, $a0, 0x18
/* 00000074 1440FFF4 */ bnez $v0, .L00000048
/* 00000078 00000000 */ nop
/* 0000007C 3C061F80 */ lui $a2, 0x1F80
/* 00000080 34C61D80 */ ori $a2, $a2, 0x1D80
/* 00000084 00002021 */ addu $a0, $zero, $zero
/* 00000088 3C030000 */ lui $v1, %hi(D_00000010)
/* 0000008C 24630000 */ addiu $v1, $v1, %lo(D_00000010)
.L00000090:
/* 00000090 94620000 */ lhu $v0, 0x0($v1)
/* 00000094 24630002 */ addiu $v1, $v1, 0x2
/* 00000098 24840001 */ addiu $a0, $a0, 0x1
/* 0000009C A4C20000 */ sh $v0, 0x0($a2)
/* 000000A0 28820010 */ slti $v0, $a0, 0x10
/* 000000A4 1440FFFA */ bnez $v0, .L00000090
/* 000000A8 24C60002 */ addiu $a2, $a2, 0x2
/* 000000AC 0C000000 */ jal SpuVmInit
/* 000000B0 34040018 */ ori $a0, $zero, 0x18
/* 000000B4 00002821 */ addu $a1, $zero, $zero
/* 000000B8 3C030000 */ lui $v1, 0x0
/* 000000BC 24630000 */ addiu $v1, $v1, 0x0
.L000000C0:
/* 000000C0 3404000F */ ori $a0, $zero, 0xF
/* 000000C4 2462003C */ addiu $v0, $v1, 0x3C
.L000000C8:
/* 000000C8 AC400000 */ sw $zero, 0x0($v0)
/* 000000CC 2484FFFF */ addiu $a0, $a0, -0x1
/* 000000D0 0481FFFD */ bgez $a0, .L000000C8
/* 000000D4 2442FFFC */ addiu $v0, $v0, -0x4
/* 000000D8 24A50001 */ addiu $a1, $a1, 0x1
/* 000000DC 28A20020 */ slti $v0, $a1, 0x20
/* 000000E0 1440FFF7 */ bnez $v0, .L000000C0
/* 000000E4 24630040 */ addiu $v1, $v1, 0x40
/* 000000E8 3402003C */ ori $v0, $zero, 0x3C
/* 000000EC 3C010000 */ lui $at, 0x0
/* 000000F0 AC220000 */ sw $v0, 0x0($at)
/* 000000F4 2402FFFF */ addiu $v0, $zero, -0x1
/* 000000F8 3C010000 */ lui $at, 0x0
/* 000000FC AC200000 */ sw $zero, 0x0($at)
/* 00000100 3C010000 */ lui $at, %hi(_snd_use_vsync_cb)
/* 00000104 AC200000 */ sw $zero, %lo(_snd_use_vsync_cb)($at)
/* 00000108 3C010000 */ lui $at, %hi(_snd_use_interrupt_id)
/* 0000010C AC220000 */ sw $v0, %lo(_snd_use_interrupt_id)($at)
/* 00000110 3C010000 */ lui $at, %hi(_snd_use_event)
/* 00000114 AC200000 */ sw $zero, %lo(_snd_use_event)($at)
/* 00000118 3C010000 */ lui $at, %hi(_snd_1per2)
/* 0000011C AC200000 */ sw $zero, %lo(_snd_1per2)($at)
/* 00000120 3C010000 */ lui $at, %hi(_snd_vsync_cb)
/* 00000124 AC200000 */ sw $zero, %lo(_snd_vsync_cb)($at)
/* 00000128 0C000000 */ jal GetVideoMode
/* 0000012C 00000000 */ nop
/* 00000130 3C010000 */ lui $at, %hi(_snd_video_mode)
/* 00000134 AC220000 */ sw $v0, %lo(_snd_video_mode)($at)
/* 00000138 3C010000 */ lui $at, 0x0
/* 0000013C AC200000 */ sw $zero, 0x0($at)
/* 00000140 8FBF0014 */ lw $ra, 0x14($sp)
/* 00000144 8FB00010 */ lw $s0, 0x10($sp)
/* 00000148 27BD0018 */ addiu $sp, $sp, 0x18
/* 0000014C 03E00008 */ jr $ra
/* 00000150 00000000 */ nop
.size _SsInit, . - _SsInit
|
sozud/psy-q-splitter | 9,294 | splitter/test_data/SpuVmAlloc.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel SpuVmAlloc
/* 12A0C 8002220C D8FFBD27 */ addiu $sp, $sp, -0x28
/* 12A10 80022210 2000B0AF */ sw $s0, 0x20($sp)
/* 12A14 80022214 63001034 */ ori $s0, $zero, 0x63
/* 12A18 80022218 FFFF0B34 */ ori $t3, $zero, 0xFFFF
/* 12A1C 8002221C 21500000 */ addu $t2, $zero, $zero
/* 12A20 80022220 21400000 */ addu $t0, $zero, $zero
/* 12A24 80022224 63000934 */ ori $t1, $zero, 0x63
/* 12A28 80022228 0880023C */ lui $v0, %hi(spuVmMaxVoice)
/* 12A2C 8002222C E86F4290 */ lbu $v0, %lo(spuVmMaxVoice)($v0)
/* 12A30 80022230 09800C3C */ lui $t4, %hi(_svm_cur+15)
/* 12A34 80022234 D7788C91 */ lbu $t4, %lo(_svm_cur+15)($t4)
/* 12A38 80022238 21380000 */ addu $a3, $zero, $zero
/* 12A3C 8002223C 4C004010 */ beqz $v0, .L80022370
/* 12A40 80022240 2400BFAF */ sw $ra, 0x24($sp)
/* 12A44 80022244 FF00E230 */ andi $v0, $a3, 0xFF
.L80022248:
/* 12A48 80022248 40180200 */ sll $v1, $v0, 1
/* 12A4C 8002224C 21186200 */ addu $v1, $v1, $v0
/* 12A50 80022250 80180300 */ sll $v1, $v1, 2
/* 12A54 80022254 21186200 */ addu $v1, $v1, $v0
/* 12A58 80022258 80180300 */ sll $v1, $v1, 2
/* 12A5C 8002225C 0480013C */ lui $at, %hi(_svm_voice+27)
/* 12A60 80022260 21082300 */ addu $at, $at, $v1
/* 12A64 80022264 43B82290 */ lbu $v0, %lo(_svm_voice+27)($at)
/* 12A68 80022268 00000000 */ nop
/* 12A6C 8002226C 09004014 */ bnez $v0, .L80022294
/* 12A70 80022270 FF00E230 */ andi $v0, $a3, 0xFF
/* 12A74 80022274 0480013C */ lui $at, %hi(_svm_voice+6)
/* 12A78 80022278 21082300 */ addu $at, $at, $v1
/* 12A7C 8002227C 2EB82294 */ lhu $v0, %lo(_svm_voice+6)($at)
/* 12A80 80022280 00000000 */ nop
/* 12A84 80022284 03004014 */ bnez $v0, .L80022294
/* 12A88 80022288 FF00E230 */ andi $v0, $a3, 0xFF
/* 12A8C 8002228C DC880008 */ j .L80022370
/* 12A90 80022290 2180E000 */ addu $s0, $a3, $zero
.L80022294:
/* 12A94 80022294 40180200 */ sll $v1, $v0, 1
/* 12A98 80022298 21186200 */ addu $v1, $v1, $v0
/* 12A9C 8002229C 80180300 */ sll $v1, $v1, 2
/* 12AA0 800222A0 21186200 */ addu $v1, $v1, $v0
/* 12AA4 800222A4 80180300 */ sll $v1, $v1, 2
/* 12AA8 800222A8 0480013C */ lui $at, %hi(_svm_voice+24)
/* 12AAC 800222AC 21082300 */ addu $at, $at, $v1
/* 12AB0 800222B0 40B82684 */ lh $a2, %lo(_svm_voice+24)($at)
/* 12AB4 800222B4 FFFF8431 */ andi $a0, $t4, 0xFFFF
/* 12AB8 800222B8 2A10C400 */ slt $v0, $a2, $a0
/* 12ABC 800222BC 0B004010 */ beqz $v0, .L800222EC
/* 12AC0 800222C0 2128C000 */ addu $a1, $a2, $zero
/* 12AC4 800222C4 2160A000 */ addu $t4, $a1, $zero
/* 12AC8 800222C8 2148E000 */ addu $t1, $a3, $zero
/* 12ACC 800222CC 0480013C */ lui $at, %hi(_svm_voice+6)
/* 12AD0 800222D0 21082300 */ addu $at, $at, $v1
/* 12AD4 800222D4 2EB82B94 */ lhu $t3, %lo(_svm_voice+6)($at)
/* 12AD8 800222D8 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12ADC 800222DC 21082300 */ addu $at, $at, $v1
/* 12AE0 800222E0 2AB82894 */ lhu $t0, %lo(_svm_voice+2)($at)
/* 12AE4 800222E4 D5880008 */ j .L80022354
/* 12AE8 800222E8 01000A34 */ ori $t2, $zero, 0x1
.L800222EC:
/* 12AEC 800222EC 1900C414 */ bne $a2, $a0, .L80022354
/* 12AF0 800222F0 FFFF6531 */ andi $a1, $t3, 0xFFFF
/* 12AF4 800222F4 0480013C */ lui $at, %hi(_svm_voice+6)
/* 12AF8 800222F8 21082300 */ addu $at, $at, $v1
/* 12AFC 800222FC 2EB82494 */ lhu $a0, %lo(_svm_voice+6)($at)
/* 12B00 80022300 00000000 */ nop
/* 12B04 80022304 2B108500 */ sltu $v0, $a0, $a1
/* 12B08 80022308 06004010 */ beqz $v0, .L80022324
/* 12B0C 8002230C 01004A25 */ addiu $t2, $t2, 0x1
/* 12B10 80022310 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12B14 80022314 21082300 */ addu $at, $at, $v1
/* 12B18 80022318 2AB82894 */ lhu $t0, %lo(_svm_voice+2)($at)
/* 12B1C 8002231C D4880008 */ j .L80022350
/* 12B20 80022320 21588000 */ addu $t3, $a0, $zero
.L80022324:
/* 12B24 80022324 0B008514 */ bne $a0, $a1, .L80022354
/* 12B28 80022328 00000000 */ nop
/* 12B2C 8002232C 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12B30 80022330 21082300 */ addu $at, $at, $v1
/* 12B34 80022334 2AB82284 */ lh $v0, %lo(_svm_voice+2)($at)
/* 12B38 80022338 00000000 */ nop
/* 12B3C 8002233C 21184000 */ addu $v1, $v0, $zero
/* 12B40 80022340 2A100201 */ slt $v0, $t0, $v0
/* 12B44 80022344 03004010 */ beqz $v0, .L80022354
/* 12B48 80022348 00000000 */ nop
/* 12B4C 8002234C 21406000 */ addu $t0, $v1, $zero
.L80022350:
/* 12B50 80022350 2148E000 */ addu $t1, $a3, $zero
.L80022354:
/* 12B54 80022354 0100E724 */ addiu $a3, $a3, 0x1
/* 12B58 80022358 0880033C */ lui $v1, %hi(spuVmMaxVoice)
/* 12B5C 8002235C E86F6390 */ lbu $v1, %lo(spuVmMaxVoice)($v1)
/* 12B60 80022360 FF00E230 */ andi $v0, $a3, 0xFF
/* 12B64 80022364 2B104300 */ sltu $v0, $v0, $v1
/* 12B68 80022368 B7FF4014 */ bnez $v0, .L80022248
/* 12B6C 8002236C FF00E230 */ andi $v0, $a3, 0xFF
.L80022370:
/* 12B70 80022370 FF000332 */ andi $v1, $s0, 0xFF
/* 12B74 80022374 63000234 */ ori $v0, $zero, 0x63
/* 12B78 80022378 05006214 */ bne $v1, $v0, .L80022390
/* 12B7C 8002237C FF004231 */ andi $v0, $t2, 0xFF
/* 12B80 80022380 03004014 */ bnez $v0, .L80022390
/* 12B84 80022384 21802001 */ addu $s0, $t1, $zero
/* 12B88 80022388 0880103C */ lui $s0, %hi(spuVmMaxVoice)
/* 12B8C 8002238C E86F1092 */ lbu $s0, %lo(spuVmMaxVoice)($s0)
.L80022390:
/* 12B90 80022390 0880043C */ lui $a0, %hi(spuVmMaxVoice)
/* 12B94 80022394 E86F8490 */ lbu $a0, %lo(spuVmMaxVoice)($a0)
/* 12B98 80022398 FF000232 */ andi $v0, $s0, 0xFF
/* 12B9C 8002239C 2B104400 */ sltu $v0, $v0, $a0
/* 12BA0 800223A0 2E004010 */ beqz $v0, .L8002245C
/* 12BA4 800223A4 00000000 */ nop
/* 12BA8 800223A8 14008010 */ beqz $a0, .L800223FC
/* 12BAC 800223AC 21380000 */ addu $a3, $zero, $zero
/* 12BB0 800223B0 0480053C */ lui $a1, %hi(_svm_voice)
/* 12BB4 800223B4 28B8A524 */ addiu $a1, $a1, %lo(_svm_voice)
/* 12BB8 800223B8 FF00E330 */ andi $v1, $a3, 0xFF
.L800223BC:
/* 12BBC 800223BC 40100300 */ sll $v0, $v1, 1
/* 12BC0 800223C0 21104300 */ addu $v0, $v0, $v1
/* 12BC4 800223C4 80100200 */ sll $v0, $v0, 2
/* 12BC8 800223C8 21104300 */ addu $v0, $v0, $v1
/* 12BCC 800223CC 80100200 */ sll $v0, $v0, 2
/* 12BD0 800223D0 0100E724 */ addiu $a3, $a3, 0x1
/* 12BD4 800223D4 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12BD8 800223D8 21082200 */ addu $at, $at, $v0
/* 12BDC 800223DC 2AB82394 */ lhu $v1, %lo(_svm_voice+2)($at)
/* 12BE0 800223E0 21104500 */ addu $v0, $v0, $a1
/* 12BE4 800223E4 01006324 */ addiu $v1, $v1, 0x1
/* 12BE8 800223E8 020043A4 */ sh $v1, 0x2($v0)
/* 12BEC 800223EC FF00E230 */ andi $v0, $a3, 0xFF
/* 12BF0 800223F0 2B104400 */ sltu $v0, $v0, $a0
/* 12BF4 800223F4 F1FF4014 */ bnez $v0, .L800223BC
/* 12BF8 800223F8 FF00E330 */ andi $v1, $a3, 0xFF
.L800223FC:
/* 12BFC 800223FC FF000332 */ andi $v1, $s0, 0xFF
/* 12C00 80022400 40100300 */ sll $v0, $v1, 1
/* 12C04 80022404 21104300 */ addu $v0, $v0, $v1
/* 12C08 80022408 80100200 */ sll $v0, $v0, 2
/* 12C0C 8002240C 21104300 */ addu $v0, $v0, $v1
/* 12C10 80022410 80100200 */ sll $v0, $v0, 2
/* 12C14 80022414 0480013C */ lui $at, %hi(_svm_voice+2)
/* 12C18 80022418 21082200 */ addu $at, $at, $v0
/* 12C1C 8002241C 2AB820A4 */ sh $zero, %lo(_svm_voice+2)($at)
/* 12C20 80022420 0980033C */ lui $v1, %hi(_svm_cur+15)
/* 12C24 80022424 D7786390 */ lbu $v1, %lo(_svm_cur+15)($v1)
/* 12C28 80022428 0480013C */ lui $at, %hi(_svm_voice+24)
/* 12C2C 8002242C 21082200 */ addu $at, $at, $v0
/* 12C30 80022430 40B823A4 */ sh $v1, %lo(_svm_voice+24)($at)
/* 12C34 80022434 0480013C */ lui $at, %hi(_svm_voice+27)
/* 12C38 80022438 21082200 */ addu $at, $at, $v0
/* 12C3C 8002243C 43B82390 */ lbu $v1, %lo(_svm_voice+27)($at)
/* 12C40 80022440 02000234 */ ori $v0, $zero, 0x2
/* 12C44 80022444 06006214 */ bne $v1, $v0, .L80022460
/* 12C48 80022448 FF000232 */ andi $v0, $s0, 0xFF
/* 12C4C 8002244C FF00053C */ lui $a1, 0xFF
/* 12C50 80022450 FFFFA534 */ ori $a1, $a1, 0xFFFF
/* 12C54 80022454 1CA4000C */ jal SpuSetNoiseVoice
/* 12C58 80022458 21200000 */ addu $a0, $zero, $zero
.L8002245C:
/* 12C5C 8002245C FF000232 */ andi $v0, $s0, 0xFF
.L80022460:
/* 12C60 80022460 2400BF8F */ lw $ra, 0x24($sp)
/* 12C64 80022464 2000B08F */ lw $s0, 0x20($sp)
/* 12C68 80022468 2800BD27 */ addiu $sp, $sp, 0x28
/* 12C6C 8002246C 0800E003 */ jr $ra
/* 12C70 80022470 00000000 */ nop
.size SpuVmAlloc, . - SpuVmAlloc
|
sozud/psy-q-splitter | 1,435 | splitter/test_data/_SsUtResolveADSR.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsUtResolveADSR
/* FDBC 8001F5BC 00808230 */ andi $v0, $a0, 0x8000
/* FDC0 8001F5C0 0A00C2A4 */ sh $v0, 0xA($a2)
/* FDC4 8001F5C4 0080A230 */ andi $v0, $a1, 0x8000
/* FDC8 8001F5C8 0C00C2A4 */ sh $v0, 0xC($a2)
/* FDCC 8001F5CC 0040A230 */ andi $v0, $a1, 0x4000
/* FDD0 8001F5D0 1000C2A4 */ sh $v0, 0x10($a2)
/* FDD4 8001F5D4 2000A230 */ andi $v0, $a1, 0x20
/* FDD8 8001F5D8 FFFF8330 */ andi $v1, $a0, 0xFFFF
/* FDDC 8001F5DC 0E00C2A4 */ sh $v0, 0xE($a2)
/* FDE0 8001F5E0 02120300 */ srl $v0, $v1, 8
/* FDE4 8001F5E4 7F004230 */ andi $v0, $v0, 0x7F
/* FDE8 8001F5E8 02190300 */ srl $v1, $v1, 4
/* FDEC 8001F5EC 0F006330 */ andi $v1, $v1, 0xF
/* FDF0 8001F5F0 0F008430 */ andi $a0, $a0, 0xF
/* FDF4 8001F5F4 0000C2A4 */ sh $v0, 0x0($a2)
/* FDF8 8001F5F8 82110500 */ srl $v0, $a1, 6
/* FDFC 8001F5FC 7F004230 */ andi $v0, $v0, 0x7F
/* FE00 8001F600 1F00A530 */ andi $a1, $a1, 0x1F
/* FE04 8001F604 0200C3A4 */ sh $v1, 0x2($a2)
/* FE08 8001F608 0400C4A4 */ sh $a0, 0x4($a2)
/* FE0C 8001F60C 0600C2A4 */ sh $v0, 0x6($a2)
/* FE10 8001F610 0800E003 */ jr $ra
/* FE14 8001F614 0800C5A4 */ sh $a1, 0x8($a2)
.size _SsUtResolveADSR, . - _SsUtResolveADSR
|
sozud/psy-q-splitter | 10,308 | splitter/test_data/_SsSndCrescendo.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsSndCrescendo
/* 102A8 8001FAA8 C0FFBD27 */ addiu $sp, $sp, -0x40
/* 102AC 8001FAAC 21388000 */ addu $a3, $a0, $zero
/* 102B0 8001FAB0 00140700 */ sll $v0, $a3, 16
/* 102B4 8001FAB4 0980033C */ lui $v1, %hi(_ss_score)
/* 102B8 8001FAB8 9C7C6324 */ addiu $v1, $v1, %lo(_ss_score)
/* 102BC 8001FABC 83130200 */ sra $v0, $v0, 14
/* 102C0 8001FAC0 3000B2AF */ sw $s2, 0x30($sp)
/* 102C4 8001FAC4 21904300 */ addu $s2, $v0, $v1
/* 102C8 8001FAC8 001C0500 */ sll $v1, $a1, 16
/* 102CC 8001FACC 031C0300 */ sra $v1, $v1, 16
/* 102D0 8001FAD0 40100300 */ sll $v0, $v1, 1
/* 102D4 8001FAD4 21104300 */ addu $v0, $v0, $v1
/* 102D8 8001FAD8 80100200 */ sll $v0, $v0, 2
/* 102DC 8001FADC 23104300 */ subu $v0, $v0, $v1
/* 102E0 8001FAE0 80100200 */ sll $v0, $v0, 2
/* 102E4 8001FAE4 23104300 */ subu $v0, $v0, $v1
/* 102E8 8001FAE8 2C00B1AF */ sw $s1, 0x2C($sp)
/* 102EC 8001FAEC 80880200 */ sll $s1, $v0, 2
/* 102F0 8001FAF0 3800B4AF */ sw $s4, 0x38($sp)
/* 102F4 8001FAF4 21A0E000 */ addu $s4, $a3, $zero
/* 102F8 8001FAF8 3C00BFAF */ sw $ra, 0x3C($sp)
/* 102FC 8001FAFC 3400B3AF */ sw $s3, 0x34($sp)
/* 10300 8001FB00 2800B0AF */ sw $s0, 0x28($sp)
/* 10304 8001FB04 0000438E */ lw $v1, 0x0($s2)
/* 10308 8001FB08 2198A000 */ addu $s3, $a1, $zero
/* 1030C 8001FB0C 21802302 */ addu $s0, $s1, $v1
/* 10310 8001FB10 9800028E */ lw $v0, 0x98($s0)
/* 10314 8001FB14 42000686 */ lh $a2, 0x42($s0)
/* 10318 8001FB18 FFFF4224 */ addiu $v0, $v0, -0x1
/* 1031C 8001FB1C 2118C000 */ addu $v1, $a2, $zero
/* 10320 8001FB20 2500C018 */ blez $a2, .L8001FBB8
/* 10324 8001FB24 980002AE */ sw $v0, 0x98($s0)
/* 10328 8001FB28 1B004600 */ divu $zero, $v0, $a2
/* 1032C 8001FB2C 0200C014 */ bnez $a2, .L8001FB38
/* 10330 8001FB30 00000000 */ nop
/* 10334 8001FB34 0D000700 */ break 7
.L8001FB38:
/* 10338 8001FB38 10100000 */ mfhi $v0
/* 1033C 8001FB3C 00000000 */ nop
/* 10340 8001FB40 80004014 */ bnez $v0, .L8001FD44
/* 10344 8001FB44 00000000 */ nop
/* 10348 8001FB48 40000296 */ lhu $v0, 0x40($s0)
/* 1034C 8001FB4C 00000000 */ nop
/* 10350 8001FB50 FFFF4224 */ addiu $v0, $v0, -0x1
/* 10354 8001FB54 400002A6 */ sh $v0, 0x40($s0)
/* 10358 8001FB58 00140200 */ sll $v0, $v0, 16
/* 1035C 8001FB5C 50004004 */ bltz $v0, .L8001FCA0
/* 10360 8001FB60 00120500 */ sll $v0, $a1, 8
/* 10364 8001FB64 2510E200 */ or $v0, $a3, $v0
/* 10368 8001FB68 00140200 */ sll $v0, $v0, 16
/* 1036C 8001FB6C 038C0200 */ sra $s1, $v0, 16
/* 10370 8001FB70 21202002 */ addu $a0, $s1, $zero
/* 10374 8001FB74 1000A527 */ addiu $a1, $sp, 0x10
/* 10378 8001FB78 7C95000C */ jal SpuVmGetSeqVol
/* 1037C 8001FB7C 1200A627 */ addiu $a2, $sp, 0x12
/* 10380 8001FB80 1000A297 */ lhu $v0, 0x10($sp)
/* 10384 8001FB84 40000386 */ lh $v1, 0x40($s0)
/* 10388 8001FB88 01004424 */ addiu $a0, $v0, 0x1
/* 1038C 8001FB8C 21104300 */ addu $v0, $v0, $v1
/* 10390 8001FB90 2A104400 */ slt $v0, $v0, $a0
/* 10394 8001FB94 51004014 */ bnez $v0, .L8001FCDC
/* 10398 8001FB98 21202002 */ addu $a0, $s1, $zero
/* 1039C 8001FB9C 21380000 */ addu $a3, $zero, $zero
/* 103A0 8001FBA0 1000A597 */ lhu $a1, 0x10($sp)
/* 103A4 8001FBA4 1200A697 */ lhu $a2, 0x12($sp)
/* 103A8 8001FBA8 0100A524 */ addiu $a1, $a1, 0x1
/* 103AC 8001FBAC 0100C624 */ addiu $a2, $a2, 0x1
/* 103B0 8001FBB0 247F0008 */ j .L8001FC90
/* 103B4 8001FBB4 FFFFA530 */ andi $a1, $a1, 0xFFFF
.L8001FBB8:
/* 103B8 8001FBB8 6300C104 */ bgez $a2, .L8001FD48
/* 103BC 8001FBBC 00221300 */ sll $a0, $s3, 8
/* 103C0 8001FBC0 40000296 */ lhu $v0, 0x40($s0)
/* 103C4 8001FBC4 00000000 */ nop
/* 103C8 8001FBC8 21104300 */ addu $v0, $v0, $v1
/* 103CC 8001FBCC 400002A6 */ sh $v0, 0x40($s0)
/* 103D0 8001FBD0 00140200 */ sll $v0, $v0, 16
/* 103D4 8001FBD4 32004004 */ bltz $v0, .L8001FCA0
/* 103D8 8001FBD8 00120500 */ sll $v0, $a1, 8
/* 103DC 8001FBDC 2510E200 */ or $v0, $a3, $v0
/* 103E0 8001FBE0 00140200 */ sll $v0, $v0, 16
/* 103E4 8001FBE4 038C0200 */ sra $s1, $v0, 16
/* 103E8 8001FBE8 21202002 */ addu $a0, $s1, $zero
/* 103EC 8001FBEC 1000A527 */ addiu $a1, $sp, 0x10
/* 103F0 8001FBF0 7C95000C */ jal SpuVmGetSeqVol
/* 103F4 8001FBF4 1200A627 */ addiu $a2, $sp, 0x12
/* 103F8 8001FBF8 1000A297 */ lhu $v0, 0x10($sp)
/* 103FC 8001FBFC 42000386 */ lh $v1, 0x42($s0)
/* 10400 8001FC00 00000000 */ nop
/* 10404 8001FC04 23104300 */ subu $v0, $v0, $v1
/* 10408 8001FC08 7F004228 */ slti $v0, $v0, 0x7F
/* 1040C 8001FC0C 0B004014 */ bnez $v0, .L8001FC3C
/* 10410 8001FC10 00000000 */ nop
/* 10414 8001FC14 1200A297 */ lhu $v0, 0x12($sp)
/* 10418 8001FC18 00000000 */ nop
/* 1041C 8001FC1C 23104300 */ subu $v0, $v0, $v1
/* 10420 8001FC20 7F004228 */ slti $v0, $v0, 0x7F
/* 10424 8001FC24 05004014 */ bnez $v0, .L8001FC3C
/* 10428 8001FC28 21202002 */ addu $a0, $s1, $zero
/* 1042C 8001FC2C 7F000534 */ ori $a1, $zero, 0x7F
/* 10430 8001FC30 7F000634 */ ori $a2, $zero, 0x7F
/* 10434 8001FC34 1F95000C */ jal SpuVmSetSeqVol
/* 10438 8001FC38 21380000 */ addu $a3, $zero, $zero
.L8001FC3C:
/* 1043C 8001FC3C 9400038E */ lw $v1, 0x94($s0)
/* 10440 8001FC40 9800028E */ lw $v0, 0x98($s0)
/* 10444 8001FC44 42000486 */ lh $a0, 0x42($s0)
/* 10448 8001FC48 23186200 */ subu $v1, $v1, $v0
/* 1044C 8001FC4C 23100400 */ negu $v0, $a0
/* 10450 8001FC50 18006200 */ mult $v1, $v0
/* 10454 8001FC54 3E000386 */ lh $v1, 0x3E($s0)
/* 10458 8001FC58 12100000 */ mflo $v0
/* 1045C 8001FC5C 2B104300 */ sltu $v0, $v0, $v1
/* 10460 8001FC60 1E004010 */ beqz $v0, .L8001FCDC
/* 10464 8001FC64 21408000 */ addu $t0, $a0, $zero
/* 10468 8001FC68 00221300 */ sll $a0, $s3, 8
/* 1046C 8001FC6C 25208402 */ or $a0, $s4, $a0
/* 10470 8001FC70 00240400 */ sll $a0, $a0, 16
/* 10474 8001FC74 03240400 */ sra $a0, $a0, 16
/* 10478 8001FC78 21380000 */ addu $a3, $zero, $zero
/* 1047C 8001FC7C 1000A597 */ lhu $a1, 0x10($sp)
/* 10480 8001FC80 1200A697 */ lhu $a2, 0x12($sp)
/* 10484 8001FC84 2328A800 */ subu $a1, $a1, $t0
/* 10488 8001FC88 FFFFA530 */ andi $a1, $a1, 0xFFFF
/* 1048C 8001FC8C 2330C800 */ subu $a2, $a2, $t0
.L8001FC90:
/* 10490 8001FC90 1F95000C */ jal SpuVmSetSeqVol
/* 10494 8001FC94 FFFFC630 */ andi $a2, $a2, 0xFFFF
/* 10498 8001FC98 377F0008 */ j .L8001FCDC
/* 1049C 8001FC9C 00000000 */ nop
.L8001FCA0:
/* 104A0 8001FCA0 00220500 */ sll $a0, $a1, 8
/* 104A4 8001FCA4 2520E400 */ or $a0, $a3, $a0
/* 104A8 8001FCA8 00240400 */ sll $a0, $a0, 16
/* 104AC 8001FCAC 03240400 */ sra $a0, $a0, 16
/* 104B0 8001FCB0 7F000534 */ ori $a1, $zero, 0x7F
/* 104B4 8001FCB4 7F000634 */ ori $a2, $zero, 0x7F
/* 104B8 8001FCB8 1F95000C */ jal SpuVmSetSeqVol
/* 104BC 8001FCBC 21380000 */ addu $a3, $zero, $zero
/* 104C0 8001FCC0 0000438E */ lw $v1, 0x0($s2)
/* 104C4 8001FCC4 00000000 */ nop
/* 104C8 8001FCC8 21182302 */ addu $v1, $s1, $v1
/* 104CC 8001FCCC 9000628C */ lw $v0, 0x90($v1)
/* 104D0 8001FCD0 EFFF0424 */ addiu $a0, $zero, -0x11
/* 104D4 8001FCD4 24104400 */ and $v0, $v0, $a0
/* 104D8 8001FCD8 900062AC */ sw $v0, 0x90($v1)
.L8001FCDC:
/* 104DC 8001FCDC 9800028E */ lw $v0, 0x98($s0)
/* 104E0 8001FCE0 00000000 */ nop
/* 104E4 8001FCE4 05004010 */ beqz $v0, .L8001FCFC
/* 104E8 8001FCE8 00241400 */ sll $a0, $s4, 16
/* 104EC 8001FCEC 40000286 */ lh $v0, 0x40($s0)
/* 104F0 8001FCF0 00000000 */ nop
/* 104F4 8001FCF4 13004014 */ bnez $v0, .L8001FD44
/* 104F8 8001FCF8 00000000 */ nop
.L8001FCFC:
/* 104FC 8001FCFC 83230400 */ sra $a0, $a0, 14
/* 10500 8001FD00 001C1300 */ sll $v1, $s3, 16
/* 10504 8001FD04 031C0300 */ sra $v1, $v1, 16
/* 10508 8001FD08 40100300 */ sll $v0, $v1, 1
/* 1050C 8001FD0C 21104300 */ addu $v0, $v0, $v1
/* 10510 8001FD10 80100200 */ sll $v0, $v0, 2
/* 10514 8001FD14 23104300 */ subu $v0, $v0, $v1
/* 10518 8001FD18 80100200 */ sll $v0, $v0, 2
/* 1051C 8001FD1C 23104300 */ subu $v0, $v0, $v1
/* 10520 8001FD20 0980013C */ lui $at, %hi(_ss_score)
/* 10524 8001FD24 21082400 */ addu $at, $at, $a0
/* 10528 8001FD28 9C7C238C */ lw $v1, %lo(_ss_score)($at)
/* 1052C 8001FD2C 80100200 */ sll $v0, $v0, 2
/* 10530 8001FD30 21104300 */ addu $v0, $v0, $v1
/* 10534 8001FD34 9000438C */ lw $v1, 0x90($v0)
/* 10538 8001FD38 EFFF0424 */ addiu $a0, $zero, -0x11
/* 1053C 8001FD3C 24186400 */ and $v1, $v1, $a0
/* 10540 8001FD40 900043AC */ sw $v1, 0x90($v0)
.L8001FD44:
/* 10544 8001FD44 00221300 */ sll $a0, $s3, 8
.L8001FD48:
/* 10548 8001FD48 25208402 */ or $a0, $s4, $a0
/* 1054C 8001FD4C 00240400 */ sll $a0, $a0, 16
/* 10550 8001FD50 03240400 */ sra $a0, $a0, 16
/* 10554 8001FD54 78000526 */ addiu $a1, $s0, 0x78
/* 10558 8001FD58 7C95000C */ jal SpuVmGetSeqVol
/* 1055C 8001FD5C 7A000626 */ addiu $a2, $s0, 0x7A
/* 10560 8001FD60 3C00BF8F */ lw $ra, 0x3C($sp)
/* 10564 8001FD64 3800B48F */ lw $s4, 0x38($sp)
/* 10568 8001FD68 3400B38F */ lw $s3, 0x34($sp)
/* 1056C 8001FD6C 3000B28F */ lw $s2, 0x30($sp)
/* 10570 8001FD70 2C00B18F */ lw $s1, 0x2C($sp)
/* 10574 8001FD74 2800B08F */ lw $s0, 0x28($sp)
/* 10578 8001FD78 4000BD27 */ addiu $sp, $sp, 0x40
/* 1057C 8001FD7C 0800E003 */ jr $ra
/* 10580 8001FD80 00000000 */ nop
.size _SsSndCrescendo, . - _SsSndCrescendo
|
sozud/psy-q-splitter | 3,068 | splitter/test_data/_SpuInit.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SpuInit
/* 00000000 27BDFFE8 */ addiu $sp, $sp, -0x18
/* 00000004 AFB00010 */ sw $s0, 0x10($sp)
/* 00000008 AFBF0014 */ sw $ra, 0x14($sp)
/* 0000000C 0C000000 */ jal ResetCallback
/* 00000010 00808021 */ addu $s0, $a0, $zero
/* 00000014 0C000000 */ jal _spu_init
/* 00000018 02002021 */ addu $a0, $s0, $zero
/* 0000001C 16000008 */ bnez $s0, .L00000040
/* 00000020 3404C000 */ ori $a0, $zero, 0xC000
/* 00000024 34030017 */ ori $v1, $zero, 0x17
/* 00000028 3C020000 */ lui $v0, %hi(_spu_voice_centerNote+46)
/* 0000002C 24420000 */ addiu $v0, $v0, %lo(_spu_voice_centerNote+46)
.L00000030:
/* 00000030 A4440000 */ sh $a0, 0x0($v0)
/* 00000034 2463FFFF */ addiu $v1, $v1, -0x1
/* 00000038 0461FFFD */ bgez $v1, .L00000030
/* 0000003C 2442FFFE */ addiu $v0, $v0, -0x2
.L00000040:
/* 00000040 0C000000 */ jal SpuStart
/* 00000044 00000000 */ nop
/* 00000048 340400D1 */ ori $a0, $zero, 0xD1
/* 0000004C 3C050000 */ lui $a1, %hi(_spu_rev_startaddr)
/* 00000050 8CA50000 */ lw $a1, %lo(_spu_rev_startaddr)($a1)
/* 00000054 3C010000 */ lui $at, %hi(_spu_rev_flag)
/* 00000058 AC200000 */ sw $zero, %lo(_spu_rev_flag)($at)
/* 0000005C 3C010000 */ lui $at, %hi(_spu_rev_reserve_wa)
/* 00000060 AC200000 */ sw $zero, %lo(_spu_rev_reserve_wa)($at)
/* 00000064 3C010000 */ lui $at, %hi(_spu_rev_attr+4)
/* 00000068 AC200000 */ sw $zero, %lo(_spu_rev_attr+4)($at)
/* 0000006C 3C010000 */ lui $at, %hi(_spu_rev_attr+8)
/* 00000070 A4200000 */ sh $zero, %lo(_spu_rev_attr+8)($at)
/* 00000074 3C010000 */ lui $at, %hi(_spu_rev_attr+10)
/* 00000078 A4200000 */ sh $zero, %lo(_spu_rev_attr+10)($at)
/* 0000007C 3C010000 */ lui $at, %hi(_spu_rev_attr+12)
/* 00000080 AC200000 */ sw $zero, %lo(_spu_rev_attr+12)($at)
/* 00000084 3C010000 */ lui $at, %hi(_spu_rev_attr+16)
/* 00000088 AC200000 */ sw $zero, %lo(_spu_rev_attr+16)($at)
/* 0000008C 3C010000 */ lui $at, %hi(_spu_rev_offsetaddr)
/* 00000090 AC250000 */ sw $a1, %lo(_spu_rev_offsetaddr)($at)
/* 00000094 0C000000 */ jal _spu_FsetRXX
/* 00000098 00003021 */ addu $a2, $zero, $zero
/* 0000009C 3C010000 */ lui $at, %hi(_spu_trans_mode)
/* 000000A0 AC200000 */ sw $zero, %lo(_spu_trans_mode)($at)
/* 000000A4 3C010000 */ lui $at, %hi(_spu_transMode)
/* 000000A8 AC200000 */ sw $zero, %lo(_spu_transMode)($at)
/* 000000AC 3C010000 */ lui $at, %hi(_spu_keystat)
/* 000000B0 AC200000 */ sw $zero, %lo(_spu_keystat)($at)
/* 000000B4 8FBF0014 */ lw $ra, 0x14($sp)
/* 000000B8 8FB00010 */ lw $s0, 0x10($sp)
/* 000000BC 27BD0018 */ addiu $sp, $sp, 0x18
/* 000000C0 03E00008 */ jr $ra
/* 000000C4 00000000 */ nop
.size _SpuInit, . - _SpuInit
|
sozud/psy-q-splitter | 3,890 | splitter/test_data/_SsSeqPlay.s | .set noat /* allow manual use of $at */
.set noreorder /* don't insert nops after branches */
glabel _SsSeqPlay
/* E064 8001D864 C8FFBD27 */ addiu $sp, $sp, -0x38
/* E068 8001D868 003C0400 */ sll $a3, $a0, 16
/* E06C 8001D86C 83230700 */ sra $a0, $a3, 14
/* E070 8001D870 002C0500 */ sll $a1, $a1, 16
/* E074 8001D874 031C0500 */ sra $v1, $a1, 16
/* E078 8001D878 40100300 */ sll $v0, $v1, 1
/* E07C 8001D87C 21104300 */ addu $v0, $v0, $v1
/* E080 8001D880 80100200 */ sll $v0, $v0, 2
/* E084 8001D884 23104300 */ subu $v0, $v0, $v1
/* E088 8001D888 80100200 */ sll $v0, $v0, 2
/* E08C 8001D88C 23104300 */ subu $v0, $v0, $v1
/* E090 8001D890 3000BFAF */ sw $ra, 0x30($sp)
/* E094 8001D894 2C00B3AF */ sw $s3, 0x2C($sp)
/* E098 8001D898 2800B2AF */ sw $s2, 0x28($sp)
/* E09C 8001D89C 2400B1AF */ sw $s1, 0x24($sp)
/* E0A0 8001D8A0 2000B0AF */ sw $s0, 0x20($sp)
/* E0A4 8001D8A4 0980013C */ lui $at, %hi(_ss_score)
/* E0A8 8001D8A8 21082400 */ addu $at, $at, $a0
/* E0AC 8001D8AC 9C7C238C */ lw $v1, %lo(_ss_score)($at)
/* E0B0 8001D8B0 80100200 */ sll $v0, $v0, 2
/* E0B4 8001D8B4 21884300 */ addu $s1, $v0, $v1
/* E0B8 8001D8B8 70002286 */ lh $v0, 0x70($s1)
/* E0BC 8001D8BC 8800238E */ lw $v1, 0x88($s1)
/* E0C0 8001D8C0 00000000 */ nop
/* E0C4 8001D8C4 23206200 */ subu $a0, $v1, $v0
/* E0C8 8001D8C8 10008018 */ blez $a0, .L8001D90C
/* E0CC 8001D8CC 21304000 */ addu $a2, $v0, $zero
/* E0D0 8001D8D0 6E002386 */ lh $v1, 0x6E($s1)
/* E0D4 8001D8D4 00000000 */ nop
/* E0D8 8001D8D8 04006018 */ blez $v1, .L8001D8EC
/* E0DC 8001D8DC 21106000 */ addu $v0, $v1, $zero
/* E0E0 8001D8E0 FFFF4224 */ addiu $v0, $v0, -0x1
/* E0E4 8001D8E4 55760008 */ j .L8001D954
/* E0E8 8001D8E8 6E0022A6 */ sh $v0, 0x6E($s1)
.L8001D8EC:
/* E0EC 8001D8EC 05006014 */ bnez $v1, .L8001D904
/* E0F0 8001D8F0 00000000 */ nop
/* E0F4 8001D8F4 8800228E */ lw $v0, 0x88($s1)
/* E0F8 8001D8F8 6E0026A6 */ sh $a2, 0x6E($s1)
/* E0FC 8001D8FC 54760008 */ j .L8001D950
/* E100 8001D900 FFFF4224 */ addiu $v0, $v0, -0x1
.L8001D904:
/* E104 8001D904 55760008 */ j .L8001D954
/* E108 8001D908 880024AE */ sw $a0, 0x88($s1)
.L8001D90C:
/* E10C 8001D90C 2A104300 */ slt $v0, $v0, $v1
/* E110 8001D910 10004014 */ bnez $v0, .L8001D954
/* E114 8001D914 21806000 */ addu $s0, $v1, $zero
/* E118 8001D918 2198E000 */ addu $s3, $a3, $zero
/* E11C 8001D91C 2190A000 */ addu $s2, $a1, $zero
/* E120 8001D920 03241300 */ sra $a0, $s3, 16
.L8001D924:
/* E124 8001D924 5D76000C */ jal _SsGetSeqData
/* E128 8001D928 032C1200 */ sra $a1, $s2, 16
/* E12C 8001D92C 8800228E */ lw $v0, 0x88($s1)
/* E130 8001D930 00000000 */ nop
/* E134 8001D934 FBFF4010 */ beqz $v0, .L8001D924
/* E138 8001D938 03241300 */ sra $a0, $s3, 16
/* E13C 8001D93C 70002386 */ lh $v1, 0x70($s1)
/* E140 8001D940 21800202 */ addu $s0, $s0, $v0
/* E144 8001D944 2A100302 */ slt $v0, $s0, $v1
/* E148 8001D948 F6FF4014 */ bnez $v0, .L8001D924
/* E14C 8001D94C 23100302 */ subu $v0, $s0, $v1
.L8001D950:
/* E150 8001D950 880022AE */ sw $v0, 0x88($s1)
.L8001D954:
/* E154 8001D954 3000BF8F */ lw $ra, 0x30($sp)
/* E158 8001D958 2C00B38F */ lw $s3, 0x2C($sp)
/* E15C 8001D95C 2800B28F */ lw $s2, 0x28($sp)
/* E160 8001D960 2400B18F */ lw $s1, 0x24($sp)
/* E164 8001D964 2000B08F */ lw $s0, 0x20($sp)
/* E168 8001D968 3800BD27 */ addiu $sp, $sp, 0x38
/* E16C 8001D96C 0800E003 */ jr $ra
/* E170 8001D970 00000000 */ nop
.size _SsSeqPlay, . - _SsSeqPlay
|
spotty118/Rustos | 1,033 | src/boot.s | # Multiboot header
.set ALIGN, 1<<0 # align loaded modules on page boundaries
.set MEMINFO, 1<<1 # provide memory map
.set VIDEO, 1<<2 # request video mode
.set FLAGS, ALIGN | MEMINFO | VIDEO # multiboot 'flag' field
.set MAGIC, 0x1BADB002 # magic number lets bootloader find the header
.set CHECKSUM, -(MAGIC + FLAGS) # checksum required to prove we are multiboot
# Multiboot header section
.section .multiboot
.align 4
.long MAGIC
.long FLAGS
.long CHECKSUM
.long 0 # header_addr (unused for ELF)
.long 0 # load_addr
.long 0 # load_end_addr
.long 0 # bss_end_addr
.long 0 # entry_addr
.long 0 # mode_type (0 = linear framebuffer)
.long 1024 # width
.long 768 # height
.long 32 # depth (bits per pixel)
# Stack section
.section .bss
.align 16
stack_bottom:
.skip 16384 # 16 KiB
stack_top:
# Entry point
.section .text
.global _start
.type _start, @function
_start:
mov $stack_top, %esp
call rust_main
cli
1: hlt
jmp 1b
.size _start, . - _start
|
ssamSohn/crosvm | 1,406 | kernel_loader/src/test_elf.S | # Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Build instructions:
# x86_64-linux-gnu-as test_elf.S -o test_elf.o
# x86_64-linux-gnu-ld test_elf.o -o test_elf.bin -T test_elf.ld
.intel_syntax noprefix
.section .rodata
hello_world:
.string "Hello world!\n"
.set hello_size, .-hello_world
.text
.globl _start
_start:
lea rsi, [rip + hello_world] # rsi -> message string
mov rcx, hello_size # rcx = length of message
mov dx, 0x3F8 # dx = COM1 port
.print_loop:
# Wait for the transmit buffer to be empty by polling the line status.
add dx, 5 # dx = line status register
.wait_empty:
in al, dx # read line status
test al, 0x20 # check buffer empty flag
jz .wait_empty # keep waiting if flag is not set
.wait_done:
sub dx, 5 # dx = data register
# Load a byte of the message and send it to the serial port.
lodsb # load message byte from RSI to AL
out dx, al # send byte to serial port
dec rcx # rcx--
jnz .print_loop # repeat if rcx != 0
.done:
int3 # cause vcpu to exit
|
sspphh/Dragon-Mini | 5,730 | kernel/src/arch/riscv64/asm/head.S | #include "common/asm.h"
#include "asm/csr.h"
.section .bootstrap
// 内核入口(从DragonStub跳转到这里)
// 参数:
// a0: hartid (核心ID)
// a1: fdt (平坦设备树)
.global _start
.type _start, @function
ENTRY(_start)
/* Mask all interrupts */
csrw CSR_IE, zero
csrw CSR_IP, zero
// 暂存hartid
la t0, __initial_hartid_ptr
sd a0, 0(t0)
// 暂存平坦设备树地址
la t0, __initial_fdt_ptr
sd a1, 0(t0)
// 暂存_start标签被DragonStub加载到的物理地址
auipc t0, 0
li t1, -4095
and t0, t0, t1
la t1, __initial_start_load_paddr
sd t0, 0(t1)
// 清空页表的空间
la a0, __initial_pgtable
call __initial_clear_pgtable
la a0, __initial_l1_pgtable
call __initial_clear_pgtable
la a0, __initial_l1_pgtable
li a1, 4096
add a0, a0, a1
call __initial_clear_pgtable
// 设置页表,把内核当前所在的物理地址映射到链接时的内核虚拟空间
la a0, __initial_start_load_paddr
ld a0, 0(a0)
// 偏移量0xffffffc000000000,计算起始的L0页表项
// 因为内核链接地址还有16M的空间,所以这里加上0x1000000
li a1, KERNEL_VIRT_START
// 映射物理地址到虚拟地址
call initial_map_256M_phys_addr
// 增加恒等映射
la a0, __initial_start_load_paddr
ld a0, 0(a0)
mv a1, a0
call initial_map_1g_identical
__init_set_pgtable_loop_end:
call __initial_reloacate_enable_mmu
.option push
.option norelax
// 设置栈指针
la a0, BSP_IDLE_STACK_SPACE
mv sp, a0
li t0, 32752 // 预留16字节防止越界
add sp, sp, t0
.option pop
/*
* Disable FPU & VECTOR to detect illegal usage of
* floating point or vector in kernel space
*/
li t0, SR_FS_VS
csrc CSR_STATUS, t0
/* Call the kernel */
la a0, __initial_hartid_ptr
ld a0, 0(a0)
la a1, __initial_fdt_ptr
ld a1, 0(a1)
// 跳转到kernel_main
call kernel_main
nop
wfi
__initial_reloacate_enable_mmu:
// 计算起始物理地址与内核高虚拟地址的偏移量
la t0, __initial_start_load_paddr
ld t0, 0(t0)
li t1, KERNEL_VIRT_START
sub t1, t1, t0
// 重定位返回地址
add ra, ra, t1
/* Point stvec to virtual address of intruction after satp write */
/* Set trap vector to spin forever to help debug */
la a2, 3f
add a2, a2, t1
csrw CSR_TVEC, a2
// enable MMU
la a2, __initial_pgtable
srli a2, a2, 12
la a0, __initial_satp_mode
ld a0, 0(a0)
or a2, a2, a0
sfence.vma
csrw satp, a2
3:
la a0, __initial_Lsecondary_park
add a0, a0, t1
csrw CSR_TVEC, a0
csrw satp, a2
sfence.vma
ret
// 映射物理地址到虚拟地址(2M页,1G大小)
// 参数:
// a0: 物理地址
// a1: 虚拟地址
initial_map_256M_phys_addr:
// 检查物理地址是否对齐到2M
li t0, 0x1fffff
and t0, t0, a0
bnez t0, __initial_map_1g_phys_failed
// 检查虚拟地址是否对齐到2M
li t0, 0x1fffff
and t0, t0, a1
bnez t0, __initial_map_1g_phys_failed
// 把起始虚拟地址存储到t2中
mv t2, a1
// 按照2M对齐
li t1, -0x200000
and t2, t2, t1
// 计算L0页表项的索引
srli t2, t2, 30
andi t2, t2, 511
// 填写第一个L0页表项
la t4, __initial_pgtable
slli t5, t2, 3 // t5 = t2 * 8
add t4, t4, t5 // t4 = t4 + t5, t4指向L0页表项
// 提取L1页表的地址
la t5, __initial_l1_pgtable
srli t5, t5, 12
slli t5, t5, 10
ori t5, t5, 0x1 // 设置L1页表项属性,V = 1
// 设置L0页表项的值
sd t5, 0(t4)
// 计算是否需要填写第二个L1页表项(判断是否超过第一个L1页表的范围)
addi t3, t2, 128
li t5, 512
blt t3, t5, __initial_set_l1_pgtable
// 填写第二个L1页表
la t3, __initial_l1_pgtable
li t5, 4096
add t3, t3, t5
srli t3, t3, 12
slli t3, t3, 10
ori t3, t3, 0x1 // 设置L1页表项属性,V = 1
// 设置L0页表项的值
sd t3, 8(t4)
__initial_set_l1_pgtable: // 开始填写L1页表
// 获取起始物理地址
mv t6, a0
// 获取L1页表的地址
la t0, __initial_l1_pgtable
// 计算起始L1页表项的索引
mv t3, a1
srli t3, t3, 21
andi t3, t3, 511
slli t3, t3, 3 // t3 = t3 * 8
add t0, t0, t3 // t0 = t0 + t3
// 加载计数器
li t5, 0
__initial_set_l1_pgtable_loop:
mv t3, t6
srli t3, t3, 12 // t3 = t6 >> 12 (page frame number)
li t1, 0x3FFFFFFFFFFFFF
and t3, t3, t1 // t3 = t3 & 0x3FFFFFFFFFFFFF
slli t3, t3, 10 // t3 = t3 << 10
ori t3, t3, 0xEF // 设置L1页表项属性,set R/W/X/V/A/D/G = 1
// 设置L1页表项的值
sd t3, 0(t0)
// 增加 页表项指针
addi t0, t0, 8
// 增加 t6 的值(2M)
li t2, 0x200000
add t6, t6, t2
// 增加计数器
addi t5, t5, 1
// 判断计数器是否超过128
li t2, 128
blt t5, t2, __initial_set_l1_pgtable_loop
// 填写完成
ret
__initial_map_1g_phys_failed:
// 地址没有对齐到2M
wfi
la a0, __initial_map_1g_phys_failed
// 跳转
jr a0
// 映射物理地址到虚拟地址(恒等映射)
// 参数:
// a0: 物理地址
initial_map_1g_identical:
mv a1, a0
// 把_start向下对齐到1GB
li t0, -0x40000000
// 计算起始物理地址,存放在t0中
and t0, t0, a0
// 把起始虚拟地址存储到t2中
mv t2, a1
// 按照1g对齐
li t1, -0x40000000
and t2, t2, t1
// 右移30位,得到L0页表项的索引
srli t2, t2, 30
// 与511进行与运算,得到L0页表项的索引
andi t2, t2, 511
// 填写页表项
la t4, __initial_pgtable
slli t3, t2, 3 // t3 = t2 * 8
add t4, t4, t3 // t4 = t4 + t3
mv t3, t0
srli t3, t3, 12 // t3 = t0 >> 12 (page frame number)
slli t3, t3, 10 // t3 = t3 << 10
ori t3, t3, 0xEF // set R/W/X/V/A/D/G = 1
// 计算delta的pfn
li t2, 0x40000000
srli t2, t2, 12
// 把delta pfn移位到页表项的第10位的位置
slli t2, t2, 10
li t1, 2
__loop_set_8g:
sd t3, 0(t4)
// 增加 t4 的值
addi t4, t4, 8
// 增加1G的pfn
add t3, t3, t2
addi t1, t1, -1
bnez t1, __loop_set_8g
ret
// 用于清空页表的空间
// 参数:
// a0: page table address
__initial_clear_pgtable:
mv t0, a0
li t1, 512
li t2, 0 // 用于存储 0
__initial_clear_pgtable_loop:
sd t2, 0(t0) // 将 0 存储到当前word
addi t0, t0, 8 // 增加 t0 的值
addi t1, t1, -1 // 减少剩余的word数
bnez t1, __initial_clear_pgtable_loop
ret
.align 2
__initial_Lsecondary_park:
/* We lack SMP support or have too many harts, so park this hart */
wfi
j __initial_Lsecondary_park
// 全局变量,存储平坦设备树的地址和hartid
.global __initial_fdt_ptr
__initial_fdt_ptr:
.quad 0
.global __initial_hartid_ptr
__initial_hartid_ptr:
.quad 0
// _start标签在启动时被加载到的物理地址
.global __initial_start_load_paddr
__initial_start_load_paddr:
.quad 0
__initial_kernel_main_vaddr:
.quad 0
.global __initial_satp_mode
__initial_satp_mode:
.quad SATP_MODE_39
// 初始页表的空间(sv39模式的L0页表)
.section .initial_pgtable_section
.global __initial_pgtable
__initial_pgtable:
.skip 4096
.global __initial_l1_pgtable
__initial_l1_pgtable:
.skip 8192
|
sspphh/Dragon-Mini | 2,029 | kernel/src/arch/x86_64/asm/apu_boot.S | #include "../common/asm.h"
.align 0x1000 // 按照4k对齐
.section .text
.code16
ENTRY(_apu_boot_start)
_apu_boot_base = .
cli
wbinvd // 将处理器缓存同步到内存中
mov %cs, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %ss
mov %ax, %fs
mov %ax, %gs
// 设置栈指针
movl $(_apu_boot_tmp_stack_end - _apu_boot_base), %esp
// 计算ap处理器引导程序的基地址
mov %cs, %ax
movzx %ax, %esi
shll $4, %esi
// set gdt and 32bit/64bit code addr
leal (_apu_code32 - _apu_boot_base)(%esi), %eax
movl %eax, (_apu_code32_vector - _apu_boot_base)
leal (_apu_code64 - _apu_boot_base)(%esi), %eax
movl %eax, (_apu_code64_vector - _apu_boot_base)
leal (_apu_tmp_gdt - _apu_boot_base)(%esi), %eax
movl %eax, (_apu_tmp_gdt + 2 - _apu_boot_base)
// 从实模式切换到保护模式
lidtl _apu_tmp_idt - _apu_boot_base
lgdtl _apu_tmp_gdt - _apu_boot_base
// 操作cr0控制器,使能保护模式
smsw %ax
bts $0, %ax
lmsw %ax
// 转到保护模式
ljmpl *(_apu_code32_vector - _apu_boot_base)
.code32
.align 0x1000
_apu_code32:
# 转到长模式
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %ss
mov %ax, %fs
mov %ax, %gs
// 设置栈指针
leal (_apu_boot_tmp_stack_end - _apu_boot_base)(%esi), %eax
movl %eax, %esp
// 1. 允许 PAE
mov %cr4, %eax
or $(1<<5), %eax
mov %eax, %cr4
movl $enter_head_from_ap_boot, %eax
jmpl *%eax
hlt
.code64
.align 0x1000
_apu_code64:
hlt
.align 0x1000
_apu_tmp_idt:
.word 0
.word 0,0
.align 0x1000
_apu_tmp_gdt:
.short _apu_tmp_gdt_end - _apu_tmp_gdt -1
.long _apu_tmp_gdt - _apu_boot_base
.short 0
.quad 0x00cf9a000000ffff
.quad 0x00cf92000000ffff
.quad 0x0020980000000000
.quad 0x0000920000000000
_apu_tmp_gdt_end:
.align 0x1000
_apu_code32_vector:
.long _apu_code32 - _apu_boot_base
.word 0x08,0
.align 0x1000
_apu_code64_vector:
.long _apu_code64 - _apu_boot_base
.word 0x18,0
.align 0x1000
_apu_boot_tmp_stack_start:
// .org 0x400
_apu_boot_tmp_stack_end:
ENTRY(_apu_boot_end)
|
sspphh/Dragon-Mini | 7,708 | kernel/src/arch/x86_64/asm/entry.S | #include <common/asm.h>
.code64
//.section .text
R15 = 0x00
R14 = 0x08
R13 = 0x10
R12 = 0x18
R11 = 0x20
R10 = 0x28
R9 = 0x30
R8 = 0x38
RBX = 0x40
RCX = 0x48
RDX = 0x50
RSI = 0x58
RDI = 0x60
RBP = 0x68
DS = 0x70
ES = 0x78
RAX = 0x80
FUNC = 0x88
ERRCODE = 0x90
// 以下几个字段,在中断产生时,由处理器压入栈内
RIP = 0x98
CS = 0xa0
RFLAGS = 0xa8
OLD_RSP = 0xb0
OLDSS = 0xb8
Restore_all:
// === 恢复调用现场 ===
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rbx
popq %rcx
popq %rdx
popq %rsi
popq %rdi
popq %rbp
popq %rax // 不允许直接pop到ds
movq %rax, %ds
popq %rax
movq %rax, %es
popq %rax
addq $0x10, %rsp // 弹出变量FUNC和errcode
sti
iretq
ret_from_exception:
// === 从中断中返回 ===
ENTRY(ret_from_intr)
// 进入信号处理流程
cli
// 将原本要返回的栈帧的栈指针传入do_signal的第一个参数
movq %rsp, %rdi
callq do_signal
cli
__entry_ret_from_intr_before_gs_check_2:
push %rcx
addq $8, %rsp
movq CS(%rsp), %rcx
subq $8, %rsp
andq $0x3, %rcx
cmpq $0x3, %rcx
jne __entry_ret_from_intr_after_gs_check_2
swapgs
__entry_ret_from_intr_after_gs_check_2:
popq %rcx
// 恢复寄存器
jmp Restore_all
Err_Code:
// ===== 有错误码的情况下,保存寄存器并跳转服务程序
pushq %rax
movq %es, %rax
pushq %rax
movq %ds, %rax
pushq %rax
xorq %rax, %rax
pushq %rbp
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbx
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
cld
movq ERRCODE(%rsp), %rsi // 把错误码装进rsi,作为函数的第二个参数
movq FUNC(%rsp), %rdx
movq $0x10, %rdi // 加载内核段的地址
movq %rdi, %ds
movq %rdi, %es
movq %rsp, %rdi // 把栈指针装入rdi,作为函数的第一个的参数
__entry_err_code_before_gs_check_1:
pushq %rcx
movq CS(%rdi), %rcx
and $0x3, %rcx
cmp $0x3, %rcx
jne __entry_err_code_after_gs_check_1
swapgs
__entry_err_code_after_gs_check_1:
popq %rcx
callq *%rdx //调用服务程序 带*号表示调用的是绝对地址
__entry_err_code_to_ret_from_exception:
jmp ret_from_exception
// 0 #DE 除法错误
ENTRY(trap_divide_error)
pushq $0 //由于#DE不会产生错误码,但是为了保持弹出结构的一致性,故也压入一个错误码0
pushq %rax // 先将rax入栈
leaq do_divide_error(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 1 #DB 调试异常
ENTRY(trap_debug)
pushq $0
pushq %rax
leaq do_debug(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 2 不可屏蔽中断
ENTRY(trap_nmi)
// 不可屏蔽中断不是异常,而是一个外部中断,不会产生错误码
// 应执行中断处理流程
pushq $0 //占位err_code
pushq %rax
leaq do_nmi(%rip), %rax
xchgq %rax, (%rsp)
jmp Err_Code
// 3 #BP 断点异常
ENTRY(trap_int3)
pushq $0
pushq %rax
leaq do_int3(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 4 #OF 溢出异常
ENTRY(trap_overflow)
pushq $0
pushq %rax
leaq do_overflow(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 5 #BR 越界异常
ENTRY(trap_bounds)
pushq $0
pushq %rax
leaq do_bounds(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 6 #UD 无效/未定义的机器码
ENTRY(trap_undefined_opcode)
pushq $0
pushq %rax
leaq do_undefined_opcode(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 7 #NM 设备异常(FPU不存在)
ENTRY(trap_dev_not_avaliable)
pushq $0
pushq %rax
leaq do_dev_not_avaliable(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 8 #DF 双重错误
ENTRY(trap_double_fault)
pushq %rax
leaq do_double_fault(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 9 协处理器越界(保留)
ENTRY(trap_coprocessor_segment_overrun)
pushq $0
pushq %rax
leaq do_coprocessor_segment_overrun(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 10 #TS 无效的TSS段
ENTRY(trap_invalid_TSS)
// === 不正确的任务状态段 #TS ==
// 有错误码,处理器已经自动在异常处理程序栈中压入错误码
pushq %rax
leaq do_invalid_TSS(%rip), %rax
xchgq %rax, (%rsp)
jmp Err_Code
// 11 #NP 段不存在
ENTRY(trap_segment_not_exists)
pushq %rax
leaq do_segment_not_exists(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 12 #SS 段错误
ENTRY(trap_stack_segment_fault)
pushq %rax
leaq do_stack_segment_fault(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 13 #GP 通用保护性异常
ENTRY(trap_general_protection)
pushq %rax
leaq do_general_protection(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 14 #PF 页错误
ENTRY(trap_page_fault)
// === 页故障 #PF ==
// 有错误码
pushq %rax
leaq do_page_fault(%rip), %rax
xchgq %rax, (%rsp)
jmp Err_Code
// 15 Intel保留,请勿使用
// 16 #MF X87 FPU错误(计算错误)
ENTRY(trap_x87_FPU_error)
pushq $0
pushq %rax
leaq do_x87_FPU_error(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 17 #AC 对齐检测
ENTRY(trap_alignment_check)
pushq %rax
leaq do_alignment_check(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 18 #MC 机器检测
ENTRY(trap_machine_check)
pushq $0
pushq %rax
leaq do_machine_check(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 19 #XM SIMD浮点异常
ENTRY(trap_SIMD_exception)
pushq $0
pushq %rax
leaq do_SIMD_exception(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 20 #VE 虚拟化异常
ENTRY(trap_virtualization_exception)
pushq $0
pushq %rax
leaq do_virtualization_exception(%rip), %rax // 获取中断服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// 系统调用入口
// 0x80 系统调用门
ENTRY(syscall_int)
pushq $0
pushq %rax
leaq syscall_handler(%rip), %rax // 获取系统调用服务程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
// irq模块初始化后的ignore_int入点
ENTRY(ignore_int)
pushq $0
pushq %rax
leaq ignore_int_handler(%rip), %rax // 获取ignore处理程序的地址
xchgq %rax, (%rsp) // 把FUNC的地址换入栈中
jmp Err_Code
ENTRY(syscall_64)
// 切换用户栈和内核栈
cli
swapgs
movq %rsp, %gs:0x8
movq %gs:0x0, %rsp
pushq $43 // USER_DS
pushq %gs:0x8 // rsp
pushq %r11 // RFLAGS
pushq $51 // USER_CS
pushq %rcx // RIP
pushq $0 // error code占位
pushq %rax
leaq syscall_handler(%rip), %rax // FUNC
xchgq %rax, (%rsp)
pushq %rax // rax
movq %es, %rax
pushq %rax // es
movq %ds, %rax
pushq %rax // ds
xorq %rax, %rax
pushq %rbp
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rbx
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
cld
xorq %rsi, %rsi
movq FUNC(%rsp), %rdx
movq %rsp, %rdi // 把栈指针装入rdi,作为函数的第一个的参数
sti
callq *%rdx //调用服务程序
// 将原本要返回的栈帧的栈指针传入do_signal的第一个参数
movq %rsp, %rdi
callq do_signal
cli
// === 恢复调用现场 ===
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rbx
popq %rcx
popq %rdx
popq %rsi
popq %rdi
popq %rbp
popq %rax // 不允许直接pop到ds
movq %rax, %ds
popq %rax
movq %rax, %es
popq %rax
addq $0x10, %rsp // 弹出变量FUNC和errcode
popq %rcx // pop rip到rcx
addq $0x8, %rsp // 弹出cs
popq %r11 // pop rflags到r11
popq %rsp // Restore rsp
swapgs
sysretq
|
sspphh/Dragon-Mini | 12,460 | kernel/src/arch/x86_64/asm/head.S | // 这是内核执行头程序
// Created by longjin.
// 2022/01/20
#include "common/asm.h"
// 以下是来自 multiboot2 规范的定义
// How many bytes from the start of the file we search for the header.
#define MULTIBOOT_SEARCH 32768
#define MULTIBOOT_HEADER_ALIGN 8
// The magic field should contain this.
#define MULTIBOOT2_HEADER_MAGIC 0xe85250d6
// This should be in %eax.
#define MULTIBOOT2_BOOTLOADER_MAGIC 0x36d76289
// Alignment of multiboot modules.
#define MULTIBOOT_MOD_ALIGN 0x00001000
// Alignment of the multiboot info structure.
#define MULTIBOOT_INFO_ALIGN 0x00000008
// Flags set in the 'flags' member of the multiboot header.
#define MULTIBOOT_TAG_ALIGN 8
#define MULTIBOOT_TAG_TYPE_END 0
#define MULTIBOOT_TAG_TYPE_CMDLINE 1
#define MULTIBOOT_TAG_TYPE_BOOT_LOADER_NAME 2
#define MULTIBOOT_TAG_TYPE_MODULE 3
#define MULTIBOOT_TAG_TYPE_BASIC_MEMINFO 4
#define MULTIBOOT_TAG_TYPE_BOOTDEV 5
#define MULTIBOOT_TAG_TYPE_MMAP 6
#define MULTIBOOT_TAG_TYPE_VBE 7
#define MULTIBOOT_TAG_TYPE_FRAMEBUFFER 8
#define MULTIBOOT_TAG_TYPE_ELF_SECTIONS 9
#define MULTIBOOT_TAG_TYPE_APM 10
#define MULTIBOOT_TAG_TYPE_EFI32 11
#define MULTIBOOT_TAG_TYPE_EFI64 12
#define MULTIBOOT_TAG_TYPE_SMBIOS 13
#define MULTIBOOT_TAG_TYPE_ACPI_OLD 14
#define MULTIBOOT_TAG_TYPE_ACPI_NEW 15
#define MULTIBOOT_TAG_TYPE_NETWORK 16
#define MULTIBOOT_TAG_TYPE_EFI_MMAP 17
#define MULTIBOOT_TAG_TYPE_EFI_BS 18
#define MULTIBOOT_TAG_TYPE_EFI32_IH 19
#define MULTIBOOT_TAG_TYPE_EFI64_IH 20
#define MULTIBOOT_TAG_TYPE_LOAD_BASE_ADDR 21
#define MULTIBOOT_HEADER_TAG_END 0
#define MULTIBOOT_HEADER_TAG_INFORMATION_REQUEST 1
#define MULTIBOOT_HEADER_TAG_ADDRESS 2
#define MULTIBOOT_HEADER_TAG_ENTRY_ADDRESS 3
#define MULTIBOOT_HEADER_TAG_CONSOLE_FLAGS 4
#define MULTIBOOT_HEADER_TAG_FRAMEBUFFER 5
#define MULTIBOOT_HEADER_TAG_MODULE_ALIGN 6
#define MULTIBOOT_HEADER_TAG_EFI_BS 7
#define MULTIBOOT_HEADER_TAG_ENTRY_ADDRESS_EFI32 8
#define MULTIBOOT_HEADER_TAG_ENTRY_ADDRESS_EFI64 9
#define MULTIBOOT_HEADER_TAG_RELOCATABLE 10
#define MULTIBOOT_ARCHITECTURE_I386 0
#define MULTIBOOT_ARCHITECTURE_MIPS32 4
#define MULTIBOOT_HEADER_TAG_OPTIONAL 1
#define MULTIBOOT_LOAD_PREFERENCE_NONE 0
#define MULTIBOOT_LOAD_PREFERENCE_LOW 1
#define MULTIBOOT_LOAD_PREFERENCE_HIGH 2
#define MULTIBOOT_CONSOLE_FLAGS_CONSOLE_REQUIRED 1
#define MULTIBOOT_CONSOLE_FLAGS_EGA_TEXT_SUPPORTED 2
// 直接用 -m64 编译出来的是 64 位代码,
// 但是启动后的机器是 32 位的,相当于在 32 位机器上跑 64 位程序。
// 得加一层跳转到 64 位的 -m32 代码,开启 long 模式后再跳转到以 -m64 编译的代码中
// 对于 x86_64,需要在启动阶段进入长模式(IA32E),这意味着需要一个临时页表
// See https://wiki.osdev.org/Creating_a_64-bit_kernel:
// With a 32-bit bootstrap in your kernel
// 这部分是从保护模式启动 long 模式的代码
// 工作在 32bit
// 声明这一段代码以 32 位模式编译
.code32
// multiboot2 文件头
// 计算头长度
.SET HEADER_LENGTH, multiboot_header_end - multiboot_header
// 计算校验和
.SET CHECKSUM, -(MULTIBOOT2_HEADER_MAGIC + MULTIBOOT_ARCHITECTURE_I386 + HEADER_LENGTH)
// 8 字节对齐
.section .multiboot_header
.align MULTIBOOT_HEADER_ALIGN
// 声明所属段
multiboot_header:
// 魔数
.long MULTIBOOT2_HEADER_MAGIC
// 架构
.long MULTIBOOT_ARCHITECTURE_I386
// 头长度
.long HEADER_LENGTH
// 校验和
.long CHECKSUM
// 添加其它内容在此,详细信息见 Multiboot2 Specification version 2.0.pdf
// 设置帧缓冲区(同时在这里设置qemu的分辨率, 默认为: 1440*900, 还支持: 640*480, 等)
.align 8
framebuffer_tag_start:
.short MULTIBOOT_HEADER_TAG_FRAMEBUFFER
.short MULTIBOOT_HEADER_TAG_OPTIONAL
.long framebuffer_tag_end - framebuffer_tag_start
.long 1440 // 宽
.long 900 // 高
.long 32
framebuffer_tag_end:
.align 8
.short MULTIBOOT_HEADER_TAG_END
// 结束标记
.short 0
.long 8
multiboot_header_end:
.section .bootstrap
.global _start
.type _start, @function
# 在 multiboot2.cpp 中定义
.extern _start64
.extern boot_info_addr
.extern multiboot2_magic
ENTRY(_start)
// 关中断
cli
// multiboot2_info 结构体指针
mov %ebx, mb2_info
//mov %ebx, %e8
// 魔数
mov %eax, mb2_magic
//mov %eax, %e9
/ 从保护模式跳转到长模式
// 1. 允许 PAE
mov %cr4, %eax
or $(1<<5), %eax
mov %eax, %cr4
// 2. 设置临时页表
// 最高级
mov $pml4, %eax
mov $pdpt, %ebx
or $0x3, %ebx
mov %ebx, 0(%eax)
// 次级
mov $pdpt, %eax
mov $pd, %ebx
or $0x3, %ebx
mov %ebx, 0(%eax)
// 次低级
mov $pd, %eax
mov $pt, %ebx
or $0x3, %ebx
mov %ebx, 0(%eax)
// 最低级
// 循环 512 次,填满一页
mov $512, %ecx
mov $pt, %eax
mov $0x3, %ebx
.fill_pt:
mov %ebx, 0(%eax)
add $0x1000, %ebx
add $8, %eax
loop .fill_pt
.global enter_head_from_ap_boot
enter_head_from_ap_boot:
// 填写 CR3
mov $pml4, %eax
mov %eax, %cr3
// 3. 切换到 long 模式
mov $0xC0000080, %ecx
rdmsr
or $(1<<8), %eax
wrmsr
// 4. 开启分页
mov %cr0, %eax
or $(1<<31), %eax
mov %eax, %cr0
// 5. 重新设置 GDT
mov $gdt64_pointer, %eax
lgdt 0(%eax)
jmp $0x8, $ready_to_start_64
hlt
ret
.code64
.global ready_to_start_64
ready_to_start_64:
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %ss
mov $0x7e00, %esp
//6. 跳转到start64
movq switch_to_start64(%rip), %rax
pushq $0x08 //段选择子
pushq %rax
lretq
switch_to_start64:
.quad _start64
.code64
is_from_ap:
hlt
.global _start64
.type _start64, @function
.extern Start_Kernel
ENTRY(_start64)
// 初始化寄存器
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %ss
mov $0x7e00, %esp
// === 加载GDTR ====
lgdt GDT_POINTER(%rip) //这里我没搞明白rip相对寻址, 看了文档,大概是用来实现PIC的(position independent code)
//lgdt $GDT_POINTER
// === 加载IDTR ====
lidt IDT_POINTER(%rip)
//lidt $IDT_POINTER
movq GDT_POINTER(%rip), %r12
movq head_stack_start(%rip), %rsp
// 分支,判断是否为apu
movq $0x1b, %rcx // 根据IA32_APIC_BASE.BSP[8]标志位判断处理器是否为apu
rdmsr
bt $8, %rax
jnc load_apu_cr3
// 2. 设置临时页表
// 最高级
mov $__PML4E, %eax
mov $__PDPTE, %ebx
or $0x3, %ebx
mov %ebx, 0(%eax)
mov $__PML4E, %eax
// 加256个表项, 映射高地址
add $2048, %eax
mov %ebx, 0(%eax)
// 次级
mov $__PDPTE, %eax
mov $__PDE, %ebx
or $0x3, %ebx
mov %ebx, 0(%eax)
// 次低级
mov $__PDE, %eax
mov $50, %ecx
mov $__PT_S, %ebx
or $0x3, %ebx
.fill_pde_64:
mov %ebx, 0(%eax)
add $0x1000, %ebx
add $8, %eax
loop .fill_pde_64
// 最低级
// 循环 512*25=12800 次,填满25页,共50M
mov $12800, %ecx
mov $__PT_S, %eax
mov $0x3, %ebx
.fill_pt_64:
mov %ebx, 0(%eax)
add $0x1000, %ebx
add $8, %eax
loop .fill_pt_64
// 50-100M填0,共25个页表
mov $12800, %ecx
.fill_pt_64_2:
movq $0, 0(%eax)
add $8, %eax
loop .fill_pt_64_2
// ==== 加载CR3寄存器
load_cr3:
movq $__PML4E, %rax //设置页目录基地址
movq %rax, %cr3
jmp to_switch_seg
load_apu_cr3:
// 由于内存管理模块重置了页表,因此ap核心初始化的时候,需要使用新的内核页表。
// 这个页表的值由smp模块设置到__APU_START_CR3变量中
// 加载__APU_START_CR3中的值
movq $__APU_START_CR3, %rax
movq 0(%rax), %rax
movq %rax, %cr3
jmp to_switch_seg
to_switch_seg:
movq switch_seg(%rip), %rax
// 由于ljmp和lcall在GAS中不受支持,因此我们需要先伪造函数调用现场,通过lret的方式,给它跳转过去。才能更新cs寄存器
// 实在是太妙了!Amazing!
pushq $0x08 //段选择子
pushq %rax
lretq
// 64位模式的代码
switch_seg:
.quad entry64
entry64:
movq $0x10, %rax
movq %rax, %ds
movq %rax, %es
movq %rax, %gs
movq %rax, %ss
movq head_stack_start(%rip), %rsp //rsp的地址
// 重新加载GDT和IDT,加载到高地址
leaq GDT_Table(%rip), %r8
leaq GDT_END(%rip), %r9
subq %r8, %r9
movq %r9, %r13 // GDT size
leaq IDT_Table(%rip), %r8
leaq IDT_END(%rip), %r9
subq %r8, %r9
movq %r9, %r12 // IDT size
lgdt GDT_POINTER64(%rip)
lidt IDT_POINTER64(%rip)
// 分支,判断是否为apu
movq $0x1b, %rcx // 根据IA32_APIC_BASE.BSP[8]标志位判断处理器是否为apu
rdmsr
bt $8, %rax
jnc start_smp
setup_IDT:
// 该部分代码只在启动初期使用,后面的c文件中会重新设置IDT,
leaq m_ignore_int(%rip), %rdx // 将ignore_int的地址暂时存到中段描述符的高8B
movq $(0x08 << 16), %rax // 设置段选择子。由IDT结构和段选择子结构可知,本行设置段基地址为0x100000,TI=0,RPL=0
movw %dx, %ax
movq $ (0x8e00 << 32), %rcx // 设置Type=1110 P=1 DPL=00 0=0
addq %rcx, %rax
// 把ignore_int的地址填写到正确位置, rax存低8B, rdx存高8B
movl %edx, %ecx
shrl $16, %ecx // 去除低16位
shlq $48, %rcx
addq %rcx, %rax // 填写段内偏移31:16
shrq $32, %rdx // (已经填写了32位,故右移32)
leaq IDT_Table(%rip), %rdi // 获取中断描述符表的首地址,存储到rdi
mov $256, %rcx // 初始化每个中断描述符
repeat_set_idt:
// ====== 循环,初始化总共256个中断描述符 ===
movq %rax, (%rdi) // 保存低8B
movq %rdx, 8(%rdi) // 保存高8B
addq $0x10, %rdi // 转到下一个IDT表项
dec %rcx
jne repeat_set_idt
//now enable SSE and the like
movq %cr0, %rax
and $0xFFFB, %ax //clear coprocessor emulation CR0.EM
or $0x2, %ax //set coprocessor monitoring CR0.MP
movq %rax, %cr0
movq %cr4, %rax
or $(3 << 9), %ax //set CR4.OSFXSR and CR4.OSXMMEXCPT at the same time
movq %rax, %cr4
movq go_to_kernel(%rip), %rax /* movq address */
pushq $0x08
pushq %rax
// 传参
movq mb2_info, %rdi
movq mb2_magic, %rsi
movq %r13, %rdx // GDT size
movq %r12, %r10 // IDT size
lretq
go_to_kernel:
.quad kernel_main
start_smp:
//now enable SSE and the like
movq %cr0, %rax
and $0xFFFB, %ax //clear coprocessor emulation CR0.EM
or $0x2, %ax //set coprocessor monitoring CR0.MP
movq %rax, %cr0
movq %cr4, %rax
or $(3 << 9), %ax //set CR4.OSFXSR and CR4.OSXMMEXCPT at the same time
movq %rax, %cr4
movq go_to_smp_kernel(%rip), %rax /* movq address */
pushq $0x08
pushq %rax
/*
// 重新加载GDT和IDT,加载到高地址
leaq GDT_Table(%rip), %r8
leaq GDT_END(%rip), %r9
subq %r8, %r9
movq %r9, %r13 // GDT size
leaq IDT_Table(%rip), %r8
leaq IDT_END(%rip), %r9
subq %r8, %r9
movq %r9, %r12 // IDT size
lgdt GDT_POINTER64(%rip)
lidt IDT_POINTER64(%rip)
*/
lretq
go_to_smp_kernel:
.quad smp_ap_start
// ==== 异常/中断处理模块 ignore int: 忽略中断
// (该部分代码只在启动初期使用,后面的c文件中会重新设置IDT,从而重设ignore_int的中断入点)
m_ignore_int:
// 切换到c语言的ignore_int
movq go_to_ignore_int(%rip), %rax
pushq $0x08
pushq %rax
lretq
go_to_ignore_int:
.quad ignore_int_handler
ENTRY(head_stack_start)
.quad BSP_IDLE_STACK_SPACE + 32768
// 初始化页表
.align 0x1000 //设置为4k对齐
__PML4E:
.skip 0x1000
__PDPTE:
.skip 0x1000
// 三级页表
__PDE:
.skip 0x1000
// 预留50个四级页表,总共表示100M的内存空间。这50个页表占用200KB的空间
__PT_S:
.skip 0x32000
.global __APU_START_CR3
__APU_START_CR3:
.quad 0
// GDT表
.align 16
.global GDT_Table // 使得GDT可以被外部程序引用或者访问
GDT_Table:
.quad 0x0000000000000000 // 0 空描述符 0x00
.quad 0x0020980000000000 // 1 内核64位代码段描述符 0x08
.quad 0x0000920000000000 // 2 内核64位数据段描述符 0x10
.quad 0x0000000000000000 // 3 用户32位代码段描述符 0x18
.quad 0x0000000000000000 // 4 用户32位数据段描述符 0x20
.quad 0x00cff3000000ffff // 5 用户64位数据段描述符 0x28
.quad 0x00affb000000ffff // 6 用户64位代码段描述符 0x30
.quad 0x00cf9a000000ffff // 7 内核32位代码段描述符 0x38
.quad 0x00cf92000000ffff // 8 内核32位数据段描述符 0x40
.fill 100, 8, 0 // 10-11 TSS(跳过了第9段) 重复十次填充8字节的空间,赋值为0 长模式下,每个TSS长度为128bit
GDT_END:
.global GDT_POINTER
GDT_POINTER:
GDT_LIMIT: .word GDT_END - GDT_Table - 1 // GDT的大小
GDT_BASE: .quad GDT_Table
.global GDT_POINTER64
GDT_POINTER64:
GDT_LIMIT64: .word GDT_END - GDT_Table - 1 // GDT的大小
GDT_BASE64: .quad GDT_Table + 0xffff800000000000
// IDT 表
.global IDT_Table
IDT_Table:
.fill 512, 8, 0 // 设置512*8字节的IDT表的空间
IDT_END:
.global IDT_POINTER
IDT_POINTER:
IDT_LIMIT: .word IDT_END - IDT_Table - 1
IDT_BASE: .quad IDT_Table
.global IDT_POINTER64
IDT_POINTER64:
IDT_LIMIT64: .word IDT_END - IDT_Table - 1
IDT_BASE64: .quad IDT_Table + 0xffff800000000000
.section .bootstrap.data
mb2_magic: .quad 0
mb2_info: .quad 0
.code32
// 临时页表 4KB/页
.align 0x1000
.global pml4
pml4:
.skip 0x1000
pdpt:
.skip 0x1000
pd:
.skip 0x1000
pt:
.skip 0x1000
// 临时 GDT
.align 16
gdt64:
null_desc:
.short 0xFFFF
.short 0
.byte 0
.byte 0
.byte 0
.byte 0
code_desc:
.short 0
.short 0
.byte 0
.byte 0x9A
.byte 0x20
.byte 0
data_desc:
.short 0
.short 0
.byte 0
.byte 0x92
.byte 0
.byte 0
user_code_desc:
.short 0
.short 0
.byte 0
.byte 0xFA
.byte 0x20
.byte 0
user_data_desc:
.short 0
.short 0
.byte 0
.byte 0xF2
.byte 0
.byte 0
gdt64_pointer:
.short gdt64_pointer-gdt64-1
.quad gdt64
gdt64_pointer64:
.short gdt64_pointer-gdt64-1
.quad gdt64
|
sssxks/cpurs | 2,342 | program/test.S | .global _start
_start:
# Initial setup
addi x1, x0, 10 # x1 = 10
addi x2, x0, 20 # x2 = 20
add x3, x1, x2 # x3 = x1 + x2 = 30
sw x3, 0(x0) # Store x3 (30) to memory address 0
addi x4, x0, 0 # Clear x4
lw x4, 0(x0) # Load value from memory address 0 into x4 (x4=30)
addi x5, x4, 5 # x5 = x4 + 5 = 35
# Conditional Branch Tests
addi x6, x0, 5
addi x7, x0, 5
beq x6, x7, target_taken
addi x20, x0, 999 # SKIPPED
addi x21, x0, 888 # SKIPPED
target_taken:
addi x9, x0, 50
addi x6, x0, 10
bne x6, x7, target_bne_taken
addi x22, x0, 777 # SKIPPED
target_bne_taken:
addi x8, x0, 100
# JAL Test
jal x10, after_jal
addi x23, x0, 1 # SKIPPED
after_jal:
addi x11, x0, 200
# LUI and AUIPC Tests
lui x12, 0xABCDE
auipc x13, 0x12345
# More ALU Immediate Tests
addi x14, x0, -1
xori x16, x14, 0x0F0
ori x17, x14, 0x01F
andi x18, x14, 0x01F
slti x19, x14, 0
sltiu x24, x14, 0
# x2 is 20 (original) before shifts
addi x2, x0, 20 # Restore x2 for sltiu tests against 20
sltiu x25, x2, 20
sltiu x26, x2, 21
# More ALU Register-Register Tests
addi x1, x0, 10 # Restore x1 = 10
addi x2, x0, 20 # Restore x2 = 20
xor x27, x1, x2
or x28, x1, x2
and x29, x1, x2
# Shift tests
addi x7, x0, 2 # x7 = 2 (shift amount)
# x1 = 10, x2 = 20, x14 = -1
sll x1, x1, x7 # x1 = 10 << 2 = 40
srl x2, x2, x7 # x2 = 20 >> 2 = 5
sra x14, x14, x7 # x14 = -1 >> 2 = -1
# SLT / SLTU with register-register
# After shifts: x1 = 40, x2 = 5
slt x30, x2, x1 # x30 = (5 < 40) = 1
sltu x31, x2, x1 # x31 = (5 < 40) = 1
# MUL Test
# x9=50, x8=100
mul x20, x9, x8 # x20 = 50 * 100 = 5000
# JALR to loop
# PC of `addi x13_jalr_base` is 0xAC (idx 43)
# PC of `jalr` = 0xAC + 4 = 0xB0
# `loop:` label will be at 0xB0 + 4 = 0xB4
# Target for JALR is 0xB4.
# If x13_jalr_base = 0, then imm = 0xB4 = 180.
addi x13, x0, 0 # x13_jalr_base = 0. (PC = 0xAC)
jalr x15, x13, 180 # Jump to (0 + 180) & ~1 = 180 (0xB4). x15_link = PC_jalr + 4 = 0xB0 + 4 = 0xB4. (PC = 0xB0)
# Simple infinite loop to halt
loop: # Expected PC = 0xB4
beq x0, x0, loop # Branch to self (offset 0)
|
SubSir/rCore | 1,640 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
Sudderen/educom-rust-1754291198 | 4,337 | opdrachten/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/dfsqrt.s | .text
.global __hexagon_sqrtdf2
.type __hexagon_sqrtdf2,@function
.global __hexagon_sqrt
.type __hexagon_sqrt,@function
.global __qdsp_sqrtdf2 ; .set __qdsp_sqrtdf2, __hexagon_sqrtdf2; .type __qdsp_sqrtdf2,@function
.global __qdsp_sqrt ; .set __qdsp_sqrt, __hexagon_sqrt; .type __qdsp_sqrt,@function
.global __hexagon_fast_sqrtdf2 ; .set __hexagon_fast_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast_sqrtdf2,@function
.global __hexagon_fast_sqrt ; .set __hexagon_fast_sqrt, __hexagon_sqrt; .type __hexagon_fast_sqrt,@function
.global __hexagon_fast2_sqrtdf2 ; .set __hexagon_fast2_sqrtdf2, __hexagon_sqrtdf2; .type __hexagon_fast2_sqrtdf2,@function
.global __hexagon_fast2_sqrt ; .set __hexagon_fast2_sqrt, __hexagon_sqrt; .type __hexagon_fast2_sqrt,@function
.type sqrt,@function
.p2align 5
__hexagon_sqrtdf2:
__hexagon_sqrt:
{
r15:14 = extractu(r1:0,#23 +1,#52 -23)
r28 = extractu(r1,#11,#52 -32)
r5:4 = combine(##0x3f000004,#1)
}
{
p2 = dfclass(r1:0,#0x02)
p2 = cmp.gt(r1,#-1)
if (!p2.new) jump:nt .Lsqrt_abnormal
r9 = or(r5,r14)
}
.Ldenormal_restart:
{
r11:10 = r1:0
r7,p0 = sfinvsqrta(r9)
r5 = and(r5,#-16)
r3:2 = #0
}
{
r3 += sfmpy(r7,r9):lib
r2 += sfmpy(r7,r5):lib
r6 = r5
r9 = and(r28,#1)
}
{
r6 -= sfmpy(r3,r2):lib
r11 = insert(r4,#11 +1,#52 -32)
p1 = cmp.gtu(r9,#0)
}
{
r3 += sfmpy(r3,r6):lib
r2 += sfmpy(r2,r6):lib
r6 = r5
r9 = mux(p1,#8,#9)
}
{
r6 -= sfmpy(r3,r2):lib
r11:10 = asl(r11:10,r9)
r9 = mux(p1,#3,#2)
}
{
r2 += sfmpy(r2,r6):lib
r15:14 = asl(r11:10,r9)
}
{
r2 = and(r2,##0x007fffff)
}
{
r2 = add(r2,##0x00800000 - 3)
r9 = mux(p1,#7,#8)
}
{
r8 = asl(r2,r9)
r9 = mux(p1,#15-(1+1),#15-(1+0))
}
{
r13:12 = mpyu(r8,r15)
}
{
r1:0 = asl(r11:10,#15)
r15:14 = mpyu(r13,r13)
p1 = cmp.eq(r0,r0)
}
{
r1:0 -= asl(r15:14,#15)
r15:14 = mpyu(r13,r12)
p2 = cmp.eq(r0,r0)
}
{
r1:0 -= lsr(r15:14,#16)
p3 = cmp.eq(r0,r0)
}
{
r1:0 = mpyu(r1,r8)
}
{
r13:12 += lsr(r1:0,r9)
r9 = add(r9,#16)
r1:0 = asl(r11:10,#31)
}
{
r15:14 = mpyu(r13,r13)
r1:0 -= mpyu(r13,r12)
}
{
r1:0 -= asl(r15:14,#31)
r15:14 = mpyu(r12,r12)
}
{
r1:0 -= lsr(r15:14,#33)
}
{
r1:0 = mpyu(r1,r8)
}
{
r13:12 += lsr(r1:0,r9)
r9 = add(r9,#16)
r1:0 = asl(r11:10,#47)
}
{
r15:14 = mpyu(r13,r13)
}
{
r1:0 -= asl(r15:14,#47)
r15:14 = mpyu(r13,r12)
}
{
r1:0 -= asl(r15:14,#16)
r15:14 = mpyu(r12,r12)
}
{
r1:0 -= lsr(r15:14,#17)
}
{
r1:0 = mpyu(r1,r8)
}
{
r13:12 += lsr(r1:0,r9)
}
{
r3:2 = mpyu(r13,r12)
r5:4 = mpyu(r12,r12)
r15:14 = #0
r1:0 = #0
}
{
r3:2 += lsr(r5:4,#33)
r5:4 += asl(r3:2,#33)
p1 = cmp.eq(r0,r0)
}
{
r7:6 = mpyu(r13,r13)
r1:0 = sub(r1:0,r5:4,p1):carry
r9:8 = #1
}
{
r7:6 += lsr(r3:2,#31)
r9:8 += asl(r13:12,#1)
}
{
r15:14 = sub(r11:10,r7:6,p1):carry
r5:4 = sub(r1:0,r9:8,p2):carry
r7:6 = #1
r11:10 = #0
}
{
r3:2 = sub(r15:14,r11:10,p2):carry
r7:6 = add(r13:12,r7:6)
r28 = add(r28,#-0x3ff)
}
{
if (p2) r13:12 = r7:6
if (p2) r1:0 = r5:4
if (p2) r15:14 = r3:2
}
{
r5:4 = sub(r1:0,r9:8,p3):carry
r7:6 = #1
r28 = asr(r28,#1)
}
{
r3:2 = sub(r15:14,r11:10,p3):carry
r7:6 = add(r13:12,r7:6)
}
{
if (p3) r13:12 = r7:6
if (p3) r1:0 = r5:4
r2 = #1
}
{
p0 = cmp.eq(r1:0,r11:10)
if (!p0.new) r12 = or(r12,r2)
r3 = cl0(r13:12)
r28 = add(r28,#-63)
}
{
r1:0 = convert_ud2df(r13:12)
r28 = add(r28,r3)
}
{
r1 += asl(r28,#52 -32)
jumpr r31
}
.Lsqrt_abnormal:
{
p0 = dfclass(r1:0,#0x01)
if (p0.new) jumpr:t r31
}
{
p0 = dfclass(r1:0,#0x10)
if (p0.new) jump:nt .Lsqrt_nan
}
{
p0 = cmp.gt(r1,#-1)
if (!p0.new) jump:nt .Lsqrt_invalid_neg
if (!p0.new) r28 = ##0x7F800001
}
{
p0 = dfclass(r1:0,#0x08)
if (p0.new) jumpr:nt r31
}
{
r1:0 = extractu(r1:0,#52,#0)
}
{
r28 = add(clb(r1:0),#-11)
}
{
r1:0 = asl(r1:0,r28)
r28 = sub(#1,r28)
}
{
r1 = insert(r28,#1,#52 -32)
}
{
r3:2 = extractu(r1:0,#23 +1,#52 -23)
r5 = ##0x3f000004
}
{
r9 = or(r5,r2)
r5 = and(r5,#-16)
jump .Ldenormal_restart
}
.Lsqrt_nan:
{
r28 = convert_df2sf(r1:0)
r1:0 = #-1
jumpr r31
}
.Lsqrt_invalid_neg:
{
r1:0 = convert_sf2df(r28)
jumpr r31
}
.size __hexagon_sqrt,.-__hexagon_sqrt
.size __hexagon_sqrtdf2,.-__hexagon_sqrtdf2
|
Sudderen/educom-rust-1754291198 | 3,885 | opdrachten/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/fastmath2_ldlib_asm.s | .text
.global __hexagon_fast2ldadd_asm
.type __hexagon_fast2ldadd_asm, @function
__hexagon_fast2ldadd_asm:
.falign
{
R4 = memw(r29+#8)
R5 = memw(r29+#24)
r7 = r0
}
{
R6 = sub(R4, R5):sat
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
R6 = abs(R6):sat
if ( P0) R4 = #1
if (!P0) R5 = #1
R9 = #62
} {
R6 = MIN(R6, R9)
R1:0 = memd(r29+#0)
R3:2 = memd(r29+#16)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = add(R1:0, R3:2)
R3:2 = #0
} {
R4 = clb(R1:0)
R9.L =#0x0001
} {
R8 -= add(R4, #-1)
R4 = add(R4, #-1)
p0 = cmp.gt(R4, #58)
R9.H =#0x8000
} {
if(!p0)memw(r7+#8) = R8
R1:0 = ASL(R1:0, R4)
if(p0) jump .Ldenorma1
} {
memd(r7+#0) = R1:0
jumpr r31
}
.Ldenorma1:
memd(r7+#0) = R3:2
{
memw(r7+#8) = R9
jumpr r31
}
.text
.global __hexagon_fast2ldsub_asm
.type __hexagon_fast2ldsub_asm, @function
__hexagon_fast2ldsub_asm:
.falign
{
R4 = memw(r29+#8)
R5 = memw(r29+#24)
r7 = r0
}
{
R6 = sub(R4, R5):sat
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
R6 = abs(R6):sat
if ( P0) R4 = #1
if (!P0) R5 = #1
R9 = #62
} {
R6 = min(R6, R9)
R1:0 = memd(r29+#0)
R3:2 = memd(r29+#16)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = sub(R1:0, R3:2)
R3:2 = #0
} {
R4 = clb(R1:0)
R9.L =#0x0001
} {
R8 -= add(R4, #-1)
R4 = add(R4, #-1)
p0 = cmp.gt(R4, #58)
R9.H =#0x8000
} {
if(!p0)memw(r7+#8) = R8
R1:0 = asl(R1:0, R4)
if(p0) jump .Ldenorma_s
} {
memd(r7+#0) = R1:0
jumpr r31
}
.Ldenorma_s:
memd(r7+#0) = R3:2
{
memw(r7+#8) = R9
jumpr r31
}
.text
.global __hexagon_fast2ldmpy_asm
.type __hexagon_fast2ldmpy_asm, @function
__hexagon_fast2ldmpy_asm:
.falign
{
R15:14 = memd(r29+#0)
R3:2 = memd(r29+#16)
R13:12 = #0
}
{
R8= extractu(R2, #31, #1)
R9= extractu(R14, #31, #1)
R13.H = #0x8000
}
{
R11:10 = mpy(R15, R3)
R7:6 = mpy(R15, R8)
R4 = memw(r29+#8)
R5 = memw(r29+#24)
}
{
R11:10 = add(R11:10, R11:10)
R7:6 += mpy(R3, R9)
}
{
R7:6 = asr(R7:6, #30)
R8.L = #0x0001
p1 = cmp.eq(R15:14, R3:2)
}
{
R7:6 = add(R7:6, R11:10)
R4= add(R4, R5)
p2 = cmp.eq(R3:2, R13:12)
}
{
R9 = clb(R7:6)
R8.H = #0x8000
p1 = and(p1, p2)
}
{
R4-= add(R9, #-1)
R9 = add(R9, #-1)
if(p1) jump .Lsat1
}
{
R7:6 = asl(R7:6, R9)
memw(R0+#8) = R4
p0 = cmp.gt(R9, #58)
if(p0.new) jump:NT .Ldenorm1
}
{
memd(R0+#0) = R7:6
jumpr r31
}
.Lsat1:
{
R13:12 = #0
R4+= add(R9, #1)
}
{
R13.H = #0x4000
memw(R0+#8) = R4
}
{
memd(R0+#0) = R13:12
jumpr r31
}
.Ldenorm1:
{
memw(R0+#8) = R8
R15:14 = #0
}
{
memd(R0+#0) = R15:14
jumpr r31
}
|
Sudderen/educom-rust-1754291198 | 4,378 | opdrachten/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/dfmul.s | .text
.global __hexagon_muldf3
.type __hexagon_muldf3,@function
.global __qdsp_muldf3 ; .set __qdsp_muldf3, __hexagon_muldf3
.global __hexagon_fast_muldf3 ; .set __hexagon_fast_muldf3, __hexagon_muldf3
.global __hexagon_fast2_muldf3 ; .set __hexagon_fast2_muldf3, __hexagon_muldf3
.p2align 5
__hexagon_muldf3:
{
p0 = dfclass(r1:0,#2)
p0 = dfclass(r3:2,#2)
r13:12 = combine(##0x40000000,#0)
}
{
r13:12 = insert(r1:0,#52,#11 -1)
r5:4 = asl(r3:2,#11 -1)
r28 = #-1024
r9:8 = #1
}
{
r7:6 = mpyu(r4,r13)
r5:4 = insert(r9:8,#2,#62)
}
{
r15:14 = mpyu(r12,r4)
r7:6 += mpyu(r12,r5)
}
{
r7:6 += lsr(r15:14,#32)
r11:10 = mpyu(r13,r5)
r5:4 = combine(##1024 +1024 -4,#0)
}
{
r11:10 += lsr(r7:6,#32)
if (!p0) jump .Lmul_abnormal
p1 = cmp.eq(r14,#0)
p1 = cmp.eq(r6,#0)
}
{
if (!p1) r10 = or(r10,r8)
r6 = extractu(r1,#11,#20)
r7 = extractu(r3,#11,#20)
}
{
r15:14 = neg(r11:10)
r6 += add(r28,r7)
r28 = xor(r1,r3)
}
{
if (!p2.new) r11:10 = r15:14
p2 = cmp.gt(r28,#-1)
p0 = !cmp.gt(r6,r5)
p0 = cmp.gt(r6,r4)
if (!p0.new) jump:nt .Lmul_ovf_unf
}
{
r1:0 = convert_d2df(r11:10)
r6 = add(r6,#-1024 -58)
}
{
r1 += asl(r6,#20)
jumpr r31
}
.falign
.Lpossible_unf1:
{
p0 = cmp.eq(r0,#0)
p0 = bitsclr(r1,r4)
if (!p0.new) jumpr:t r31
r5 = #0x7fff
}
{
p0 = bitsset(r13,r5)
r4 = USR
r5 = #0x030
}
{
if (p0) r4 = or(r4,r5)
}
{
USR = r4
}
{
p0 = dfcmp.eq(r1:0,r1:0)
jumpr r31
}
.falign
.Lmul_ovf_unf:
{
r1:0 = convert_d2df(r11:10)
r13:12 = abs(r11:10)
r7 = add(r6,#-1024 -58)
}
{
r1 += asl(r7,#20)
r7 = extractu(r1,#11,#20)
r4 = ##0x7FEFFFFF
}
{
r7 += add(r6,##-1024 -58)
r5 = #0
}
{
p0 = cmp.gt(r7,##1024 +1024 -2)
if (p0.new) jump:nt .Lmul_ovf
}
{
p0 = cmp.gt(r7,#0)
if (p0.new) jump:nt .Lpossible_unf1
r5 = sub(r6,r5)
r28 = #63
}
{
r4 = #0
r5 = sub(#5,r5)
}
{
p3 = cmp.gt(r11,#-1)
r5 = min(r5,r28)
r11:10 = r13:12
}
{
r28 = USR
r15:14 = extractu(r11:10,r5:4)
}
{
r11:10 = asr(r11:10,r5)
r4 = #0x0030
r1 = insert(r9,#11,#20)
}
{
p0 = cmp.gtu(r9:8,r15:14)
if (!p0.new) r10 = or(r10,r8)
r11 = setbit(r11,#20 +3)
}
{
r15:14 = neg(r11:10)
p1 = bitsclr(r10,#0x7)
if (!p1.new) r28 = or(r4,r28)
}
{
if (!p3) r11:10 = r15:14
USR = r28
}
{
r1:0 = convert_d2df(r11:10)
p0 = dfcmp.eq(r1:0,r1:0)
}
{
r1 = insert(r9,#11 -1,#20 +1)
jumpr r31
}
.falign
.Lmul_ovf:
{
r28 = USR
r13:12 = combine(##0x7fefffff,#-1)
r1:0 = r11:10
}
{
r14 = extractu(r28,#2,#22)
r28 = or(r28,#0x28)
r5:4 = combine(##0x7ff00000,#0)
}
{
USR = r28
r14 ^= lsr(r1,#31)
r28 = r14
}
{
p0 = !cmp.eq(r28,#1)
p0 = !cmp.eq(r14,#2)
if (p0.new) r13:12 = r5:4
p0 = dfcmp.eq(r1:0,r1:0)
}
{
r1:0 = insert(r13:12,#63,#0)
jumpr r31
}
.Lmul_abnormal:
{
r13:12 = extractu(r1:0,#63,#0)
r5:4 = extractu(r3:2,#63,#0)
}
{
p3 = cmp.gtu(r13:12,r5:4)
if (!p3.new) r1:0 = r3:2
if (!p3.new) r3:2 = r1:0
}
{
p0 = dfclass(r1:0,#0x0f)
if (!p0.new) jump:nt .Linvalid_nan
if (!p3) r13:12 = r5:4
if (!p3) r5:4 = r13:12
}
{
p1 = dfclass(r1:0,#0x08)
p1 = dfclass(r3:2,#0x0e)
}
{
p0 = dfclass(r1:0,#0x08)
p0 = dfclass(r3:2,#0x01)
}
{
if (p1) jump .Ltrue_inf
p2 = dfclass(r3:2,#0x01)
}
{
if (p0) jump .Linvalid_zeroinf
if (p2) jump .Ltrue_zero
r28 = ##0x7c000000
}
{
p0 = bitsclr(r1,r28)
if (p0.new) jump:nt .Lmul_tiny
}
{
r28 = cl0(r5:4)
}
{
r28 = add(r28,#-11)
}
{
r5:4 = asl(r5:4,r28)
}
{
r3:2 = insert(r5:4,#63,#0)
r1 -= asl(r28,#20)
}
jump __hexagon_muldf3
.Lmul_tiny:
{
r28 = USR
r1:0 = xor(r1:0,r3:2)
}
{
r28 = or(r28,#0x30)
r1:0 = insert(r9:8,#63,#0)
r5 = extractu(r28,#2,#22)
}
{
USR = r28
p0 = cmp.gt(r5,#1)
if (!p0.new) r0 = #0
r5 ^= lsr(r1,#31)
}
{
p0 = cmp.eq(r5,#3)
if (!p0.new) r0 = #0
jumpr r31
}
.Linvalid_zeroinf:
{
r28 = USR
}
{
r1:0 = #-1
r28 = or(r28,#2)
}
{
USR = r28
}
{
p0 = dfcmp.uo(r1:0,r1:0)
jumpr r31
}
.Linvalid_nan:
{
p0 = dfclass(r3:2,#0x0f)
r28 = convert_df2sf(r1:0)
if (p0.new) r3:2 = r1:0
}
{
r2 = convert_df2sf(r3:2)
r1:0 = #-1
jumpr r31
}
.falign
.Ltrue_zero:
{
r1:0 = r3:2
r3:2 = r1:0
}
.Ltrue_inf:
{
r3 = extract(r3,#1,#31)
}
{
r1 ^= asl(r3,#31)
jumpr r31
}
.size __hexagon_muldf3,.-__hexagon_muldf3
|
Sudderen/educom-rust-1754291198 | 7,236 | opdrachten/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/dffma.s | .text
.global __hexagon_fmadf4
.type __hexagon_fmadf4,@function
.global __hexagon_fmadf5
.type __hexagon_fmadf5,@function
.global __qdsp_fmadf5 ; .set __qdsp_fmadf5, __hexagon_fmadf5
.p2align 5
__hexagon_fmadf4:
__hexagon_fmadf5:
fma:
{
p0 = dfclass(r1:0,#2)
p0 = dfclass(r3:2,#2)
r13:12 = #0
r15:14 = #0
}
{
r13:12 = insert(r1:0,#52,#11 -3)
r15:14 = insert(r3:2,#52,#11 -3)
r7 = ##0x10000000
allocframe(#32)
}
{
r9:8 = mpyu(r12,r14)
if (!p0) jump .Lfma_abnormal_ab
r13 = or(r13,r7)
r15 = or(r15,r7)
}
{
p0 = dfclass(r5:4,#2)
if (!p0.new) jump:nt .Lfma_abnormal_c
r11:10 = combine(r7,#0)
r7:6 = combine(#0,r9)
}
.Lfma_abnormal_c_restart:
{
r7:6 += mpyu(r14,r13)
r11:10 = insert(r5:4,#52,#11 -3)
memd(r29+#0) = r17:16
memd(r29+#8) = r19:18
}
{
r7:6 += mpyu(r12,r15)
r19:18 = neg(r11:10)
p0 = cmp.gt(r5,#-1)
r28 = xor(r1,r3)
}
{
r18 = extractu(r1,#11,#20)
r19 = extractu(r3,#11,#20)
r17:16 = combine(#0,r7)
if (!p0) r11:10 = r19:18
}
{
r17:16 += mpyu(r13,r15)
r9:8 = combine(r6,r8)
r18 = add(r18,r19)
r19 = extractu(r5,#11,#20)
}
{
r18 = add(r18,#-1023 +(4))
p3 = !cmp.gt(r28,#-1)
r7:6 = #0
r15:14 = #0
}
{
r7:6 = sub(r7:6,r9:8,p3):carry
p0 = !cmp.gt(r28,#-1)
p1 = cmp.gt(r19,r18)
if (p1.new) r19:18 = combine(r18,r19)
}
{
r15:14 = sub(r15:14,r17:16,p3):carry
if (p0) r9:8 = r7:6
r7:6 = #0
r19 = sub(r18,r19)
}
{
if (p0) r17:16 = r15:14
p0 = cmp.gt(r19,#63)
if (p1) r9:8 = r7:6
if (p1) r7:6 = r9:8
}
{
if (p1) r17:16 = r11:10
if (p1) r11:10 = r17:16
if (p0) r19 = add(r19,#-64)
r28 = #63
}
{
if (p0) r7:6 = r11:10
r28 = asr(r11,#31)
r13 = min(r19,r28)
r12 = #0
}
{
if (p0) r11:10 = combine(r28,r28)
r5:4 = extract(r7:6,r13:12)
r7:6 = lsr(r7:6,r13)
r12 = sub(#64,r13)
}
{
r15:14 = #0
r28 = #-2
r7:6 |= lsl(r11:10,r12)
r11:10 = asr(r11:10,r13)
}
{
p3 = cmp.gtu(r5:4,r15:14)
if (p3.new) r6 = and(r6,r28)
r15:14 = #1
r5:4 = #0
}
{
r9:8 = add(r7:6,r9:8,p3):carry
}
{
r17:16 = add(r11:10,r17:16,p3):carry
r28 = #62
}
{
r12 = add(clb(r17:16),#-2)
if (!cmp.eq(r12.new,r28)) jump:t 1f
}
{
r11:10 = extractu(r9:8,#62,#2)
r9:8 = asl(r9:8,#62)
r18 = add(r18,#-62)
}
{
r17:16 = insert(r11:10,#62,#0)
}
{
r12 = add(clb(r17:16),#-2)
}
.falign
1:
{
r11:10 = asl(r17:16,r12)
r5:4 |= asl(r9:8,r12)
r13 = sub(#64,r12)
r18 = sub(r18,r12)
}
{
r11:10 |= lsr(r9:8,r13)
p2 = cmp.gtu(r15:14,r5:4)
r28 = #1023 +1023 -2
}
{
if (!p2) r10 = or(r10,r14)
p0 = !cmp.gt(r18,r28)
p0 = cmp.gt(r18,#1)
if (!p0.new) jump:nt .Lfma_ovf_unf
}
{
p0 = cmp.gtu(r15:14,r11:10)
r1:0 = convert_d2df(r11:10)
r18 = add(r18,#-1023 -60)
r17:16 = memd(r29+#0)
}
{
r1 += asl(r18,#20)
r19:18 = memd(r29+#8)
if (!p0) dealloc_return
}
.Ladd_yields_zero:
{
r28 = USR
r1:0 = #0
}
{
r28 = extractu(r28,#2,#22)
r17:16 = memd(r29+#0)
r19:18 = memd(r29+#8)
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = ##0x80000000
dealloc_return
}
.Lfma_ovf_unf:
{
p0 = cmp.gtu(r15:14,r11:10)
if (p0.new) jump:nt .Ladd_yields_zero
}
{
r1:0 = convert_d2df(r11:10)
r18 = add(r18,#-1023 -60)
r28 = r18
}
{
r1 += asl(r18,#20)
r7 = extractu(r1,#11,#20)
}
{
r6 = add(r18,r7)
r17:16 = memd(r29+#0)
r19:18 = memd(r29+#8)
r9:8 = abs(r11:10)
}
{
p0 = cmp.gt(r6,##1023 +1023)
if (p0.new) jump:nt .Lfma_ovf
}
{
p0 = cmp.gt(r6,#0)
if (p0.new) jump:nt .Lpossible_unf0
}
{
r7 = add(clb(r9:8),#-2)
r6 = sub(#1+5,r28)
p3 = cmp.gt(r11,#-1)
}
{
r6 = add(r6,r7)
r9:8 = asl(r9:8,r7)
r1 = USR
r28 = #63
}
{
r7 = min(r6,r28)
r6 = #0
r0 = #0x0030
}
{
r3:2 = extractu(r9:8,r7:6)
r9:8 = asr(r9:8,r7)
}
{
p0 = cmp.gtu(r15:14,r3:2)
if (!p0.new) r8 = or(r8,r14)
r9 = setbit(r9,#20 +3)
}
{
r11:10 = neg(r9:8)
p1 = bitsclr(r8,#(1<<3)-1)
if (!p1.new) r1 = or(r1,r0)
r3:2 = #0
}
{
if (p3) r11:10 = r9:8
USR = r1
r28 = #-1023 -(52 +3)
}
{
r1:0 = convert_d2df(r11:10)
}
{
r1 += asl(r28,#20)
dealloc_return
}
.Lpossible_unf0:
{
r28 = ##0x7fefffff
r9:8 = abs(r11:10)
}
{
p0 = cmp.eq(r0,#0)
p0 = bitsclr(r1,r28)
if (!p0.new) dealloc_return:t
r28 = #0x7fff
}
{
p0 = bitsset(r9,r28)
r3 = USR
r2 = #0x0030
}
{
if (p0) r3 = or(r3,r2)
}
{
USR = r3
}
{
p0 = dfcmp.eq(r1:0,r1:0)
dealloc_return
}
.Lfma_ovf:
{
r28 = USR
r11:10 = combine(##0x7fefffff,#-1)
r1:0 = r11:10
}
{
r9:8 = combine(##0x7ff00000,#0)
r3 = extractu(r28,#2,#22)
r28 = or(r28,#0x28)
}
{
USR = r28
r3 ^= lsr(r1,#31)
r2 = r3
}
{
p0 = !cmp.eq(r2,#1)
p0 = !cmp.eq(r3,#2)
}
{
p0 = dfcmp.eq(r9:8,r9:8)
if (p0.new) r11:10 = r9:8
}
{
r1:0 = insert(r11:10,#63,#0)
dealloc_return
}
.Lfma_abnormal_ab:
{
r9:8 = extractu(r1:0,#63,#0)
r11:10 = extractu(r3:2,#63,#0)
deallocframe
}
{
p3 = cmp.gtu(r9:8,r11:10)
if (!p3.new) r1:0 = r3:2
if (!p3.new) r3:2 = r1:0
}
{
p0 = dfclass(r1:0,#0x0f)
if (!p0.new) jump:nt .Lnan
if (!p3) r9:8 = r11:10
if (!p3) r11:10 = r9:8
}
{
p1 = dfclass(r1:0,#0x08)
p1 = dfclass(r3:2,#0x0e)
}
{
p0 = dfclass(r1:0,#0x08)
p0 = dfclass(r3:2,#0x01)
}
{
if (p1) jump .Lab_inf
p2 = dfclass(r3:2,#0x01)
}
{
if (p0) jump .Linvalid
if (p2) jump .Lab_true_zero
r28 = ##0x7c000000
}
{
p0 = bitsclr(r1,r28)
if (p0.new) jump:nt .Lfma_ab_tiny
}
{
r28 = add(clb(r11:10),#-11)
}
{
r11:10 = asl(r11:10,r28)
}
{
r3:2 = insert(r11:10,#63,#0)
r1 -= asl(r28,#20)
}
jump fma
.Lfma_ab_tiny:
r9:8 = combine(##0x00100000,#0)
{
r1:0 = insert(r9:8,#63,#0)
r3:2 = insert(r9:8,#63,#0)
}
jump fma
.Lab_inf:
{
r3:2 = lsr(r3:2,#63)
p0 = dfclass(r5:4,#0x10)
}
{
r1:0 ^= asl(r3:2,#63)
if (p0) jump .Lnan
}
{
p1 = dfclass(r5:4,#0x08)
if (p1.new) jump:nt .Lfma_inf_plus_inf
}
{
jumpr r31
}
.falign
.Lfma_inf_plus_inf:
{
p0 = dfcmp.eq(r1:0,r5:4)
if (!p0.new) jump:nt .Linvalid
}
{
jumpr r31
}
.Lnan:
{
p0 = dfclass(r3:2,#0x10)
p1 = dfclass(r5:4,#0x10)
if (!p0.new) r3:2 = r1:0
if (!p1.new) r5:4 = r1:0
}
{
r3 = convert_df2sf(r3:2)
r2 = convert_df2sf(r5:4)
}
{
r3 = convert_df2sf(r1:0)
r1:0 = #-1
jumpr r31
}
.Linvalid:
{
r28 = ##0x7f800001
}
{
r1:0 = convert_sf2df(r28)
jumpr r31
}
.Lab_true_zero:
{
p0 = dfclass(r5:4,#0x10)
if (p0.new) jump:nt .Lnan
if (p0.new) r1:0 = r5:4
}
{
p0 = dfcmp.eq(r3:2,r5:4)
r1 = lsr(r1,#31)
}
{
r3 ^= asl(r1,#31)
if (!p0) r1:0 = r5:4
if (!p0) jumpr r31
}
{
p0 = cmp.eq(r3:2,r5:4)
if (p0.new) jumpr:t r31
r1:0 = r3:2
}
{
r28 = USR
}
{
r28 = extractu(r28,#2,#22)
r1:0 = #0
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = ##0x80000000
jumpr r31
}
.falign
.Lfma_abnormal_c:
{
p0 = dfclass(r5:4,#0x10)
if (p0.new) jump:nt .Lnan
if (p0.new) r1:0 = r5:4
deallocframe
}
{
p0 = dfclass(r5:4,#0x08)
if (p0.new) r1:0 = r5:4
if (p0.new) jumpr:nt r31
}
{
p0 = dfclass(r5:4,#0x01)
if (p0.new) jump:nt __hexagon_muldf3
r28 = #1
}
{
allocframe(#32)
r11:10 = #0
r5 = insert(r28,#11,#20)
jump .Lfma_abnormal_c_restart
}
.size fma,.-fma
|
Sudderen/educom-rust-1754291198 | 4,801 | opdrachten/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/dfaddsub.s | .text
.global __hexagon_adddf3
.global __hexagon_subdf3
.type __hexagon_adddf3, @function
.type __hexagon_subdf3, @function
.global __qdsp_adddf3 ; .set __qdsp_adddf3, __hexagon_adddf3
.global __hexagon_fast_adddf3 ; .set __hexagon_fast_adddf3, __hexagon_adddf3
.global __hexagon_fast2_adddf3 ; .set __hexagon_fast2_adddf3, __hexagon_adddf3
.global __qdsp_subdf3 ; .set __qdsp_subdf3, __hexagon_subdf3
.global __hexagon_fast_subdf3 ; .set __hexagon_fast_subdf3, __hexagon_subdf3
.global __hexagon_fast2_subdf3 ; .set __hexagon_fast2_subdf3, __hexagon_subdf3
.p2align 5
__hexagon_adddf3:
{
r4 = extractu(r1,#11,#20)
r5 = extractu(r3,#11,#20)
r13:12 = combine(##0x20000000,#0)
}
{
p3 = dfclass(r1:0,#2)
p3 = dfclass(r3:2,#2)
r9:8 = r13:12
p2 = cmp.gtu(r5,r4)
}
{
if (!p3) jump .Ladd_abnormal
if (p2) r1:0 = r3:2
if (p2) r3:2 = r1:0
if (p2) r5:4 = combine(r4,r5)
}
{
r13:12 = insert(r1:0,#52,#11 -2)
r9:8 = insert(r3:2,#52,#11 -2)
r15 = sub(r4,r5)
r7:6 = combine(#62,#1)
}
.Ladd_continue:
{
r15 = min(r15,r7)
r11:10 = neg(r13:12)
p2 = cmp.gt(r1,#-1)
r14 = #0
}
{
if (!p2) r13:12 = r11:10
r11:10 = extractu(r9:8,r15:14)
r9:8 = ASR(r9:8,r15)
r15:14 = #0
}
{
p1 = cmp.eq(r11:10,r15:14)
if (!p1.new) r8 = or(r8,r6)
r5 = add(r4,#-1024 -60)
p3 = cmp.gt(r3,#-1)
}
{
r13:12 = add(r13:12,r9:8)
r11:10 = sub(r13:12,r9:8)
r7:6 = combine(#54,##2045)
}
{
p0 = cmp.gtu(r4,r7)
p0 = !cmp.gtu(r4,r6)
if (!p0.new) jump:nt .Ladd_ovf_unf
if (!p3) r13:12 = r11:10
}
{
r1:0 = convert_d2df(r13:12)
p0 = cmp.eq(r13,#0)
p0 = cmp.eq(r12,#0)
if (p0.new) jump:nt .Ladd_zero
}
{
r1 += asl(r5,#20)
jumpr r31
}
.falign
__hexagon_subdf3:
{
r3 = togglebit(r3,#31)
jump __qdsp_adddf3
}
.falign
.Ladd_zero:
{
r28 = USR
r1:0 = #0
r3 = #1
}
{
r28 = extractu(r28,#2,#22)
r3 = asl(r3,#31)
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = xor(r1,r3)
jumpr r31
}
.falign
.Ladd_ovf_unf:
{
r1:0 = convert_d2df(r13:12)
p0 = cmp.eq(r13,#0)
p0 = cmp.eq(r12,#0)
if (p0.new) jump:nt .Ladd_zero
}
{
r28 = extractu(r1,#11,#20)
r1 += asl(r5,#20)
}
{
r5 = add(r5,r28)
r3:2 = combine(##0x00100000,#0)
}
{
p0 = cmp.gt(r5,##1024 +1024 -2)
if (p0.new) jump:nt .Ladd_ovf
}
{
p0 = cmp.gt(r5,#0)
if (p0.new) jumpr:t r31
r28 = sub(#1,r5)
}
{
r3:2 = insert(r1:0,#52,#0)
r1:0 = r13:12
}
{
r3:2 = lsr(r3:2,r28)
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.falign
.Ladd_ovf:
{
r1:0 = r13:12
r28 = USR
r13:12 = combine(##0x7fefffff,#-1)
}
{
r5 = extractu(r28,#2,#22)
r28 = or(r28,#0x28)
r9:8 = combine(##0x7ff00000,#0)
}
{
USR = r28
r5 ^= lsr(r1,#31)
r28 = r5
}
{
p0 = !cmp.eq(r28,#1)
p0 = !cmp.eq(r5,#2)
if (p0.new) r13:12 = r9:8
}
{
r1:0 = insert(r13:12,#63,#0)
}
{
p0 = dfcmp.eq(r1:0,r1:0)
jumpr r31
}
.Ladd_abnormal:
{
r13:12 = extractu(r1:0,#63,#0)
r9:8 = extractu(r3:2,#63,#0)
}
{
p3 = cmp.gtu(r13:12,r9:8)
if (!p3.new) r1:0 = r3:2
if (!p3.new) r3:2 = r1:0
}
{
p0 = dfclass(r1:0,#0x0f)
if (!p0.new) jump:nt .Linvalid_nan_add
if (!p3) r13:12 = r9:8
if (!p3) r9:8 = r13:12
}
{
p1 = dfclass(r1:0,#0x08)
if (p1.new) jump:nt .Linf_add
}
{
p2 = dfclass(r3:2,#0x01)
if (p2.new) jump:nt .LB_zero
r13:12 = #0
}
{
p0 = dfclass(r1:0,#4)
if (p0.new) jump:nt .Ladd_two_subnormal
r13:12 = combine(##0x20000000,#0)
}
{
r4 = extractu(r1,#11,#20)
r5 = #1
r9:8 = asl(r9:8,#11 -2)
}
{
r13:12 = insert(r1:0,#52,#11 -2)
r15 = sub(r4,r5)
r7:6 = combine(#62,#1)
jump .Ladd_continue
}
.Ladd_two_subnormal:
{
r13:12 = extractu(r1:0,#63,#0)
r9:8 = extractu(r3:2,#63,#0)
}
{
r13:12 = neg(r13:12)
r9:8 = neg(r9:8)
p0 = cmp.gt(r1,#-1)
p1 = cmp.gt(r3,#-1)
}
{
if (p0) r13:12 = r1:0
if (p1) r9:8 = r3:2
}
{
r13:12 = add(r13:12,r9:8)
}
{
r9:8 = neg(r13:12)
p0 = cmp.gt(r13,#-1)
r3:2 = #0
}
{
if (!p0) r1:0 = r9:8
if (p0) r1:0 = r13:12
r3 = ##0x80000000
}
{
if (!p0) r1 = or(r1,r3)
p0 = dfcmp.eq(r1:0,r3:2)
if (p0.new) jump:nt .Lzero_plus_zero
}
{
jumpr r31
}
.Linvalid_nan_add:
{
r28 = convert_df2sf(r1:0)
p0 = dfclass(r3:2,#0x0f)
if (p0.new) r3:2 = r1:0
}
{
r2 = convert_df2sf(r3:2)
r1:0 = #-1
jumpr r31
}
.falign
.LB_zero:
{
p0 = dfcmp.eq(r13:12,r1:0)
if (!p0.new) jumpr:t r31
}
.Lzero_plus_zero:
{
p0 = cmp.eq(r1:0,r3:2)
if (p0.new) jumpr:t r31
}
{
r28 = USR
}
{
r28 = extractu(r28,#2,#22)
r1:0 = #0
}
{
p0 = cmp.eq(r28,#2)
if (p0.new) r1 = ##0x80000000
jumpr r31
}
.Linf_add:
{
p0 = !cmp.eq(r1,r3)
p0 = dfclass(r3:2,#8)
if (!p0.new) jumpr:t r31
}
{
r2 = ##0x7f800001
}
{
r1:0 = convert_sf2df(r2)
jumpr r31
}
.size __hexagon_adddf3,.-__hexagon_adddf3
|
Sudderen/educom-rust-1754291198 | 1,295 | opdrachten/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/memcpy_forward_vp4cp4n2.s | .text
.globl hexagon_memcpy_forward_vp4cp4n2
.balign 32
.type hexagon_memcpy_forward_vp4cp4n2,@function
hexagon_memcpy_forward_vp4cp4n2:
{
r3 = sub(##4096, r1)
r5 = lsr(r2, #3)
}
{
r3 = extractu(r3, #10, #2)
r4 = extractu(r3, #7, #5)
}
{
r3 = minu(r2, r3)
r4 = minu(r5, r4)
}
{
r4 = or(r4, ##2105344)
p0 = cmp.eq(r3, #0)
if (p0.new) jump:nt .Lskipprolog
}
l2fetch(r1, r4)
{
loop0(.Lprolog, r3)
r2 = sub(r2, r3)
}
.falign
.Lprolog:
{
r4 = memw(r1++#4)
memw(r0++#4) = r4.new
} :endloop0
.Lskipprolog:
{
r3 = lsr(r2, #10)
if (cmp.eq(r3.new, #0)) jump:nt .Lskipmain
}
{
loop1(.Lout, r3)
r2 = extractu(r2, #10, #0)
r3 = ##2105472
}
.falign
.Lout:
l2fetch(r1, r3)
loop0(.Lpage, #512)
.falign
.Lpage:
r5:4 = memd(r1++#8)
{
memw(r0++#8) = r4
memw(r0+#4) = r5
} :endloop0:endloop1
.Lskipmain:
{
r3 = ##2105344
r4 = lsr(r2, #3)
p0 = cmp.eq(r2, #0)
if (p0.new) jumpr:nt r31
}
{
r3 = or(r3, r4)
loop0(.Lepilog, r2)
}
l2fetch(r1, r3)
.falign
.Lepilog:
{
r4 = memw(r1++#4)
memw(r0++#4) = r4.new
} :endloop0
jumpr r31
.size hexagon_memcpy_forward_vp4cp4n2, . - hexagon_memcpy_forward_vp4cp4n2
|
Sudderen/educom-rust-1754291198 | 5,659 | opdrachten/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/dfdiv.s | .text
.global __hexagon_divdf3
.type __hexagon_divdf3,@function
.global __qdsp_divdf3 ; .set __qdsp_divdf3, __hexagon_divdf3
.global __hexagon_fast_divdf3 ; .set __hexagon_fast_divdf3, __hexagon_divdf3
.global __hexagon_fast2_divdf3 ; .set __hexagon_fast2_divdf3, __hexagon_divdf3
.p2align 5
__hexagon_divdf3:
{
p2 = dfclass(r1:0,#0x02)
p2 = dfclass(r3:2,#0x02)
r13:12 = combine(r3,r1)
r28 = xor(r1,r3)
}
{
if (!p2) jump .Ldiv_abnormal
r7:6 = extractu(r3:2,#23,#52 -23)
r8 = ##0x3f800001
}
{
r9 = or(r8,r6)
r13 = extractu(r13,#11,#52 -32)
r12 = extractu(r12,#11,#52 -32)
p3 = cmp.gt(r28,#-1)
}
.Ldenorm_continue:
{
r11,p0 = sfrecipa(r8,r9)
r10 = and(r8,#-2)
r28 = #1
r12 = sub(r12,r13)
}
{
r10 -= sfmpy(r11,r9):lib
r1 = insert(r28,#11 +1,#52 -32)
r13 = ##0x00800000 << 3
}
{
r11 += sfmpy(r11,r10):lib
r3 = insert(r28,#11 +1,#52 -32)
r10 = and(r8,#-2)
}
{
r10 -= sfmpy(r11,r9):lib
r5 = #-0x3ff +1
r4 = #0x3ff -1
}
{
r11 += sfmpy(r11,r10):lib
p1 = cmp.gt(r12,r5)
p1 = !cmp.gt(r12,r4)
}
{
r13 = insert(r11,#23,#3)
r5:4 = #0
r12 = add(r12,#-61)
}
{
r13 = add(r13,#((-3) << 3))
}
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASL(r7:6, # ( 14 )); r1:0 -= asl(r15:14, # 32); }
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 1 )); r1:0 -= asl(r15:14, # 32); }
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 16 )); r1:0 -= asl(r15:14, # 32); }
{ r7:6 = mpyu(r13,r1); r1:0 = asl(r1:0,# ( 15 )); }; { r6 = # 0; r1:0 -= mpyu(r7,r2); r15:14 = mpyu(r7,r3); }; { r5:4 += ASR(r7:6, # ( 31 )); r1:0 -= asl(r15:14, # 32); r7:6=# ( 0 ); }
{
r15:14 = sub(r1:0,r3:2)
p0 = cmp.gtu(r3:2,r1:0)
if (!p0.new) r6 = #2
}
{
r5:4 = add(r5:4,r7:6)
if (!p0) r1:0 = r15:14
r15:14 = #0
}
{
p0 = cmp.eq(r1:0,r15:14)
if (!p0.new) r4 = or(r4,r28)
}
{
r7:6 = neg(r5:4)
}
{
if (!p3) r5:4 = r7:6
}
{
r1:0 = convert_d2df(r5:4)
if (!p1) jump .Ldiv_ovf_unf
}
{
r1 += asl(r12,#52 -32)
jumpr r31
}
.Ldiv_ovf_unf:
{
r1 += asl(r12,#52 -32)
r13 = extractu(r1,#11,#52 -32)
}
{
r7:6 = abs(r5:4)
r12 = add(r12,r13)
}
{
p0 = cmp.gt(r12,##0x3ff +0x3ff)
if (p0.new) jump:nt .Ldiv_ovf
}
{
p0 = cmp.gt(r12,#0)
if (p0.new) jump:nt .Lpossible_unf2
}
{
r13 = add(clb(r7:6),#-1)
r12 = sub(#7,r12)
r10 = USR
r11 = #63
}
{
r13 = min(r12,r11)
r11 = or(r10,#0x030)
r7:6 = asl(r7:6,r13)
r12 = #0
}
{
r15:14 = extractu(r7:6,r13:12)
r7:6 = lsr(r7:6,r13)
r3:2 = #1
}
{
p0 = cmp.gtu(r3:2,r15:14)
if (!p0.new) r6 = or(r2,r6)
r7 = setbit(r7,#52 -32+4)
}
{
r5:4 = neg(r7:6)
p0 = bitsclr(r6,#(1<<4)-1)
if (!p0.new) r10 = r11
}
{
USR = r10
if (p3) r5:4 = r7:6
r10 = #-0x3ff -(52 +4)
}
{
r1:0 = convert_d2df(r5:4)
}
{
r1 += asl(r10,#52 -32)
jumpr r31
}
.Lpossible_unf2:
{
r3:2 = extractu(r1:0,#63,#0)
r15:14 = combine(##0x00100000,#0)
r10 = #0x7FFF
}
{
p0 = dfcmp.eq(r15:14,r3:2)
p0 = bitsset(r7,r10)
}
{
if (!p0) jumpr r31
r10 = USR
}
{
r10 = or(r10,#0x30)
}
{
USR = r10
}
{
p0 = dfcmp.eq(r1:0,r1:0)
jumpr r31
}
.Ldiv_ovf:
{
r10 = USR
r3:2 = combine(##0x7fefffff,#-1)
r1 = mux(p3,#0,#-1)
}
{
r7:6 = combine(##0x7ff00000,#0)
r5 = extractu(r10,#2,#22)
r10 = or(r10,#0x28)
}
{
USR = r10
r5 ^= lsr(r1,#31)
r4 = r5
}
{
p0 = !cmp.eq(r4,#1)
p0 = !cmp.eq(r5,#2)
if (p0.new) r3:2 = r7:6
p0 = dfcmp.eq(r3:2,r3:2)
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.Ldiv_abnormal:
{
p0 = dfclass(r1:0,#0x0F)
p0 = dfclass(r3:2,#0x0F)
p3 = cmp.gt(r28,#-1)
}
{
p1 = dfclass(r1:0,#0x08)
p1 = dfclass(r3:2,#0x08)
}
{
p2 = dfclass(r1:0,#0x01)
p2 = dfclass(r3:2,#0x01)
}
{
if (!p0) jump .Ldiv_nan
if (p1) jump .Ldiv_invalid
}
{
if (p2) jump .Ldiv_invalid
}
{
p2 = dfclass(r1:0,#(0x0F ^ 0x01))
p2 = dfclass(r3:2,#(0x0F ^ 0x08))
}
{
p1 = dfclass(r1:0,#(0x0F ^ 0x08))
p1 = dfclass(r3:2,#(0x0F ^ 0x01))
}
{
if (!p2) jump .Ldiv_zero_result
if (!p1) jump .Ldiv_inf_result
}
{
p0 = dfclass(r1:0,#0x02)
p1 = dfclass(r3:2,#0x02)
r10 = ##0x00100000
}
{
r13:12 = combine(r3,r1)
r1 = insert(r10,#11 +1,#52 -32)
r3 = insert(r10,#11 +1,#52 -32)
}
{
if (p0) r1 = or(r1,r10)
if (p1) r3 = or(r3,r10)
}
{
r5 = add(clb(r1:0),#-11)
r4 = add(clb(r3:2),#-11)
r10 = #1
}
{
r12 = extractu(r12,#11,#52 -32)
r13 = extractu(r13,#11,#52 -32)
}
{
r1:0 = asl(r1:0,r5)
r3:2 = asl(r3:2,r4)
if (!p0) r12 = sub(r10,r5)
if (!p1) r13 = sub(r10,r4)
}
{
r7:6 = extractu(r3:2,#23,#52 -23)
}
{
r9 = or(r8,r6)
jump .Ldenorm_continue
}
.Ldiv_zero_result:
{
r1 = xor(r1,r3)
r3:2 = #0
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.Ldiv_inf_result:
{
p2 = dfclass(r3:2,#0x01)
p2 = dfclass(r1:0,#(0x0F ^ 0x08))
}
{
r10 = USR
if (!p2) jump 1f
r1 = xor(r1,r3)
}
{
r10 = or(r10,#0x04)
}
{
USR = r10
}
1:
{
r3:2 = combine(##0x7ff00000,#0)
p0 = dfcmp.uo(r3:2,r3:2)
}
{
r1:0 = insert(r3:2,#63,#0)
jumpr r31
}
.Ldiv_nan:
{
p0 = dfclass(r1:0,#0x10)
p1 = dfclass(r3:2,#0x10)
if (!p0.new) r1:0 = r3:2
if (!p1.new) r3:2 = r1:0
}
{
r5 = convert_df2sf(r1:0)
r4 = convert_df2sf(r3:2)
}
{
r1:0 = #-1
jumpr r31
}
.Ldiv_invalid:
{
r10 = ##0x7f800001
}
{
r1:0 = convert_sf2df(r10)
jumpr r31
}
.size __hexagon_divdf3,.-__hexagon_divdf3
|
Sudderen/educom-rust-1754291198 | 5,120 | opdrachten/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/fastmath2_dlib_asm.s | .text
.global __hexagon_fast2_dadd_asm
.type __hexagon_fast2_dadd_asm, @function
__hexagon_fast2_dadd_asm:
.falign
{
R7:6 = VABSDIFFH(R1:0, R3:2)
R9 = #62
R4 = SXTH(R0)
R5 = SXTH(R2)
} {
R6 = SXTH(R6)
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
if ( P0) R4 = #1
if (!P0) R5 = #1
R0.L = #0
R6 = MIN(R6, R9)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
R2.L = #0
R11:10 = #0
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = add(R1:0, R3:2)
R10.L = #0x8001
} {
R4 = clb(R1:0)
R9 = #58
} {
R4 = add(R4, #-1)
p0 = cmp.gt(R4, R9)
} {
R1:0 = ASL(R1:0, R4)
R8 = SUB(R8, R4)
if(p0) jump .Ldenorma
} {
R0 = insert(R8, #16, #0)
jumpr r31
}
.Ldenorma:
{
R1:0 = R11:10
jumpr r31
}
.text
.global __hexagon_fast2_dsub_asm
.type __hexagon_fast2_dsub_asm, @function
__hexagon_fast2_dsub_asm:
.falign
{
R7:6 = VABSDIFFH(R1:0, R3:2)
R9 = #62
R4 = SXTH(R0)
R5 = SXTH(R2)
} {
R6 = SXTH(R6)
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
if ( P0) R4 = #1
if (!P0) R5 = #1
R0.L = #0
R6 = MIN(R6, R9)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
R2.L = #0
R11:10 = #0
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = sub(R1:0, R3:2)
R10.L = #0x8001
} {
R4 = clb(R1:0)
R9 = #58
} {
R4 = add(R4, #-1)
p0 = cmp.gt(R4, R9)
} {
R1:0 = ASL(R1:0, R4)
R8 = SUB(R8, R4)
if(p0) jump .Ldenorm
} {
R0 = insert(R8, #16, #0)
jumpr r31
}
.Ldenorm:
{
R1:0 = R11:10
jumpr r31
}
.text
.global __hexagon_fast2_dmpy_asm
.type __hexagon_fast2_dmpy_asm, @function
__hexagon_fast2_dmpy_asm:
.falign
{
R13= lsr(R2, #16)
R5 = sxth(R2)
R4 = sxth(R0)
R12= lsr(R0, #16)
}
{
R11:10 = mpy(R1, R3)
R7:6 = mpy(R1, R13)
R0.L = #0x0
R15:14 = #0
}
{
R11:10 = add(R11:10, R11:10)
R7:6 += mpy(R3, R12)
R2.L = #0x0
R15.H = #0x8000
}
{
R7:6 = asr(R7:6, #15)
R12.L = #0x8001
p1 = cmp.eq(R1:0, R3:2)
}
{
R7:6 = add(R7:6, R11:10)
R8 = add(R4, R5)
p2 = cmp.eq(R1:0, R15:14)
}
{
R9 = clb(R7:6)
R3:2 = abs(R7:6)
R11 = #58
}
{
p1 = and(p1, p2)
R8 = sub(R8, R9)
R9 = add(R9, #-1)
p0 = cmp.gt(R9, R11)
}
{
R8 = add(R8, #1)
R1:0 = asl(R7:6, R9)
if(p1) jump .Lsat
}
{
R0 = insert(R8,#16, #0)
if(!p0) jumpr r31
}
{
R0 = insert(R12,#16, #0)
jumpr r31
}
.Lsat:
{
R1:0 = #-1
}
{
R1:0 = lsr(R1:0, #1)
}
{
R0 = insert(R8,#16, #0)
jumpr r31
}
.text
.global __hexagon_fast2_qd2f_asm
.type __hexagon_fast2_qd2f_asm, @function
__hexagon_fast2_qd2f_asm:
.falign
{
R3 = abs(R1):sat
R4 = sxth(R0)
R5 = #0x40
R6.L = #0xffc0
}
{
R0 = extractu(R3, #8, #0)
p2 = cmp.gt(R4, #126)
p3 = cmp.ge(R4, #-126)
R6.H = #0x7fff
}
{
p1 = cmp.eq(R0,#0x40)
if(p1.new) R5 = #0
R4 = add(R4, #126)
if(!p3) jump .Lmin
}
{
p0 = bitsset(R3, R6)
R0.L = #0x0000
R2 = add(R3, R5)
R7 = lsr(R6, #8)
}
{
if(p0) R4 = add(R4, #1)
if(p0) R3 = #0
R2 = lsr(R2, #7)
R0.H = #0x8000
}
{
R0 = and(R0, R1)
R6 &= asl(R4, #23)
if(!p0) R3 = and(R2, R7)
if(p2) jump .Lmax
}
{
R0 += add(R6, R3)
jumpr r31
}
.Lmax:
{
R0.L = #0xffff;
}
{
R0.H = #0x7f7f;
jumpr r31
}
.Lmin:
{
R0 = #0x0
jumpr r31
}
.text
.global __hexagon_fast2_f2qd_asm
.type __hexagon_fast2_f2qd_asm, @function
__hexagon_fast2_f2qd_asm:
.falign
{
R1 = asl(R0, #7)
p0 = tstbit(R0, #31)
R5:4 = #0
R3 = add(R0,R0)
}
{
R1 = setbit(R1, #30)
R0= extractu(R0,#8,#23)
R4.L = #0x8001
p1 = cmp.eq(R3, #0)
}
{
R1= extractu(R1, #31, #0)
R0= add(R0, #-126)
R2 = #0
if(p1) jump .Lminqd
}
{
R0 = zxth(R0)
if(p0) R1= sub(R2, R1)
jumpr r31
}
.Lminqd:
{
R1:0 = R5:4
jumpr r31
}
|
SuperiorOS/android_packages_modules_Virtualization | 4,679 | libs/libvmbase/exceptions.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes 14
* instructions, so it can be used in exception handlers with 18 instructions
* left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
* which can be used as the first and second arguments of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested exception
* and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently takes 14
* instructions, so it can be used in exception handlers while still leaving 18
* instructions left; if paired with save_volatile_to_stack, there are 4
* instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SP0. It behaves similarly to the SPx case by first switching to SPx, doing
* the work, then switching back to SP0 before returning.
*
* Switching to SPx and calling the Rust handler takes 16 instructions. To
* restore and return we need an additional 16 instructions, so we can implement
* the whole handler within the allotted 32 instructions.
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SPx. It saves volatile registers, calls the Rust handler, restores volatile
* registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care about
* non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions, and
* restoring and returning also takes 15 instructions, so we can fit the whole
* handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
SuperiorOS/android_packages_modules_Virtualization | 1,788 | libs/libvmbase/exceptions_panic.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
/**
* The following table is intended to trap any fault resulting from the very
* first memory accesses. They assume that PSCI v0.2 is available and provides
* the PSCI_SYSTEM_RESET call in an attempt to gracefully exit but otherwise
* results in the core busy-looping.
*/
.section .text.vector_table_panic, "ax"
.global vector_table_panic
.balign 0x800
vector_table_panic:
sync_cur_sp0_panic:
reset_or_hang
.balign 0x80
irq_cur_sp0_panic:
reset_or_hang
.balign 0x80
fiq_cur_sp0_panic:
reset_or_hang
.balign 0x80
serr_cur_sp0_panic:
reset_or_hang
.balign 0x80
sync_cur_spx_panic:
reset_or_hang
.balign 0x80
irq_cur_spx_panic:
reset_or_hang
.balign 0x80
fiq_cur_spx_panic:
reset_or_hang
.balign 0x80
serr_cur_spx_panic:
reset_or_hang
.balign 0x80
sync_lower_64_panic:
reset_or_hang
.balign 0x80
irq_lower_64_panic:
reset_or_hang
.balign 0x80
fiq_lower_64_panic:
reset_or_hang
.balign 0x80
serr_lower_64_panic:
reset_or_hang
.balign 0x80
sync_lower_32_panic:
reset_or_hang
.balign 0x80
irq_lower_32_panic:
reset_or_hang
.balign 0x80
fiq_lower_32_panic:
reset_or_hang
.balign 0x80
serr_lower_32_panic:
reset_or_hang
|
SuperiorOS/android_packages_modules_Virtualization | 5,154 | libs/libvmbase/entry.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/* Privileged Access Never is unchanged on taking an exception to EL1. */
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* All writable memory regions are treated as XN. */
.set .L_SCTLR_EL1_WXN, 0x1 << 19
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
* prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3
* for the Rust entry point, as these may contain boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/* Load and apply the memory management configuration, ready to enable MMU and caches. */
adr x30, vector_table_panic
msr vbar_el1, x30
/*
* Our load address is set by the host so validate it before proceeding.
*/
adr x30, entry
mov_i x29, entry
cmp x29, x30
b.eq 1f
reset_or_hang
1:
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then invalidate any potentially stale
* local TLB entries before they start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Copy the data section. */
adr_l x28, data_begin
adr_l x29, data_end
adr_l x30, data_lma
2: cmp x28, x29
b.ge 3f
ldp q0, q1, [x30], #32
stp q0, q1, [x28], #32
b 2b
3: /* Prepare the exception handler stack (SP_EL1). */
adr_l x30, init_eh_stack_pointer
msr spsel, #1
mov sp, x30
/* Prepare the main thread stack (SP_EL0). */
adr_l x30, init_stack_pointer
msr spsel, #0
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/*
* Set up Bionic-compatible thread-local storage.
*
* Note that TPIDR_EL0 can't be configured from rust_entry because the
* compiler will dereference it during function entry to access
* __stack_chk_guard and Rust doesn't support LLVM's
* __attribute__((no_stack_protector)).
*/
adr_l x30, __bionic_tls
msr tpidr_el0, x30
/* Call into Rust code. */
bl rust_entry
/* Loop forever waiting for interrupts. */
4: wfi
b 4b
|
SuperiorOS/android_packages_modules_Virtualization | 2,102 | guest/vmbase_example/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.fill 509, 8, 0x0 // 509 GiB of remaining VA space
0: /* level 2 */
#if defined(VMBASE_EXAMPLE_IS_BIOS)
.quad 0 // 2 MiB not mapped (DT)
.quad .L_BLOCK_MEM_XIP | 0x80200000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80400000 // 2 MiB of writable DRAM
.fill 509, 8, 0x0
#elif defined(VMBASE_EXAMPLE_IS_KERNEL)
.quad .L_BLOCK_MEM_XIP | 0x80000000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80200000 // 2 MiB of writable DRAM
.fill 510, 8, 0x0
#else
#error "Unexpected vmbase_example mode: failed to generate idmap"
#endif
|
SuperiorOS/android_packages_modules_Virtualization | 2,161 | guest/rialto/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Initial TTBR0 idmap activated before first memory write.
// Remains active until a new page table is created by early Rust.
//
.set .SZ_1K, 1024
.set .SZ_4K, 4 * .SZ_1K
.set .SZ_1M, 1024 * .SZ_1K
.set .SZ_2M, 2 * .SZ_1M
.set .SZ_1G, 1024 * .SZ_1M
.set .PAGE_SIZE, .SZ_4K
.set .ORIGIN_ADDR, 2 * .SZ_1G
.set .TEXT_ADDR, .ORIGIN_ADDR + (0 * .SZ_2M)
.set .DATA_ADDR, .ORIGIN_ADDR + (1 * .SZ_2M)
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
.set .L_TT_AF, 0x1 << 10 // Access flag
.set .L_TT_NG, 0x1 << 11 // Not global
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.balign .PAGE_SIZE
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.balign .PAGE_SIZE, 0 // unmapped
/* level 2 */
0:
.quad .L_BLOCK_MEM_XIP | .TEXT_ADDR // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | .DATA_ADDR // 2 MiB of writable DRAM
.balign .PAGE_SIZE, 0 // unmapped
|
SuperiorOS/android_packages_modules_Virtualization | 1,745 | guest/pvmfw/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GB of device mappings
.quad .L_TT_TYPE_TABLE + 0f // Unmapped device memory, and pVM firmware
.fill 510, 8, 0x0 // 510 GB of remaining VA space
/* level 2 */
0: .fill 510, 8, 0x0
.quad .L_BLOCK_MEM_XIP | 0x7fc00000 // pVM firmware image
.quad .L_BLOCK_MEM | 0x7fe00000 // Writable memory for stack, heap &c.
|
SuperiorOS/android_packages_modules_Virtualization | 4,679 | libs/libvmbase/exceptions.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes 14
* instructions, so it can be used in exception handlers with 18 instructions
* left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
* which can be used as the first and second arguments of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested exception
* and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently takes 14
* instructions, so it can be used in exception handlers while still leaving 18
* instructions left; if paired with save_volatile_to_stack, there are 4
* instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SP0. It behaves similarly to the SPx case by first switching to SPx, doing
* the work, then switching back to SP0 before returning.
*
* Switching to SPx and calling the Rust handler takes 16 instructions. To
* restore and return we need an additional 16 instructions, so we can implement
* the whole handler within the allotted 32 instructions.
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SPx. It saves volatile registers, calls the Rust handler, restores volatile
* registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care about
* non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions, and
* restoring and returning also takes 15 instructions, so we can fit the whole
* handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
SuperiorOS/android_packages_modules_Virtualization | 1,788 | libs/libvmbase/exceptions_panic.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
/**
* The following table is intended to trap any fault resulting from the very
* first memory accesses. They assume that PSCI v0.2 is available and provides
* the PSCI_SYSTEM_RESET call in an attempt to gracefully exit but otherwise
* results in the core busy-looping.
*/
.section .text.vector_table_panic, "ax"
.global vector_table_panic
.balign 0x800
vector_table_panic:
sync_cur_sp0_panic:
reset_or_hang
.balign 0x80
irq_cur_sp0_panic:
reset_or_hang
.balign 0x80
fiq_cur_sp0_panic:
reset_or_hang
.balign 0x80
serr_cur_sp0_panic:
reset_or_hang
.balign 0x80
sync_cur_spx_panic:
reset_or_hang
.balign 0x80
irq_cur_spx_panic:
reset_or_hang
.balign 0x80
fiq_cur_spx_panic:
reset_or_hang
.balign 0x80
serr_cur_spx_panic:
reset_or_hang
.balign 0x80
sync_lower_64_panic:
reset_or_hang
.balign 0x80
irq_lower_64_panic:
reset_or_hang
.balign 0x80
fiq_lower_64_panic:
reset_or_hang
.balign 0x80
serr_lower_64_panic:
reset_or_hang
.balign 0x80
sync_lower_32_panic:
reset_or_hang
.balign 0x80
irq_lower_32_panic:
reset_or_hang
.balign 0x80
fiq_lower_32_panic:
reset_or_hang
.balign 0x80
serr_lower_32_panic:
reset_or_hang
|
SuperiorOS/android_packages_modules_Virtualization | 5,154 | libs/libvmbase/entry.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/* Privileged Access Never is unchanged on taking an exception to EL1. */
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* All writable memory regions are treated as XN. */
.set .L_SCTLR_EL1_WXN, 0x1 << 19
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
* prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3
* for the Rust entry point, as these may contain boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/* Load and apply the memory management configuration, ready to enable MMU and caches. */
adr x30, vector_table_panic
msr vbar_el1, x30
/*
* Our load address is set by the host so validate it before proceeding.
*/
adr x30, entry
mov_i x29, entry
cmp x29, x30
b.eq 1f
reset_or_hang
1:
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then invalidate any potentially stale
* local TLB entries before they start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Copy the data section. */
adr_l x28, data_begin
adr_l x29, data_end
adr_l x30, data_lma
2: cmp x28, x29
b.ge 3f
ldp q0, q1, [x30], #32
stp q0, q1, [x28], #32
b 2b
3: /* Prepare the exception handler stack (SP_EL1). */
adr_l x30, init_eh_stack_pointer
msr spsel, #1
mov sp, x30
/* Prepare the main thread stack (SP_EL0). */
adr_l x30, init_stack_pointer
msr spsel, #0
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/*
* Set up Bionic-compatible thread-local storage.
*
* Note that TPIDR_EL0 can't be configured from rust_entry because the
* compiler will dereference it during function entry to access
* __stack_chk_guard and Rust doesn't support LLVM's
* __attribute__((no_stack_protector)).
*/
adr_l x30, __bionic_tls
msr tpidr_el0, x30
/* Call into Rust code. */
bl rust_entry
/* Loop forever waiting for interrupts. */
4: wfi
b 4b
|
SuperiorOS/android_packages_modules_Virtualization | 2,102 | guest/vmbase_example/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.fill 509, 8, 0x0 // 509 GiB of remaining VA space
0: /* level 2 */
#if defined(VMBASE_EXAMPLE_IS_BIOS)
.quad 0 // 2 MiB not mapped (DT)
.quad .L_BLOCK_MEM_XIP | 0x80200000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80400000 // 2 MiB of writable DRAM
.fill 509, 8, 0x0
#elif defined(VMBASE_EXAMPLE_IS_KERNEL)
.quad .L_BLOCK_MEM_XIP | 0x80000000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80200000 // 2 MiB of writable DRAM
.fill 510, 8, 0x0
#else
#error "Unexpected vmbase_example mode: failed to generate idmap"
#endif
|
SuperiorOS/android_packages_modules_Virtualization | 2,161 | guest/rialto/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Initial TTBR0 idmap activated before first memory write.
// Remains active until a new page table is created by early Rust.
//
.set .SZ_1K, 1024
.set .SZ_4K, 4 * .SZ_1K
.set .SZ_1M, 1024 * .SZ_1K
.set .SZ_2M, 2 * .SZ_1M
.set .SZ_1G, 1024 * .SZ_1M
.set .PAGE_SIZE, .SZ_4K
.set .ORIGIN_ADDR, 2 * .SZ_1G
.set .TEXT_ADDR, .ORIGIN_ADDR + (0 * .SZ_2M)
.set .DATA_ADDR, .ORIGIN_ADDR + (1 * .SZ_2M)
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
.set .L_TT_AF, 0x1 << 10 // Access flag
.set .L_TT_NG, 0x1 << 11 // Not global
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.balign .PAGE_SIZE
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.balign .PAGE_SIZE, 0 // unmapped
/* level 2 */
0:
.quad .L_BLOCK_MEM_XIP | .TEXT_ADDR // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | .DATA_ADDR // 2 MiB of writable DRAM
.balign .PAGE_SIZE, 0 // unmapped
|
SuperiorOS/android_packages_modules_Virtualization | 1,745 | guest/pvmfw/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GB of device mappings
.quad .L_TT_TYPE_TABLE + 0f // Unmapped device memory, and pVM firmware
.fill 510, 8, 0x0 // 510 GB of remaining VA space
/* level 2 */
0: .fill 510, 8, 0x0
.quad .L_BLOCK_MEM_XIP | 0x7fc00000 // pVM firmware image
.quad .L_BLOCK_MEM | 0x7fe00000 // Writable memory for stack, heap &c.
|
SWE-bench-repos/sharkdp__bat | 7,962 | tests/syntax-tests/highlighted/ARM Assembly/test.S | [38;2;248;248;242m.[0m[38;2;248;248;242mdata[0m
[38;2;249;38;114m.balign[0m[38;2;190;132;255m 4[0m
[38;2;248;248;242mred[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mgreen[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mblue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;249;38;114m.text[0m
[38;2;249;38;114m.global[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;249;38;114m.func[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;248;248;242mgrayscale[0m[38;2;248;248;242m:[0m
[38;2;248;248;242massign[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;117;113;94m/* some comment */[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m[38;2;248;248;242m [0m[38;2;117;113;94m/* another comment */[0m
[38;2;248;248;242mgrayscale_loop[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239masr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #8[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr2[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239msub[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mcmp[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #0[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbne[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale_loop[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr0[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr1[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbx[0m[38;2;248;248;242m [0m[38;2;248;248;242mlr[0m
[38;2;248;248;242maddr_red[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mred[0m
[38;2;248;248;242maddr_green[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mgreen[0m
[38;2;248;248;242maddr_blue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mblue[0m
|
t3hw00t/ARW | 40,185 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.section __TEXT,__const
.align 5
Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
Lone:
.long 1,0,0,0
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
.globl _ChaCha20_ctr32_nohw
.private_extern _ChaCha20_ctr32_nohw
.align 5
_ChaCha20_ctr32_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
Loop_outer:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov w7,w23
lsr x8,x23,#32
mov w9,w24
lsr x10,x24,#32
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#64
Loop:
sub x4,x4,#1
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
ror w21,w21,#16
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#20
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
ror w21,w21,#24
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#25
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#16
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
ror w9,w9,#20
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#24
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
ror w9,w9,#25
cbnz x4,Loop
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
b.lo Ltail
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.hi Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 4
Ltail:
add x2,x2,#64
Less_than_64:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
stp x5,x7,[sp,#0]
stp x9,x11,[sp,#16]
stp x13,x15,[sp,#32]
stp x17,x20,[sp,#48]
Loop_tail:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _ChaCha20_ctr32_neon
.private_extern _ChaCha20_ctr32_neon
.align 5
_ChaCha20_ctr32_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp x2,#512
b.hs L512_or_more_neon
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
Loop_outer_neon:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov v0.16b,v24.16b
mov w7,w23
lsr x8,x23,#32
mov v4.16b,v24.16b
mov w9,w24
lsr x10,x24,#32
mov v16.16b,v24.16b
mov w11,w25
mov v1.16b,v25.16b
lsr x12,x25,#32
mov v5.16b,v25.16b
mov w13,w26
mov v17.16b,v25.16b
lsr x14,x26,#32
mov v3.16b,v27.16b
mov w15,w27
mov v7.16b,v28.16b
lsr x16,x27,#32
mov v19.16b,v29.16b
mov w17,w28
mov v2.16b,v26.16b
lsr x19,x28,#32
mov v6.16b,v26.16b
mov w20,w30
mov v18.16b,v26.16b
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#256
Loop_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v16.4s,v16.4s,v17.4s
add w7,w7,w11
eor v3.16b,v3.16b,v0.16b
add w8,w8,w12
eor v7.16b,v7.16b,v4.16b
eor w17,w17,w5
eor v19.16b,v19.16b,v16.16b
eor w19,w19,w6
rev32 v3.8h,v3.8h
eor w20,w20,w7
rev32 v7.8h,v7.8h
eor w21,w21,w8
rev32 v19.8h,v19.8h
ror w17,w17,#16
add v2.4s,v2.4s,v3.4s
ror w19,w19,#16
add v6.4s,v6.4s,v7.4s
ror w20,w20,#16
add v18.4s,v18.4s,v19.4s
ror w21,w21,#16
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#20
add w16,w16,w21
ushr v5.4s,v21.4s,#20
eor w9,w9,w13
ushr v17.4s,v22.4s,#20
eor w10,w10,w14
sli v1.4s,v20.4s,#12
eor w11,w11,w15
sli v5.4s,v21.4s,#12
eor w12,w12,w16
sli v17.4s,v22.4s,#12
ror w9,w9,#20
add v0.4s,v0.4s,v1.4s
ror w10,w10,#20
add v4.4s,v4.4s,v5.4s
ror w11,w11,#20
add v16.4s,v16.4s,v17.4s
ror w12,w12,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w9
eor v21.16b,v7.16b,v4.16b
add w6,w6,w10
eor v22.16b,v19.16b,v16.16b
add w7,w7,w11
ushr v3.4s,v20.4s,#24
add w8,w8,w12
ushr v7.4s,v21.4s,#24
eor w17,w17,w5
ushr v19.4s,v22.4s,#24
eor w19,w19,w6
sli v3.4s,v20.4s,#8
eor w20,w20,w7
sli v7.4s,v21.4s,#8
eor w21,w21,w8
sli v19.4s,v22.4s,#8
ror w17,w17,#24
add v2.4s,v2.4s,v3.4s
ror w19,w19,#24
add v6.4s,v6.4s,v7.4s
ror w20,w20,#24
add v18.4s,v18.4s,v19.4s
ror w21,w21,#24
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#25
add w16,w16,w21
ushr v5.4s,v21.4s,#25
eor w9,w9,w13
ushr v17.4s,v22.4s,#25
eor w10,w10,w14
sli v1.4s,v20.4s,#7
eor w11,w11,w15
sli v5.4s,v21.4s,#7
eor w12,w12,w16
sli v17.4s,v22.4s,#7
ror w9,w9,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w10,w10,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w10
add v4.4s,v4.4s,v5.4s
add w6,w6,w11
add v16.4s,v16.4s,v17.4s
add w7,w7,w12
eor v3.16b,v3.16b,v0.16b
add w8,w8,w9
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w5
eor v19.16b,v19.16b,v16.16b
eor w17,w17,w6
rev32 v3.8h,v3.8h
eor w19,w19,w7
rev32 v7.8h,v7.8h
eor w20,w20,w8
rev32 v19.8h,v19.8h
ror w21,w21,#16
add v2.4s,v2.4s,v3.4s
ror w17,w17,#16
add v6.4s,v6.4s,v7.4s
ror w19,w19,#16
add v18.4s,v18.4s,v19.4s
ror w20,w20,#16
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#20
add w14,w14,w20
ushr v5.4s,v21.4s,#20
eor w10,w10,w15
ushr v17.4s,v22.4s,#20
eor w11,w11,w16
sli v1.4s,v20.4s,#12
eor w12,w12,w13
sli v5.4s,v21.4s,#12
eor w9,w9,w14
sli v17.4s,v22.4s,#12
ror w10,w10,#20
add v0.4s,v0.4s,v1.4s
ror w11,w11,#20
add v4.4s,v4.4s,v5.4s
ror w12,w12,#20
add v16.4s,v16.4s,v17.4s
ror w9,w9,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w10
eor v21.16b,v7.16b,v4.16b
add w6,w6,w11
eor v22.16b,v19.16b,v16.16b
add w7,w7,w12
ushr v3.4s,v20.4s,#24
add w8,w8,w9
ushr v7.4s,v21.4s,#24
eor w21,w21,w5
ushr v19.4s,v22.4s,#24
eor w17,w17,w6
sli v3.4s,v20.4s,#8
eor w19,w19,w7
sli v7.4s,v21.4s,#8
eor w20,w20,w8
sli v19.4s,v22.4s,#8
ror w21,w21,#24
add v2.4s,v2.4s,v3.4s
ror w17,w17,#24
add v6.4s,v6.4s,v7.4s
ror w19,w19,#24
add v18.4s,v18.4s,v19.4s
ror w20,w20,#24
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#25
add w14,w14,w20
ushr v5.4s,v21.4s,#25
eor w10,w10,w15
ushr v17.4s,v22.4s,#25
eor w11,w11,w16
sli v1.4s,v20.4s,#7
eor w12,w12,w13
sli v5.4s,v21.4s,#7
eor w9,w9,w14
sli v17.4s,v22.4s,#7
ror w10,w10,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w11,w11,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w12,w12,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
cbnz x4,Loop_neon
add w5,w5,w22 // accumulate key block
add v0.4s,v0.4s,v24.4s
add x6,x6,x22,lsr#32
add v4.4s,v4.4s,v24.4s
add w7,w7,w23
add v16.4s,v16.4s,v24.4s
add x8,x8,x23,lsr#32
add v2.4s,v2.4s,v26.4s
add w9,w9,w24
add v6.4s,v6.4s,v26.4s
add x10,x10,x24,lsr#32
add v18.4s,v18.4s,v26.4s
add w11,w11,w25
add v3.4s,v3.4s,v27.4s
add x12,x12,x25,lsr#32
add w13,w13,w26
add v7.4s,v7.4s,v28.4s
add x14,x14,x26,lsr#32
add w15,w15,w27
add v19.4s,v19.4s,v29.4s
add x16,x16,x27,lsr#32
add w17,w17,w28
add v1.4s,v1.4s,v25.4s
add x19,x19,x28,lsr#32
add w20,w20,w30
add v5.4s,v5.4s,v25.4s
add x21,x21,x30,lsr#32
add v17.4s,v17.4s,v25.4s
b.lo Ltail_neon
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v20.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v21.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v22.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v23.16b
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
add v27.4s,v27.4s,v31.4s // += 4
stp x13,x15,[x0,#32]
add v28.4s,v28.4s,v31.4s
stp x17,x20,[x0,#48]
add v29.4s,v29.4s,v31.4s
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
eor v16.16b,v16.16b,v0.16b
eor v17.16b,v17.16b,v1.16b
eor v18.16b,v18.16b,v2.16b
eor v19.16b,v19.16b,v3.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
b.hi Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
Ltail_neon:
add x2,x2,#256
cmp x2,#64
b.lo Less_than_64
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_128
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v0.16b,v0.16b,v20.16b
eor v1.16b,v1.16b,v21.16b
eor v2.16b,v2.16b,v22.16b
eor v3.16b,v3.16b,v23.16b
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_192
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp]
b Last_neon
Less_than_128:
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp]
b Last_neon
Less_than_192:
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp]
b Last_neon
.align 4
Last_neon:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
Loop_tail_neon:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 5
ChaCha20_512_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
L512_or_more_neon:
sub sp,sp,#128+64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
stp q24,q25,[sp,#0] // off-load key block, invariant part
add v27.4s,v27.4s,v31.4s // not typo
str q26,[sp,#32]
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
add v30.4s,v29.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub x2,x2,#512 // not typo
Loop_outer_512_neon:
mov v0.16b,v24.16b
mov v4.16b,v24.16b
mov v8.16b,v24.16b
mov v12.16b,v24.16b
mov v16.16b,v24.16b
mov v20.16b,v24.16b
mov v1.16b,v25.16b
mov w5,w22 // unpack key block
mov v5.16b,v25.16b
lsr x6,x22,#32
mov v9.16b,v25.16b
mov w7,w23
mov v13.16b,v25.16b
lsr x8,x23,#32
mov v17.16b,v25.16b
mov w9,w24
mov v21.16b,v25.16b
lsr x10,x24,#32
mov v3.16b,v27.16b
mov w11,w25
mov v7.16b,v28.16b
lsr x12,x25,#32
mov v11.16b,v29.16b
mov w13,w26
mov v15.16b,v30.16b
lsr x14,x26,#32
mov v2.16b,v26.16b
mov w15,w27
mov v6.16b,v26.16b
lsr x16,x27,#32
add v19.4s,v3.4s,v31.4s // +4
mov w17,w28
add v23.4s,v7.4s,v31.4s // +4
lsr x19,x28,#32
mov v10.16b,v26.16b
mov w20,w30
mov v14.16b,v26.16b
lsr x21,x30,#32
mov v18.16b,v26.16b
stp q27,q28,[sp,#48] // off-load key block, variable part
mov v22.16b,v26.16b
str q29,[sp,#80]
mov x4,#5
subs x2,x2,#512
Loop_upper_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_upper_neon
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
mov w5,w22 // unpack key block
lsr x6,x22,#32
stp x9,x11,[x0,#16]
mov w7,w23
lsr x8,x23,#32
stp x13,x15,[x0,#32]
mov w9,w24
lsr x10,x24,#32
stp x17,x20,[x0,#48]
add x0,x0,#64
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#5
Loop_lower_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_lower_neon
add w5,w5,w22 // accumulate key block
ldp q24,q25,[sp,#0]
add x6,x6,x22,lsr#32
ldp q26,q27,[sp,#32]
add w7,w7,w23
ldp q28,q29,[sp,#64]
add x8,x8,x23,lsr#32
add v0.4s,v0.4s,v24.4s
add w9,w9,w24
add v4.4s,v4.4s,v24.4s
add x10,x10,x24,lsr#32
add v8.4s,v8.4s,v24.4s
add w11,w11,w25
add v12.4s,v12.4s,v24.4s
add x12,x12,x25,lsr#32
add v16.4s,v16.4s,v24.4s
add w13,w13,w26
add v20.4s,v20.4s,v24.4s
add x14,x14,x26,lsr#32
add v2.4s,v2.4s,v26.4s
add w15,w15,w27
add v6.4s,v6.4s,v26.4s
add x16,x16,x27,lsr#32
add v10.4s,v10.4s,v26.4s
add w17,w17,w28
add v14.4s,v14.4s,v26.4s
add x19,x19,x28,lsr#32
add v18.4s,v18.4s,v26.4s
add w20,w20,w30
add v22.4s,v22.4s,v26.4s
add x21,x21,x30,lsr#32
add v19.4s,v19.4s,v31.4s // +4
add x5,x5,x6,lsl#32 // pack
add v23.4s,v23.4s,v31.4s // +4
add x7,x7,x8,lsl#32
add v3.4s,v3.4s,v27.4s
ldp x6,x8,[x1,#0] // load input
add v7.4s,v7.4s,v28.4s
add x9,x9,x10,lsl#32
add v11.4s,v11.4s,v29.4s
add x11,x11,x12,lsl#32
add v15.4s,v15.4s,v30.4s
ldp x10,x12,[x1,#16]
add v19.4s,v19.4s,v27.4s
add x13,x13,x14,lsl#32
add v23.4s,v23.4s,v28.4s
add x15,x15,x16,lsl#32
add v1.4s,v1.4s,v25.4s
ldp x14,x16,[x1,#32]
add v5.4s,v5.4s,v25.4s
add x17,x17,x19,lsl#32
add v9.4s,v9.4s,v25.4s
add x20,x20,x21,lsl#32
add v13.4s,v13.4s,v25.4s
ldp x19,x21,[x1,#48]
add v17.4s,v17.4s,v25.4s
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v24.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v25.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v26.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v27.16b
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#7 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v24.16b
eor v5.16b,v5.16b,v25.16b
eor v6.16b,v6.16b,v26.16b
eor v7.16b,v7.16b,v27.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
eor v8.16b,v8.16b,v0.16b
ldp q24,q25,[sp,#0]
eor v9.16b,v9.16b,v1.16b
ldp q26,q27,[sp,#32]
eor v10.16b,v10.16b,v2.16b
eor v11.16b,v11.16b,v3.16b
st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64
ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64
eor v12.16b,v12.16b,v4.16b
eor v13.16b,v13.16b,v5.16b
eor v14.16b,v14.16b,v6.16b
eor v15.16b,v15.16b,v7.16b
st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64
ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64
eor v16.16b,v16.16b,v8.16b
eor v17.16b,v17.16b,v9.16b
eor v18.16b,v18.16b,v10.16b
eor v19.16b,v19.16b,v11.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
shl v0.4s,v31.4s,#1 // 4 -> 8
eor v20.16b,v20.16b,v12.16b
eor v21.16b,v21.16b,v13.16b
eor v22.16b,v22.16b,v14.16b
eor v23.16b,v23.16b,v15.16b
st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64
add v27.4s,v27.4s,v0.4s // += 8
add v28.4s,v28.4s,v0.4s
add v29.4s,v29.4s,v0.4s
add v30.4s,v30.4s,v0.4s
b.hs Loop_outer_512_neon
adds x2,x2,#512
ushr v0.4s,v31.4s,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp q24,q31,[sp,#0] // wipe off-load area
stp q24,q31,[sp,#32]
stp q24,q31,[sp,#64]
b.eq Ldone_512_neon
cmp x2,#192
sub v27.4s,v27.4s,v0.4s // -= 1
sub v28.4s,v28.4s,v0.4s
sub v29.4s,v29.4s,v0.4s
add sp,sp,#128
b.hs Loop_outer_neon
eor v25.16b,v25.16b,v25.16b
eor v26.16b,v26.16b,v26.16b
eor v27.16b,v27.16b,v27.16b
eor v28.16b,v28.16b,v28.16b
eor v29.16b,v29.16b,v29.16b
eor v30.16b,v30.16b,v30.16b
b Loop_outer
Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
t3hw00t/ARW | 18,316 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesni-gcm-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 5
_aesni_ctr32_ghash_6x:
vmovdqu 32(%r11),%xmm2
subq $6,%rdx
vpxor %xmm4,%xmm4,%xmm4
vmovdqu 0-128(%rcx),%xmm15
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpaddb %xmm2,%xmm11,%xmm12
vpaddb %xmm2,%xmm12,%xmm13
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm15,%xmm1,%xmm9
vmovdqu %xmm4,16+8(%rsp)
jmp L$oop6x
.p2align 5
L$oop6x:
addl $100663296,%ebx
jc L$handle_ctr32
vmovdqu 0-32(%r9),%xmm3
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm15,%xmm10,%xmm10
vpxor %xmm15,%xmm11,%xmm11
L$resume_ctr32:
vmovdqu %xmm1,(%r8)
vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5
vpxor %xmm15,%xmm12,%xmm12
vmovups 16-128(%rcx),%xmm2
vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6
xorq %r12,%r12
cmpq %r14,%r15
vaesenc %xmm2,%xmm9,%xmm9
vmovdqu 48+8(%rsp),%xmm0
vpxor %xmm15,%xmm13,%xmm13
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1
vaesenc %xmm2,%xmm10,%xmm10
vpxor %xmm15,%xmm14,%xmm14
setnc %r12b
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vmovdqu 16-32(%r9),%xmm3
negq %r12
vaesenc %xmm2,%xmm12,%xmm12
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5
vpxor %xmm4,%xmm8,%xmm8
vaesenc %xmm2,%xmm13,%xmm13
vpxor %xmm5,%xmm1,%xmm4
andq $0x60,%r12
vmovups 32-128(%rcx),%xmm15
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1
vaesenc %xmm2,%xmm14,%xmm14
vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2
leaq (%r14,%r12,1),%r14
vaesenc %xmm15,%xmm9,%xmm9
vpxor 16+8(%rsp),%xmm8,%xmm8
vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3
vmovdqu 64+8(%rsp),%xmm0
vaesenc %xmm15,%xmm10,%xmm10
movbeq 88(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 80(%r14),%r12
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,32+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,40+8(%rsp)
vmovdqu 48-32(%r9),%xmm5
vaesenc %xmm15,%xmm14,%xmm14
vmovups 48-128(%rcx),%xmm15
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm3,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3
vaesenc %xmm15,%xmm11,%xmm11
vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5
vmovdqu 80+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vpxor %xmm1,%xmm4,%xmm4
vmovdqu 64-32(%r9),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vmovups 64-128(%rcx),%xmm15
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3
vaesenc %xmm15,%xmm10,%xmm10
movbeq 72(%r14),%r13
vpxor %xmm5,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
movbeq 64(%r14),%r12
vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1
vmovdqu 96+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,48+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,56+8(%rsp)
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 96-32(%r9),%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vmovups 80-128(%rcx),%xmm15
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5
vaesenc %xmm15,%xmm10,%xmm10
movbeq 56(%r14),%r13
vpxor %xmm1,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1
vpxor 112+8(%rsp),%xmm8,%xmm8
vaesenc %xmm15,%xmm11,%xmm11
movbeq 48(%r14),%r12
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,64+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,72+8(%rsp)
vpxor %xmm3,%xmm4,%xmm4
vmovdqu 112-32(%r9),%xmm3
vaesenc %xmm15,%xmm14,%xmm14
vmovups 96-128(%rcx),%xmm15
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1
vaesenc %xmm15,%xmm10,%xmm10
movbeq 40(%r14),%r13
vpxor %xmm2,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
movbeq 32(%r14),%r12
vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,80+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,88+8(%rsp)
vpxor %xmm5,%xmm6,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor %xmm1,%xmm6,%xmm6
vmovups 112-128(%rcx),%xmm15
vpslldq $8,%xmm6,%xmm5
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 16(%r11),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm8,%xmm7,%xmm7
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm5,%xmm4,%xmm4
movbeq 24(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 16(%r14),%r12
vpalignr $8,%xmm4,%xmm4,%xmm0
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
movq %r13,96+8(%rsp)
vaesenc %xmm15,%xmm12,%xmm12
movq %r12,104+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
vmovups 128-128(%rcx),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vmovups 144-128(%rcx),%xmm15
vaesenc %xmm1,%xmm10,%xmm10
vpsrldq $8,%xmm6,%xmm6
vaesenc %xmm1,%xmm11,%xmm11
vpxor %xmm6,%xmm7,%xmm7
vaesenc %xmm1,%xmm12,%xmm12
vpxor %xmm0,%xmm4,%xmm4
movbeq 8(%r14),%r13
vaesenc %xmm1,%xmm13,%xmm13
movbeq 0(%r14),%r12
vaesenc %xmm1,%xmm14,%xmm14
vmovups 160-128(%rcx),%xmm1
cmpl $11,%r10d
jb L$enc_tail
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 176-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 192-128(%rcx),%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 208-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 224-128(%rcx),%xmm1
jmp L$enc_tail
.p2align 5
L$handle_ctr32:
vmovdqu (%r11),%xmm0
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vmovdqu 0-32(%r9),%xmm3
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm15,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm15,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpshufb %xmm0,%xmm14,%xmm14
vpshufb %xmm0,%xmm1,%xmm1
jmp L$resume_ctr32
.p2align 5
L$enc_tail:
vaesenc %xmm15,%xmm9,%xmm9
vmovdqu %xmm7,16+8(%rsp)
vpalignr $8,%xmm4,%xmm4,%xmm8
vaesenc %xmm15,%xmm10,%xmm10
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
vpxor 0(%rdi),%xmm1,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
vpxor 16(%rdi),%xmm1,%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vpxor 32(%rdi),%xmm1,%xmm5
vaesenc %xmm15,%xmm13,%xmm13
vpxor 48(%rdi),%xmm1,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor 64(%rdi),%xmm1,%xmm7
vpxor 80(%rdi),%xmm1,%xmm3
vmovdqu (%r8),%xmm1
vaesenclast %xmm2,%xmm9,%xmm9
vmovdqu 32(%r11),%xmm2
vaesenclast %xmm0,%xmm10,%xmm10
vpaddb %xmm2,%xmm1,%xmm0
movq %r13,112+8(%rsp)
leaq 96(%rdi),%rdi
prefetcht0 512(%rdi)
prefetcht0 576(%rdi)
vaesenclast %xmm5,%xmm11,%xmm11
vpaddb %xmm2,%xmm0,%xmm5
movq %r12,120+8(%rsp)
leaq 96(%rsi),%rsi
vmovdqu 0-128(%rcx),%xmm15
vaesenclast %xmm6,%xmm12,%xmm12
vpaddb %xmm2,%xmm5,%xmm6
vaesenclast %xmm7,%xmm13,%xmm13
vpaddb %xmm2,%xmm6,%xmm7
vaesenclast %xmm3,%xmm14,%xmm14
vpaddb %xmm2,%xmm7,%xmm3
addq $0x60,%rax
subq $0x6,%rdx
jc L$6x_done
vmovups %xmm9,-96(%rsi)
vpxor %xmm15,%xmm1,%xmm9
vmovups %xmm10,-80(%rsi)
vmovdqa %xmm0,%xmm10
vmovups %xmm11,-64(%rsi)
vmovdqa %xmm5,%xmm11
vmovups %xmm12,-48(%rsi)
vmovdqa %xmm6,%xmm12
vmovups %xmm13,-32(%rsi)
vmovdqa %xmm7,%xmm13
vmovups %xmm14,-16(%rsi)
vmovdqa %xmm3,%xmm14
vmovdqu 32+8(%rsp),%xmm7
jmp L$oop6x
L$6x_done:
vpxor 16+8(%rsp),%xmm8,%xmm8
vpxor %xmm4,%xmm8,%xmm8
ret
.globl _aesni_gcm_decrypt
.private_extern _aesni_gcm_decrypt
.p2align 5
_aesni_gcm_decrypt:
_CET_ENDBR
xorq %rax,%rax
cmpq $0x60,%rdx
jb L$gcm_dec_abort
pushq %rbp
movq %rsp,%rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
vzeroupper
movq 16(%rbp),%r12
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq L$bswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
vmovdqu (%r12),%xmm8
andq $-128,%rsp
vmovdqu (%r11),%xmm0
leaq 128(%rcx),%rcx
leaq 32(%r9),%r9
movl 240-128(%rcx),%r10d
vpshufb %xmm0,%xmm8,%xmm8
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc L$dec_no_key_aliasing
cmpq $768,%r15
jnc L$dec_no_key_aliasing
subq %r15,%rsp
L$dec_no_key_aliasing:
vmovdqu 80(%rdi),%xmm7
movq %rdi,%r14
vmovdqu 64(%rdi),%xmm4
leaq -192(%rdi,%rdx,1),%r15
vmovdqu 48(%rdi),%xmm5
shrq $4,%rdx
xorq %rax,%rax
vmovdqu 32(%rdi),%xmm6
vpshufb %xmm0,%xmm7,%xmm7
vmovdqu 16(%rdi),%xmm2
vpshufb %xmm0,%xmm4,%xmm4
vmovdqu (%rdi),%xmm3
vpshufb %xmm0,%xmm5,%xmm5
vmovdqu %xmm4,48(%rsp)
vpshufb %xmm0,%xmm6,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm2,%xmm2
vmovdqu %xmm6,80(%rsp)
vpshufb %xmm0,%xmm3,%xmm3
vmovdqu %xmm2,96(%rsp)
vmovdqu %xmm3,112(%rsp)
call _aesni_ctr32_ghash_6x
movq 16(%rbp),%r12
vmovups %xmm9,-96(%rsi)
vmovups %xmm10,-80(%rsi)
vmovups %xmm11,-64(%rsi)
vmovups %xmm12,-48(%rsi)
vmovups %xmm13,-32(%rsi)
vmovups %xmm14,-16(%rsi)
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
L$gcm_dec_abort:
ret
.p2align 5
_aesni_ctr32_6x:
vmovdqu 0-128(%rcx),%xmm4
vmovdqu 32(%r11),%xmm2
leaq -1(%r10),%r13
vmovups 16-128(%rcx),%xmm15
leaq 32-128(%rcx),%r12
vpxor %xmm4,%xmm1,%xmm9
addl $100663296,%ebx
jc L$handle_ctr32_2
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddb %xmm2,%xmm11,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddb %xmm2,%xmm12,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp L$oop_ctr32
.p2align 4
L$oop_ctr32:
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vmovups (%r12),%xmm15
leaq 16(%r12),%r12
decl %r13d
jnz L$oop_ctr32
vmovdqu (%r12),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor 0(%rdi),%xmm3,%xmm4
vaesenc %xmm15,%xmm10,%xmm10
vpxor 16(%rdi),%xmm3,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
vpxor 32(%rdi),%xmm3,%xmm6
vaesenc %xmm15,%xmm12,%xmm12
vpxor 48(%rdi),%xmm3,%xmm8
vaesenc %xmm15,%xmm13,%xmm13
vpxor 64(%rdi),%xmm3,%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vpxor 80(%rdi),%xmm3,%xmm3
leaq 96(%rdi),%rdi
vaesenclast %xmm4,%xmm9,%xmm9
vaesenclast %xmm5,%xmm10,%xmm10
vaesenclast %xmm6,%xmm11,%xmm11
vaesenclast %xmm8,%xmm12,%xmm12
vaesenclast %xmm2,%xmm13,%xmm13
vaesenclast %xmm3,%xmm14,%xmm14
vmovups %xmm9,0(%rsi)
vmovups %xmm10,16(%rsi)
vmovups %xmm11,32(%rsi)
vmovups %xmm12,48(%rsi)
vmovups %xmm13,64(%rsi)
vmovups %xmm14,80(%rsi)
leaq 96(%rsi),%rsi
ret
.p2align 5
L$handle_ctr32_2:
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpshufb %xmm0,%xmm14,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpshufb %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp L$oop_ctr32
.globl _aesni_gcm_encrypt
.private_extern _aesni_gcm_encrypt
.p2align 5
_aesni_gcm_encrypt:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+2(%rip)
#endif
xorq %rax,%rax
cmpq $288,%rdx
jb L$gcm_enc_abort
pushq %rbp
movq %rsp,%rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
vzeroupper
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq L$bswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
leaq 128(%rcx),%rcx
vmovdqu (%r11),%xmm0
andq $-128,%rsp
movl 240-128(%rcx),%r10d
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc L$enc_no_key_aliasing
cmpq $768,%r15
jnc L$enc_no_key_aliasing
subq %r15,%rsp
L$enc_no_key_aliasing:
movq %rsi,%r14
leaq -192(%rsi,%rdx,1),%r15
shrq $4,%rdx
call _aesni_ctr32_6x
vpshufb %xmm0,%xmm9,%xmm8
vpshufb %xmm0,%xmm10,%xmm2
vmovdqu %xmm8,112(%rsp)
vpshufb %xmm0,%xmm11,%xmm4
vmovdqu %xmm2,96(%rsp)
vpshufb %xmm0,%xmm12,%xmm5
vmovdqu %xmm4,80(%rsp)
vpshufb %xmm0,%xmm13,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm14,%xmm7
vmovdqu %xmm6,48(%rsp)
call _aesni_ctr32_6x
movq 16(%rbp),%r12
leaq 32(%r9),%r9
vmovdqu (%r12),%xmm8
subq $12,%rdx
movq $192,%rax
vpshufb %xmm0,%xmm8,%xmm8
call _aesni_ctr32_ghash_6x
vmovdqu 32(%rsp),%xmm7
vmovdqu (%r11),%xmm0
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm7,%xmm7,%xmm1
vmovdqu 32-32(%r9),%xmm15
vmovups %xmm9,-96(%rsi)
vpshufb %xmm0,%xmm9,%xmm9
vpxor %xmm7,%xmm1,%xmm1
vmovups %xmm10,-80(%rsi)
vpshufb %xmm0,%xmm10,%xmm10
vmovups %xmm11,-64(%rsi)
vpshufb %xmm0,%xmm11,%xmm11
vmovups %xmm12,-48(%rsi)
vpshufb %xmm0,%xmm12,%xmm12
vmovups %xmm13,-32(%rsi)
vpshufb %xmm0,%xmm13,%xmm13
vmovups %xmm14,-16(%rsi)
vpshufb %xmm0,%xmm14,%xmm14
vmovdqu %xmm9,16(%rsp)
vmovdqu 48(%rsp),%xmm6
vmovdqu 16-32(%r9),%xmm0
vpunpckhqdq %xmm6,%xmm6,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5
vpxor %xmm6,%xmm2,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vmovdqu 64(%rsp),%xmm9
vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm9,%xmm9,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6
vpxor %xmm9,%xmm5,%xmm5
vpxor %xmm7,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vmovdqu 80(%rsp),%xmm1
vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm4,%xmm7,%xmm7
vpunpckhqdq %xmm1,%xmm1,%xmm4
vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpxor %xmm6,%xmm9,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 96(%rsp),%xmm2
vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm7,%xmm6,%xmm6
vpunpckhqdq %xmm2,%xmm2,%xmm7
vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpxor %xmm9,%xmm1,%xmm1
vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm5,%xmm4,%xmm4
vpxor 112(%rsp),%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5
vmovdqu 112-32(%r9),%xmm0
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7
vpxor %xmm4,%xmm7,%xmm4
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm14,%xmm14,%xmm1
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8
vpxor %xmm14,%xmm1,%xmm1
vpxor %xmm5,%xmm6,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9
vmovdqu 32-32(%r9),%xmm15
vpxor %xmm2,%xmm8,%xmm7
vpxor %xmm4,%xmm9,%xmm6
vmovdqu 16-32(%r9),%xmm0
vpxor %xmm5,%xmm7,%xmm9
vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4
vpxor %xmm9,%xmm6,%xmm6
vpunpckhqdq %xmm13,%xmm13,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14
vpxor %xmm13,%xmm2,%xmm2
vpslldq $8,%xmm6,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vpxor %xmm9,%xmm5,%xmm8
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm6,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm12,%xmm12,%xmm9
vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13
vpxor %xmm12,%xmm9,%xmm9
vpxor %xmm14,%xmm13,%xmm13
vpalignr $8,%xmm8,%xmm8,%xmm14
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm11,%xmm11,%xmm1
vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12
vpxor %xmm11,%xmm1,%xmm1
vpxor %xmm13,%xmm12,%xmm12
vxorps 16(%rsp),%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm9,%xmm9
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm10,%xmm10,%xmm2
vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11
vpxor %xmm10,%xmm2,%xmm2
vpalignr $8,%xmm8,%xmm8,%xmm14
vpxor %xmm12,%xmm11,%xmm11
vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm9,%xmm1,%xmm1
vxorps %xmm7,%xmm14,%xmm14
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4
vmovdqu 112-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm11,%xmm10,%xmm10
vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7
vpxor %xmm4,%xmm5,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6
vpxor %xmm10,%xmm7,%xmm7
vpxor %xmm2,%xmm6,%xmm6
vpxor %xmm5,%xmm7,%xmm4
vpxor %xmm4,%xmm6,%xmm6
vpslldq $8,%xmm6,%xmm1
vmovdqu 16(%r11),%xmm3
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm1,%xmm5,%xmm8
vpxor %xmm6,%xmm7,%xmm7
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm2,%xmm8,%xmm8
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm7,%xmm2,%xmm2
vpxor %xmm2,%xmm8,%xmm8
movq 16(%rbp),%r12
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
L$gcm_enc_abort:
ret
.section __DATA,__const
.p2align 6
L$bswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
L$poly:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
L$one_msb:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
L$two_lsb:
.byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
L$one_lsb:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6
.text
#endif
|
t3hw00t/ARW | 10,863 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghash-neon-armv8-win64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.text
.globl gcm_init_neon
.def gcm_init_neon
.type 32
.endef
.align 4
gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl gcm_gmult_neon
.def gcm_gmult_neon
.type 32
.endef
.align 4
gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl gcm_ghash_neon
.def gcm_ghash_neon
.type 32
.endef
.align 4
gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section .rodata
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
t3hw00t/ARW | 190,544 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/chacha20_poly1305_x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.section __DATA,__const
.p2align 6
chacha20_poly1305_constants:
L$chacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
L$rol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
L$rol16:
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
L$avx2_init:
.long 0,0,0,0
L$sse_inc:
.long 1,0,0,0
L$avx2_inc:
.long 2,0,0,0,2,0,0,0
L$clamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF
.p2align 4
L$and_masks:
.byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
.text
.p2align 6
poly_hash_ad_internal:
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
cmpq $13,%r8
jne L$hash_ad_loop
L$poly_fast_tls_ad:
movq (%rcx),%r10
movq 5(%rcx),%r11
shrq $24,%r11
movq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
ret
L$hash_ad_loop:
cmpq $16,%r8
jb L$hash_ad_tail
addq 0+0(%rcx),%r10
adcq 8+0(%rcx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rcx),%rcx
subq $16,%r8
jmp L$hash_ad_loop
L$hash_ad_tail:
cmpq $0,%r8
je L$hash_ad_done
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
addq %r8,%rcx
L$hash_ad_tail_loop:
shldq $8,%r13,%r14
shlq $8,%r13
movzbq -1(%rcx),%r15
xorq %r15,%r13
decq %rcx
decq %r8
jne L$hash_ad_tail_loop
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$hash_ad_done:
ret
.globl _chacha20_poly1305_open_sse41
.private_extern _chacha20_poly1305_open_sse41
.p2align 6
_chacha20_poly1305_open_sse41:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
cmpq $128,%rbx
jbe L$open_sse_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm7
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movq $10,%r10
L$open_sse_init_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jne L$open_sse_init_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
pand L$clamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
L$open_sse_main_loop:
cmpq $256,%rbx
jb L$open_sse_tail
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $4,%rcx
movq %rsi,%r8
L$open_sse_main_loop_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
leaq 16(%r8),%r8
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %rcx
jge L$open_sse_main_loop_rounds
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
cmpq $-6,%rcx
jg L$open_sse_main_loop_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor 0+80(%rbp),%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp L$open_sse_main_loop
L$open_sse_tail:
testq %rbx,%rbx
jz L$open_sse_finalize
cmpq $192,%rbx
ja L$open_sse_tail_256
cmpq $128,%rbx
ja L$open_sse_tail_192
cmpq $64,%rbx
ja L$open_sse_tail_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
cmpq $16,%rcx
jb L$open_sse_tail_64_rounds
L$open_sse_tail_64_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
L$open_sse_tail_64_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
cmpq $16,%rcx
jae L$open_sse_tail_64_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_64_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_128:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movq %rbx,%rcx
andq $-16,%rcx
xorq %r8,%r8
L$open_sse_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_128_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
cmpq %rcx,%r8
jb L$open_sse_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_128_rounds
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
subq $64,%rbx
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_192:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movq %rbx,%rcx
movq $160,%r8
cmpq $160,%rcx
cmovgq %r8,%rcx
andq $-16,%rcx
xorq %r8,%r8
L$open_sse_tail_192_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_192_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
cmpq %rcx,%r8
jb L$open_sse_tail_192_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_192_rounds
cmpq $176,%rbx
jb L$open_sse_tail_192_finish
addq 0+160(%rsi),%r10
adcq 8+160(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
cmpq $192,%rbx
jb L$open_sse_tail_192_finish
addq 0+176(%rsi),%r10
adcq 8+176(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_192_finish:
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
subq $128,%rbx
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_256:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
xorq %r8,%r8
L$open_sse_tail_256_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
movdqa 0+80(%rbp),%xmm11
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
movdqa 0+80(%rbp),%xmm9
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
movdqa 0+80(%rbp),%xmm11
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
movdqa 0+80(%rbp),%xmm9
addq $16,%r8
cmpq $160,%r8
jb L$open_sse_tail_256_rounds_and_x1hash
movq %rbx,%rcx
andq $-16,%rcx
L$open_sse_tail_256_hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%r8
cmpq %rcx,%r8
jb L$open_sse_tail_256_hash
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqa 0+80(%rbp),%xmm12
subq $192,%rbx
leaq 192(%rsi),%rsi
leaq 192(%rdi),%rdi
L$open_sse_tail_64_dec_loop:
cmpq $16,%rbx
jb L$open_sse_tail_16_init
subq $16,%rbx
movdqu (%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_16_init:
movdqa %xmm0,%xmm1
L$open_sse_tail_16:
testq %rbx,%rbx
jz L$open_sse_finalize
pxor %xmm3,%xmm3
leaq -1(%rsi,%rbx,1),%rsi
movq %rbx,%r8
L$open_sse_tail_16_compose:
pslldq $1,%xmm3
pinsrb $0,(%rsi),%xmm3
subq $1,%rsi
subq $1,%r8
jnz L$open_sse_tail_16_compose
.byte 102,73,15,126,221
pextrq $1,%xmm3,%r14
pxor %xmm1,%xmm3
L$open_sse_tail_16_extract:
pextrb $0,%xmm3,(%rdi)
psrldq $1,%xmm3
addq $1,%rdi
subq $1,%rbx
jne L$open_sse_tail_16_extract
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_finalize:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
addq $288 + 0 + 32,%rsp
popq %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
ret
L$open_sse_128:
movdqu L$chacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm13,%xmm15
movq $10,%r10
L$open_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz L$open_sse_128_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd L$chacha20_consts(%rip),%xmm1
paddd L$chacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm9
paddd %xmm11,%xmm10
paddd %xmm15,%xmm13
paddd L$sse_inc(%rip),%xmm15
paddd %xmm15,%xmm14
pand L$clamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
L$open_sse_128_xor_hash:
cmpq $16,%rbx
jb L$open_sse_tail_16
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm1
movdqu %xmm1,0(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
movdqa %xmm2,%xmm13
movdqa %xmm6,%xmm2
movdqa %xmm10,%xmm6
movdqa %xmm14,%xmm10
jmp L$open_sse_128_xor_hash
.globl _chacha20_poly1305_seal_sse41
.private_extern _chacha20_poly1305_seal_sse41
.p2align 6
_chacha20_poly1305_seal_sse41:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
cmpq $128,%rbx
jbe L$seal_sse_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm14
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $10,%r10
L$seal_sse_init_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jnz L$seal_sse_init_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
pand L$clamp(%rip),%xmm3
movdqa %xmm3,0+0(%rbp)
movdqa %xmm7,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
cmpq $192,%rbx
ja L$seal_sse_main_init
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_main_init:
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 128(%rdi)
movdqu %xmm4,16 + 128(%rdi)
movdqu %xmm8,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
movq $2,%rcx
movq $8,%r8
cmpq $64,%rbx
jbe L$seal_sse_tail_64
cmpq $128,%rbx
jbe L$seal_sse_tail_128
cmpq $192,%rbx
jbe L$seal_sse_tail_192
L$seal_sse_main_loop:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
.p2align 5
L$seal_sse_main_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
leaq 16(%rdi),%rdi
decq %r8
jge L$seal_sse_main_rounds
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_main_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm14,0+80(%rbp)
movdqa %xmm14,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm14
pxor %xmm3,%xmm14
movdqu %xmm14,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm14
pxor %xmm7,%xmm14
movdqu %xmm14,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm14
pxor %xmm11,%xmm14
movdqu %xmm14,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm14
pxor %xmm15,%xmm14
movdqu %xmm14,48 + 0(%rdi)
movdqa 0+80(%rbp),%xmm14
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
cmpq $256,%rbx
ja L$seal_sse_main_loop_xor
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_main_loop_xor:
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
subq $256,%rbx
movq $6,%rcx
movq $4,%r8
cmpq $192,%rbx
jg L$seal_sse_main_loop
movq %rbx,%rcx
testq %rbx,%rbx
je L$seal_sse_128_tail_hash
movq $6,%rcx
cmpq $128,%rbx
ja L$seal_sse_tail_192
cmpq $64,%rbx
ja L$seal_sse_tail_128
L$seal_sse_tail_64:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
L$seal_sse_tail_64_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_64_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_64_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_64_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp L$seal_sse_128_tail_xor
L$seal_sse_tail_128:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
L$seal_sse_tail_128_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_128_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_128_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_128_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movq $64,%rcx
subq $64,%rbx
leaq 64(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_tail_192:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
L$seal_sse_tail_192_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_192_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_192_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_192_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
L$seal_sse_128_tail_hash:
cmpq $16,%rcx
jb L$seal_sse_128_tail_xor
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
leaq 16(%rdi),%rdi
jmp L$seal_sse_128_tail_hash
L$seal_sse_128_tail_xor:
cmpq $16,%rbx
jb L$seal_sse_tail_16
subq $16,%rbx
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,0(%rdi)
addq 0(%rdi),%r10
adcq 8(%rdi),%r11
adcq $1,%r12
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
movdqa %xmm1,%xmm12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
jmp L$seal_sse_128_tail_xor
L$seal_sse_tail_16:
testq %rbx,%rbx
jz L$process_blocks_of_extra_in
movq %rbx,%r8
movq %rbx,%rcx
leaq -1(%rsi,%rbx,1),%rsi
pxor %xmm15,%xmm15
L$seal_sse_tail_16_compose:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
decq %rcx
jne L$seal_sse_tail_16_compose
pxor %xmm0,%xmm15
movq %rbx,%rcx
movdqu %xmm15,%xmm0
L$seal_sse_tail_16_extract:
pextrb $0,%xmm0,(%rdi)
psrldq $1,%xmm0
addq $1,%rdi
subq $1,%rcx
jnz L$seal_sse_tail_16_extract
movq 288 + 0 + 32(%rsp),%r9
movq 56(%r9),%r14
movq 48(%r9),%r13
testq %r14,%r14
jz L$process_partial_block
movq $16,%r15
subq %rbx,%r15
cmpq %r15,%r14
jge L$load_extra_in
movq %r14,%r15
L$load_extra_in:
leaq -1(%r13,%r15,1),%rsi
addq %r15,%r13
subq %r15,%r14
movq %r13,48(%r9)
movq %r14,56(%r9)
addq %r15,%r8
pxor %xmm11,%xmm11
L$load_extra_load_loop:
pslldq $1,%xmm11
pinsrb $0,(%rsi),%xmm11
leaq -1(%rsi),%rsi
subq $1,%r15
jnz L$load_extra_load_loop
movq %rbx,%r15
L$load_extra_shift_loop:
pslldq $1,%xmm11
subq $1,%r15
jnz L$load_extra_shift_loop
leaq L$and_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
por %xmm11,%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$process_blocks_of_extra_in:
movq 288+32+0 (%rsp),%r9
movq 48(%r9),%rsi
movq 56(%r9),%r8
movq %r8,%rcx
shrq $4,%r8
L$process_extra_hash_loop:
jz process_extra_in_trailer
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rsi),%rsi
subq $1,%r8
jmp L$process_extra_hash_loop
process_extra_in_trailer:
andq $15,%rcx
movq %rcx,%rbx
jz L$do_length_block
leaq -1(%rsi,%rcx,1),%rsi
L$process_extra_in_trailer_load:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
subq $1,%rcx
jnz L$process_extra_in_trailer_load
L$process_partial_block:
leaq L$and_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$do_length_block:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
addq $288 + 0 + 32,%rsp
popq %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
ret
L$seal_sse_128:
movdqu L$chacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm14
movdqa %xmm14,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
movq $10,%r10
L$seal_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz L$seal_sse_128_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd L$chacha20_consts(%rip),%xmm1
paddd L$chacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm8
paddd %xmm11,%xmm9
paddd %xmm15,%xmm12
paddd L$sse_inc(%rip),%xmm15
paddd %xmm15,%xmm13
pand L$clamp(%rip),%xmm2
movdqa %xmm2,0+0(%rbp)
movdqa %xmm6,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
jmp L$seal_sse_128_tail_xor
.globl _chacha20_poly1305_open_avx2
.private_extern _chacha20_poly1305_open_avx2
.p2align 6
_chacha20_poly1305_open_avx2:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
vzeroupper
vmovdqa L$chacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd L$avx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe L$open_avx2_192
cmpq $320,%rbx
jbe L$open_avx2_320
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%r10
L$open_avx2_init_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
decq %r10
jne L$open_avx2_init_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
L$open_avx2_init_hash:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%rcx
cmpq $64,%rcx
jne L$open_avx2_init_hash
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vmovdqu %ymm0,0(%rdi)
vmovdqu %ymm4,32(%rdi)
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
subq $64,%rbx
L$open_avx2_main_loop:
cmpq $512,%rbx
jb L$open_avx2_main_loop_done
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
L$open_avx2_main_loop_rounds:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rsi,%rcx,1),%r10
adcq 8+16(%rsi,%rcx,1),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rsi,%rcx,1),%r10
adcq 8+32(%rsi,%rcx,1),%r11
adcq $1,%r12
leaq 48(%rcx),%rcx
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
cmpq $60*8,%rcx
jne L$open_avx2_main_loop_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+60*8(%rsi),%r10
adcq 8+60*8(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
addq 0+60*8+16(%rsi),%r10
adcq 8+60*8+16(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
leaq 512(%rdi),%rdi
subq $512,%rbx
jmp L$open_avx2_main_loop
L$open_avx2_main_loop_done:
testq %rbx,%rbx
vzeroupper
je L$open_sse_finalize
cmpq $384,%rbx
ja L$open_avx2_tail_512
cmpq $256,%rbx
ja L$open_avx2_tail_384
cmpq $128,%rbx
ja L$open_avx2_tail_256
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
andq $-16,%rcx
testq %rcx,%rcx
je L$open_avx2_tail_128_rounds
L$open_avx2_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_avx2_tail_128_rounds:
addq $16,%r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb L$open_avx2_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne L$open_avx2_tail_128_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_256:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $128,%rcx
shrq $4,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
L$open_avx2_tail_256_rounds_and_x1hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
L$open_avx2_tail_256_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
incq %r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
cmpq %rcx,%r8
jb L$open_avx2_tail_256_rounds_and_x1hash
cmpq $10,%r8
jne L$open_avx2_tail_256_rounds
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
L$open_avx2_tail_256_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg L$open_avx2_tail_256_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp L$open_avx2_tail_256_hash
L$open_avx2_tail_256_done:
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
subq $128,%rbx
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_384:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $256,%rcx
shrq $4,%rcx
addq $6,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
L$open_avx2_tail_384_rounds_and_x2hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
L$open_avx2_tail_384_rounds_and_x1hash:
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
incq %r8
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb L$open_avx2_tail_384_rounds_and_x2hash
cmpq $10,%r8
jne L$open_avx2_tail_384_rounds_and_x1hash
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
L$open_avx2_384_tail_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg L$open_avx2_384_tail_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp L$open_avx2_384_tail_hash
L$open_avx2_384_tail_done:
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_512:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
movq %rsi,%r8
L$open_avx2_tail_512_rounds_and_x2hash:
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
L$open_avx2_tail_512_rounds_and_x1hash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+16(%r8),%r10
adcq 8+16(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%r8),%r8
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
incq %rcx
cmpq $4,%rcx
jl L$open_avx2_tail_512_rounds_and_x2hash
cmpq $10,%rcx
jne L$open_avx2_tail_512_rounds_and_x1hash
movq %rbx,%rcx
subq $384,%rcx
andq $-16,%rcx
L$open_avx2_tail_512_hash:
testq %rcx,%rcx
je L$open_avx2_tail_512_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
subq $16,%rcx
jmp L$open_avx2_tail_512_hash
L$open_avx2_tail_512_done:
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 384(%rsi),%rsi
leaq 384(%rdi),%rdi
subq $384,%rbx
L$open_avx2_tail_128_xor:
cmpq $32,%rbx
jb L$open_avx2_tail_32_xor
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_32_xor:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb L$open_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm1
vmovdqu %xmm1,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vperm2i128 $0x11,%ymm0,%ymm0,%ymm0
vmovdqa %xmm0,%xmm1
L$open_avx2_exit:
vzeroupper
jmp L$open_sse_tail_16
L$open_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
L$open_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne L$open_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
L$open_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
L$open_avx2_short_hash_and_xor_loop:
cmpq $32,%rbx
jb L$open_avx2_short_tail_32
subq $32,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rsi),%r10
adcq 8+16(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp L$open_avx2_short_hash_and_xor_loop
L$open_avx2_short_tail_32:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb L$open_avx2_short_tail_32_exit
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm1
L$open_avx2_short_tail_32_exit:
vzeroupper
jmp L$open_sse_tail_16
L$open_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
L$open_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne L$open_avx2_320_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp L$open_avx2_short
.globl _chacha20_poly1305_seal_avx2
.private_extern _chacha20_poly1305_seal_avx2
.p2align 6
_chacha20_poly1305_seal_avx2:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
vzeroupper
vmovdqa L$chacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd L$avx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe L$seal_avx2_192
cmpq $320,%rbx
jbe L$seal_avx2_320
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm4,%ymm7
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vmovdqa %ymm8,%ymm11
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,%ymm15
vpaddd L$avx2_inc(%rip),%ymm15,%ymm14
vpaddd L$avx2_inc(%rip),%ymm14,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm15,0+256(%rbp)
movq $10,%r10
L$seal_avx2_init_rounds:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %r10
jnz L$seal_avx2_init_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vperm2i128 $0x02,%ymm3,%ymm7,%ymm15
vperm2i128 $0x13,%ymm3,%ymm7,%ymm3
vpand L$clamp(%rip),%ymm15,%ymm15
vmovdqa %ymm15,0+0(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
vpxor 0(%rsi),%ymm3,%ymm3
vpxor 32(%rsi),%ymm11,%ymm11
vmovdqu %ymm3,0(%rdi)
vmovdqu %ymm11,32(%rdi)
vperm2i128 $0x02,%ymm2,%ymm6,%ymm15
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+64(%rsi),%ymm15,%ymm15
vpxor 32+64(%rsi),%ymm2,%ymm2
vpxor 64+64(%rsi),%ymm6,%ymm6
vpxor 96+64(%rsi),%ymm10,%ymm10
vmovdqu %ymm15,0+64(%rdi)
vmovdqu %ymm2,32+64(%rdi)
vmovdqu %ymm6,64+64(%rdi)
vmovdqu %ymm10,96+64(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm15
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+192(%rsi),%ymm15,%ymm15
vpxor 32+192(%rsi),%ymm1,%ymm1
vpxor 64+192(%rsi),%ymm5,%ymm5
vpxor 96+192(%rsi),%ymm9,%ymm9
vmovdqu %ymm15,0+192(%rdi)
vmovdqu %ymm1,32+192(%rdi)
vmovdqu %ymm5,64+192(%rdi)
vmovdqu %ymm9,96+192(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm15
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm15,%ymm8
leaq 320(%rsi),%rsi
subq $320,%rbx
movq $320,%rcx
cmpq $128,%rbx
jbe L$seal_avx2_short_hash_remainder
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vpxor 64(%rsi),%ymm8,%ymm8
vpxor 96(%rsi),%ymm12,%ymm12
vmovdqu %ymm0,320(%rdi)
vmovdqu %ymm4,352(%rdi)
vmovdqu %ymm8,384(%rdi)
vmovdqu %ymm12,416(%rdi)
leaq 128(%rsi),%rsi
subq $128,%rbx
movq $8,%rcx
movq $2,%r8
cmpq $128,%rbx
jbe L$seal_avx2_tail_128
cmpq $256,%rbx
jbe L$seal_avx2_tail_256
cmpq $384,%rbx
jbe L$seal_avx2_tail_384
cmpq $512,%rbx
jbe L$seal_avx2_tail_512
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
subq $16,%rdi
movq $9,%rcx
jmp L$seal_avx2_main_loop_rounds_entry
.p2align 5
L$seal_avx2_main_loop:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%rcx
.p2align 5
L$seal_avx2_main_loop_rounds:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$seal_avx2_main_loop_rounds_entry:
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rdi),%r10
adcq 8+32(%rdi),%r11
adcq $1,%r12
leaq 48(%rdi),%rdi
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %rcx
jne L$seal_avx2_main_loop_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
subq $512,%rbx
cmpq $512,%rbx
jg L$seal_avx2_main_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
movq $10,%rcx
xorq %r8,%r8
cmpq $384,%rbx
ja L$seal_avx2_tail_512
cmpq $256,%rbx
ja L$seal_avx2_tail_384
cmpq $128,%rbx
ja L$seal_avx2_tail_256
L$seal_avx2_tail_128:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
L$seal_avx2_tail_128_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_128_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_128_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_128_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp L$seal_avx2_short_loop
L$seal_avx2_tail_256:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
L$seal_avx2_tail_256_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_256_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_256_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_256_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $128,%rcx
leaq 128(%rsi),%rsi
subq $128,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_tail_384:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
L$seal_avx2_tail_384_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_384_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_384_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_384_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $256,%rcx
leaq 256(%rsi),%rsi
subq $256,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_tail_512:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
L$seal_avx2_tail_512_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_512_rounds_and_2xhash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
addq %rax,%r15
adcq %rdx,%r9
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_512_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_512_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $384,%rcx
leaq 384(%rsi),%rsi
subq $384,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
L$seal_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne L$seal_avx2_320_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp L$seal_avx2_short
L$seal_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
L$seal_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne L$seal_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
L$seal_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
L$seal_avx2_short_hash_remainder:
cmpq $16,%rcx
jb L$seal_avx2_short_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
addq $16,%rdi
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_short_loop:
cmpq $32,%rbx
jb L$seal_avx2_short_tail
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp L$seal_avx2_short_loop
L$seal_avx2_short_tail:
cmpq $16,%rbx
jb L$seal_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm0
L$seal_avx2_exit:
vzeroupper
jmp L$seal_sse_tail_16
#endif
|
t3hw00t/ARW | 10,875 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghash-neon-armv8-ios64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
.text
.globl _gcm_init_neon
.private_extern _gcm_init_neon
.align 4
_gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl _gcm_gmult_neon
.private_extern _gcm_gmult_neon
.align 4
_gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl _gcm_ghash_neon
.private_extern _gcm_ghash_neon
.align 4
_gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section __TEXT,__const
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
t3hw00t/ARW | 17,785 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/bsaes-armv7-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ https://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ See the License for the specific language governing permissions and
@ limitations under the License.
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project.
@
@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
@ of Linaro.
@ ====================================================================
@ Bit-sliced AES for ARM NEON
@
@ February 2012.
@
@ This implementation is direct adaptation of bsaes-x86_64 module for
@ ARM NEON. Except that this module is endian-neutral [in sense that
@ it can be compiled for either endianness] by courtesy of vld1.8's
@ neutrality. Initial version doesn't implement interface to OpenSSL,
@ only low-level primitives and unsupported entry points, just enough
@ to collect performance results, which for Cortex-A8 core are:
@
@ encrypt 19.5 cycles per byte processed with 128-bit key
@ decrypt 22.1 cycles per byte processed with 128-bit key
@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
@
@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
@ which is [much] worse than anticipated (for further details see
@ http://www.openssl.org/~appro/Snapdragon-S4.html).
@
@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
@ manages in 20.0 cycles].
@
@ When comparing to x86_64 results keep in mind that NEON unit is
@ [mostly] single-issue and thus can't [fully] benefit from
@ instruction-level parallelism. And when comparing to aes-armv4
@ results keep in mind key schedule conversion overhead (see
@ bsaes-x86_64.pl for further details)...
@
@ <appro@openssl.org>
@ April-August 2013
@ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard.
#ifndef __KERNEL__
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
# define VFP_ABI_FRAME 0x40
#else
# define VFP_ABI_PUSH
# define VFP_ABI_POP
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define __ARM_MAX_ARCH__ 7
#endif
#ifdef __thumb__
# define adrl adr
#endif
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.text
.syntax unified @ ARMv7-capable assembler is expected to handle this
#if defined(__thumb2__) && !defined(__APPLE__)
.thumb
#else
.code 32
# undef __thumb2__
#endif
.type _bsaes_const,%object
.align 6
_bsaes_const:
.LM0ISR:@ InvShiftRows constants
.quad 0x0a0e0206070b0f03, 0x0004080c0d010509
.LISR:
.quad 0x0504070602010003, 0x0f0e0d0c080b0a09
.LISRM0:
.quad 0x01040b0e0205080f, 0x0306090c00070a0d
.LM0SR:@ ShiftRows constants
.quad 0x0a0e02060f03070b, 0x0004080c05090d01
.LSR:
.quad 0x0504070600030201, 0x0f0e0d0c0a09080b
.LSRM0:
.quad 0x0304090e00050a0f, 0x01060b0c0207080d
.LM0:
.quad 0x02060a0e03070b0f, 0x0004080c0105090d
.LREVM0SR:
.quad 0x090d01050c000408, 0x03070b0f060a0e02
.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 6
.size _bsaes_const,.-_bsaes_const
.type _bsaes_encrypt8,%function
.align 4
_bsaes_encrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0SR
#else
sub r6,r6,#_bsaes_encrypt8-.LM0SR
#endif
vldmia r6!, {q8} @ .LM0SR
_bsaes_encrypt8_alt:
veor q10, q0, q9 @ xor with round0 key
veor q11, q1, q9
vtbl.8 d0, {q10}, d16
vtbl.8 d1, {q10}, d17
veor q12, q2, q9
vtbl.8 d2, {q11}, d16
vtbl.8 d3, {q11}, d17
veor q13, q3, q9
vtbl.8 d4, {q12}, d16
vtbl.8 d5, {q12}, d17
veor q14, q4, q9
vtbl.8 d6, {q13}, d16
vtbl.8 d7, {q13}, d17
veor q15, q5, q9
vtbl.8 d8, {q14}, d16
vtbl.8 d9, {q14}, d17
veor q10, q6, q9
vtbl.8 d10, {q15}, d16
vtbl.8 d11, {q15}, d17
veor q11, q7, q9
vtbl.8 d12, {q10}, d16
vtbl.8 d13, {q10}, d17
vtbl.8 d14, {q11}, d16
vtbl.8 d15, {q11}, d17
_bsaes_encrypt8_bitslice:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q6, #1
vshr.u64 q11, q4, #1
veor q10, q10, q7
veor q11, q11, q5
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #1
veor q5, q5, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q2, #1
vshr.u64 q11, q0, #1
veor q10, q10, q3
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q3, q3, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q5, #2
vshr.u64 q11, q4, #2
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q7, q7, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q5, q5, q10
veor q4, q4, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q3
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q3, q3, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q3, #4
vshr.u64 q11, q2, #4
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q6, q6, q11
vshl.u64 q11, q11, #4
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q5
veor q11, q11, q4
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q4, q4, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
sub r5,r5,#1
b .Lenc_sbox
.align 4
.Lenc_loop:
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
vtbl.8 d1, {q8}, d25
vldmia r4!, {q8}
veor q10, q10, q2
vtbl.8 d2, {q9}, d24
vtbl.8 d3, {q9}, d25
vldmia r4!, {q9}
veor q11, q11, q3
vtbl.8 d4, {q10}, d24
vtbl.8 d5, {q10}, d25
vldmia r4!, {q10}
vtbl.8 d6, {q11}, d24
vtbl.8 d7, {q11}, d25
vldmia r4!, {q11}
veor q8, q8, q4
veor q9, q9, q5
vtbl.8 d8, {q8}, d24
vtbl.8 d9, {q8}, d25
veor q10, q10, q6
vtbl.8 d10, {q9}, d24
vtbl.8 d11, {q9}, d25
veor q11, q11, q7
vtbl.8 d12, {q10}, d24
vtbl.8 d13, {q10}, d25
vtbl.8 d14, {q11}, d24
vtbl.8 d15, {q11}, d25
.Lenc_sbox:
veor q2, q2, q1
veor q5, q5, q6
veor q3, q3, q0
veor q6, q6, q2
veor q5, q5, q0
veor q6, q6, q3
veor q3, q3, q7
veor q7, q7, q5
veor q3, q3, q4
veor q4, q4, q5
veor q2, q2, q7
veor q3, q3, q1
veor q1, q1, q5
veor q11, q7, q4
veor q10, q1, q2
veor q9, q5, q3
veor q13, q2, q4
vmov q8, q10
veor q12, q6, q0
vorr q10, q10, q9
veor q15, q11, q8
vand q14, q11, q12
vorr q11, q11, q12
veor q12, q12, q9
vand q8, q8, q9
veor q9, q3, q0
vand q15, q15, q12
vand q13, q13, q9
veor q9, q7, q1
veor q12, q5, q6
veor q11, q11, q13
veor q10, q10, q13
vand q13, q9, q12
vorr q9, q9, q12
veor q11, q11, q15
veor q8, q8, q13
veor q10, q10, q14
veor q9, q9, q15
veor q8, q8, q14
vand q12, q2, q3
veor q9, q9, q14
vand q13, q4, q0
vand q14, q1, q5
vorr q15, q7, q6
veor q11, q11, q12
veor q9, q9, q14
veor q8, q8, q15
veor q10, q10, q13
@ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
@ new smaller inversion
vand q14, q11, q9
vmov q12, q8
veor q13, q10, q14
veor q15, q8, q14
veor q14, q8, q14 @ q14=q15
vbsl q13, q9, q8
vbsl q15, q11, q10
veor q11, q11, q10
vbsl q12, q13, q14
vbsl q8, q14, q13
vand q14, q12, q15
veor q9, q9, q8
veor q14, q14, q11
veor q12, q6, q0
veor q8, q5, q3
veor q10, q15, q14
vand q10, q10, q6
veor q6, q6, q5
vand q11, q5, q15
vand q6, q6, q14
veor q5, q11, q10
veor q6, q6, q11
veor q15, q15, q13
veor q14, q14, q9
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q0
veor q12, q12, q8
veor q0, q0, q3
vand q8, q8, q15
vand q3, q3, q13
vand q12, q12, q14
vand q0, q0, q9
veor q8, q8, q12
veor q0, q0, q3
veor q12, q12, q11
veor q3, q3, q10
veor q6, q6, q12
veor q0, q0, q12
veor q5, q5, q8
veor q3, q3, q8
veor q12, q7, q4
veor q8, q1, q2
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q4
veor q12, q12, q8
veor q4, q4, q2
vand q8, q8, q15
vand q2, q2, q13
vand q12, q12, q14
vand q4, q4, q9
veor q8, q8, q12
veor q4, q4, q2
veor q12, q12, q11
veor q2, q2, q10
veor q15, q15, q13
veor q14, q14, q9
veor q10, q15, q14
vand q10, q10, q7
veor q7, q7, q1
vand q11, q1, q15
vand q7, q7, q14
veor q1, q11, q10
veor q7, q7, q11
veor q7, q7, q12
veor q4, q4, q12
veor q1, q1, q8
veor q2, q2, q8
veor q7, q7, q0
veor q1, q1, q6
veor q6, q6, q0
veor q4, q4, q7
veor q0, q0, q1
veor q1, q1, q5
veor q5, q5, q2
veor q2, q2, q3
veor q3, q3, q5
veor q4, q4, q5
veor q6, q6, q3
subs r5,r5,#1
bcc .Lenc_done
vext.8 q8, q0, q0, #12 @ x0 <<< 32
vext.8 q9, q1, q1, #12
veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
vext.8 q10, q4, q4, #12
veor q1, q1, q9
vext.8 q11, q6, q6, #12
veor q4, q4, q10
vext.8 q12, q3, q3, #12
veor q6, q6, q11
vext.8 q13, q7, q7, #12
veor q3, q3, q12
vext.8 q14, q2, q2, #12
veor q7, q7, q13
vext.8 q15, q5, q5, #12
veor q2, q2, q14
veor q9, q9, q0
veor q5, q5, q15
vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
veor q10, q10, q1
veor q8, q8, q5
veor q9, q9, q5
vext.8 q1, q1, q1, #8
veor q13, q13, q3
veor q0, q0, q8
veor q14, q14, q7
veor q1, q1, q9
vext.8 q8, q3, q3, #8
veor q12, q12, q6
vext.8 q9, q7, q7, #8
veor q15, q15, q2
vext.8 q3, q6, q6, #8
veor q11, q11, q4
vext.8 q7, q5, q5, #8
veor q12, q12, q5
vext.8 q6, q2, q2, #8
veor q11, q11, q5
vext.8 q2, q4, q4, #8
veor q5, q9, q13
veor q4, q8, q12
veor q3, q3, q11
veor q7, q7, q15
veor q6, q6, q14
@ vmov q4, q8
veor q2, q2, q10
@ vmov q5, q9
vldmia r6, {q12} @ .LSR
ite eq @ Thumb2 thing, samity check in ARM
addeq r6,r6,#0x10
bne .Lenc_loop
vldmia r6, {q12} @ .LSRM0
b .Lenc_loop
.align 4
.Lenc_done:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q2, #1
vshr.u64 q11, q3, #1
veor q10, q10, q5
veor q11, q11, q7
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #1
veor q7, q7, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q3, q3, q11
vshr.u64 q10, q4, #1
vshr.u64 q11, q0, #1
veor q10, q10, q6
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q6, q6, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q4, q4, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q7, #2
vshr.u64 q11, q3, #2
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q5, q5, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q7, q7, q10
veor q3, q3, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q6
veor q11, q11, q4
vand q10, q10, q9
vand q11, q11, q9
veor q6, q6, q10
vshl.u64 q10, q10, #2
veor q4, q4, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q6, #4
vshr.u64 q11, q4, #4
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q2, q2, q11
vshl.u64 q11, q11, #4
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q7
veor q11, q11, q3
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q3, q3, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
vldmia r4, {q8} @ last round key
veor q4, q4, q8
veor q6, q6, q8
veor q3, q3, q8
veor q7, q7, q8
veor q2, q2, q8
veor q5, q5, q8
veor q0, q0, q8
veor q1, q1, q8
bx lr
.size _bsaes_encrypt8,.-_bsaes_encrypt8
.type _bsaes_key_convert,%function
.align 4
_bsaes_key_convert:
adr r6,.
vld1.8 {q7}, [r4]! @ load round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0
#else
sub r6,r6,#_bsaes_key_convert-.LM0
#endif
vld1.8 {q15}, [r4]! @ load round 1 key
vmov.i8 q8, #0x01 @ bit masks
vmov.i8 q9, #0x02
vmov.i8 q10, #0x04
vmov.i8 q11, #0x08
vmov.i8 q12, #0x10
vmov.i8 q13, #0x20
vldmia r6, {q14} @ .LM0
#ifdef __ARMEL__
vrev32.8 q7, q7
vrev32.8 q15, q15
#endif
sub r5,r5,#1
vstmia r12!, {q7} @ save round 0 key
b .Lkey_loop
.align 4
.Lkey_loop:
vtbl.8 d14,{q15},d28
vtbl.8 d15,{q15},d29
vmov.i8 q6, #0x40
vmov.i8 q15, #0x80
vtst.8 q0, q7, q8
vtst.8 q1, q7, q9
vtst.8 q2, q7, q10
vtst.8 q3, q7, q11
vtst.8 q4, q7, q12
vtst.8 q5, q7, q13
vtst.8 q6, q7, q6
vtst.8 q7, q7, q15
vld1.8 {q15}, [r4]! @ load next round key
vmvn q0, q0 @ "pnot"
vmvn q1, q1
vmvn q5, q5
vmvn q6, q6
#ifdef __ARMEL__
vrev32.8 q15, q15
#endif
subs r5,r5,#1
vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key
bne .Lkey_loop
vmov.i8 q7,#0x63 @ compose .L63
@ don't save last round key
bx lr
.size _bsaes_key_convert,.-_bsaes_key_convert
.globl bsaes_ctr32_encrypt_blocks
.hidden bsaes_ctr32_encrypt_blocks
.type bsaes_ctr32_encrypt_blocks,%function
.align 5
bsaes_ctr32_encrypt_blocks:
@ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this
@ out to retain a constant-time implementation.
mov ip, sp
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ ctr is 1st arg on the stack
sub sp, sp, #0x10 @ scratch space to carry over the ctr
mov r9, sp @ save sp
ldr r10, [r3, #240] @ get # of rounds
#ifndef BSAES_ASM_EXTENDED_KEY
@ allocate the key schedule on the stack
sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
add r12, #96 @ size of bit-sliced key schedule
@ populate the key schedule
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
mov sp, r12 @ sp is sp
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
vld1.8 {q0}, [r8] @ load counter
#ifdef __APPLE__
mov r8, #:lower16:(.LREVM0SR-.LM0)
add r8, r6, r8
#else
add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
#endif
vldmia sp, {q4} @ load round0 key
#else
ldr r12, [r3, #244]
eors r12, #1
beq 0f
@ populate the key schedule
str r12, [r3, #244]
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
add r12, r3, #248 @ pass key schedule
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
.align 2
add r12, r3, #248
vld1.8 {q0}, [r8] @ load counter
adrl r8, .LREVM0SR @ borrow r8
vldmia r12, {q4} @ load round0 key
sub sp, #0x10 @ place for adjusted round0 key
#endif
vmov.i32 q8,#1 @ compose 1<<96
veor q9,q9,q9
vrev32.8 q0,q0
vext.8 q8,q9,q8,#4
vrev32.8 q4,q4
vadd.u32 q9,q8,q8 @ compose 2<<96
vstmia sp, {q4} @ save adjusted round0 key
b .Lctr_enc_loop
.align 4
.Lctr_enc_loop:
vadd.u32 q10, q8, q9 @ compose 3<<96
vadd.u32 q1, q0, q8 @ +1
vadd.u32 q2, q0, q9 @ +2
vadd.u32 q3, q0, q10 @ +3
vadd.u32 q4, q1, q10
vadd.u32 q5, q2, q10
vadd.u32 q6, q3, q10
vadd.u32 q7, q4, q10
vadd.u32 q10, q5, q10 @ next counter
@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
@ to flip byte order in 32-bit counter
vldmia sp, {q9} @ load round0 key
#ifndef BSAES_ASM_EXTENDED_KEY
add r4, sp, #0x10 @ pass next round key
#else
add r4, r3, #264
#endif
vldmia r8, {q8} @ .LREVM0SR
mov r5, r10 @ pass rounds
vstmia r9, {q10} @ save next counter
#ifdef __APPLE__
mov r6, #:lower16:(.LREVM0SR-.LSR)
sub r6, r8, r6
#else
sub r6, r8, #.LREVM0SR-.LSR @ pass constants
#endif
bl _bsaes_encrypt8_alt
subs r2, r2, #8
blo .Lctr_enc_loop_done
vld1.8 {q8,q9}, [r0]! @ load input
vld1.8 {q10,q11}, [r0]!
veor q0, q8
veor q1, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q10
veor q6, q11
vld1.8 {q14,q15}, [r0]!
veor q3, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q7, q13
veor q2, q14
vst1.8 {q4}, [r1]!
veor q5, q15
vst1.8 {q6}, [r1]!
vmov.i32 q8, #1 @ compose 1<<96
vst1.8 {q3}, [r1]!
veor q9, q9, q9
vst1.8 {q7}, [r1]!
vext.8 q8, q9, q8, #4
vst1.8 {q2}, [r1]!
vadd.u32 q9,q8,q8 @ compose 2<<96
vst1.8 {q5}, [r1]!
vldmia r9, {q0} @ load counter
bne .Lctr_enc_loop
b .Lctr_enc_done
.align 4
.Lctr_enc_loop_done:
add r2, r2, #8
vld1.8 {q8}, [r0]! @ load input
veor q0, q8
vst1.8 {q0}, [r1]! @ write output
cmp r2, #2
blo .Lctr_enc_done
vld1.8 {q9}, [r0]!
veor q1, q9
vst1.8 {q1}, [r1]!
beq .Lctr_enc_done
vld1.8 {q10}, [r0]!
veor q4, q10
vst1.8 {q4}, [r1]!
cmp r2, #4
blo .Lctr_enc_done
vld1.8 {q11}, [r0]!
veor q6, q11
vst1.8 {q6}, [r1]!
beq .Lctr_enc_done
vld1.8 {q12}, [r0]!
veor q3, q12
vst1.8 {q3}, [r1]!
cmp r2, #6
blo .Lctr_enc_done
vld1.8 {q13}, [r0]!
veor q7, q13
vst1.8 {q7}, [r1]!
beq .Lctr_enc_done
vld1.8 {q14}, [r0]
veor q2, q14
vst1.8 {q2}, [r1]!
.Lctr_enc_done:
vmov.i32 q0, #0
vmov.i32 q1, #0
#ifndef BSAES_ASM_EXTENDED_KEY
.Lctr_enc_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne .Lctr_enc_bzero
#else
vstmia sp, {q0,q1}
#endif
mov sp, r9
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
VFP_ABI_POP
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return
@ OpenSSL contains aes_nohw_* fallback code here. We patch this
@ out to retain a constant-time implementation.
.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
t3hw00t/ARW | 70,675 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha256-x86_64-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl sha256_block_data_order_nohw
.hidden sha256_block_data_order_nohw
.type sha256_block_data_order_nohw,@function
.align 16
sha256_block_data_order_nohw:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $64+32,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop
.align 16
.Lloop:
movl %ebx,%edi
leaq K256(%rip),%rbp
xorl %ecx,%edi
movl 0(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 4(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 8(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 12(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 16(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 20(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 24(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 28(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
addl %r14d,%eax
movl 32(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 36(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 40(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 44(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 48(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 52(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 56(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 60(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movl 4(%rsp),%r13d
movl 56(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 36(%rsp),%r12d
addl 0(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 8(%rsp),%r13d
movl 60(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 40(%rsp),%r12d
addl 4(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 12(%rsp),%r13d
movl 0(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 44(%rsp),%r12d
addl 8(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 16(%rsp),%r13d
movl 4(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 48(%rsp),%r12d
addl 12(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 20(%rsp),%r13d
movl 8(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 52(%rsp),%r12d
addl 16(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 24(%rsp),%r13d
movl 12(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 56(%rsp),%r12d
addl 20(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 28(%rsp),%r13d
movl 16(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 60(%rsp),%r12d
addl 24(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 32(%rsp),%r13d
movl 20(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 0(%rsp),%r12d
addl 28(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
movl 36(%rsp),%r13d
movl 24(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 4(%rsp),%r12d
addl 32(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 40(%rsp),%r13d
movl 28(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 8(%rsp),%r12d
addl 36(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 44(%rsp),%r13d
movl 32(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 12(%rsp),%r12d
addl 40(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 48(%rsp),%r13d
movl 36(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 16(%rsp),%r12d
addl 44(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 52(%rsp),%r13d
movl 40(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 20(%rsp),%r12d
addl 48(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 56(%rsp),%r13d
movl 44(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 24(%rsp),%r12d
addl 52(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 60(%rsp),%r13d
movl 48(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 28(%rsp),%r12d
addl 56(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 0(%rsp),%r13d
movl 52(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 32(%rsp),%r12d
addl 60(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
cmpb $0,3(%rbp)
jnz .Lrounds_16_xx
movq 64+0(%rsp),%rdi
addl %r14d,%eax
leaq 64(%rsi),%rsi
addl 0(%rdi),%eax
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue:
ret
.cfi_endproc
.size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw
.section .rodata
.align 64
.type K256,@object
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl sha256_block_data_order_hw
.hidden sha256_block_data_order_hw
.type sha256_block_data_order_hw,@function
.align 64
sha256_block_data_order_hw:
.cfi_startproc
_CET_ENDBR
leaq K256+128(%rip),%rcx
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa 512-128(%rcx),%xmm7
pshufd $0x1b,%xmm1,%xmm0
pshufd $0xb1,%xmm1,%xmm1
pshufd $0x1b,%xmm2,%xmm2
movdqa %xmm7,%xmm8
.byte 102,15,58,15,202,8
punpcklqdq %xmm0,%xmm2
jmp .Loop_shaext
.align 16
.Loop_shaext:
movdqu (%rsi),%xmm3
movdqu 16(%rsi),%xmm4
movdqu 32(%rsi),%xmm5
.byte 102,15,56,0,223
movdqu 48(%rsi),%xmm6
movdqa 0-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 102,15,56,0,231
movdqa %xmm2,%xmm10
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
nop
movdqa %xmm1,%xmm9
.byte 15,56,203,202
movdqa 32-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 102,15,56,0,239
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
leaq 64(%rsi),%rsi
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 64-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 102,15,56,0,247
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 96-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 128-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 160-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 192-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 224-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 256-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 288-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 320-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 352-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 384-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 416-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
.byte 15,56,203,202
paddd %xmm7,%xmm6
movdqa 448-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
.byte 15,56,205,245
movdqa %xmm8,%xmm7
.byte 15,56,203,202
movdqa 480-128(%rcx),%xmm0
paddd %xmm6,%xmm0
nop
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
decq %rdx
nop
.byte 15,56,203,202
paddd %xmm10,%xmm2
paddd %xmm9,%xmm1
jnz .Loop_shaext
pshufd $0xb1,%xmm2,%xmm2
pshufd $0x1b,%xmm1,%xmm7
pshufd $0xb1,%xmm1,%xmm1
punpckhqdq %xmm2,%xmm1
.byte 102,15,58,15,215,8
movdqu %xmm1,(%rdi)
movdqu %xmm2,16(%rdi)
ret
.cfi_endproc
.size sha256_block_data_order_hw,.-sha256_block_data_order_hw
.globl sha256_block_data_order_ssse3
.hidden sha256_block_data_order_ssse3
.type sha256_block_data_order_ssse3,@function
.align 64
sha256_block_data_order_ssse3:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_ssse3:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop_ssse3
.align 16
.Lloop_ssse3:
movdqa K256+512(%rip),%xmm7
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
.byte 102,15,56,0,199
movdqu 48(%rsi),%xmm3
leaq K256(%rip),%rbp
.byte 102,15,56,0,207
movdqa 0(%rbp),%xmm4
movdqa 32(%rbp),%xmm5
.byte 102,15,56,0,215
paddd %xmm0,%xmm4
movdqa 64(%rbp),%xmm6
.byte 102,15,56,0,223
movdqa 96(%rbp),%xmm7
paddd %xmm1,%xmm5
paddd %xmm2,%xmm6
paddd %xmm3,%xmm7
movdqa %xmm4,0(%rsp)
movl %eax,%r14d
movdqa %xmm5,16(%rsp)
movl %ebx,%edi
movdqa %xmm6,32(%rsp)
xorl %ecx,%edi
movdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lssse3_00_47
.align 16
.Lssse3_00_47:
subq $-128,%rbp
rorl $14,%r13d
movdqa %xmm1,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm3,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,224,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,250,4
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm3,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 4(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm0
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm0
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm0,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 0(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm0,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,0(%rsp)
rorl $14,%r13d
movdqa %xmm2,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm0,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,225,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,251,4
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm0,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 20(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm1
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm1
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm1,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 32(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm1,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,16(%rsp)
rorl $14,%r13d
movdqa %xmm3,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm1,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,226,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,248,4
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm1,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 36(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm2
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm2
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm2,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 64(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm2,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,32(%rsp)
rorl $14,%r13d
movdqa %xmm0,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm2,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,227,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,249,4
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm2,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 52(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm3
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm3
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm3,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 96(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm3,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lssse3_00_47
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_ssse3
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_ssse3:
ret
.cfi_endproc
.size sha256_block_data_order_ssse3,.-sha256_block_data_order_ssse3
.globl sha256_block_data_order_avx
.hidden sha256_block_data_order_avx
.type sha256_block_data_order_avx,@function
.align 64
sha256_block_data_order_avx:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_avx:
vzeroupper
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
vmovdqa K256+512+32(%rip),%xmm8
vmovdqa K256+512+64(%rip),%xmm9
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi),%xmm0
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%edi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%edi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
subq $-128,%rbp
vpalignr $4,%xmm0,%xmm1,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm2,%xmm3,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm3,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm0,%xmm0
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm0,%xmm0
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
vpshufd $80,%xmm0,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm0,%xmm0
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 0(%rbp),%xmm0,%xmm6
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm3,%xmm0,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm1,%xmm1
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm0,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm1,%xmm1
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm1,%xmm1
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
vpshufd $80,%xmm1,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm1,%xmm1
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 32(%rbp),%xmm1,%xmm6
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm0,%xmm1,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm1,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm2,%xmm2
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm2,%xmm2
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
vpshufd $80,%xmm2,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm2,%xmm2
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 64(%rbp),%xmm2,%xmm6
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm1,%xmm2,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm3,%xmm3
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm2,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm3,%xmm3
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm3,%xmm3
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
vpshufd $80,%xmm3,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm3,%xmm3
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 96(%rbp),%xmm3,%xmm6
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lavx_00_47
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_avx
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
vzeroupper
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
ret
.cfi_endproc
.size sha256_block_data_order_avx,.-sha256_block_data_order_avx
#endif
|
t3hw00t/ARW | 4,266 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/ghashv8-armx-linux64.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,%function
.align 4
gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.size gcm_init_clmul,.-gcm_init_clmul
.globl gcm_gmult_clmul
.hidden gcm_gmult_clmul
.type gcm_gmult_clmul,%function
.align 4
gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.size gcm_gmult_clmul,.-gcm_gmult_clmul
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
t3hw00t/ARW | 4,229 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/x86-mont-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_mont
.hidden bn_mul_mont
.type bn_mul_mont,@function
.align 16
bn_mul_mont:
.L_bn_mul_mont_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
movl 40(%esp),%edi
leal 20(%esp),%esi
leal 24(%esp),%edx
addl $2,%edi
negl %edi
leal -32(%esp,%edi,4),%ebp
negl %edi
movl %ebp,%eax
subl %edx,%eax
andl $2047,%eax
subl %eax,%ebp
xorl %ebp,%edx
andl $2048,%edx
xorl $2048,%edx
subl %edx,%ebp
andl $-64,%ebp
movl %esp,%eax
subl %ebp,%eax
andl $-4096,%eax
movl %esp,%edx
leal (%ebp,%eax,1),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L000page_walk
jmp .L001page_walk_done
.align 16
.L000page_walk:
leal -4096(%esp),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L000page_walk
.L001page_walk_done:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%ebp
movl 16(%esi),%esi
movl (%esi),%esi
movl %eax,4(%esp)
movl %ebx,8(%esp)
movl %ecx,12(%esp)
movl %ebp,16(%esp)
movl %esi,20(%esp)
leal -3(%edi),%ebx
movl %edx,24(%esp)
movl $-1,%eax
movd %eax,%mm7
movl 8(%esp),%esi
movl 12(%esp),%edi
movl 16(%esp),%ebp
xorl %edx,%edx
xorl %ecx,%ecx
movd (%edi),%mm4
movd (%esi),%mm5
movd (%ebp),%mm3
pmuludq %mm4,%mm5
movq %mm5,%mm2
movq %mm5,%mm0
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
incl %ecx
.align 16
.L0021st:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
leal 1(%ecx),%ecx
cmpl %ebx,%ecx
jl .L0021st
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm2,%mm3
movq %mm3,32(%esp,%ebx,4)
incl %edx
.L003outer:
xorl %ecx,%ecx
movd (%edi,%edx,4),%mm4
movd (%esi),%mm5
movd 32(%esp),%mm6
movd (%ebp),%mm3
pmuludq %mm4,%mm5
paddq %mm6,%mm5
movq %mm5,%mm0
movq %mm5,%mm2
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 36(%esp),%mm6
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm6,%mm2
incl %ecx
decl %ebx
.L004inner:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
movd 36(%esp,%ecx,4),%mm6
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
paddq %mm6,%mm2
decl %ebx
leal 1(%ecx),%ecx
jnz .L004inner
movl %ecx,%ebx
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
movd 36(%esp,%ebx,4),%mm6
paddq %mm2,%mm3
paddq %mm6,%mm3
movq %mm3,32(%esp,%ebx,4)
leal 1(%edx),%edx
cmpl %ebx,%edx
jle .L003outer
emms
jmp .L005common_tail
.align 16
.L005common_tail:
movl 16(%esp),%ebp
movl 4(%esp),%edi
leal 32(%esp),%esi
movl (%esi),%eax
movl %ebx,%ecx
xorl %edx,%edx
.align 16
.L006sub:
sbbl (%ebp,%edx,4),%eax
movl %eax,(%edi,%edx,4)
decl %ecx
movl 4(%esi,%edx,4),%eax
leal 1(%edx),%edx
jge .L006sub
sbbl $0,%eax
movl $-1,%edx
xorl %eax,%edx
jmp .L007copy
.align 16
.L007copy:
movl 32(%esp,%ebx,4),%esi
movl (%edi,%ebx,4),%ebp
movl %ecx,32(%esp,%ebx,4)
andl %eax,%esi
andl %edx,%ebp
orl %esi,%ebp
movl %ebp,(%edi,%ebx,4)
decl %ebx
jge .L007copy
movl 24(%esp),%esp
movl $1,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_mont,.-.L_bn_mul_mont_begin
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
.byte 111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
t3hw00t/ARW | 42,856 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/sha512-armv4-linux32.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the Apache License, Version 2.0 (the "License");
@ you may not use this file except in compliance with the License.
@ You may obtain a copy of the License at
@
@ https://www.apache.org/licenses/LICENSE-2.0
@
@ Unless required by applicable law or agreed to in writing, software
@ distributed under the License is distributed on an "AS IS" BASIS,
@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ See the License for the specific language governing permissions and
@ limitations under the License.
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project.
@ ====================================================================
@ SHA512 block procedure for ARMv4. September 2007.
@ This code is ~4.5 (four and a half) times faster than code generated
@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
@ Xscale PXA250 core].
@
@ July 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
@ Cortex A8 core and ~40 cycles per processed byte.
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 7%
@ improvement on Coxtex A8 core and ~38 cycles per byte.
@ March 2011.
@
@ Add NEON implementation. On Cortex A8 it was measured to process
@ one byte in 23.3 cycles or ~60% faster than integer-only code.
@ August 2012.
@
@ Improve NEON performance by 12% on Snapdragon S4. In absolute
@ terms it's 22.6 cycles per byte, which is disappointing result.
@ Technical writers asserted that 3-way S4 pipeline can sustain
@ multiple NEON instructions per cycle, but dual NEON issue could
@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
@ for further details. On side note Cortex-A15 processes one byte in
@ 16 cycles.
@ Byte order [in]dependence. =========================================
@
@ Originally caller was expected to maintain specific *dword* order in
@ h[0-7], namely with most significant dword at *lower* address, which
@ was reflected in below two parameters as 0 and 4. Now caller is
@ expected to maintain native byte order for whole 64-bit values.
#ifndef __KERNEL__
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
#else
# define __ARM_MAX_ARCH__ 7
# define VFP_ABI_PUSH
# define VFP_ABI_POP
#endif
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
#ifdef __ARMEL__
# define LO 0
# define HI 4
# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
#else
# define HI 0
# define LO 4
# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
#endif
.text
#if defined(__thumb2__)
.syntax unified
.thumb
# define adrl adr
#else
.code 32
#endif
.type K512,%object
.align 5
K512:
WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.size K512,.-K512
.globl sha512_block_data_order_nohw
.hidden sha512_block_data_order_nohw
.type sha512_block_data_order_nohw,%function
sha512_block_data_order_nohw:
add r2,r1,r2,lsl#7 @ len to point at the end of inp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
adr r14,K512
sub sp,sp,#9*8
ldr r7,[r0,#32+LO]
ldr r8,[r0,#32+HI]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
.Loop:
str r9, [sp,#48+0]
str r10, [sp,#48+4]
str r11, [sp,#56+0]
str r12, [sp,#56+4]
ldr r5,[r0,#0+LO]
ldr r6,[r0,#0+HI]
ldr r3,[r0,#8+LO]
ldr r4,[r0,#8+HI]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
str r3,[sp,#8+0]
str r4,[sp,#8+4]
str r9, [sp,#16+0]
str r10, [sp,#16+4]
str r11, [sp,#24+0]
str r12, [sp,#24+4]
ldr r3,[r0,#40+LO]
ldr r4,[r0,#40+HI]
str r3,[sp,#40+0]
str r4,[sp,#40+4]
.L00_15:
#if __ARM_ARCH<7
ldrb r3,[r1,#7]
ldrb r9, [r1,#6]
ldrb r10, [r1,#5]
ldrb r11, [r1,#4]
ldrb r4,[r1,#3]
ldrb r12, [r1,#2]
orr r3,r3,r9,lsl#8
ldrb r9, [r1,#1]
orr r3,r3,r10,lsl#16
ldrb r10, [r1],#8
orr r3,r3,r11,lsl#24
orr r4,r4,r12,lsl#8
orr r4,r4,r9,lsl#16
orr r4,r4,r10,lsl#24
#else
ldr r3,[r1,#4]
ldr r4,[r1],#8
#ifdef __ARMEL__
rev r3,r3
rev r4,r4
#endif
#endif
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#148
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
tst r14,#1
beq .L00_15
ldr r9,[sp,#184+0]
ldr r10,[sp,#184+4]
bic r14,r14,#1
.L16_79:
@ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
@ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
@ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
mov r3,r9,lsr#1
ldr r11,[sp,#80+0]
mov r4,r10,lsr#1
ldr r12,[sp,#80+4]
eor r3,r3,r10,lsl#31
eor r4,r4,r9,lsl#31
eor r3,r3,r9,lsr#8
eor r4,r4,r10,lsr#8
eor r3,r3,r10,lsl#24
eor r4,r4,r9,lsl#24
eor r3,r3,r9,lsr#7
eor r4,r4,r10,lsr#7
eor r3,r3,r10,lsl#25
@ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
@ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
@ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
mov r9,r11,lsr#19
mov r10,r12,lsr#19
eor r9,r9,r12,lsl#13
eor r10,r10,r11,lsl#13
eor r9,r9,r12,lsr#29
eor r10,r10,r11,lsr#29
eor r9,r9,r11,lsl#3
eor r10,r10,r12,lsl#3
eor r9,r9,r11,lsr#6
eor r10,r10,r12,lsr#6
ldr r11,[sp,#120+0]
eor r9,r9,r12,lsl#26
ldr r12,[sp,#120+4]
adds r3,r3,r9
ldr r9,[sp,#192+0]
adc r4,r4,r10
ldr r10,[sp,#192+4]
adds r3,r3,r11
adc r4,r4,r12
adds r3,r3,r9
adc r4,r4,r10
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#23
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
#if __ARM_ARCH>=7
ittt eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r9,[sp,#184+0]
ldreq r10,[sp,#184+4]
beq .L16_79
bic r14,r14,#1
ldr r3,[sp,#8+0]
ldr r4,[sp,#8+4]
ldr r9, [r0,#0+LO]
ldr r10, [r0,#0+HI]
ldr r11, [r0,#8+LO]
ldr r12, [r0,#8+HI]
adds r9,r5,r9
str r9, [r0,#0+LO]
adc r10,r6,r10
str r10, [r0,#0+HI]
adds r11,r3,r11
str r11, [r0,#8+LO]
adc r12,r4,r12
str r12, [r0,#8+HI]
ldr r5,[sp,#16+0]
ldr r6,[sp,#16+4]
ldr r3,[sp,#24+0]
ldr r4,[sp,#24+4]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
adds r9,r5,r9
str r9, [r0,#16+LO]
adc r10,r6,r10
str r10, [r0,#16+HI]
adds r11,r3,r11
str r11, [r0,#24+LO]
adc r12,r4,r12
str r12, [r0,#24+HI]
ldr r3,[sp,#40+0]
ldr r4,[sp,#40+4]
ldr r9, [r0,#32+LO]
ldr r10, [r0,#32+HI]
ldr r11, [r0,#40+LO]
ldr r12, [r0,#40+HI]
adds r7,r7,r9
str r7,[r0,#32+LO]
adc r8,r8,r10
str r8,[r0,#32+HI]
adds r11,r3,r11
str r11, [r0,#40+LO]
adc r12,r4,r12
str r12, [r0,#40+HI]
ldr r5,[sp,#48+0]
ldr r6,[sp,#48+4]
ldr r3,[sp,#56+0]
ldr r4,[sp,#56+4]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
adds r9,r5,r9
str r9, [r0,#48+LO]
adc r10,r6,r10
str r10, [r0,#48+HI]
adds r11,r3,r11
str r11, [r0,#56+LO]
adc r12,r4,r12
str r12, [r0,#56+HI]
add sp,sp,#640
sub r14,r14,#640
teq r1,r2
bne .Loop
add sp,sp,#8*9 @ destroy frame
#if __ARM_ARCH>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl sha512_block_data_order_neon
.hidden sha512_block_data_order_neon
.type sha512_block_data_order_neon,%function
.align 4
sha512_block_data_order_neon:
dmb @ errata #451034 on early Cortex A8
add r2,r1,r2,lsl#7 @ len to point at the end of inp
adr r3,K512
VFP_ABI_PUSH
vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context
.Loop_neon:
vshr.u64 d24,d20,#14 @ 0
#if 0<16
vld1.64 {d0},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 0>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 0<16 && defined(__ARMEL__)
vrev64.8 d0,d0
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 1
#if 1<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 1>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 1<16 && defined(__ARMEL__)
vrev64.8 d1,d1
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 2
#if 2<16
vld1.64 {d2},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 2>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 2<16 && defined(__ARMEL__)
vrev64.8 d2,d2
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 3
#if 3<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 3>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 3<16 && defined(__ARMEL__)
vrev64.8 d3,d3
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 4
#if 4<16
vld1.64 {d4},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 4>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 4<16 && defined(__ARMEL__)
vrev64.8 d4,d4
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 5
#if 5<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 5>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 5<16 && defined(__ARMEL__)
vrev64.8 d5,d5
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 6
#if 6<16
vld1.64 {d6},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 6>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 6<16 && defined(__ARMEL__)
vrev64.8 d6,d6
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 7
#if 7<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 7>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 7<16 && defined(__ARMEL__)
vrev64.8 d7,d7
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 d24,d20,#14 @ 8
#if 8<16
vld1.64 {d8},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 8>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 8<16 && defined(__ARMEL__)
vrev64.8 d8,d8
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 9
#if 9<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 9>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 9<16 && defined(__ARMEL__)
vrev64.8 d9,d9
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 10
#if 10<16
vld1.64 {d10},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 10>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 10<16 && defined(__ARMEL__)
vrev64.8 d10,d10
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 11
#if 11<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 11>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 11<16 && defined(__ARMEL__)
vrev64.8 d11,d11
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 12
#if 12<16
vld1.64 {d12},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 12>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 12<16 && defined(__ARMEL__)
vrev64.8 d12,d12
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 13
#if 13<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 13>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 13<16 && defined(__ARMEL__)
vrev64.8 d13,d13
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 14
#if 14<16
vld1.64 {d14},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 14>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 14<16 && defined(__ARMEL__)
vrev64.8 d14,d14
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 15
#if 15<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 15>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 15<16 && defined(__ARMEL__)
vrev64.8 d15,d15
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
mov r12,#4
.L16_79_neon:
subs r12,#1
vshr.u64 q12,q7,#19
vshr.u64 q13,q7,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q7,#6
vsli.64 q12,q7,#45
vext.8 q14,q0,q1,#8 @ X[i+1]
vsli.64 q13,q7,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q0,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q4,q5,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q0,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q0,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 16<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 17
#if 17<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 17>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 17<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q0,#19
vshr.u64 q13,q0,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q0,#6
vsli.64 q12,q0,#45
vext.8 q14,q1,q2,#8 @ X[i+1]
vsli.64 q13,q0,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q1,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q5,q6,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q1,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q1,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 18<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 19
#if 19<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 19>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 19<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q1,#19
vshr.u64 q13,q1,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q1,#6
vsli.64 q12,q1,#45
vext.8 q14,q2,q3,#8 @ X[i+1]
vsli.64 q13,q1,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q2,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q6,q7,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q2,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q2,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 20<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 21
#if 21<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 21>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 21<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q2,#19
vshr.u64 q13,q2,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q2,#6
vsli.64 q12,q2,#45
vext.8 q14,q3,q4,#8 @ X[i+1]
vsli.64 q13,q2,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q3,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q7,q0,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q3,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q3,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 22<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 23
#if 23<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 23>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 23<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 q12,q3,#19
vshr.u64 q13,q3,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q3,#6
vsli.64 q12,q3,#45
vext.8 q14,q4,q5,#8 @ X[i+1]
vsli.64 q13,q3,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q4,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q0,q1,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q4,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q4,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 24<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 25
#if 25<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 25>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 25<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q4,#19
vshr.u64 q13,q4,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q4,#6
vsli.64 q12,q4,#45
vext.8 q14,q5,q6,#8 @ X[i+1]
vsli.64 q13,q4,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q5,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q1,q2,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q5,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q5,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 26<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 27
#if 27<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 27>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 27<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q5,#19
vshr.u64 q13,q5,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q5,#6
vsli.64 q12,q5,#45
vext.8 q14,q6,q7,#8 @ X[i+1]
vsli.64 q13,q5,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q6,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q2,q3,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q6,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q6,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 28<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 29
#if 29<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 29>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 29<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q6,#19
vshr.u64 q13,q6,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q6,#6
vsli.64 q12,q6,#45
vext.8 q14,q7,q0,#8 @ X[i+1]
vsli.64 q13,q6,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q7,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q3,q4,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q7,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q7,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 30<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 31
#if 31<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 31>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 31<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
bne .L16_79_neon
vadd.i64 d16,d30 @ h+=Maj from the past
vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13
vadd.i64 q10,q14
vadd.i64 q11,q15
vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context
teq r1,r2
sub r3,#640 @ rewind K512
bne .Loop_neon
VFP_ABI_POP
bx lr @ .word 0xe12fff1e
.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
#endif
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
t3hw00t/ARW | 24,471 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aes-gcm-avx2-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.section __DATA,__const
.p2align 4
L$bswap_mask:
.quad 0x08090a0b0c0d0e0f, 0x0001020304050607
L$gfpoly:
.quad 1, 0xc200000000000000
L$gfpoly_and_internal_carrybit:
.quad 1, 0xc200000000000001
.p2align 5
L$ctr_pattern:
.quad 0, 0
.quad 1, 0
L$inc_2blocks:
.quad 2, 0
.quad 2, 0
.text
.globl _gcm_init_vpclmulqdq_avx2
.private_extern _gcm_init_vpclmulqdq_avx2
.p2align 5
_gcm_init_vpclmulqdq_avx2:
_CET_ENDBR
vpshufd $0x4e,(%rsi),%xmm3
vpshufd $0xd3,%xmm3,%xmm0
vpsrad $31,%xmm0,%xmm0
vpaddq %xmm3,%xmm3,%xmm3
vpand L$gfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0
vpxor %xmm0,%xmm3,%xmm3
vbroadcasti128 L$gfpoly(%rip),%ymm6
vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0
vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1
vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2
vpxor %xmm2,%xmm1,%xmm1
vpclmulqdq $0x01,%xmm0,%xmm6,%xmm2
vpshufd $0x4e,%xmm0,%xmm0
vpxor %xmm0,%xmm1,%xmm1
vpxor %xmm2,%xmm1,%xmm1
vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5
vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0
vpshufd $0x4e,%xmm1,%xmm1
vpxor %xmm1,%xmm5,%xmm5
vpxor %xmm0,%xmm5,%xmm5
vinserti128 $1,%xmm3,%ymm5,%ymm3
vinserti128 $1,%xmm5,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xc5,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcd,0x01
.byte 0xc4,0xe3,0x65,0x44,0xd5,0x10
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x65,0x44,0xe5,0x11
.byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm4,%ymm4
vpxor %ymm0,%ymm4,%ymm4
vmovdqu %ymm3,96(%rdi)
vmovdqu %ymm4,64(%rdi)
vpunpcklqdq %ymm3,%ymm4,%ymm0
vpunpckhqdq %ymm3,%ymm4,%ymm1
vpxor %ymm1,%ymm0,%ymm0
vmovdqu %ymm0,128+32(%rdi)
.byte 0xc4,0xe3,0x5d,0x44,0xc5,0x00
.byte 0xc4,0xe3,0x5d,0x44,0xcd,0x01
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x10
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x5d,0x44,0xdd,0x11
.byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm3,%ymm3
vpxor %ymm0,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xc5,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcd,0x01
.byte 0xc4,0xe3,0x65,0x44,0xd5,0x10
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x4d,0x44,0xd0,0x01
vpshufd $0x4e,%ymm0,%ymm0
vpxor %ymm0,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
.byte 0xc4,0xe3,0x65,0x44,0xe5,0x11
.byte 0xc4,0xe3,0x4d,0x44,0xc1,0x01
vpshufd $0x4e,%ymm1,%ymm1
vpxor %ymm1,%ymm4,%ymm4
vpxor %ymm0,%ymm4,%ymm4
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,0(%rdi)
vpunpcklqdq %ymm3,%ymm4,%ymm0
vpunpckhqdq %ymm3,%ymm4,%ymm1
vpxor %ymm1,%ymm0,%ymm0
vmovdqu %ymm0,128(%rdi)
vzeroupper
ret
.globl _gcm_ghash_vpclmulqdq_avx2_1
.private_extern _gcm_ghash_vpclmulqdq_avx2_1
.p2align 5
_gcm_ghash_vpclmulqdq_avx2_1:
_CET_ENDBR
vmovdqu L$bswap_mask(%rip),%xmm6
vmovdqu L$gfpoly(%rip),%xmm7
vmovdqu (%rdi),%xmm5
vpshufb %xmm6,%xmm5,%xmm5
L$ghash_lastblock:
vmovdqu (%rdx),%xmm0
vpshufb %xmm6,%xmm0,%xmm0
vpxor %xmm0,%xmm5,%xmm5
vmovdqu 128-16(%rsi),%xmm0
vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1
vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2
vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3
vpxor %xmm3,%xmm2,%xmm2
vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3
vpshufd $0x4e,%xmm1,%xmm1
vpxor %xmm1,%xmm2,%xmm2
vpxor %xmm3,%xmm2,%xmm2
vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5
vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1
vpshufd $0x4e,%xmm2,%xmm2
vpxor %xmm2,%xmm5,%xmm5
vpxor %xmm1,%xmm5,%xmm5
L$ghash_done:
vpshufb %xmm6,%xmm5,%xmm5
vmovdqu %xmm5,(%rdi)
vzeroupper
ret
.globl _aes_gcm_enc_update_vaes_avx2
.private_extern _aes_gcm_enc_update_vaes_avx2
.p2align 5
_aes_gcm_enc_update_vaes_avx2:
_CET_ENDBR
pushq %r12
movq 16(%rsp),%r12
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+8(%rip)
#endif
vbroadcasti128 L$bswap_mask(%rip),%ymm0
vmovdqu (%r12),%xmm1
vpshufb %xmm0,%xmm1,%xmm1
vbroadcasti128 (%r8),%ymm11
vpshufb %ymm0,%ymm11,%ymm11
movl 240(%rcx),%r10d
leal -20(,%r10,4),%r10d
leaq 96(%rcx,%r10,4),%r11
vbroadcasti128 (%rcx),%ymm9
vbroadcasti128 (%r11),%ymm10
vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11
cmpq $127,%rdx
jbe L$crypt_loop_4x_done__func1
vmovdqu 128(%r9),%ymm7
vmovdqu 128+32(%r9),%ymm8
vmovdqu L$inc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
leaq 16(%rcx),%rax
L$vaesenc_loop_first_4_vecs__func1:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_first_4_vecs__func1
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
.byte 0xc4,0x62,0x1d,0xdd,0xe2
.byte 0xc4,0x62,0x15,0xdd,0xeb
.byte 0xc4,0x62,0x0d,0xdd,0xf5
.byte 0xc4,0x62,0x05,0xdd,0xfe
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
addq $-128,%rdx
cmpq $127,%rdx
jbe L$ghash_last_ciphertext_4x__func1
.p2align 4
L$crypt_loop_4x__func1:
vmovdqu L$inc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
cmpl $24,%r10d
jl L$aes128__func1
je L$aes192__func1
vbroadcasti128 -208(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -192(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
L$aes192__func1:
vbroadcasti128 -176(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -160(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
L$aes128__func1:
prefetcht0 512(%rdi)
prefetcht0 512+64(%rdi)
vmovdqu 0(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xec,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcc,0x11
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00
vbroadcasti128 -144(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -128(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 32(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -112(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 64(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
vbroadcasti128 -96(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -80(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vbroadcasti128 -64(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 96(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -48(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 L$gfpoly(%rip),%ymm4
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -32(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -16(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
subq $-128,%rsi
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
.byte 0xc4,0x62,0x1d,0xdd,0xe2
.byte 0xc4,0x62,0x15,0xdd,0xeb
.byte 0xc4,0x62,0x0d,0xdd,0xf5
.byte 0xc4,0x62,0x05,0xdd,0xfe
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
addq $-128,%rdx
cmpq $127,%rdx
ja L$crypt_loop_4x__func1
L$ghash_last_ciphertext_4x__func1:
vmovdqu 0(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xec,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcc,0x11
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00
vmovdqu 32(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 64(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rsi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 96(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10
vpxor %ymm2,%ymm6,%ymm6
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 L$gfpoly(%rip),%ymm4
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
.byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
subq $-128,%rsi
L$crypt_loop_4x_done__func1:
testq %rdx,%rdx
jz L$done__func1
leaq 128(%r9),%r8
subq %rdx,%r8
vpxor %xmm5,%xmm5,%xmm5
vpxor %xmm6,%xmm6,%xmm6
vpxor %xmm7,%xmm7,%xmm7
cmpq $64,%rdx
jb L$lessthan64bytes__func1
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_1__func1:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_1__func1
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%ymm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %ymm3,%ymm13,%ymm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpshufb %ymm0,%ymm13,%ymm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%ymm3
.byte 0xc4,0xe3,0x1d,0x44,0xea,0x00
.byte 0xc4,0xe3,0x1d,0x44,0xf2,0x01
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xfa,0x11
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x11
vpxor %ymm4,%ymm7,%ymm7
addq $64,%r8
addq $64,%rdi
addq $64,%rsi
subq $64,%rdx
jz L$reduce__func1
vpxor %xmm1,%xmm1,%xmm1
L$lessthan64bytes__func1:
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_2__func1:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_2__func1
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
cmpq $32,%rdx
jb L$xor_one_block__func1
je L$xor_two_blocks__func1
L$xor_three_blocks__func1:
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%xmm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %xmm3,%xmm13,%xmm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %xmm13,32(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpshufb %xmm0,%xmm13,%xmm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%xmm3
vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm7,%ymm7
jmp L$ghash_mul_one_vec_unreduced__func1
L$xor_two_blocks__func1:
vmovdqu (%rdi),%ymm2
vpxor %ymm2,%ymm12,%ymm12
vmovdqu %ymm12,(%rsi)
vpshufb %ymm0,%ymm12,%ymm12
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
jmp L$ghash_mul_one_vec_unreduced__func1
L$xor_one_block__func1:
vmovdqu (%rdi),%xmm2
vpxor %xmm2,%xmm12,%xmm12
vmovdqu %xmm12,(%rsi)
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm1,%xmm12,%xmm12
vmovdqu (%r8),%xmm2
L$ghash_mul_one_vec_unreduced__func1:
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x11
vpxor %ymm4,%ymm7,%ymm7
L$reduce__func1:
vbroadcasti128 L$gfpoly(%rip),%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xdd,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
.byte 0xc4,0xe3,0x6d,0x44,0xde,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm7,%ymm7
vpxor %ymm3,%ymm7,%ymm7
vextracti128 $1,%ymm7,%xmm1
vpxor %xmm7,%xmm1,%xmm1
L$done__func1:
vpshufb %xmm0,%xmm1,%xmm1
vmovdqu %xmm1,(%r12)
vzeroupper
popq %r12
ret
.globl _aes_gcm_dec_update_vaes_avx2
.private_extern _aes_gcm_dec_update_vaes_avx2
.p2align 5
_aes_gcm_dec_update_vaes_avx2:
_CET_ENDBR
pushq %r12
movq 16(%rsp),%r12
vbroadcasti128 L$bswap_mask(%rip),%ymm0
vmovdqu (%r12),%xmm1
vpshufb %xmm0,%xmm1,%xmm1
vbroadcasti128 (%r8),%ymm11
vpshufb %ymm0,%ymm11,%ymm11
movl 240(%rcx),%r10d
leal -20(,%r10,4),%r10d
leaq 96(%rcx,%r10,4),%r11
vbroadcasti128 (%rcx),%ymm9
vbroadcasti128 (%r11),%ymm10
vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11
cmpq $127,%rdx
jbe L$crypt_loop_4x_done__func2
vmovdqu 128(%r9),%ymm7
vmovdqu 128+32(%r9),%ymm8
.p2align 4
L$crypt_loop_4x__func2:
vmovdqu L$inc_2blocks(%rip),%ymm2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm14
vpaddd %ymm2,%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm15
vpaddd %ymm2,%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
vpxor %ymm9,%ymm14,%ymm14
vpxor %ymm9,%ymm15,%ymm15
cmpl $24,%r10d
jl L$aes128__func2
je L$aes192__func2
vbroadcasti128 -208(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -192(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
L$aes192__func2:
vbroadcasti128 -176(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -160(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
L$aes128__func2:
prefetcht0 512(%rdi)
prefetcht0 512+64(%rdi)
vmovdqu 0(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 0(%r9),%ymm4
vpxor %ymm1,%ymm3,%ymm3
.byte 0xc4,0xe3,0x65,0x44,0xec,0x00
.byte 0xc4,0xe3,0x65,0x44,0xcc,0x11
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xf7,0x00
vbroadcasti128 -144(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vbroadcasti128 -128(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 32(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 32(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xd7,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -112(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 64(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vmovdqu 64(%r9),%ymm4
vbroadcasti128 -96(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -80(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x00
vpxor %ymm2,%ymm6,%ymm6
vmovdqu 96(%rdi),%ymm3
vpshufb %ymm0,%ymm3,%ymm3
vbroadcasti128 -64(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vmovdqu 96(%r9),%ymm4
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x00
vpxor %ymm2,%ymm5,%ymm5
.byte 0xc4,0xe3,0x65,0x44,0xd4,0x11
vpxor %ymm2,%ymm1,%ymm1
vpunpckhqdq %ymm3,%ymm3,%ymm2
vpxor %ymm3,%ymm2,%ymm2
.byte 0xc4,0xc3,0x6d,0x44,0xd0,0x10
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -48(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm1,%ymm6,%ymm6
vbroadcasti128 L$gfpoly(%rip),%ymm4
.byte 0xc4,0xe3,0x5d,0x44,0xd5,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm2,%ymm6,%ymm6
vbroadcasti128 -32(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
.byte 0xc4,0xe3,0x5d,0x44,0xd6,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm1,%ymm1
vpxor %ymm2,%ymm1,%ymm1
vbroadcasti128 -16(%r11),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
.byte 0xc4,0x62,0x0d,0xdc,0xf2
.byte 0xc4,0x62,0x05,0xdc,0xfa
vextracti128 $1,%ymm1,%xmm2
vpxor %xmm2,%xmm1,%xmm1
vpxor 0(%rdi),%ymm10,%ymm2
vpxor 32(%rdi),%ymm10,%ymm3
vpxor 64(%rdi),%ymm10,%ymm5
vpxor 96(%rdi),%ymm10,%ymm6
.byte 0xc4,0x62,0x1d,0xdd,0xe2
.byte 0xc4,0x62,0x15,0xdd,0xeb
.byte 0xc4,0x62,0x0d,0xdd,0xf5
.byte 0xc4,0x62,0x05,0xdd,0xfe
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vmovdqu %ymm14,64(%rsi)
vmovdqu %ymm15,96(%rsi)
subq $-128,%rdi
subq $-128,%rsi
addq $-128,%rdx
cmpq $127,%rdx
ja L$crypt_loop_4x__func2
L$crypt_loop_4x_done__func2:
testq %rdx,%rdx
jz L$done__func2
leaq 128(%r9),%r8
subq %rdx,%r8
vpxor %xmm5,%xmm5,%xmm5
vpxor %xmm6,%xmm6,%xmm6
vpxor %xmm7,%xmm7,%xmm7
cmpq $64,%rdx
jb L$lessthan64bytes__func2
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_1__func2:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_1__func2
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%ymm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %ymm3,%ymm13,%ymm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %ymm13,32(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpshufb %ymm0,%ymm3,%ymm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%ymm3
.byte 0xc4,0xe3,0x1d,0x44,0xea,0x00
.byte 0xc4,0xe3,0x1d,0x44,0xf2,0x01
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xfa,0x11
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x15,0x44,0xe3,0x11
vpxor %ymm4,%ymm7,%ymm7
addq $64,%r8
addq $64,%rdi
addq $64,%rsi
subq $64,%rdx
jz L$reduce__func2
vpxor %xmm1,%xmm1,%xmm1
L$lessthan64bytes__func2:
vpshufb %ymm0,%ymm11,%ymm12
vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11
vpshufb %ymm0,%ymm11,%ymm13
vpxor %ymm9,%ymm12,%ymm12
vpxor %ymm9,%ymm13,%ymm13
leaq 16(%rcx),%rax
L$vaesenc_loop_tail_2__func2:
vbroadcasti128 (%rax),%ymm2
.byte 0xc4,0x62,0x1d,0xdc,0xe2
.byte 0xc4,0x62,0x15,0xdc,0xea
addq $16,%rax
cmpq %rax,%r11
jne L$vaesenc_loop_tail_2__func2
.byte 0xc4,0x42,0x1d,0xdd,0xe2
.byte 0xc4,0x42,0x15,0xdd,0xea
cmpq $32,%rdx
jb L$xor_one_block__func2
je L$xor_two_blocks__func2
L$xor_three_blocks__func2:
vmovdqu 0(%rdi),%ymm2
vmovdqu 32(%rdi),%xmm3
vpxor %ymm2,%ymm12,%ymm12
vpxor %xmm3,%xmm13,%xmm13
vmovdqu %ymm12,0(%rsi)
vmovdqu %xmm13,32(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpshufb %xmm0,%xmm3,%xmm13
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
vmovdqu 32(%r8),%xmm3
vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm5,%ymm5
vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm6,%ymm6
vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
vpxor %ymm4,%ymm7,%ymm7
jmp L$ghash_mul_one_vec_unreduced__func2
L$xor_two_blocks__func2:
vmovdqu (%rdi),%ymm2
vpxor %ymm2,%ymm12,%ymm12
vmovdqu %ymm12,(%rsi)
vpshufb %ymm0,%ymm2,%ymm12
vpxor %ymm1,%ymm12,%ymm12
vmovdqu (%r8),%ymm2
jmp L$ghash_mul_one_vec_unreduced__func2
L$xor_one_block__func2:
vmovdqu (%rdi),%xmm2
vpxor %xmm2,%xmm12,%xmm12
vmovdqu %xmm12,(%rsi)
vpshufb %xmm0,%xmm2,%xmm12
vpxor %xmm1,%xmm12,%xmm12
vmovdqu (%r8),%xmm2
L$ghash_mul_one_vec_unreduced__func2:
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x00
vpxor %ymm4,%ymm5,%ymm5
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x01
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x10
vpxor %ymm4,%ymm6,%ymm6
.byte 0xc4,0xe3,0x1d,0x44,0xe2,0x11
vpxor %ymm4,%ymm7,%ymm7
L$reduce__func2:
vbroadcasti128 L$gfpoly(%rip),%ymm2
.byte 0xc4,0xe3,0x6d,0x44,0xdd,0x01
vpshufd $0x4e,%ymm5,%ymm5
vpxor %ymm5,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
.byte 0xc4,0xe3,0x6d,0x44,0xde,0x01
vpshufd $0x4e,%ymm6,%ymm6
vpxor %ymm6,%ymm7,%ymm7
vpxor %ymm3,%ymm7,%ymm7
vextracti128 $1,%ymm7,%xmm1
vpxor %xmm7,%xmm1,%xmm1
L$done__func2:
vpshufb %xmm0,%xmm1,%xmm1
vmovdqu %xmm1,(%r12)
vzeroupper
popq %r12
ret
#endif
|
t3hw00t/ARW | 20,965 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/aesni-x86_64-elf.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.type _aesni_encrypt2,@function
.align 16
_aesni_encrypt2:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
.Lenc_loop2:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop2
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.cfi_endproc
.size _aesni_encrypt2,.-_aesni_encrypt2
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
.Lenc_loop3:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop3
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.cfi_endproc
.size _aesni_encrypt3,.-_aesni_encrypt3
.type _aesni_encrypt4,@function
.align 16
_aesni_encrypt4:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 0x0f,0x1f,0x00
addq $16,%rax
.Lenc_loop4:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop4
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.cfi_endproc
.size _aesni_encrypt4,.-_aesni_encrypt4
.type _aesni_encrypt6,@function
.align 16
_aesni_encrypt6:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,217
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp .Lenc_loop6_enter
.align 16
.Lenc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.Lenc_loop6_enter:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop6
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.cfi_endproc
.size _aesni_encrypt6,.-_aesni_encrypt6
.type _aesni_encrypt8,@function
.align 16
_aesni_encrypt8:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,209
pxor %xmm0,%xmm7
pxor %xmm0,%xmm8
.byte 102,15,56,220,217
pxor %xmm0,%xmm9
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp .Lenc_loop8_inner
.align 16
.Lenc_loop8:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.Lenc_loop8_inner:
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.Lenc_loop8_enter:
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop8
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
.byte 102,68,15,56,221,192
.byte 102,68,15,56,221,200
ret
.cfi_endproc
.size _aesni_encrypt8,.-_aesni_encrypt8
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,@function
.align 16
aes_hw_ctr32_encrypt_blocks:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit(%rip)
#endif
cmpq $1,%rdx
jne .Lctr32_bulk
movups (%r8),%xmm2
movups (%rdi),%xmm3
movl 240(%rcx),%edx
movups (%rcx),%xmm0
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
.Loop_enc1_1:
.byte 102,15,56,220,209
decl %edx
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
jnz .Loop_enc1_1
.byte 102,15,56,221,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
xorps %xmm3,%xmm2
pxor %xmm3,%xmm3
movups %xmm2,(%rsi)
xorps %xmm2,%xmm2
jmp .Lctr32_epilogue
.align 16
.Lctr32_bulk:
leaq (%rsp),%r11
.cfi_def_cfa_register %r11
pushq %rbp
.cfi_offset %rbp,-16
subq $128,%rsp
andq $-16,%rsp
movdqu (%r8),%xmm2
movdqu (%rcx),%xmm0
movl 12(%r8),%r8d
pxor %xmm0,%xmm2
movl 12(%rcx),%ebp
movdqa %xmm2,0(%rsp)
bswapl %r8d
movdqa %xmm2,%xmm3
movdqa %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm2,64(%rsp)
movdqa %xmm2,80(%rsp)
movdqa %xmm2,96(%rsp)
movq %rdx,%r10
movdqa %xmm2,112(%rsp)
leaq 1(%r8),%rax
leaq 2(%r8),%rdx
bswapl %eax
bswapl %edx
xorl %ebp,%eax
xorl %ebp,%edx
.byte 102,15,58,34,216,3
leaq 3(%r8),%rax
movdqa %xmm3,16(%rsp)
.byte 102,15,58,34,226,3
bswapl %eax
movq %r10,%rdx
leaq 4(%r8),%r10
movdqa %xmm4,32(%rsp)
xorl %ebp,%eax
bswapl %r10d
.byte 102,15,58,34,232,3
xorl %ebp,%r10d
movdqa %xmm5,48(%rsp)
leaq 5(%r8),%r9
movl %r10d,64+12(%rsp)
bswapl %r9d
leaq 6(%r8),%r10
movl 240(%rcx),%eax
xorl %ebp,%r9d
bswapl %r10d
movl %r9d,80+12(%rsp)
xorl %ebp,%r10d
leaq 7(%r8),%r9
movl %r10d,96+12(%rsp)
bswapl %r9d
xorl %ebp,%r9d
movl %r9d,112+12(%rsp)
movups 16(%rcx),%xmm1
movdqa 64(%rsp),%xmm6
movdqa 80(%rsp),%xmm7
cmpq $8,%rdx
jb .Lctr32_tail
leaq 128(%rcx),%rcx
subq $8,%rdx
jmp .Lctr32_loop8
.align 32
.Lctr32_loop8:
addl $8,%r8d
movdqa 96(%rsp),%xmm8
.byte 102,15,56,220,209
movl %r8d,%r9d
movdqa 112(%rsp),%xmm9
.byte 102,15,56,220,217
bswapl %r9d
movups 32-128(%rcx),%xmm0
.byte 102,15,56,220,225
xorl %ebp,%r9d
nop
.byte 102,15,56,220,233
movl %r9d,0+12(%rsp)
leaq 1(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 48-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,16+12(%rsp)
leaq 2(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 64-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,32+12(%rsp)
leaq 3(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 80-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,48+12(%rsp)
leaq 4(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 96-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,64+12(%rsp)
leaq 5(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 112-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,80+12(%rsp)
leaq 6(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 128-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,96+12(%rsp)
leaq 7(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 144-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
xorl %ebp,%r9d
movdqu 0(%rdi),%xmm10
.byte 102,15,56,220,232
movl %r9d,112+12(%rsp)
cmpl $11,%eax
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 160-128(%rcx),%xmm0
jb .Lctr32_enc_done
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 176-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 192-128(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 208-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 224-128(%rcx),%xmm0
jmp .Lctr32_enc_done
.align 16
.Lctr32_enc_done:
movdqu 16(%rdi),%xmm11
pxor %xmm0,%xmm10
movdqu 32(%rdi),%xmm12
pxor %xmm0,%xmm11
movdqu 48(%rdi),%xmm13
pxor %xmm0,%xmm12
movdqu 64(%rdi),%xmm14
pxor %xmm0,%xmm13
movdqu 80(%rdi),%xmm15
pxor %xmm0,%xmm14
prefetcht0 448(%rdi)
prefetcht0 512(%rdi)
pxor %xmm0,%xmm15
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movdqu 96(%rdi),%xmm1
leaq 128(%rdi),%rdi
.byte 102,65,15,56,221,210
pxor %xmm0,%xmm1
movdqu 112-128(%rdi),%xmm10
.byte 102,65,15,56,221,219
pxor %xmm0,%xmm10
movdqa 0(%rsp),%xmm11
.byte 102,65,15,56,221,228
.byte 102,65,15,56,221,237
movdqa 16(%rsp),%xmm12
movdqa 32(%rsp),%xmm13
.byte 102,65,15,56,221,246
.byte 102,65,15,56,221,255
movdqa 48(%rsp),%xmm14
movdqa 64(%rsp),%xmm15
.byte 102,68,15,56,221,193
movdqa 80(%rsp),%xmm0
movups 16-128(%rcx),%xmm1
.byte 102,69,15,56,221,202
movups %xmm2,(%rsi)
movdqa %xmm11,%xmm2
movups %xmm3,16(%rsi)
movdqa %xmm12,%xmm3
movups %xmm4,32(%rsi)
movdqa %xmm13,%xmm4
movups %xmm5,48(%rsi)
movdqa %xmm14,%xmm5
movups %xmm6,64(%rsi)
movdqa %xmm15,%xmm6
movups %xmm7,80(%rsi)
movdqa %xmm0,%xmm7
movups %xmm8,96(%rsi)
movups %xmm9,112(%rsi)
leaq 128(%rsi),%rsi
subq $8,%rdx
jnc .Lctr32_loop8
addq $8,%rdx
jz .Lctr32_done
leaq -128(%rcx),%rcx
.Lctr32_tail:
leaq 16(%rcx),%rcx
cmpq $4,%rdx
jb .Lctr32_loop3
je .Lctr32_loop4
shll $4,%eax
movdqa 96(%rsp),%xmm8
pxor %xmm9,%xmm9
movups 16(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
leaq 32-16(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,225
addq $16,%rax
movups (%rdi),%xmm10
.byte 102,15,56,220,233
.byte 102,15,56,220,241
movups 16(%rdi),%xmm11
movups 32(%rdi),%xmm12
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
call .Lenc_loop8_enter
movdqu 48(%rdi),%xmm13
pxor %xmm10,%xmm2
movdqu 64(%rdi),%xmm10
pxor %xmm11,%xmm3
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
pxor %xmm10,%xmm6
movdqu %xmm5,48(%rsi)
movdqu %xmm6,64(%rsi)
cmpq $6,%rdx
jb .Lctr32_done
movups 80(%rdi),%xmm11
xorps %xmm11,%xmm7
movups %xmm7,80(%rsi)
je .Lctr32_done
movups 96(%rdi),%xmm12
xorps %xmm12,%xmm8
movups %xmm8,96(%rsi)
jmp .Lctr32_done
.align 32
.Lctr32_loop4:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx),%xmm1
jnz .Lctr32_loop4
.byte 102,15,56,221,209
.byte 102,15,56,221,217
movups (%rdi),%xmm10
movups 16(%rdi),%xmm11
.byte 102,15,56,221,225
.byte 102,15,56,221,233
movups 32(%rdi),%xmm12
movups 48(%rdi),%xmm13
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm4,32(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm5,48(%rsi)
jmp .Lctr32_done
.align 32
.Lctr32_loop3:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx),%xmm1
jnz .Lctr32_loop3
.byte 102,15,56,221,209
.byte 102,15,56,221,217
.byte 102,15,56,221,225
movups (%rdi),%xmm10
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
cmpq $2,%rdx
jb .Lctr32_done
movups 16(%rdi),%xmm11
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
je .Lctr32_done
movups 32(%rdi),%xmm12
xorps %xmm12,%xmm4
movups %xmm4,32(%rsi)
.Lctr32_done:
xorps %xmm0,%xmm0
xorl %ebp,%ebp
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movaps %xmm0,0(%rsp)
pxor %xmm8,%xmm8
movaps %xmm0,16(%rsp)
pxor %xmm9,%xmm9
movaps %xmm0,32(%rsp)
pxor %xmm10,%xmm10
movaps %xmm0,48(%rsp)
pxor %xmm11,%xmm11
movaps %xmm0,64(%rsp)
pxor %xmm12,%xmm12
movaps %xmm0,80(%rsp)
pxor %xmm13,%xmm13
movaps %xmm0,96(%rsp)
pxor %xmm14,%xmm14
movaps %xmm0,112(%rsp)
pxor %xmm15,%xmm15
movq -8(%r11),%rbp
.cfi_restore %rbp
leaq (%r11),%rsp
.cfi_def_cfa_register %rsp
.Lctr32_epilogue:
ret
.cfi_endproc
.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks
.globl aes_hw_set_encrypt_key_base
.hidden aes_hw_set_encrypt_key_base
.type aes_hw_set_encrypt_key_base,@function
.align 16
aes_hw_set_encrypt_key_base:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
.cfi_adjust_cfa_offset 8
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je .L14rounds
cmpl $128,%esi
jne .Lbad_keybits
.L10rounds:
movl $9,%esi
movups %xmm0,(%rdx)
.byte 102,15,58,223,200,1
call .Lkey_expansion_128_cold
.byte 102,15,58,223,200,2
call .Lkey_expansion_128
.byte 102,15,58,223,200,4
call .Lkey_expansion_128
.byte 102,15,58,223,200,8
call .Lkey_expansion_128
.byte 102,15,58,223,200,16
call .Lkey_expansion_128
.byte 102,15,58,223,200,32
call .Lkey_expansion_128
.byte 102,15,58,223,200,64
call .Lkey_expansion_128
.byte 102,15,58,223,200,128
call .Lkey_expansion_128
.byte 102,15,58,223,200,27
call .Lkey_expansion_128
.byte 102,15,58,223,200,54
call .Lkey_expansion_128
movups %xmm0,(%rax)
movl %esi,80(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret
.align 16
.L14rounds:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movups %xmm0,(%rdx)
movups %xmm2,16(%rdx)
.byte 102,15,58,223,202,1
call .Lkey_expansion_256a_cold
.byte 102,15,58,223,200,1
call .Lkey_expansion_256b
.byte 102,15,58,223,202,2
call .Lkey_expansion_256a
.byte 102,15,58,223,200,2
call .Lkey_expansion_256b
.byte 102,15,58,223,202,4
call .Lkey_expansion_256a
.byte 102,15,58,223,200,4
call .Lkey_expansion_256b
.byte 102,15,58,223,202,8
call .Lkey_expansion_256a
.byte 102,15,58,223,200,8
call .Lkey_expansion_256b
.byte 102,15,58,223,202,16
call .Lkey_expansion_256a
.byte 102,15,58,223,200,16
call .Lkey_expansion_256b
.byte 102,15,58,223,202,32
call .Lkey_expansion_256a
.byte 102,15,58,223,200,32
call .Lkey_expansion_256b
.byte 102,15,58,223,202,64
call .Lkey_expansion_256a
movups %xmm0,(%rax)
movl %esi,16(%rax)
xorq %rax,%rax
jmp .Lenc_key_ret
.align 16
.Lbad_keybits:
movq $-2,%rax
.Lenc_key_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
.align 16
.Lkey_expansion_128:
.cfi_startproc
movups %xmm0,(%rax)
leaq 16(%rax),%rax
.Lkey_expansion_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.cfi_endproc
.align 16
.Lkey_expansion_256a:
.cfi_startproc
movups %xmm2,(%rax)
leaq 16(%rax),%rax
.Lkey_expansion_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.cfi_endproc
.align 16
.Lkey_expansion_256b:
.cfi_startproc
movups %xmm0,(%rax)
leaq 16(%rax),%rax
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.cfi_endproc
.size aes_hw_set_encrypt_key_base,.-aes_hw_set_encrypt_key_base
.globl aes_hw_set_encrypt_key_alt
.hidden aes_hw_set_encrypt_key_alt
.type aes_hw_set_encrypt_key_alt,@function
.align 16
aes_hw_set_encrypt_key_alt:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
.cfi_adjust_cfa_offset 8
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je .L14rounds_alt
cmpl $128,%esi
jne .Lbad_keybits_alt
movl $9,%esi
movdqa .Lkey_rotate(%rip),%xmm5
movl $8,%r10d
movdqa .Lkey_rcon1(%rip),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,(%rdx)
jmp .Loop_key128
.align 16
.Loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leaq 16(%rax),%rax
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%rax)
movdqa %xmm0,%xmm2
decl %r10d
jnz .Loop_key128
movdqa .Lkey_rcon1b(%rip),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%rax)
movl %esi,96(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret_alt
.align 16
.L14rounds_alt:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movdqa .Lkey_rotate(%rip),%xmm5
movdqa .Lkey_rcon1(%rip),%xmm4
movl $7,%r10d
movdqu %xmm0,0(%rdx)
movdqa %xmm2,%xmm1
movdqu %xmm2,16(%rdx)
jmp .Loop_key256
.align 16
.Loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
decl %r10d
jz .Ldone_key256
pshufd $0xff,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%rax)
leaq 32(%rax),%rax
movdqa %xmm2,%xmm1
jmp .Loop_key256
.Ldone_key256:
movl %esi,16(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret_alt
.align 16
.Lbad_keybits_alt:
movq $-2,%rax
.Lenc_key_ret_alt:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
.size aes_hw_set_encrypt_key_alt,.-aes_hw_set_encrypt_key_alt
.section .rodata
.align 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.Lincrement32:
.long 6,6,6,0
.Lincrement64:
.long 1,0,0,0
.Lincrement1:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
.Lkey_rotate:
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
.Lkey_rotate192:
.long 0x04070605,0x04070605,0x04070605,0x04070605
.Lkey_rcon1:
.long 1,1,1,1
.Lkey_rcon1b:
.long 0x1b,0x1b,0x1b,0x1b
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.text
#endif
|
t3hw00t/ARW | 11,047 | .cargo-codex/registry/src/index.crates.io-1949cf8c6b5b557f/ring-0.17.14/pregenerated/vpaes-x86_64-macosx.S | // This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 4
_vpaes_encrypt_core:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa L$k_ipt(%rip),%xmm2
pandn %xmm0,%xmm1
movdqu (%r9),%xmm5
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,208
movdqa L$k_ipt+16(%rip),%xmm0
.byte 102,15,56,0,193
pxor %xmm5,%xmm2
addq $16,%r9
pxor %xmm2,%xmm0
leaq L$k_mc_backward(%rip),%r10
jmp L$enc_entry
.p2align 4
L$enc_loop:
movdqa %xmm13,%xmm4
movdqa %xmm12,%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa %xmm15,%xmm5
pxor %xmm4,%xmm0
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
movdqa (%r11,%r10,1),%xmm4
movdqa %xmm14,%xmm2
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addq $16,%r9
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
L$enc_entry:
movdqa %xmm9,%xmm1
movdqa %xmm11,%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,232
movdqa %xmm10,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm10,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm10,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm10,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
jnz L$enc_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.p2align 4
_vpaes_encrypt_core_2x:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_ipt(%rip),%xmm2
movdqa %xmm2,%xmm8
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
movdqu (%r9),%xmm5
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,208
.byte 102,68,15,56,0,198
movdqa L$k_ipt+16(%rip),%xmm0
movdqa %xmm0,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,247
pxor %xmm5,%xmm2
pxor %xmm5,%xmm8
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
leaq L$k_mc_backward(%rip),%r10
jmp L$enc2x_entry
.p2align 4
L$enc2x_loop:
movdqa L$k_sb1(%rip),%xmm4
movdqa L$k_sb1+16(%rip),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
movdqa L$k_sb2(%rip),%xmm5
movdqa %xmm5,%xmm13
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
.byte 102,69,15,56,0,232
movdqa (%r11,%r10,1),%xmm4
movdqa L$k_sb2+16(%rip),%xmm2
movdqa %xmm2,%xmm8
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm0,%xmm3
movdqa %xmm6,%xmm11
pxor %xmm5,%xmm2
pxor %xmm13,%xmm8
.byte 102,15,56,0,193
.byte 102,15,56,0,241
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
.byte 102,15,56,0,220
.byte 102,68,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
pxor %xmm6,%xmm11
.byte 102,15,56,0,193
.byte 102,15,56,0,241
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
pxor %xmm11,%xmm6
L$enc2x_entry:
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_inv+16(%rip),%xmm5
movdqa %xmm5,%xmm13
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,232
.byte 102,68,15,56,0,238
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm1,%xmm0
pxor %xmm7,%xmm6
.byte 102,15,56,0,217
.byte 102,68,15,56,0,223
movdqa %xmm10,%xmm4
movdqa %xmm10,%xmm12
pxor %xmm5,%xmm3
pxor %xmm13,%xmm11
.byte 102,15,56,0,224
.byte 102,68,15,56,0,230
movdqa %xmm10,%xmm2
movdqa %xmm10,%xmm8
pxor %xmm5,%xmm4
pxor %xmm13,%xmm12
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm0,%xmm2
pxor %xmm6,%xmm8
.byte 102,15,56,0,220
.byte 102,69,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
pxor %xmm7,%xmm11
jnz L$enc2x_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
ret
.p2align 4
_vpaes_schedule_core:
call _vpaes_preheat
movdqa L$k_rcon(%rip),%xmm8
movdqu (%rdi),%xmm0
movdqa %xmm0,%xmm3
leaq L$k_ipt(%rip),%r11
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
leaq L$k_sr(%rip),%r10
movdqu %xmm0,(%rdx)
L$schedule_go:
cmpl $192,%esi
ja L$schedule_256
L$schedule_128:
movl $10,%esi
L$oop_schedule_128:
call _vpaes_schedule_round
decq %rsi
jz L$schedule_mangle_last
call _vpaes_schedule_mangle
jmp L$oop_schedule_128
.p2align 4
L$schedule_256:
movdqu 16(%rdi),%xmm0
call _vpaes_schedule_transform
movl $7,%esi
L$oop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decq %rsi
jz L$schedule_mangle_last
call _vpaes_schedule_mangle
pshufd $0xFF,%xmm0,%xmm0
movdqa %xmm7,%xmm5
movdqa %xmm6,%xmm7
call _vpaes_schedule_low_round
movdqa %xmm5,%xmm7
jmp L$oop_schedule_256
.p2align 4
L$schedule_mangle_last:
leaq L$k_deskew(%rip),%r11
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,193
leaq L$k_opt(%rip),%r11
addq $32,%rdx
L$schedule_mangle_last_dec:
addq $-16,%rdx
pxor L$k_s63(%rip),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%rdx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.p2align 4
_vpaes_schedule_round:
pxor %xmm1,%xmm1
.byte 102,65,15,58,15,200,15
.byte 102,69,15,58,15,192,15
pxor %xmm1,%xmm7
pshufd $0xFF,%xmm0,%xmm0
.byte 102,15,58,15,192,1
_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor L$k_s63(%rip),%xmm7
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa %xmm11,%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm10,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm10,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm10,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm10,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa %xmm13,%xmm4
.byte 102,15,56,0,226
movdqa %xmm12,%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.p2align 4
_vpaes_schedule_transform:
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa (%r11),%xmm2
.byte 102,15,56,0,208
movdqa 16(%r11),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.p2align 4
_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa L$k_mc_forward(%rip),%xmm5
addq $16,%rdx
pxor L$k_s63(%rip),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
L$schedule_mangle_both:
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217
addq $-16,%r8
andq $0x30,%r8
movdqu %xmm3,(%rdx)
ret
.globl _vpaes_set_encrypt_key
.private_extern _vpaes_set_encrypt_key
.p2align 4
_vpaes_set_encrypt_key:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+5(%rip)
#endif
movl %esi,%eax
shrl $5,%eax
addl $5,%eax
movl %eax,240(%rdx)
movl $0,%ecx
movl $0x30,%r8d
call _vpaes_schedule_core
xorl %eax,%eax
ret
.globl _vpaes_ctr32_encrypt_blocks
.private_extern _vpaes_ctr32_encrypt_blocks
.p2align 4
_vpaes_ctr32_encrypt_blocks:
_CET_ENDBR
xchgq %rcx,%rdx
testq %rcx,%rcx
jz L$ctr32_abort
movdqu (%r8),%xmm0
movdqa L$ctr_add_one(%rip),%xmm8
subq %rdi,%rsi
call _vpaes_preheat
movdqa %xmm0,%xmm6
pshufb L$rev_ctr(%rip),%xmm6
testq $1,%rcx
jz L$ctr32_prep_loop
movdqu (%rdi),%xmm7
call _vpaes_encrypt_core
pxor %xmm7,%xmm0
paddd %xmm8,%xmm6
movdqu %xmm0,(%rsi,%rdi,1)
subq $1,%rcx
leaq 16(%rdi),%rdi
jz L$ctr32_done
L$ctr32_prep_loop:
movdqa %xmm6,%xmm14
movdqa %xmm6,%xmm15
paddd %xmm8,%xmm15
L$ctr32_loop:
movdqa L$rev_ctr(%rip),%xmm1
movdqa %xmm14,%xmm0
movdqa %xmm15,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
call _vpaes_encrypt_core_2x
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa L$ctr_add_two(%rip),%xmm3
pxor %xmm1,%xmm0
pxor %xmm2,%xmm6
paddd %xmm3,%xmm14
paddd %xmm3,%xmm15
movdqu %xmm0,(%rsi,%rdi,1)
movdqu %xmm6,16(%rsi,%rdi,1)
subq $2,%rcx
leaq 32(%rdi),%rdi
jnz L$ctr32_loop
L$ctr32_done:
L$ctr32_abort:
ret
.p2align 4
_vpaes_preheat:
leaq L$k_s0F(%rip),%r10
movdqa -32(%r10),%xmm10
movdqa -16(%r10),%xmm11
movdqa 0(%r10),%xmm9
movdqa 48(%r10),%xmm13
movdqa 64(%r10),%xmm12
movdqa 80(%r10),%xmm15
movdqa 96(%r10),%xmm14
ret
.section __DATA,__const
.p2align 6
_vpaes_consts:
L$k_inv:
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
L$k_s0F:
.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
L$k_ipt:
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
L$k_sb1:
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
L$k_sb2:
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
L$k_sbo:
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
L$k_mc_forward:
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
L$k_mc_backward:
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
L$k_sr:
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
L$k_rcon:
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
L$k_s63:
.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
L$k_opt:
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
L$k_deskew:
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
L$rev_ctr:
.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
L$ctr_add_one:
.quad 0x0000000000000000, 0x0000000100000000
L$ctr_add_two:
.quad 0x0000000000000000, 0x0000000200000000
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.p2align 6
.text
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.