repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
XboxDev/nxdk | 3,740 | lib/xboxrt/c_runtime/_aullrem.s | // SPDX-License-Identifier: MIT OR NCSA
// SPDX-FileCopyrightText: 2008 Stephen Canon
// SPDX-FileCopyrightText: 2018-2021 Stefan Schmidt
// du_int __umoddi3(du_int a, du_int b);
// result = remainder of a / b.
// both inputs and the output are 64-bit unsigned integers.
// This will do whatever the underlying hardware is set to do on division by zero.
// No other exceptions are generated, as the divide cannot overflow.
//
// This is targeted at 32-bit x86 *only*, as this can be done directly in hardware
// on x86_64. The performance goal is ~40 cycles per divide, which is faster than
// currently possible via simulation of integer divides on the x87 unit.
// Modified for stdcall calling convention for use in nxdk
.include "prelude.s.inc"
safeseh_prelude
.text
.balign 4
.globl __aullrem
__aullrem:
pushl %ebx
movl 20(%esp), %ebx // Find the index i of the leading bit in b.
bsrl %ebx, %ecx // If the high word of b is zero, jump to
jz 9f // the code to handle that special case [9].
/* High word of b is known to be non-zero on this branch */
movl 16(%esp), %eax // Construct bhi, containing bits [1+i:32+i] of b
shrl %cl, %eax // Practically, this means that bhi is given by:
shrl %eax //
notl %ecx // bhi = (high word of b) << (31 - i) |
shll %cl, %ebx // (low word of b) >> (1 + i)
orl %eax, %ebx //
movl 12(%esp), %edx // Load the high and low words of a, and jump
movl 8(%esp), %eax // to [2] if the high word is larger than bhi
cmpl %ebx, %edx // to avoid overflowing the upcoming divide.
jae 2f
/* High word of a is greater than or equal to (b >> (1 + i)) on this branch */
divl %ebx // eax <-- qs, edx <-- r such that ahi:alo = bs*qs + r
pushl %edi
notl %ecx
shrl %eax
shrl %cl, %eax // q = qs >> (1 + i)
movl %eax, %edi
mull 20(%esp) // q*blo
movl 12(%esp), %ebx
movl 16(%esp), %ecx // ECX:EBX = a
subl %eax, %ebx
sbbl %edx, %ecx // ECX:EBX = a - q*blo
movl 24(%esp), %eax
imull %edi, %eax // q*bhi
subl %eax, %ecx // ECX:EBX = a - q*b
jnc 1f // if positive, this is the result.
addl 20(%esp), %ebx // otherwise
adcl 24(%esp), %ecx // ECX:EBX = a - (q-1)*b = result
1: movl %ebx, %eax
movl %ecx, %edx
popl %edi
popl %ebx
ret $0x10
2: /* High word of a is greater than or equal to (b >> (1 + i)) on this branch */
subl %ebx, %edx // subtract bhi from ahi so that divide will not
divl %ebx // overflow, and find q and r such that
//
// ahi:alo = (1:q)*bhi + r
//
// Note that q is a number in (31-i).(1+i)
// fix point.
pushl %edi
notl %ecx
shrl %eax
orl $0x80000000, %eax
shrl %cl, %eax // q = (1:qs) >> (1 + i)
movl %eax, %edi
mull 20(%esp) // q*blo
movl 12(%esp), %ebx
movl 16(%esp), %ecx // ECX:EBX = a
subl %eax, %ebx
sbbl %edx, %ecx // ECX:EBX = a - q*blo
movl 24(%esp), %eax
imull %edi, %eax // q*bhi
subl %eax, %ecx // ECX:EBX = a - q*b
jnc 3f // if positive, this is the result.
addl 20(%esp), %ebx // otherwise
adcl 24(%esp), %ecx // ECX:EBX = a - (q-1)*b = result
3: movl %ebx, %eax
movl %ecx, %edx
popl %edi
popl %ebx
ret $0x10
9: /* High word of b is zero on this branch */
movl 12(%esp), %eax // Find qhi and rhi such that
movl 16(%esp), %ecx //
xorl %edx, %edx // ahi = qhi*b + rhi with 0 ≤ rhi < b
divl %ecx //
movl %eax, %ebx //
movl 8(%esp), %eax // Find rlo such that
divl %ecx //
movl %edx, %eax // rhi:alo = qlo*b + rlo with 0 ≤ rlo < b
popl %ebx //
xorl %edx, %edx // and return 0:rlo
ret $0x10 //
|
XboxDev/nxdk | 3,517 | lib/xboxrt/c_runtime/_aulldiv.s | // SPDX-License-Identifier: MIT OR NCSA
// SPDX-FileCopyrightText: 2008 Stephen Canon
// SPDX-FileCopyrightText: 2018-2021 Stefan Schmidt
// du_int __udivdi3(du_int a, du_int b);
// result = a / b.
// both inputs and the output are 64-bit unsigned integers.
// This will do whatever the underlying hardware is set to do on division by zero.
// No other exceptions are generated, as the divide cannot overflow.
//
// This is targeted at 32-bit x86 *only*, as this can be done directly in hardware
// on x86_64. The performance goal is ~40 cycles per divide, which is faster than
// currently possible via simulation of integer divides on the x87 unit.
// Modified for stdcall calling convention for use in nxdk
.include "prelude.s.inc"
safeseh_prelude
.text
.balign 4
.globl __aulldiv
__aulldiv:
pushl %ebx
movl 20(%esp), %ebx // Find the index i of the leading bit in b.
bsrl %ebx, %ecx // If the high word of b is zero, jump to
jz 9f // the code to handle that special case [9].
/* High word of b is known to be non-zero on this branch */
movl 16(%esp), %eax // Construct bhi, containing bits [1+i:32+i] of b
shrl %cl, %eax // Practically, this means that bhi is given by:
shrl %eax //
notl %ecx // bhi = (high word of b) << (31 - i) |
shll %cl, %ebx // (low word of b) >> (1 + i)
orl %eax, %ebx //
movl 12(%esp), %edx // Load the high and low words of a, and jump
movl 8(%esp), %eax // to [1] if the high word is larger than bhi
cmpl %ebx, %edx // to avoid overflowing the upcoming divide.
jae 1f
/* High word of a is greater than or equal to (b >> (1 + i)) on this branch */
divl %ebx // eax <-- qs, edx <-- r such that ahi:alo = bs*qs + r
pushl %edi
notl %ecx
shrl %eax
shrl %cl, %eax // q = qs >> (1 + i)
movl %eax, %edi
mull 20(%esp) // q*blo
movl 12(%esp), %ebx
movl 16(%esp), %ecx // ECX:EBX = a
subl %eax, %ebx
sbbl %edx, %ecx // ECX:EBX = a - q*blo
movl 24(%esp), %eax
imull %edi, %eax // q*bhi
subl %eax, %ecx // ECX:EBX = a - q*b
sbbl $0, %edi // decrement q if remainder is negative
xorl %edx, %edx
movl %edi, %eax
popl %edi
popl %ebx
ret $0x10
1: /* High word of a is greater than or equal to (b >> (1 + i)) on this branch */
subl %ebx, %edx // subtract bhi from ahi so that divide will not
divl %ebx // overflow, and find q and r such that
//
// ahi:alo = (1:q)*bhi + r
//
// Note that q is a number in (31-i).(1+i)
// fix point.
pushl %edi
notl %ecx
shrl %eax
orl $0x80000000, %eax
shrl %cl, %eax // q = (1:qs) >> (1 + i)
movl %eax, %edi
mull 20(%esp) // q*blo
movl 12(%esp), %ebx
movl 16(%esp), %ecx // ECX:EBX = a
subl %eax, %ebx
sbbl %edx, %ecx // ECX:EBX = a - q*blo
movl 24(%esp), %eax
imull %edi, %eax // q*bhi
subl %eax, %ecx // ECX:EBX = a - q*b
sbbl $0, %edi // decrement q if remainder is negative
xorl %edx, %edx
movl %edi, %eax
popl %edi
popl %ebx
ret $0x10
9: /* High word of b is zero on this branch */
movl 12(%esp), %eax // Find qhi and rhi such that
movl 16(%esp), %ecx //
xorl %edx, %edx // ahi = qhi*b + rhi with 0 ≤ rhi < b
divl %ecx //
movl %eax, %ebx //
movl 8(%esp), %eax // Find qlo such that
divl %ecx //
movl %ebx, %edx // rhi:alo = qlo*b + rlo with 0 ≤ rlo < b
popl %ebx //
ret $0x10 // and return qhi:qlo
|
XboxDev/nxdk | 5,048 | lib/xboxrt/c_runtime/_alldiv.s | // SPDX-License-Identifier: MIT OR NCSA
// SPDX-FileCopyrightText: 2008 Stephen Canon
// SPDX-FileCopyrightText: 2018-2021 Stefan Schmidt
// di_int __divdi3(di_int a, di_int b);
// result = a / b.
// both inputs and the output are 64-bit signed integers.
// This will do whatever the underlying hardware is set to do on division by zero.
// No other exceptions are generated, as the divide cannot overflow.
//
// This is targeted at 32-bit x86 *only*, as this can be done directly in hardware
// on x86_64. The performance goal is ~40 cycles per divide, which is faster than
// currently possible via simulation of integer divides on the x87 unit.
// Modified for stdcall calling convention for use in nxdk
.include "prelude.s.inc"
safeseh_prelude
.text
.balign 4
.globl __alldiv
__alldiv:
/* This is currently implemented by wrapping the unsigned divide up in an absolute
value, then restoring the correct sign at the end of the computation. This could
certainly be improved upon. */
pushl %esi
movl 20(%esp), %edx // high word of b
movl 16(%esp), %eax // low word of b
movl %edx, %ecx
sarl $31, %ecx // (b < 0) ? -1 : 0
xorl %ecx, %eax
xorl %ecx, %edx // EDX:EAX = (b < 0) ? not(b) : b
subl %ecx, %eax
sbbl %ecx, %edx // EDX:EAX = abs(b)
movl %edx, 20(%esp)
movl %eax, 16(%esp) // store abs(b) back to stack
movl %ecx, %esi // set aside sign of b
movl 12(%esp), %edx // high word of b
movl 8(%esp), %eax // low word of b
movl %edx, %ecx
sarl $31, %ecx // (a < 0) ? -1 : 0
xorl %ecx, %eax
xorl %ecx, %edx // EDX:EAX = (a < 0) ? not(a) : a
subl %ecx, %eax
sbbl %ecx, %edx // EDX:EAX = abs(a)
movl %edx, 12(%esp)
movl %eax, 8(%esp) // store abs(a) back to stack
xorl %ecx, %esi // sign of result = (sign of a) ^ (sign of b)
pushl %ebx
movl 24(%esp), %ebx // Find the index i of the leading bit in b.
bsrl %ebx, %ecx // If the high word of b is zero, jump to
jz 9f // the code to handle that special case [9].
/* High word of b is known to be non-zero on this branch */
movl 20(%esp), %eax // Construct bhi, containing bits [1+i:32+i] of b
shrl %cl, %eax // Practically, this means that bhi is given by:
shrl %eax //
notl %ecx // bhi = (high word of b) << (31 - i) |
shll %cl, %ebx // (low word of b) >> (1 + i)
orl %eax, %ebx //
movl 16(%esp), %edx // Load the high and low words of a, and jump
movl 12(%esp), %eax // to [1] if the high word is larger than bhi
cmpl %ebx, %edx // to avoid overflowing the upcoming divide.
jae 1f
/* High word of a is greater than or equal to (b >> (1 + i)) on this branch */
divl %ebx // eax <-- qs, edx <-- r such that ahi:alo = bs*qs + r
pushl %edi
notl %ecx
shrl %eax
shrl %cl, %eax // q = qs >> (1 + i)
movl %eax, %edi
mull 24(%esp) // q*blo
movl 16(%esp), %ebx
movl 20(%esp), %ecx // ECX:EBX = a
subl %eax, %ebx
sbbl %edx, %ecx // ECX:EBX = a - q*blo
movl 28(%esp), %eax
imull %edi, %eax // q*bhi
subl %eax, %ecx // ECX:EBX = a - q*b
sbbl $0, %edi // decrement q if remainder is negative
xorl %edx, %edx
movl %edi, %eax
addl %esi, %eax // Restore correct sign to result
adcl %esi, %edx
xorl %esi, %eax
xorl %esi, %edx
popl %edi // Restore callee-save registers
popl %ebx
popl %esi
ret $0x10 // Return
1: /* High word of a is greater than or equal to (b >> (1 + i)) on this branch */
subl %ebx, %edx // subtract bhi from ahi so that divide will not
divl %ebx // overflow, and find q and r such that
//
// ahi:alo = (1:q)*bhi + r
//
// Note that q is a number in (31-i).(1+i)
// fix point.
pushl %edi
notl %ecx
shrl %eax
orl $0x80000000, %eax
shrl %cl, %eax // q = (1:qs) >> (1 + i)
movl %eax, %edi
mull 24(%esp) // q*blo
movl 16(%esp), %ebx
movl 20(%esp), %ecx // ECX:EBX = a
subl %eax, %ebx
sbbl %edx, %ecx // ECX:EBX = a - q*blo
movl 28(%esp), %eax
imull %edi, %eax // q*bhi
subl %eax, %ecx // ECX:EBX = a - q*b
sbbl $0, %edi // decrement q if remainder is negative
xorl %edx, %edx
movl %edi, %eax
addl %esi, %eax // Restore correct sign to result
adcl %esi, %edx
xorl %esi, %eax
xorl %esi, %edx
popl %edi // Restore callee-save registers
popl %ebx
popl %esi
ret $0x10 // Return
9: /* High word of b is zero on this branch */
movl 16(%esp), %eax // Find qhi and rhi such that
movl 20(%esp), %ecx //
xorl %edx, %edx // ahi = qhi*b + rhi with 0 ≤ rhi < b
divl %ecx //
movl %eax, %ebx //
movl 12(%esp), %eax // Find qlo such that
divl %ecx //
movl %ebx, %edx // rhi:alo = qlo*b + rlo with 0 ≤ rlo < b
addl %esi, %eax // Restore correct sign to result
adcl %esi, %edx
xorl %esi, %eax
xorl %esi, %edx
popl %ebx // Restore callee-save registers
popl %esi
ret $0x10 // Return
|
XboxDev/nxdk | 1,056 | lib/xboxrt/c_runtime/chkstk.s | // SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: 2018-2021 Stefan Schmidt
.include "prelude.s.inc"
safeseh_prelude
/*
__chkstk does not comply to any standardized calling convention.
When called, eax contains the size of the request, and esp is modified
accordingly before returning.
Calling __chkstk has the same effect as "subl %eax, %esp".
*/
.text
.globl __chkstk
__chkstk:
pushl %ecx
leal 8(%esp), %ecx // Load original stack address into ecx
pushl %ecx // Save caller-saved registers before calling a C-function
pushl %eax
pushl %edx
pushl %ecx
pushl %eax
call __xlibc_check_stack
addl $8, %esp
popl %edx
popl %eax
popl %ecx
subl %eax, %ecx // ecx is the new stack pointer
leal 4(%esp), %eax // eax is a pointer to the return address variable
movl %ecx, %esp // load the new stack address
movl -4(%eax), %ecx // restore ecx
pushl (%eax) // put return address back on the stack
subl %esp, %eax // restore eax
ret
|
XboxDev/nxdk | 5,124 | lib/xboxrt/c_runtime/_allrem.s | // SPDX-License-Identifier: MIT OR NCSA
// SPDX-FileCopyrightText: 2008 Stephen Canon
// SPDX-FileCopyrightText: 2018-2021 Stefan Schmidt
// di_int __moddi3(di_int a, di_int b);
// result = remainder of a / b.
// both inputs and the output are 64-bit signed integers.
// This will do whatever the underlying hardware is set to do on division by zero.
// No other exceptions are generated, as the divide cannot overflow.
//
// This is targeted at 32-bit x86 *only*, as this can be done directly in hardware
// on x86_64. The performance goal is ~40 cycles per divide, which is faster than
// currently possible via simulation of integer divides on the x87 unit.
// Modified for stdcall calling convention for use in nxdk
.include "prelude.s.inc"
safeseh_prelude
.text
.balign 4
.globl __allrem
__allrem:
/* This is currently implemented by wrapping the unsigned modulus up in an absolute
value. This could certainly be improved upon. */
pushl %esi
movl 20(%esp), %edx // high word of b
movl 16(%esp), %eax // low word of b
movl %edx, %ecx
sarl $31, %ecx // (b < 0) ? -1 : 0
xorl %ecx, %eax
xorl %ecx, %edx // EDX:EAX = (b < 0) ? not(b) : b
subl %ecx, %eax
sbbl %ecx, %edx // EDX:EAX = abs(b)
movl %edx, 20(%esp)
movl %eax, 16(%esp) // store abs(b) back to stack
movl 12(%esp), %edx // high word of b
movl 8(%esp), %eax // low word of b
movl %edx, %ecx
sarl $31, %ecx // (a < 0) ? -1 : 0
xorl %ecx, %eax
xorl %ecx, %edx // EDX:EAX = (a < 0) ? not(a) : a
subl %ecx, %eax
sbbl %ecx, %edx // EDX:EAX = abs(a)
movl %edx, 12(%esp)
movl %eax, 8(%esp) // store abs(a) back to stack
movl %ecx, %esi // set aside sign of a
pushl %ebx
movl 24(%esp), %ebx // Find the index i of the leading bit in b.
bsrl %ebx, %ecx // If the high word of b is zero, jump to
jz 9f // the code to handle that special case [9].
/* High word of b is known to be non-zero on this branch */
movl 20(%esp), %eax // Construct bhi, containing bits [1+i:32+i] of b
shrl %cl, %eax // Practically, this means that bhi is given by:
shrl %eax //
notl %ecx // bhi = (high word of b) << (31 - i) |
shll %cl, %ebx // (low word of b) >> (1 + i)
orl %eax, %ebx //
movl 16(%esp), %edx // Load the high and low words of a, and jump
movl 12(%esp), %eax // to [2] if the high word is larger than bhi
cmpl %ebx, %edx // to avoid overflowing the upcoming divide.
jae 2f
/* High word of a is greater than or equal to (b >> (1 + i)) on this branch */
divl %ebx // eax <-- qs, edx <-- r such that ahi:alo = bs*qs + r
pushl %edi
notl %ecx
shrl %eax
shrl %cl, %eax // q = qs >> (1 + i)
movl %eax, %edi
mull 24(%esp) // q*blo
movl 16(%esp), %ebx
movl 20(%esp), %ecx // ECX:EBX = a
subl %eax, %ebx
sbbl %edx, %ecx // ECX:EBX = a - q*blo
movl 28(%esp), %eax
imull %edi, %eax // q*bhi
subl %eax, %ecx // ECX:EBX = a - q*b
jnc 1f // if positive, this is the result.
addl 24(%esp), %ebx // otherwise
adcl 28(%esp), %ecx // ECX:EBX = a - (q-1)*b = result
1: movl %ebx, %eax
movl %ecx, %edx
addl %esi, %eax // Restore correct sign to result
adcl %esi, %edx
xorl %esi, %eax
xorl %esi, %edx
popl %edi // Restore callee-save registers
popl %ebx
popl %esi
ret $0x10 // Return
2: /* High word of a is greater than or equal to (b >> (1 + i)) on this branch */
subl %ebx, %edx // subtract bhi from ahi so that divide will not
divl %ebx // overflow, and find q and r such that
//
// ahi:alo = (1:q)*bhi + r
//
// Note that q is a number in (31-i).(1+i)
// fix point.
pushl %edi
notl %ecx
shrl %eax
orl $0x80000000, %eax
shrl %cl, %eax // q = (1:qs) >> (1 + i)
movl %eax, %edi
mull 24(%esp) // q*blo
movl 16(%esp), %ebx
movl 20(%esp), %ecx // ECX:EBX = a
subl %eax, %ebx
sbbl %edx, %ecx // ECX:EBX = a - q*blo
movl 28(%esp), %eax
imull %edi, %eax // q*bhi
subl %eax, %ecx // ECX:EBX = a - q*b
jnc 3f // if positive, this is the result.
addl 24(%esp), %ebx // otherwise
adcl 28(%esp), %ecx // ECX:EBX = a - (q-1)*b = result
3: movl %ebx, %eax
movl %ecx, %edx
addl %esi, %eax // Restore correct sign to result
adcl %esi, %edx
xorl %esi, %eax
xorl %esi, %edx
popl %edi // Restore callee-save registers
popl %ebx
popl %esi
ret $0x10 // Return
9: /* High word of b is zero on this branch */
movl 16(%esp), %eax // Find qhi and rhi such that
movl 20(%esp), %ecx //
xorl %edx, %edx // ahi = qhi*b + rhi with 0 ≤ rhi < b
divl %ecx //
movl %eax, %ebx //
movl 12(%esp), %eax // Find rlo such that
divl %ecx //
movl %edx, %eax // rhi:alo = qlo*b + rlo with 0 ≤ rlo < b
popl %ebx //
xorl %edx, %edx // and return 0:rlo
addl %esi, %eax // Restore correct sign to result
adcl %esi, %edx
xorl %esi, %eax
xorl %esi, %edx
popl %esi
ret $0x10 // Return
|
xbret/xenoblade | 9,068 | libs/PowerPC_EABI_Support/src/MetroTRK/__exception.s | .include "macros.inc"
.section .init, "ax"
.global gTRKInterruptVectorTable
gTRKInterruptVectorTable:
.string "Metrowerks Target Resident Kernel for PowerPC"
.balign 4, 0
.fill 0xD0
#############################################
# Interrupt vector slot 0x0000 is reserved. #
#############################################
# Slot 0x0100: System Reset Exception
b __TRKreset
.fill 0xFC
# Slot 0x0200: Machine Check Exception
mtsprg 1, r2
mfsrr0 r2
icbi 0, r2
mfdar r2
dcbi 0, r2
mfsprg r2, 1
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x200
rfi
.fill 0xB4
# Slot 0x0300: DSI Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x300
rfi
.fill 0xCC
# Slot 0x0400: ISI Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x400
rfi
.fill 0xCC
# Slot 0x0500: External Interrupt Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x500
rfi
.fill 0xCC
# Slot 0x0600: Alignment Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x600
rfi
.fill 0xCC
# Slot 0x0700: Program Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x700
rfi
.fill 0xCC
# Slot 0x0800: Floating Point Unavailable Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x800
rfi
.fill 0xCC
# Slot 0x0900: Decrementer Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x900
rfi
.fill 0xCC
######################################################
# Interrupt vector slots 0x0A00 & 0x0B00 are reserved.
.fill 0x100
.fill 0x100
######################################################
# Slot 0x0C00: System Call Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0xc00
rfi
.fill 0xCC
# Slot 0x0D00: Trace Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0xd00
rfi
.fill 0xCC
############################################################################
# Slot 0x0E00 is usually for the Floating Point Assist Exception Handler, #
# however that exception is not implemented in the PPC 750CL architecture. #
############################################################################
# Slot 0x0F00: Performance Monitor Exception
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0xe00
rfi
.fill 0xCC
##################################################################################
# Interrupt vector slots 0x1000 through 0x1200 are not implemented in the 750CL. #
##################################################################################
# Slot 0x1300: Instruction Address Breakpoint Exception
b .L_800052D4
.fill 0x1C
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0xf20
rfi
.L_800052D4:
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0xf00
rfi
.fill 0x78
# Slot 0x1400: System Management Interrupt Exception
mtsprg 1, r2
mfcr r2
mtsprg 2, r2
mfmsr r2
andis. r2, r2, 2
beq .L_800053B0
mfmsr r2
xoris r2, r2, 2
sync 0
mtmsr r2
sync 0
mtsprg 1, r2
.L_800053B0:
mfsprg r2, 2
mtcrf 0xff, r2
mfsprg r2, 1
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1000
rfi
.fill 0x90
##############################################################################
# Interrupt vector slots 0x1500 and 0x1600 are not implemented in the 750CL. #
##############################################################################
# Slot 0x1700: Thermal-Management Interrupt Exception
mtsprg 1, r2
mfcr r2
mtsprg 2, r2
mfmsr r2
andis. r2, r2, 2
beq .L_800054B0
mfmsr r2
xoris r2, r2, 2
sync 0
mtmsr r2
sync 0
mtsprg 1, r2
.L_800054B0:
mfsprg r2, 2
mtcrf 0xff, r2
mfsprg r2, 1
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1100
rfi
.fill 0x90
# Slot 0x1800(?)
mtsprg 1, r2
mfcr r2
mtsprg 2, r2
mfmsr r2
andis. r2, r2, 2
beq .L_800055B0
mfmsr r2
xoris r2, r2, 2
sync 0
mtmsr r2
sync 0
mtsprg 1, r2
.L_800055B0:
mfsprg r2, 2
mtcrf 0xff, r2
mfsprg r2, 1
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1200
rfi
.fill 0x90
# Slot 0x1900(?)
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1300
rfi
.fill 0xCC
# Slot 0x1A00(?)
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1400
rfi
.fill 0x1CC
# Slot 0x1B00(?)
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1600
rfi
.fill 0xCC
# Slot 0x1C00(?)
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1700
rfi
.fill 0x4CC
# Slot 0x1D00(?)
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1c00
rfi
.fill 0xCC
# Slot 0x1E00(?)
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1d00
rfi
.fill 0xCC
# Slot 0x1F00(?)
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1e00
rfi
.fill 0xCC
# Slot 0x2000(?)
mtsprg 1, r2
mtsprg 2, r3
mtsprg 3, r4
mfsrr0 r2
mfsrr1 r4
mfmsr r3
ori r3, r3, 0x30
mtsrr1 r3
lis r3, TRK_InterruptHandler@h
ori r3, r3, TRK_InterruptHandler@l
mtsrr0 r3
li r3, 0x1f00
rfi
.fill 0xC
gTRKInterruptVectorTableEnd:
|
xbyl1234/jni_trace | 1,077 | module/src/main/cpp/third/dobby/source/TrampolineBridge/ClosureTrampolineBridge/arm/dummy/closure-trampoline-template-arm.S | // .section __TEXT,__text,regular,pure_instructions
#if defined(ENABLE_CLOSURE_BRIDGE_TEMPLATE)
#if defined(__WIN32__) || defined(__APPLE__)
#define cdecl(s) _##s
#else
#define cdecl(s) s
#endif
.align 4
#if !defined(ENABLE_CLOSURE_TRAMPOLINE_CARRY_OBJECT_PTR)
// closure trampoline carray the object pointer, and fetch required members at the runtime assembly code.
// #include "TrampolineBridge/ClosureTrampolineBridge/ClosureTrampoline.h"
// #define OFFSETOF(TYPE, ELEMENT) ((size_t)&(((TYPE *)0)->ELEMENT))
#define OFFSETOF_ClourseTrampolineEntry_carry_data 4
#define OFFSETOF_ClourseTrampolineEntry_carry_handler 0
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr r12, ClourseTrampolineEntryPtr
ldr pc, [r12, #0]
ClourseTrampolineEntryPtr:
.long 0
#else
; closure trampoline just carray the required members from the object.
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr r12, =carry_data
ldr pc, =carry_handler
carry_data:
.long 0
carry_handler:
.long 0
#endif
#endif |
xbyl1234/jni_trace | 1,230 | module/src/main/cpp/third/dobby/source/TrampolineBridge/ClosureTrampolineBridge/arm64/dummy/closure-trampoline-template-arm64.S | // .section __TEXT,__text,regular,pure_instructions
#if defined(ENABLE_CLOSURE_BRIDGE_TEMPLATE)
#if defined(__WIN32__) || defined(__APPLE__)
#define cdecl(s) _##s
#else
#define cdecl(s) s
#endif
.align 4
#if !defined(ENABLE_CLOSURE_TRAMPOLINE_CARRY_OBJECT_PTR)
// closure trampoline carray the object pointer, and fetch required members at the runtime assembly code.
// #include "TrampolineBridge/ClosureTrampolineBridge/ClosureTrampoline.h"
// #define OFFSETOF(TYPE, ELEMENT) ((size_t)&(((TYPE *)0)->ELEMENT))
#define OFFSETOF_ClourseTrampolineEntry_carry_data 8
#define OFFSETOF_ClourseTrampolineEntry_carry_handler 0
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr x17, ClourseTrampolineEntryPtr
ldr x16, OFFSETOF_ClourseTrampolineEntry_carry_data
ldr x17, OFFSETOF_ClourseTrampolineEntry_carry_handler
br x17
ClourseTrampolineEntryPtr:
.long 0
.long 0
#else
; closure trampoline just carray the required members from the object.
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr x16, =carry_data
ldr x17, =carry_handler
br x17
carry_data:
.long 0
.long 0
carry_handler:
.long 0
.long 0
#endif
#endif |
xbyl1234/android_analysis | 1,077 | app/src/main/cpp/third/dobby/source/TrampolineBridge/ClosureTrampolineBridge/arm/dummy/closure-trampoline-template-arm.S | // .section __TEXT,__text,regular,pure_instructions
#if defined(ENABLE_CLOSURE_BRIDGE_TEMPLATE)
#if defined(__WIN32__) || defined(__APPLE__)
#define cdecl(s) _##s
#else
#define cdecl(s) s
#endif
.align 4
#if !defined(ENABLE_CLOSURE_TRAMPOLINE_CARRY_OBJECT_PTR)
// closure trampoline carray the object pointer, and fetch required members at the runtime assembly code.
// #include "TrampolineBridge/ClosureTrampolineBridge/ClosureTrampoline.h"
// #define OFFSETOF(TYPE, ELEMENT) ((size_t)&(((TYPE *)0)->ELEMENT))
#define OFFSETOF_ClourseTrampolineEntry_carry_data 4
#define OFFSETOF_ClourseTrampolineEntry_carry_handler 0
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr r12, ClourseTrampolineEntryPtr
ldr pc, [r12, #0]
ClourseTrampolineEntryPtr:
.long 0
#else
; closure trampoline just carray the required members from the object.
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr r12, =carry_data
ldr pc, =carry_handler
carry_data:
.long 0
carry_handler:
.long 0
#endif
#endif |
xbyl1234/android_analysis | 1,230 | app/src/main/cpp/third/dobby/source/TrampolineBridge/ClosureTrampolineBridge/arm64/dummy/closure-trampoline-template-arm64.S | // .section __TEXT,__text,regular,pure_instructions
#if defined(ENABLE_CLOSURE_BRIDGE_TEMPLATE)
#if defined(__WIN32__) || defined(__APPLE__)
#define cdecl(s) _##s
#else
#define cdecl(s) s
#endif
.align 4
#if !defined(ENABLE_CLOSURE_TRAMPOLINE_CARRY_OBJECT_PTR)
// closure trampoline carray the object pointer, and fetch required members at the runtime assembly code.
// #include "TrampolineBridge/ClosureTrampolineBridge/ClosureTrampoline.h"
// #define OFFSETOF(TYPE, ELEMENT) ((size_t)&(((TYPE *)0)->ELEMENT))
#define OFFSETOF_ClourseTrampolineEntry_carry_data 8
#define OFFSETOF_ClourseTrampolineEntry_carry_handler 0
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr x17, ClourseTrampolineEntryPtr
ldr x16, OFFSETOF_ClourseTrampolineEntry_carry_data
ldr x17, OFFSETOF_ClourseTrampolineEntry_carry_handler
br x17
ClourseTrampolineEntryPtr:
.long 0
.long 0
#else
; closure trampoline just carray the required members from the object.
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr x16, =carry_data
ldr x17, =carry_handler
br x17
carry_data:
.long 0
.long 0
carry_handler:
.long 0
.long 0
#endif
#endif |
xbyl1234/instagram_lite_bot | 2,426 | analyse/app/src/main/cpp/third/libunwind/AsmGetRegsMips.S | /*
* Copyright (C) 2017 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.type AsmGetRegs, %function
.globl AsmGetRegs
.ent AsmGetRegs
.balign 16
AsmGetRegs:
.cfi_startproc
.cfi_def_cfa $sp, 0
.set push
.set noreorder
.cpload $t9
sw $zero, 0($a0)
.set noat
sw $at, 4($a0)
.set at
sw $v0, 8($a0)
sw $v1, 12($a0)
sw $a0, 16($a0)
sw $a1, 20($a0)
sw $a2, 24($a0)
sw $a3, 28($a0)
sw $t0, 32($a0)
sw $t1, 36($a0)
sw $t2, 40($a0)
sw $t3, 44($a0)
sw $t4, 48($a0)
sw $t5, 52($a0)
sw $t6, 56($a0)
sw $t7, 60($a0)
sw $s0, 64($a0)
sw $s1, 68($a0)
sw $s2, 72($a0)
sw $s3, 76($a0)
sw $s4, 80($a0)
sw $s5, 84($a0)
sw $s6, 88($a0)
sw $s7, 92($a0)
sw $t8, 96($a0)
sw $t9, 100($a0)
sw $k0, 104($a0)
sw $k1, 108($a0)
sw $gp, 112($a0)
sw $sp, 116($a0)
sw $s8, 120($a0)
sw $ra, 124($a0)
jalr $zero, $ra
sw $ra, 128($a0) // set PC to the calling function
.set pop
.cfi_endproc
.size AsmGetRegs, .-AsmGetRegs
.end AsmGetRegs
|
xbyl1234/instagram_lite_bot | 1,995 | analyse/app/src/main/cpp/third/libunwind/AsmGetRegsX86_64.S | /*
* Copyright (C) 2016 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.global AsmGetRegs
.balign 16
.type AsmGetRegs, @function
AsmGetRegs:
.cfi_startproc
movq %rax, (%rdi)
movq %rdx, 8(%rdi)
movq %rcx, 16(%rdi)
movq %rbx, 24(%rdi)
movq %rsi, 32(%rdi)
movq %rdi, 40(%rdi)
movq %rbp, 48(%rdi)
/* RSP */
lea 8(%rsp), %rax
movq %rax, 56(%rdi)
movq %r8, 64(%rdi)
movq %r9, 72(%rdi)
movq %r10, 80(%rdi)
movq %r11, 88(%rdi)
movq %r12, 96(%rdi)
movq %r13, 104(%rdi)
movq %r14, 112(%rdi)
movq %r15, 120(%rdi)
/* RIP */
movq (%rsp), %rax
movq %rax, 128(%rdi)
ret
.cfi_endproc
.size AsmGetRegs, .-AsmGetRegs
|
xbyl1234/instagram_lite_bot | 2,436 | analyse/app/src/main/cpp/third/libunwind/AsmGetRegsMips64.S | /*
* Copyright (C) 2017 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.type AsmGetRegs, %function
.globl AsmGetRegs
.ent AsmGetRegs
.balign 16
AsmGetRegs:
.cfi_startproc
.cfi_def_cfa $sp, 0
.set push
.set noreorder
.cpload $t9
sd $zero, 0($a0)
.set noat
sd $at, 8($a0)
.set at
sd $v0, 16($a0)
sd $v1, 24($a0)
sd $a0, 32($a0)
sd $a1, 40($a0)
sd $a2, 48($a0)
sd $a3, 56($a0)
sd $a4, 64($a0)
sd $a5, 72($a0)
sd $a6, 80($a0)
sd $a7, 88($a0)
sd $t0, 96($a0)
sd $t1, 104($a0)
sd $t2, 112($a0)
sd $t3, 120($a0)
sd $s0, 128($a0)
sd $s1, 136($a0)
sd $s2, 144($a0)
sd $s3, 152($a0)
sd $s4, 160($a0)
sd $s5, 168($a0)
sd $s6, 176($a0)
sd $s7, 184($a0)
sd $t8, 192($a0)
sd $t9, 200($a0)
sd $k0, 208($a0)
sd $k1, 216($a0)
sd $gp, 224($a0)
sd $sp, 232($a0)
sd $s8, 240($a0)
sd $ra, 248($a0)
jalr $zero, $ra
sd $ra, 256($a0) // set PC to the calling function
.set pop
.cfi_endproc
.size AsmGetRegs, .-AsmGetRegs
.end AsmGetRegs
|
xbyl1234/instagram_lite_bot | 1,962 | analyse/app/src/main/cpp/third/libunwind/AsmGetRegsX86.S | /*
* Copyright (C) 2016 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.global AsmGetRegs
.balign 16
.type AsmGetRegs, @function
AsmGetRegs:
.cfi_startproc
mov 4(%esp), %eax
movl $0, (%eax)
movl %ecx, 4(%eax)
movl %edx, 8(%eax)
movl %ebx, 12(%eax)
/* ESP */
leal 4(%esp), %ecx
movl %ecx, 16(%eax)
movl %ebp, 20(%eax)
movl %esi, 24(%eax)
movl %edi, 28(%eax)
/* EIP */
movl (%esp), %ecx
movl %ecx, 32(%eax)
mov %cs, 36(%eax)
mov %ss, 40(%eax)
mov %ds, 44(%eax)
mov %es, 48(%eax)
mov %fs, 52(%eax)
mov %gs, 56(%eax)
ret
.cfi_endproc
.size AsmGetRegs, .-AsmGetRegs
|
xbyl1234/instagram_lite_bot | 1,077 | analyse/app/src/main/cpp/third/dobby/source/TrampolineBridge/ClosureTrampolineBridge/arm/dummy/closure-trampoline-template-arm.S | // .section __TEXT,__text,regular,pure_instructions
#if defined(ENABLE_CLOSURE_BRIDGE_TEMPLATE)
#if defined(__WIN32__) || defined(__APPLE__)
#define cdecl(s) _##s
#else
#define cdecl(s) s
#endif
.align 4
#if !defined(ENABLE_CLOSURE_TRAMPOLINE_CARRY_OBJECT_PTR)
// closure trampoline carray the object pointer, and fetch required members at the runtime assembly code.
// #include "TrampolineBridge/ClosureTrampolineBridge/ClosureTrampoline.h"
// #define OFFSETOF(TYPE, ELEMENT) ((size_t)&(((TYPE *)0)->ELEMENT))
#define OFFSETOF_ClourseTrampolineEntry_carry_data 4
#define OFFSETOF_ClourseTrampolineEntry_carry_handler 0
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr r12, ClourseTrampolineEntryPtr
ldr pc, [r12, #0]
ClourseTrampolineEntryPtr:
.long 0
#else
; closure trampoline just carray the required members from the object.
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr r12, =carry_data
ldr pc, =carry_handler
carry_data:
.long 0
carry_handler:
.long 0
#endif
#endif |
xbyl1234/instagram_lite_bot | 1,230 | analyse/app/src/main/cpp/third/dobby/source/TrampolineBridge/ClosureTrampolineBridge/arm64/dummy/closure-trampoline-template-arm64.S | // .section __TEXT,__text,regular,pure_instructions
#if defined(ENABLE_CLOSURE_BRIDGE_TEMPLATE)
#if defined(__WIN32__) || defined(__APPLE__)
#define cdecl(s) _##s
#else
#define cdecl(s) s
#endif
.align 4
#if !defined(ENABLE_CLOSURE_TRAMPOLINE_CARRY_OBJECT_PTR)
// closure trampoline carray the object pointer, and fetch required members at the runtime assembly code.
// #include "TrampolineBridge/ClosureTrampolineBridge/ClosureTrampoline.h"
// #define OFFSETOF(TYPE, ELEMENT) ((size_t)&(((TYPE *)0)->ELEMENT))
#define OFFSETOF_ClourseTrampolineEntry_carry_data 8
#define OFFSETOF_ClourseTrampolineEntry_carry_handler 0
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr x17, ClourseTrampolineEntryPtr
ldr x16, OFFSETOF_ClourseTrampolineEntry_carry_data
ldr x17, OFFSETOF_ClourseTrampolineEntry_carry_handler
br x17
ClourseTrampolineEntryPtr:
.long 0
.long 0
#else
; closure trampoline just carray the required members from the object.
.globl cdecl(closure_trampoline_template)
cdecl(closure_trampoline_template):
ldr x16, =carry_data
ldr x17, =carry_handler
br x17
carry_data:
.long 0
.long 0
carry_handler:
.long 0
.long 0
#endif
#endif |
xbyl1234/instagram_lite_bot | 1,492 | lite/three/sonic/internal/rt/asm_amd64.s | // +build !noasm !appengine
// Code generated by asm2asm, DO NOT EDIT·
#include "go_asm.h"
#include "funcdata.h"
#include "textflag.h"
TEXT ·MoreStack(SB), NOSPLIT, $0 - 8
NO_LOCAL_POINTERS
_entry:
MOVQ (TLS), R14
MOVQ size+0(FP), R12
NOTQ R12
LEAQ (SP)(R12*1), R12
CMPQ R12, 16(R14)
JBE _stack_grow
RET
_stack_grow:
CALL runtime·morestack_noctxt<>(SB)
JMP _entry
TEXT ·StopProf(SB), NOSPLIT, $0-0
NO_LOCAL_POINTERS
CMPB github·com∕bytedance∕sonic∕internal∕rt·StopProfiling(SB), $0
JEQ _ret_1
MOVL $1, AX
LEAQ github·com∕bytedance∕sonic∕internal∕rt·yieldCount(SB), CX
LOCK
XADDL AX, (CX)
MOVL runtime·prof+4(SB), AX
TESTL AX, AX
JEQ _ret_1
MOVL AX, github·com∕bytedance∕sonic∕internal∕rt·oldHz(SB)
MOVL $0, runtime·prof+4(SB)
_ret_1:
RET
TEXT ·StartProf(SB), NOSPLIT, $0-0
NO_LOCAL_POINTERS
CMPB github·com∕bytedance∕sonic∕internal∕rt·StopProfiling(SB), $0
JEQ _ret_2
MOVL $-1, AX
LEAQ github·com∕bytedance∕sonic∕internal∕rt·yieldCount(SB), CX
LOCK
XADDL AX, (CX)
CMPL github·com∕bytedance∕sonic∕internal∕rt·yieldCount(SB), $0
JNE _ret_2
CMPL runtime·prof+4(SB), $0
JNE _ret_2
CMPL github·com∕bytedance∕sonic∕internal∕rt·oldHz(SB), $0
JNE _branch_1
MOVL $100, github·com∕bytedance∕sonic∕internal∕rt·oldHz(SB)
_branch_1:
MOVL github·com∕bytedance∕sonic∕internal∕rt·oldHz(SB), AX
MOVL AX, runtime·prof+4(SB)
_ret_2:
RET
|
xbyl1234/instagram_lite_bot | 1,053 | lite/three/sonic/internal/decoder/generic_amd64_go117_test.s | // +build go1.17,!go1.21
//
// Copyright 2021 ByteDance Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "go_asm.h"
#include "funcdata.h"
#include "textflag.h"
TEXT ·decodeValueStub(SB), NOSPLIT, $0 - 72
NO_LOCAL_POINTERS
PXOR X0, X0
MOVOU X0, rv+48(FP)
MOVQ st+0(FP) , R13
MOVQ sp+8(FP) , R10
MOVQ sn+16(FP), R12
MOVQ ic+24(FP), R11
MOVQ vp+32(FP), R15
MOVQ df+40(FP), AX
MOVQ ·_subr_decode_value(SB), BX
CALL BX
MOVQ R11, rp+48(FP)
MOVQ BX, ex+56(FP)
RET
|
xbyl1234/instagram_lite_bot | 1,052 | lite/three/sonic/internal/decoder/generic_amd64_test.s | // +build go1.15,!go1.17
//
// Copyright 2021 ByteDance Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "go_asm.h"
#include "funcdata.h"
#include "textflag.h"
TEXT ·decodeValueStub(SB), NOSPLIT, $0 - 72
NO_LOCAL_POINTERS
PXOR X0, X0
MOVOU X0, rv+48(FP)
MOVQ st+0(FP), BX
MOVQ sp+8(FP), R12
MOVQ sn+16(FP), R13
MOVQ ic+24(FP), R14
MOVQ vp+32(FP), R15
MOVQ df+40(FP), R10
MOVQ ·_subr_decode_value(SB), AX
CALL AX
MOVQ R14, rp+48(FP)
MOVQ R11, ex+56(FP)
RET
|
xbyl1234/instagram_lite_bot | 15,839 | lite/three/zlib/contrib/gcc_gvmat64/gvmat64.S | /*
;uInt longest_match_x64(
; deflate_state *s,
; IPos cur_match); // current match
; gvmat64.S -- Asm portion of the optimized longest_match for 32 bits x86_64
; (AMD64 on Athlon 64, Opteron, Phenom
; and Intel EM64T on Pentium 4 with EM64T, Pentium D, Core 2 Duo, Core I5/I7)
; this file is translation from gvmat64.asm to GCC 4.x (for Linux, Mac XCode)
; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant.
;
; File written by Gilles Vollant, by converting to assembly the longest_match
; from Jean-loup Gailly in deflate.c of zLib and infoZip zip.
; and by taking inspiration on asm686 with masm, optimised assembly code
; from Brian Raiter, written 1998
;
; This software is provided 'as-is', without any express or implied
; warranty. In no event will the authors be held liable for any damages
; arising from the use of this software.
;
; Permission is granted to anyone to use this software for any purpose,
; including commercial applications, and to alter it and redistribute it
; freely, subject to the following restrictions:
;
; 1. The origin of this software must not be misrepresented; you must not
; claim that you wrote the original software. If you use this software
; in a product, an acknowledgment in the product documentation would be
; appreciated but is not required.
; 2. Altered source versions must be plainly marked as such, and must not be
; misrepresented as being the original software
; 3. This notice may not be removed or altered from any source distribution.
;
; http://www.zlib.net
; http://www.winimage.com/zLibDll
; http://www.muppetlabs.com/~breadbox/software/assembly.html
;
; to compile this file for zLib, I use option:
; gcc -c -arch x86_64 gvmat64.S
;uInt longest_match(s, cur_match)
; deflate_state *s;
; IPos cur_match; // current match /
;
; with XCode for Mac, I had strange error with some jump on intel syntax
; this is why BEFORE_JMP and AFTER_JMP are used
*/
#define BEFORE_JMP .att_syntax
#define AFTER_JMP .intel_syntax noprefix
#ifndef NO_UNDERLINE
# define match_init _match_init
# define longest_match _longest_match
#endif
.intel_syntax noprefix
.globl match_init, longest_match
.text
longest_match:
#define LocalVarsSize 96
/*
; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12
; free register : r14,r15
; register can be saved : rsp
*/
#define chainlenwmask (rsp + 8 - LocalVarsSize)
#define nicematch (rsp + 16 - LocalVarsSize)
#define save_rdi (rsp + 24 - LocalVarsSize)
#define save_rsi (rsp + 32 - LocalVarsSize)
#define save_rbx (rsp + 40 - LocalVarsSize)
#define save_rbp (rsp + 48 - LocalVarsSize)
#define save_r12 (rsp + 56 - LocalVarsSize)
#define save_r13 (rsp + 64 - LocalVarsSize)
#define save_r14 (rsp + 72 - LocalVarsSize)
#define save_r15 (rsp + 80 - LocalVarsSize)
/*
; all the +4 offsets are due to the addition of pending_buf_size (in zlib
; in the deflate_state structure since the asm code was first written
; (if you compile with zlib 1.0.4 or older, remove the +4).
; Note : these value are good with a 8 bytes boundary pack structure
*/
#define MAX_MATCH 258
#define MIN_MATCH 3
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
/*
;;; Offsets for fields in the deflate_state structure. These numbers
;;; are calculated from the definition of deflate_state, with the
;;; assumption that the compiler will dword-align the fields. (Thus,
;;; changing the definition of deflate_state could easily cause this
;;; program to crash horribly, without so much as a warning at
;;; compile time. Sigh.)
; all the +zlib1222add offsets are due to the addition of fields
; in zlib in the deflate_state structure since the asm code was first written
; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").
; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").
; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").
*/
/* you can check the structure offset by running
#include <stdlib.h>
#include <stdio.h>
#include "deflate.h"
void print_depl()
{
deflate_state ds;
deflate_state *s=&ds;
printf("size pointer=%u\n",(int)sizeof(void*));
printf("#define dsWSize %u\n",(int)(((char*)&(s->w_size))-((char*)s)));
printf("#define dsWMask %u\n",(int)(((char*)&(s->w_mask))-((char*)s)));
printf("#define dsWindow %u\n",(int)(((char*)&(s->window))-((char*)s)));
printf("#define dsPrev %u\n",(int)(((char*)&(s->prev))-((char*)s)));
printf("#define dsMatchLen %u\n",(int)(((char*)&(s->match_length))-((char*)s)));
printf("#define dsPrevMatch %u\n",(int)(((char*)&(s->prev_match))-((char*)s)));
printf("#define dsStrStart %u\n",(int)(((char*)&(s->strstart))-((char*)s)));
printf("#define dsMatchStart %u\n",(int)(((char*)&(s->match_start))-((char*)s)));
printf("#define dsLookahead %u\n",(int)(((char*)&(s->lookahead))-((char*)s)));
printf("#define dsPrevLen %u\n",(int)(((char*)&(s->prev_length))-((char*)s)));
printf("#define dsMaxChainLen %u\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));
printf("#define dsGoodMatch %u\n",(int)(((char*)&(s->good_match))-((char*)s)));
printf("#define dsNiceMatch %u\n",(int)(((char*)&(s->nice_match))-((char*)s)));
}
*/
#define dsWSize 68
#define dsWMask 76
#define dsWindow 80
#define dsPrev 96
#define dsMatchLen 144
#define dsPrevMatch 148
#define dsStrStart 156
#define dsMatchStart 160
#define dsLookahead 164
#define dsPrevLen 168
#define dsMaxChainLen 172
#define dsGoodMatch 188
#define dsNiceMatch 192
#define window_size [ rcx + dsWSize]
#define WMask [ rcx + dsWMask]
#define window_ad [ rcx + dsWindow]
#define prev_ad [ rcx + dsPrev]
#define strstart [ rcx + dsStrStart]
#define match_start [ rcx + dsMatchStart]
#define Lookahead [ rcx + dsLookahead] //; 0ffffffffh on infozip
#define prev_length [ rcx + dsPrevLen]
#define max_chain_length [ rcx + dsMaxChainLen]
#define good_match [ rcx + dsGoodMatch]
#define nice_match [ rcx + dsNiceMatch]
/*
; windows:
; parameter 1 in rcx(deflate state s), param 2 in rdx (cur match)
; see http://weblogs.asp.net/oldnewthing/archive/2004/01/14/58579.aspx and
; http://msdn.microsoft.com/library/en-us/kmarch/hh/kmarch/64bitAMD_8e951dd2-ee77-4728-8702-55ce4b5dd24a.xml.asp
;
; All registers must be preserved across the call, except for
; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch.
;
; gcc on macosx-linux:
; see http://www.x86-64.org/documentation/abi-0.99.pdf
; param 1 in rdi, param 2 in rsi
; rbx, rsp, rbp, r12 to r15 must be preserved
;;; Save registers that the compiler may be using, and adjust esp to
;;; make room for our stack frame.
;;; Retrieve the function arguments. r8d will hold cur_match
;;; throughout the entire function. edx will hold the pointer to the
;;; deflate_state structure during the function's setup (before
;;; entering the main loop.
; ms: parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match)
; mac: param 1 in rdi, param 2 rsi
; this clear high 32 bits of r8, which can be garbage in both r8 and rdx
*/
mov [save_rbx],rbx
mov [save_rbp],rbp
mov rcx,rdi
mov r8d,esi
mov [save_r12],r12
mov [save_r13],r13
mov [save_r14],r14
mov [save_r15],r15
//;;; uInt wmask = s->w_mask;
//;;; unsigned chain_length = s->max_chain_length;
//;;; if (s->prev_length >= s->good_match) {
//;;; chain_length >>= 2;
//;;; }
mov edi, prev_length
mov esi, good_match
mov eax, WMask
mov ebx, max_chain_length
cmp edi, esi
jl LastMatchGood
shr ebx, 2
LastMatchGood:
//;;; chainlen is decremented once beforehand so that the function can
//;;; use the sign flag instead of the zero flag for the exit test.
//;;; It is then shifted into the high word, to make room for the wmask
//;;; value, which it will always accompany.
dec ebx
shl ebx, 16
or ebx, eax
//;;; on zlib only
//;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
mov eax, nice_match
mov [chainlenwmask], ebx
mov r10d, Lookahead
cmp r10d, eax
cmovnl r10d, eax
mov [nicematch],r10d
//;;; register Bytef *scan = s->window + s->strstart;
mov r10, window_ad
mov ebp, strstart
lea r13, [r10 + rbp]
//;;; Determine how many bytes the scan ptr is off from being
//;;; dword-aligned.
mov r9,r13
neg r13
and r13,3
//;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
//;;; s->strstart - (IPos)MAX_DIST(s) : NIL;
mov eax, window_size
sub eax, MIN_LOOKAHEAD
xor edi,edi
sub ebp, eax
mov r11d, prev_length
cmovng ebp,edi
//;;; int best_len = s->prev_length;
//;;; Store the sum of s->window + best_len in esi locally, and in esi.
lea rsi,[r10+r11]
//;;; register ush scan_start = *(ushf*)scan;
//;;; register ush scan_end = *(ushf*)(scan+best_len-1);
//;;; Posf *prev = s->prev;
movzx r12d,word ptr [r9]
movzx ebx, word ptr [r9 + r11 - 1]
mov rdi, prev_ad
//;;; Jump into the main loop.
mov edx, [chainlenwmask]
cmp bx,word ptr [rsi + r8 - 1]
jz LookupLoopIsZero
LookupLoop1:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
jbe LeaveNow
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry1:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jz LookupLoopIsZero
AFTER_JMP
LookupLoop2:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
BEFORE_JMP
jbe LeaveNow
AFTER_JMP
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry2:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jz LookupLoopIsZero
AFTER_JMP
LookupLoop4:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
BEFORE_JMP
jbe LeaveNow
AFTER_JMP
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry4:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jnz LookupLoop1
jmp LookupLoopIsZero
AFTER_JMP
/*
;;; do {
;;; match = s->window + cur_match;
;;; if (*(ushf*)(match+best_len-1) != scan_end ||
;;; *(ushf*)match != scan_start) continue;
;;; [...]
;;; } while ((cur_match = prev[cur_match & wmask]) > limit
;;; && --chain_length != 0);
;;;
;;; Here is the inner loop of the function. The function will spend the
;;; majority of its time in this loop, and majority of that time will
;;; be spent in the first ten instructions.
;;;
;;; Within this loop:
;;; ebx = scanend
;;; r8d = curmatch
;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)
;;; esi = windowbestlen - i.e., (window + bestlen)
;;; edi = prev
;;; ebp = limit
*/
.balign 16
LookupLoop:
and r8d, edx
movzx r8d, word ptr [rdi + r8*2]
cmp r8d, ebp
BEFORE_JMP
jbe LeaveNow
AFTER_JMP
sub edx, 0x00010000
BEFORE_JMP
js LeaveNow
AFTER_JMP
LoopEntry:
cmp bx,word ptr [rsi + r8 - 1]
BEFORE_JMP
jnz LookupLoop1
AFTER_JMP
LookupLoopIsZero:
cmp r12w, word ptr [r10 + r8]
BEFORE_JMP
jnz LookupLoop1
AFTER_JMP
//;;; Store the current value of chainlen.
mov [chainlenwmask], edx
/*
;;; Point edi to the string under scrutiny, and esi to the string we
;;; are hoping to match it up with. In actuality, esi and edi are
;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is
;;; initialized to -(MAX_MATCH_8 - scanalign).
*/
lea rsi,[r8+r10]
mov rdx, 0xfffffffffffffef8 //; -(MAX_MATCH_8)
lea rsi, [rsi + r13 + 0x0108] //;MAX_MATCH_8]
lea rdi, [r9 + r13 + 0x0108] //;MAX_MATCH_8]
prefetcht1 [rsi+rdx]
prefetcht1 [rdi+rdx]
/*
;;; Test the strings for equality, 8 bytes at a time. At the end,
;;; adjust rdx so that it is offset to the exact byte that mismatched.
;;;
;;; We already know at this point that the first three bytes of the
;;; strings match each other, and they can be safely passed over before
;;; starting the compare loop. So what this code does is skip over 0-3
;;; bytes, as much as necessary in order to dword-align the edi
;;; pointer. (rsi will still be misaligned three times out of four.)
;;;
;;; It should be confessed that this loop usually does not represent
;;; much of the total running time. Replacing it with a more
;;; straightforward "rep cmpsb" would not drastically degrade
;;; performance.
*/
LoopCmps:
mov rax, [rsi + rdx]
xor rax, [rdi + rdx]
jnz LeaveLoopCmps
mov rax, [rsi + rdx + 8]
xor rax, [rdi + rdx + 8]
jnz LeaveLoopCmps8
mov rax, [rsi + rdx + 8+8]
xor rax, [rdi + rdx + 8+8]
jnz LeaveLoopCmps16
add rdx,8+8+8
BEFORE_JMP
jnz LoopCmps
jmp LenMaximum
AFTER_JMP
LeaveLoopCmps16: add rdx,8
LeaveLoopCmps8: add rdx,8
LeaveLoopCmps:
test eax, 0x0000FFFF
jnz LenLower
test eax,0xffffffff
jnz LenLower32
add rdx,4
shr rax,32
or ax,ax
BEFORE_JMP
jnz LenLower
AFTER_JMP
LenLower32:
shr eax,16
add rdx,2
LenLower:
sub al, 1
adc rdx, 0
//;;; Calculate the length of the match. If it is longer than MAX_MATCH,
//;;; then automatically accept it as the best possible match and leave.
lea rax, [rdi + rdx]
sub rax, r9
cmp eax, MAX_MATCH
BEFORE_JMP
jge LenMaximum
AFTER_JMP
/*
;;; If the length of the match is not longer than the best match we
;;; have so far, then forget it and return to the lookup loop.
;///////////////////////////////////
*/
cmp eax, r11d
jg LongerMatch
lea rsi,[r10+r11]
mov rdi, prev_ad
mov edx, [chainlenwmask]
BEFORE_JMP
jmp LookupLoop
AFTER_JMP
/*
;;; s->match_start = cur_match;
;;; best_len = len;
;;; if (len >= nice_match) break;
;;; scan_end = *(ushf*)(scan+best_len-1);
*/
LongerMatch:
mov r11d, eax
mov match_start, r8d
cmp eax, [nicematch]
BEFORE_JMP
jge LeaveNow
AFTER_JMP
lea rsi,[r10+rax]
movzx ebx, word ptr [r9 + rax - 1]
mov rdi, prev_ad
mov edx, [chainlenwmask]
BEFORE_JMP
jmp LookupLoop
AFTER_JMP
//;;; Accept the current string, with the maximum possible length.
LenMaximum:
mov r11d,MAX_MATCH
mov match_start, r8d
//;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
//;;; return s->lookahead;
LeaveNow:
mov eax, Lookahead
cmp r11d, eax
cmovng eax, r11d
//;;; Restore the stack and return from whence we came.
// mov rsi,[save_rsi]
// mov rdi,[save_rdi]
mov rbx,[save_rbx]
mov rbp,[save_rbp]
mov r12,[save_r12]
mov r13,[save_r13]
mov r14,[save_r14]
mov r15,[save_r15]
ret 0
//; please don't remove this string !
//; Your can freely use gvmat64 in any free or commercial app
//; but it is far better don't remove the string in the binary!
// db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998, converted to amd 64 by Gilles Vollant 2005",0dh,0ah,0
match_init:
ret 0
|
XDagger/xdag | 1,991 | client/x86_64cpuid.s |
.hidden xOPENSSL_ia32cap_P
.comm xOPENSSL_ia32cap_P,16,4
.text
.globl xOPENSSL_ia32_cpuid
.type xOPENSSL_ia32_cpuid,@function
.align 16
xOPENSSL_ia32_cpuid:
movq %rbx,%r8
xorl %eax,%eax
movl %eax,8(%rdi)
cpuid
movl %eax,%r11d
xorl %eax,%eax
cmpl $0x756e6547,%ebx
setne %al
movl %eax,%r9d
cmpl $0x49656e69,%edx
setne %al
orl %eax,%r9d
cmpl $0x6c65746e,%ecx
setne %al
orl %eax,%r9d
jz .Lintel
cmpl $0x68747541,%ebx
setne %al
movl %eax,%r10d
cmpl $0x69746E65,%edx
setne %al
orl %eax,%r10d
cmpl $0x444D4163,%ecx
setne %al
orl %eax,%r10d
jnz .Lintel
movl $0x80000000,%eax
cpuid
cmpl $0x80000001,%eax
jb .Lintel
movl %eax,%r10d
movl $0x80000001,%eax
cpuid
orl %ecx,%r9d
andl $0x00000801,%r9d
cmpl $0x80000008,%r10d
jb .Lintel
movl $0x80000008,%eax
cpuid
movzbq %cl,%r10
incq %r10
movl $1,%eax
cpuid
btl $28,%edx
jnc .Lgeneric
shrl $16,%ebx
cmpb %r10b,%bl
ja .Lgeneric
andl $0xefffffff,%edx
jmp .Lgeneric
.Lintel:
cmpl $4,%r11d
movl $-1,%r10d
jb .Lnocacheinfo
movl $4,%eax
movl $0,%ecx
cpuid
movl %eax,%r10d
shrl $14,%r10d
andl $0xfff,%r10d
.Lnocacheinfo:
movl $1,%eax
cpuid
andl $0xbfefffff,%edx
cmpl $0,%r9d
jne .Lnotintel
orl $0x40000000,%edx
andb $15,%ah
cmpb $15,%ah
jne .Lnotintel
orl $0x00100000,%edx
.Lnotintel:
btl $28,%edx
jnc .Lgeneric
andl $0xefffffff,%edx
cmpl $0,%r10d
je .Lgeneric
orl $0x10000000,%edx
shrl $16,%ebx
cmpb $1,%bl
ja .Lgeneric
andl $0xefffffff,%edx
.Lgeneric:
andl $0x00000800,%r9d
andl $0xfffff7ff,%ecx
orl %ecx,%r9d
movl %edx,%r10d
cmpl $7,%r11d
jb .Lno_extended_info
movl $7,%eax
xorl %ecx,%ecx
cpuid
movl %ebx,8(%rdi)
.Lno_extended_info:
btl $27,%r9d
jnc .Lclear_avx
xorl %ecx,%ecx
.byte 0x0f,0x01,0xd0
andl $6,%eax
cmpl $6,%eax
je .Ldone
.Lclear_avx:
movl $0xefffe7ff,%eax
andl %eax,%r9d
andl $0xffffffdf,8(%rdi)
.Ldone:
shlq $32,%r9
movl %r10d,%eax
movq %r8,%rbx
orq %r9,%rax
.byte 0xf3,0xc3
.size xOPENSSL_ia32_cpuid,.-xOPENSSL_ia32_cpuid
|
XDagger/xdag | 2,157 | client/x86_64cpuid-mac.s | .private_extern _xOPENSSL_ia32cap_P
.comm _xOPENSSL_ia32cap_P,16,2
.text
.globl _xOPENSSL_ia32_cpuid
.p2align 4
_xOPENSSL_ia32_cpuid:
movq %rbx,%r8
xorl %eax,%eax
movl %eax,8(%rdi)
cpuid
movl %eax,%r11d
xorl %eax,%eax
cmpl $0x756e6547,%ebx
setne %al
movl %eax,%r9d
cmpl $0x49656e69,%edx
setne %al
orl %eax,%r9d
cmpl $0x6c65746e,%ecx
setne %al
orl %eax,%r9d
jz L$intel
cmpl $0x68747541,%ebx
setne %al
movl %eax,%r10d
cmpl $0x69746E65,%edx
setne %al
orl %eax,%r10d
cmpl $0x444D4163,%ecx
setne %al
orl %eax,%r10d
jnz L$intel
movl $0x80000000,%eax
cpuid
cmpl $0x80000001,%eax
jb L$intel
movl %eax,%r10d
movl $0x80000001,%eax
cpuid
orl %ecx,%r9d
andl $0x00000801,%r9d
cmpl $0x80000008,%r10d
jb L$intel
movl $0x80000008,%eax
cpuid
movzbq %cl,%r10
incq %r10
movl $1,%eax
cpuid
btl $28,%edx
jnc L$generic
shrl $16,%ebx
cmpb %r10b,%bl
ja L$generic
andl $0xefffffff,%edx
jmp L$generic
L$intel:
cmpl $4,%r11d
movl $-1,%r10d
jb L$nocacheinfo
movl $4,%eax
movl $0,%ecx
cpuid
movl %eax,%r10d
shrl $14,%r10d
andl $0xfff,%r10d
L$nocacheinfo:
movl $1,%eax
cpuid
andl $0xbfefffff,%edx
cmpl $0,%r9d
jne L$notintel
orl $0x40000000,%edx
andb $15,%ah
cmpb $15,%ah
jne L$notP4
orl $0x00100000,%edx
L$notP4:
cmpb $6,%ah
jne L$notintel
andl $0x0fff0ff0,%eax
cmpl $0x00050670,%eax
je L$knights
cmpl $0x00080650,%eax
jne L$notintel
L$knights:
andl $0xfbffffff,%ecx
L$notintel:
btl $28,%edx
jnc L$generic
andl $0xefffffff,%edx
cmpl $0,%r10d
je L$generic
orl $0x10000000,%edx
shrl $16,%ebx
cmpb $1,%bl
ja L$generic
andl $0xefffffff,%edx
L$generic:
andl $0x00000800,%r9d
andl $0xfffff7ff,%ecx
orl %ecx,%r9d
movl %edx,%r10d
cmpl $7,%r11d
jb L$no_extended_info
movl $7,%eax
xorl %ecx,%ecx
cpuid
btl $26,%r9d
jc L$notknights
andl $0xfff7ffff,%ebx
L$notknights:
movl %ebx,8(%rdi)
L$no_extended_info:
btl $27,%r9d
jnc L$clear_avx
xorl %ecx,%ecx
.byte 0x0f,0x01,0xd0
andl $6,%eax
cmpl $6,%eax
je L$done
L$clear_avx:
movl $0xefffe7ff,%eax
andl %eax,%r9d
andl $0xffffffdf,8(%rdi)
L$done:
shlq $32,%r9
movl %r10d,%eax
movq %r8,%rbx
orq %r9,%rax
.byte 0xf3,0xc3
|
XDagger/xdag | 28,453 | secp256k1/src/asm/field_10x26_arm.s | @ vim: set tabstop=8 softtabstop=8 shiftwidth=8 noexpandtab syntax=armasm:
/**********************************************************************
* Copyright (c) 2014 Wladimir J. van der Laan *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
/*
ARM implementation of field_10x26 inner loops.
Note:
- To avoid unnecessary loads and make use of available registers, two
'passes' have every time been interleaved, with the odd passes accumulating c' and d'
which will be added to c and d respectively in the even passes
*/
.syntax unified
.arch armv7-a
@ eabi attributes - see readelf -A
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use = yes
.eabi_attribute 9, 0 @ Tag_Thumb_ISA_use = no
.eabi_attribute 10, 0 @ Tag_FP_arch = none
.eabi_attribute 24, 1 @ Tag_ABI_align_needed = 8-byte
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved = 8-byte, except leaf SP
.eabi_attribute 30, 2 @ Tag_ABI_optimization_goals = Aggressive Speed
.eabi_attribute 34, 1 @ Tag_CPU_unaligned_access = v6
.text
@ Field constants
.set field_R0, 0x3d10
.set field_R1, 0x400
.set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff
.align 2
.global secp256k1_fe_mul_inner
.type secp256k1_fe_mul_inner, %function
@ Arguments:
@ r0 r Restrict: can overlap with a, not with b
@ r1 a
@ r2 b
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
secp256k1_fe_mul_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
/******************************************
* Main computation code.
******************************************
Allocation:
r0,r14,r7,r8 scratch
r1 a (pointer)
r2 b (pointer)
r3:r4 c
r5:r6 d
r11:r12 c'
r9:r10 d'
Note: do not write to r[] here, it may overlap with a[]
*/
/* A - interleaved with B */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #9*4] @ b[9]
ldr r0, [r1, #1*4] @ a[1]
umull r5, r6, r7, r8 @ d = a[0] * b[9]
ldr r14, [r2, #8*4] @ b[8]
umull r9, r10, r0, r8 @ d' = a[1] * b[9]
ldr r7, [r1, #2*4] @ a[2]
umlal r5, r6, r0, r14 @ d += a[1] * b[8]
ldr r8, [r2, #7*4] @ b[7]
umlal r9, r10, r7, r14 @ d' += a[2] * b[8]
ldr r0, [r1, #3*4] @ a[3]
umlal r5, r6, r7, r8 @ d += a[2] * b[7]
ldr r14, [r2, #6*4] @ b[6]
umlal r9, r10, r0, r8 @ d' += a[3] * b[7]
ldr r7, [r1, #4*4] @ a[4]
umlal r5, r6, r0, r14 @ d += a[3] * b[6]
ldr r8, [r2, #5*4] @ b[5]
umlal r9, r10, r7, r14 @ d' += a[4] * b[6]
ldr r0, [r1, #5*4] @ a[5]
umlal r5, r6, r7, r8 @ d += a[4] * b[5]
ldr r14, [r2, #4*4] @ b[4]
umlal r9, r10, r0, r8 @ d' += a[5] * b[5]
ldr r7, [r1, #6*4] @ a[6]
umlal r5, r6, r0, r14 @ d += a[5] * b[4]
ldr r8, [r2, #3*4] @ b[3]
umlal r9, r10, r7, r14 @ d' += a[6] * b[4]
ldr r0, [r1, #7*4] @ a[7]
umlal r5, r6, r7, r8 @ d += a[6] * b[3]
ldr r14, [r2, #2*4] @ b[2]
umlal r9, r10, r0, r8 @ d' += a[7] * b[3]
ldr r7, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r14 @ d += a[7] * b[2]
ldr r8, [r2, #1*4] @ b[1]
umlal r9, r10, r7, r14 @ d' += a[8] * b[2]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r8 @ d += a[8] * b[1]
ldr r14, [r2, #0*4] @ b[0]
umlal r9, r10, r0, r8 @ d' += a[9] * b[1]
ldr r7, [r1, #0*4] @ a[0]
umlal r5, r6, r0, r14 @ d += a[9] * b[0]
@ r7,r14 used in B
bic r0, r5, field_not_M @ t9 = d & M
str r0, [sp, #4 + 4*9]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
/* B */
umull r3, r4, r7, r14 @ c = a[0] * b[0]
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u0 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u0 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t0 = c & M
str r14, [sp, #4 + 0*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u0 * R1
umlal r3, r4, r0, r14
/* C - interleaved with D */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #2*4] @ b[2]
ldr r14, [r2, #1*4] @ b[1]
umull r11, r12, r7, r8 @ c' = a[0] * b[2]
ldr r0, [r1, #1*4] @ a[1]
umlal r3, r4, r7, r14 @ c += a[0] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r11, r12, r0, r14 @ c' += a[1] * b[1]
ldr r7, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r8 @ c += a[1] * b[0]
ldr r14, [r2, #9*4] @ b[9]
umlal r11, r12, r7, r8 @ c' += a[2] * b[0]
ldr r0, [r1, #3*4] @ a[3]
umlal r5, r6, r7, r14 @ d += a[2] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umull r9, r10, r0, r14 @ d' = a[3] * b[9]
ldr r7, [r1, #4*4] @ a[4]
umlal r5, r6, r0, r8 @ d += a[3] * b[8]
ldr r14, [r2, #7*4] @ b[7]
umlal r9, r10, r7, r8 @ d' += a[4] * b[8]
ldr r0, [r1, #5*4] @ a[5]
umlal r5, r6, r7, r14 @ d += a[4] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r9, r10, r0, r14 @ d' += a[5] * b[7]
ldr r7, [r1, #6*4] @ a[6]
umlal r5, r6, r0, r8 @ d += a[5] * b[6]
ldr r14, [r2, #5*4] @ b[5]
umlal r9, r10, r7, r8 @ d' += a[6] * b[6]
ldr r0, [r1, #7*4] @ a[7]
umlal r5, r6, r7, r14 @ d += a[6] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r9, r10, r0, r14 @ d' += a[7] * b[5]
ldr r7, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r8 @ d += a[7] * b[4]
ldr r14, [r2, #3*4] @ b[3]
umlal r9, r10, r7, r8 @ d' += a[8] * b[4]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r14 @ d += a[8] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r9, r10, r0, r14 @ d' += a[9] * b[3]
umlal r5, r6, r0, r8 @ d += a[9] * b[2]
bic r0, r5, field_not_M @ u1 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u1 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t1 = c & M
str r14, [sp, #4 + 1*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u1 * R1
umlal r3, r4, r0, r14
/* D */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u2 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u2 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t2 = c & M
str r14, [sp, #4 + 2*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u2 * R1
umlal r3, r4, r0, r14
/* E - interleaved with F */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #4*4] @ b[4]
umull r11, r12, r7, r8 @ c' = a[0] * b[4]
ldr r8, [r2, #3*4] @ b[3]
umlal r3, r4, r7, r8 @ c += a[0] * b[3]
ldr r7, [r1, #1*4] @ a[1]
umlal r11, r12, r7, r8 @ c' += a[1] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r3, r4, r7, r8 @ c += a[1] * b[2]
ldr r7, [r1, #2*4] @ a[2]
umlal r11, r12, r7, r8 @ c' += a[2] * b[2]
ldr r8, [r2, #1*4] @ b[1]
umlal r3, r4, r7, r8 @ c += a[2] * b[1]
ldr r7, [r1, #3*4] @ a[3]
umlal r11, r12, r7, r8 @ c' += a[3] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r3, r4, r7, r8 @ c += a[3] * b[0]
ldr r7, [r1, #4*4] @ a[4]
umlal r11, r12, r7, r8 @ c' += a[4] * b[0]
ldr r8, [r2, #9*4] @ b[9]
umlal r5, r6, r7, r8 @ d += a[4] * b[9]
ldr r7, [r1, #5*4] @ a[5]
umull r9, r10, r7, r8 @ d' = a[5] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umlal r5, r6, r7, r8 @ d += a[5] * b[8]
ldr r7, [r1, #6*4] @ a[6]
umlal r9, r10, r7, r8 @ d' += a[6] * b[8]
ldr r8, [r2, #7*4] @ b[7]
umlal r5, r6, r7, r8 @ d += a[6] * b[7]
ldr r7, [r1, #7*4] @ a[7]
umlal r9, r10, r7, r8 @ d' += a[7] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r5, r6, r7, r8 @ d += a[7] * b[6]
ldr r7, [r1, #8*4] @ a[8]
umlal r9, r10, r7, r8 @ d' += a[8] * b[6]
ldr r8, [r2, #5*4] @ b[5]
umlal r5, r6, r7, r8 @ d += a[8] * b[5]
ldr r7, [r1, #9*4] @ a[9]
umlal r9, r10, r7, r8 @ d' += a[9] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r5, r6, r7, r8 @ d += a[9] * b[4]
bic r0, r5, field_not_M @ u3 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u3 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t3 = c & M
str r14, [sp, #4 + 3*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u3 * R1
umlal r3, r4, r0, r14
/* F */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u4 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u4 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t4 = c & M
str r14, [sp, #4 + 4*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u4 * R1
umlal r3, r4, r0, r14
/* G - interleaved with H */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #6*4] @ b[6]
ldr r14, [r2, #5*4] @ b[5]
umull r11, r12, r7, r8 @ c' = a[0] * b[6]
ldr r0, [r1, #1*4] @ a[1]
umlal r3, r4, r7, r14 @ c += a[0] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r11, r12, r0, r14 @ c' += a[1] * b[5]
ldr r7, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r8 @ c += a[1] * b[4]
ldr r14, [r2, #3*4] @ b[3]
umlal r11, r12, r7, r8 @ c' += a[2] * b[4]
ldr r0, [r1, #3*4] @ a[3]
umlal r3, r4, r7, r14 @ c += a[2] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r11, r12, r0, r14 @ c' += a[3] * b[3]
ldr r7, [r1, #4*4] @ a[4]
umlal r3, r4, r0, r8 @ c += a[3] * b[2]
ldr r14, [r2, #1*4] @ b[1]
umlal r11, r12, r7, r8 @ c' += a[4] * b[2]
ldr r0, [r1, #5*4] @ a[5]
umlal r3, r4, r7, r14 @ c += a[4] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r11, r12, r0, r14 @ c' += a[5] * b[1]
ldr r7, [r1, #6*4] @ a[6]
umlal r3, r4, r0, r8 @ c += a[5] * b[0]
ldr r14, [r2, #9*4] @ b[9]
umlal r11, r12, r7, r8 @ c' += a[6] * b[0]
ldr r0, [r1, #7*4] @ a[7]
umlal r5, r6, r7, r14 @ d += a[6] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umull r9, r10, r0, r14 @ d' = a[7] * b[9]
ldr r7, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r8 @ d += a[7] * b[8]
ldr r14, [r2, #7*4] @ b[7]
umlal r9, r10, r7, r8 @ d' += a[8] * b[8]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r14 @ d += a[8] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r9, r10, r0, r14 @ d' += a[9] * b[7]
umlal r5, r6, r0, r8 @ d += a[9] * b[6]
bic r0, r5, field_not_M @ u5 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u5 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t5 = c & M
str r14, [sp, #4 + 5*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u5 * R1
umlal r3, r4, r0, r14
/* H */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u6 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u6 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t6 = c & M
str r14, [sp, #4 + 6*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u6 * R1
umlal r3, r4, r0, r14
/* I - interleaved with J */
ldr r8, [r2, #8*4] @ b[8]
ldr r7, [r1, #0*4] @ a[0]
ldr r14, [r2, #7*4] @ b[7]
umull r11, r12, r7, r8 @ c' = a[0] * b[8]
ldr r0, [r1, #1*4] @ a[1]
umlal r3, r4, r7, r14 @ c += a[0] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r11, r12, r0, r14 @ c' += a[1] * b[7]
ldr r7, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r8 @ c += a[1] * b[6]
ldr r14, [r2, #5*4] @ b[5]
umlal r11, r12, r7, r8 @ c' += a[2] * b[6]
ldr r0, [r1, #3*4] @ a[3]
umlal r3, r4, r7, r14 @ c += a[2] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r11, r12, r0, r14 @ c' += a[3] * b[5]
ldr r7, [r1, #4*4] @ a[4]
umlal r3, r4, r0, r8 @ c += a[3] * b[4]
ldr r14, [r2, #3*4] @ b[3]
umlal r11, r12, r7, r8 @ c' += a[4] * b[4]
ldr r0, [r1, #5*4] @ a[5]
umlal r3, r4, r7, r14 @ c += a[4] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r11, r12, r0, r14 @ c' += a[5] * b[3]
ldr r7, [r1, #6*4] @ a[6]
umlal r3, r4, r0, r8 @ c += a[5] * b[2]
ldr r14, [r2, #1*4] @ b[1]
umlal r11, r12, r7, r8 @ c' += a[6] * b[2]
ldr r0, [r1, #7*4] @ a[7]
umlal r3, r4, r7, r14 @ c += a[6] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r11, r12, r0, r14 @ c' += a[7] * b[1]
ldr r7, [r1, #8*4] @ a[8]
umlal r3, r4, r0, r8 @ c += a[7] * b[0]
ldr r14, [r2, #9*4] @ b[9]
umlal r11, r12, r7, r8 @ c' += a[8] * b[0]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r14 @ d += a[8] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umull r9, r10, r0, r14 @ d' = a[9] * b[9]
umlal r5, r6, r0, r8 @ d += a[9] * b[8]
bic r0, r5, field_not_M @ u7 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u7 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t7 = c & M
str r14, [sp, #4 + 7*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u7 * R1
umlal r3, r4, r0, r14
/* J */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u8 = d & M
str r0, [sp, #4 + 8*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u8 * R0
umlal r3, r4, r0, r14
/******************************************
* compute and write back result
******************************************
Allocation:
r0 r
r3:r4 c
r5:r6 d
r7 t0
r8 t1
r9 t2
r11 u8
r12 t9
r1,r2,r10,r14 scratch
Note: do not read from a[] after here, it may overlap with r[]
*/
ldr r0, [sp, #0]
add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9
ldmia r1, {r2,r7,r8,r9,r10,r11,r12}
add r1, r0, #3*4
stmia r1, {r2,r7,r8,r9,r10}
bic r2, r3, field_not_M @ r[8] = c & M
str r2, [r0, #8*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u8 * R1
umlal r3, r4, r11, r14
movw r14, field_R0 @ c += d * R0
umlal r3, r4, r5, r14
adds r3, r3, r12 @ c += t9
adc r4, r4, #0
add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2
ldmia r1, {r7,r8,r9}
ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4)
str r2, [r0, #9*4]
mov r3, r3, lsr #22 @ c >>= 22
orr r3, r3, r4, asl #10
mov r4, r4, lsr #22
movw r14, field_R1 << 4 @ c += d * (R1 << 4)
umlal r3, r4, r5, r14
movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add)
umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4)
adds r5, r5, r7 @ d.lo += t0
mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4)
adc r6, r6, 0 @ d.hi += carry
bic r2, r5, field_not_M @ r[0] = d & M
str r2, [r0, #0*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add)
umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4)
adds r5, r5, r8 @ d.lo += t1
adc r6, r6, #0 @ d.hi += carry
adds r5, r5, r1 @ d.lo += tmp.lo
mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4)
adc r6, r6, r2 @ d.hi += carry + tmp.hi
bic r2, r5, field_not_M @ r[1] = d & M
str r2, [r0, #1*4]
mov r5, r5, lsr #26 @ d >>= 26 (ignore hi)
orr r5, r5, r6, asl #6
add r5, r5, r9 @ d += t2
str r5, [r0, #2*4] @ r[2] = d
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size secp256k1_fe_mul_inner, .-secp256k1_fe_mul_inner
.align 2
.global secp256k1_fe_sqr_inner
.type secp256k1_fe_sqr_inner, %function
@ Arguments:
@ r0 r Can overlap with a
@ r1 a
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
secp256k1_fe_sqr_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
/******************************************
* Main computation code.
******************************************
Allocation:
r0,r14,r2,r7,r8 scratch
r1 a (pointer)
r3:r4 c
r5:r6 d
r11:r12 c'
r9:r10 d'
Note: do not write to r[] here, it may overlap with a[]
*/
/* A interleaved with B */
ldr r0, [r1, #1*4] @ a[1]*2
ldr r7, [r1, #0*4] @ a[0]
mov r0, r0, asl #1
ldr r14, [r1, #9*4] @ a[9]
umull r3, r4, r7, r7 @ c = a[0] * a[0]
ldr r8, [r1, #8*4] @ a[8]
mov r7, r7, asl #1
umull r5, r6, r7, r14 @ d = a[0]*2 * a[9]
ldr r7, [r1, #2*4] @ a[2]*2
umull r9, r10, r0, r14 @ d' = a[1]*2 * a[9]
ldr r14, [r1, #7*4] @ a[7]
umlal r5, r6, r0, r8 @ d += a[1]*2 * a[8]
mov r7, r7, asl #1
ldr r0, [r1, #3*4] @ a[3]*2
umlal r9, r10, r7, r8 @ d' += a[2]*2 * a[8]
ldr r8, [r1, #6*4] @ a[6]
umlal r5, r6, r7, r14 @ d += a[2]*2 * a[7]
mov r0, r0, asl #1
ldr r7, [r1, #4*4] @ a[4]*2
umlal r9, r10, r0, r14 @ d' += a[3]*2 * a[7]
ldr r14, [r1, #5*4] @ a[5]
mov r7, r7, asl #1
umlal r5, r6, r0, r8 @ d += a[3]*2 * a[6]
umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[6]
umlal r5, r6, r7, r14 @ d += a[4]*2 * a[5]
umlal r9, r10, r14, r14 @ d' += a[5] * a[5]
bic r0, r5, field_not_M @ t9 = d & M
str r0, [sp, #4 + 9*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
/* B */
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u0 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u0 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t0 = c & M
str r14, [sp, #4 + 0*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u0 * R1
umlal r3, r4, r0, r14
/* C interleaved with D */
ldr r0, [r1, #0*4] @ a[0]*2
ldr r14, [r1, #1*4] @ a[1]
mov r0, r0, asl #1
ldr r8, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r14 @ c += a[0]*2 * a[1]
mov r7, r8, asl #1 @ a[2]*2
umull r11, r12, r14, r14 @ c' = a[1] * a[1]
ldr r14, [r1, #9*4] @ a[9]
umlal r11, r12, r0, r8 @ c' += a[0]*2 * a[2]
ldr r0, [r1, #3*4] @ a[3]*2
ldr r8, [r1, #8*4] @ a[8]
umlal r5, r6, r7, r14 @ d += a[2]*2 * a[9]
mov r0, r0, asl #1
ldr r7, [r1, #4*4] @ a[4]*2
umull r9, r10, r0, r14 @ d' = a[3]*2 * a[9]
ldr r14, [r1, #7*4] @ a[7]
umlal r5, r6, r0, r8 @ d += a[3]*2 * a[8]
mov r7, r7, asl #1
ldr r0, [r1, #5*4] @ a[5]*2
umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[8]
ldr r8, [r1, #6*4] @ a[6]
mov r0, r0, asl #1
umlal r5, r6, r7, r14 @ d += a[4]*2 * a[7]
umlal r9, r10, r0, r14 @ d' += a[5]*2 * a[7]
umlal r5, r6, r0, r8 @ d += a[5]*2 * a[6]
umlal r9, r10, r8, r8 @ d' += a[6] * a[6]
bic r0, r5, field_not_M @ u1 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u1 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t1 = c & M
str r14, [sp, #4 + 1*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u1 * R1
umlal r3, r4, r0, r14
/* D */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u2 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u2 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t2 = c & M
str r14, [sp, #4 + 2*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u2 * R1
umlal r3, r4, r0, r14
/* E interleaved with F */
ldr r7, [r1, #0*4] @ a[0]*2
ldr r0, [r1, #1*4] @ a[1]*2
ldr r14, [r1, #2*4] @ a[2]
mov r7, r7, asl #1
ldr r8, [r1, #3*4] @ a[3]
ldr r2, [r1, #4*4]
umlal r3, r4, r7, r8 @ c += a[0]*2 * a[3]
mov r0, r0, asl #1
umull r11, r12, r7, r2 @ c' = a[0]*2 * a[4]
mov r2, r2, asl #1 @ a[4]*2
umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[3]
ldr r8, [r1, #9*4] @ a[9]
umlal r3, r4, r0, r14 @ c += a[1]*2 * a[2]
ldr r0, [r1, #5*4] @ a[5]*2
umlal r11, r12, r14, r14 @ c' += a[2] * a[2]
ldr r14, [r1, #8*4] @ a[8]
mov r0, r0, asl #1
umlal r5, r6, r2, r8 @ d += a[4]*2 * a[9]
ldr r7, [r1, #6*4] @ a[6]*2
umull r9, r10, r0, r8 @ d' = a[5]*2 * a[9]
mov r7, r7, asl #1
ldr r8, [r1, #7*4] @ a[7]
umlal r5, r6, r0, r14 @ d += a[5]*2 * a[8]
umlal r9, r10, r7, r14 @ d' += a[6]*2 * a[8]
umlal r5, r6, r7, r8 @ d += a[6]*2 * a[7]
umlal r9, r10, r8, r8 @ d' += a[7] * a[7]
bic r0, r5, field_not_M @ u3 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u3 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t3 = c & M
str r14, [sp, #4 + 3*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u3 * R1
umlal r3, r4, r0, r14
/* F */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u4 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u4 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t4 = c & M
str r14, [sp, #4 + 4*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u4 * R1
umlal r3, r4, r0, r14
/* G interleaved with H */
ldr r7, [r1, #0*4] @ a[0]*2
ldr r0, [r1, #1*4] @ a[1]*2
mov r7, r7, asl #1
ldr r8, [r1, #5*4] @ a[5]
ldr r2, [r1, #6*4] @ a[6]
umlal r3, r4, r7, r8 @ c += a[0]*2 * a[5]
ldr r14, [r1, #4*4] @ a[4]
mov r0, r0, asl #1
umull r11, r12, r7, r2 @ c' = a[0]*2 * a[6]
ldr r7, [r1, #2*4] @ a[2]*2
umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[5]
mov r7, r7, asl #1
ldr r8, [r1, #3*4] @ a[3]
umlal r3, r4, r0, r14 @ c += a[1]*2 * a[4]
mov r0, r2, asl #1 @ a[6]*2
umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[4]
ldr r14, [r1, #9*4] @ a[9]
umlal r3, r4, r7, r8 @ c += a[2]*2 * a[3]
ldr r7, [r1, #7*4] @ a[7]*2
umlal r11, r12, r8, r8 @ c' += a[3] * a[3]
mov r7, r7, asl #1
ldr r8, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r14 @ d += a[6]*2 * a[9]
umull r9, r10, r7, r14 @ d' = a[7]*2 * a[9]
umlal r5, r6, r7, r8 @ d += a[7]*2 * a[8]
umlal r9, r10, r8, r8 @ d' += a[8] * a[8]
bic r0, r5, field_not_M @ u5 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u5 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t5 = c & M
str r14, [sp, #4 + 5*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u5 * R1
umlal r3, r4, r0, r14
/* H */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u6 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u6 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t6 = c & M
str r14, [sp, #4 + 6*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u6 * R1
umlal r3, r4, r0, r14
/* I interleaved with J */
ldr r7, [r1, #0*4] @ a[0]*2
ldr r0, [r1, #1*4] @ a[1]*2
mov r7, r7, asl #1
ldr r8, [r1, #7*4] @ a[7]
ldr r2, [r1, #8*4] @ a[8]
umlal r3, r4, r7, r8 @ c += a[0]*2 * a[7]
ldr r14, [r1, #6*4] @ a[6]
mov r0, r0, asl #1
umull r11, r12, r7, r2 @ c' = a[0]*2 * a[8]
ldr r7, [r1, #2*4] @ a[2]*2
umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[7]
ldr r8, [r1, #5*4] @ a[5]
umlal r3, r4, r0, r14 @ c += a[1]*2 * a[6]
ldr r0, [r1, #3*4] @ a[3]*2
mov r7, r7, asl #1
umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[6]
ldr r14, [r1, #4*4] @ a[4]
mov r0, r0, asl #1
umlal r3, r4, r7, r8 @ c += a[2]*2 * a[5]
mov r2, r2, asl #1 @ a[8]*2
umlal r11, r12, r0, r8 @ c' += a[3]*2 * a[5]
umlal r3, r4, r0, r14 @ c += a[3]*2 * a[4]
umlal r11, r12, r14, r14 @ c' += a[4] * a[4]
ldr r8, [r1, #9*4] @ a[9]
umlal r5, r6, r2, r8 @ d += a[8]*2 * a[9]
@ r8 will be used in J
bic r0, r5, field_not_M @ u7 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u7 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t7 = c & M
str r14, [sp, #4 + 7*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u7 * R1
umlal r3, r4, r0, r14
/* J */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
umlal r5, r6, r8, r8 @ d += a[9] * a[9]
bic r0, r5, field_not_M @ u8 = d & M
str r0, [sp, #4 + 8*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u8 * R0
umlal r3, r4, r0, r14
/******************************************
* compute and write back result
******************************************
Allocation:
r0 r
r3:r4 c
r5:r6 d
r7 t0
r8 t1
r9 t2
r11 u8
r12 t9
r1,r2,r10,r14 scratch
Note: do not read from a[] after here, it may overlap with r[]
*/
ldr r0, [sp, #0]
add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9
ldmia r1, {r2,r7,r8,r9,r10,r11,r12}
add r1, r0, #3*4
stmia r1, {r2,r7,r8,r9,r10}
bic r2, r3, field_not_M @ r[8] = c & M
str r2, [r0, #8*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u8 * R1
umlal r3, r4, r11, r14
movw r14, field_R0 @ c += d * R0
umlal r3, r4, r5, r14
adds r3, r3, r12 @ c += t9
adc r4, r4, #0
add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2
ldmia r1, {r7,r8,r9}
ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4)
str r2, [r0, #9*4]
mov r3, r3, lsr #22 @ c >>= 22
orr r3, r3, r4, asl #10
mov r4, r4, lsr #22
movw r14, field_R1 << 4 @ c += d * (R1 << 4)
umlal r3, r4, r5, r14
movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add)
umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4)
adds r5, r5, r7 @ d.lo += t0
mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4)
adc r6, r6, 0 @ d.hi += carry
bic r2, r5, field_not_M @ r[0] = d & M
str r2, [r0, #0*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add)
umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4)
adds r5, r5, r8 @ d.lo += t1
adc r6, r6, #0 @ d.hi += carry
adds r5, r5, r1 @ d.lo += tmp.lo
mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4)
adc r6, r6, r2 @ d.hi += carry + tmp.hi
bic r2, r5, field_not_M @ r[1] = d & M
str r2, [r0, #1*4]
mov r5, r5, lsr #26 @ d >>= 26 (ignore hi)
orr r5, r5, r6, asl #6
add r5, r5, r9 @ d += t2
str r5, [r0, #2*4] @ r[2] = d
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size secp256k1_fe_sqr_inner, .-secp256k1_fe_sqr_inner
|
XDagger/xdag | 154,762 | client/algorithms/sha256-mb-x86_64-mac.s | .text
.globl _xsha256_multi_block
.p2align 5
_xsha256_multi_block:
movq _xOPENSSL_ia32cap_P+4(%rip),%rcx
btq $61,%rcx
jc _shaext_shortcut
testl $268435456,%ecx
jnz _avx_shortcut
movq %rsp,%rax
pushq %rbx
pushq %rbp
subq $288,%rsp
andq $-256,%rsp
movq %rax,272(%rsp)
L$body:
leaq K256+128(%rip),%rbp
leaq 256(%rsp),%rbx
leaq 128(%rdi),%rdi
L$oop_grande:
movl %edx,280(%rsp)
xorl %edx,%edx
movq 0(%rsi),%r8
movl 8(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,0(%rbx)
cmovleq %rbp,%r8
movq 16(%rsi),%r9
movl 24(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,4(%rbx)
cmovleq %rbp,%r9
movq 32(%rsi),%r10
movl 40(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,8(%rbx)
cmovleq %rbp,%r10
movq 48(%rsi),%r11
movl 56(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,12(%rbx)
cmovleq %rbp,%r11
testl %edx,%edx
jz L$done
movdqu 0-128(%rdi),%xmm8
leaq 128(%rsp),%rax
movdqu 32-128(%rdi),%xmm9
movdqu 64-128(%rdi),%xmm10
movdqu 96-128(%rdi),%xmm11
movdqu 128-128(%rdi),%xmm12
movdqu 160-128(%rdi),%xmm13
movdqu 192-128(%rdi),%xmm14
movdqu 224-128(%rdi),%xmm15
movdqu L$pbswap(%rip),%xmm6
jmp L$oop
.p2align 5
L$oop:
movdqa %xmm10,%xmm4
pxor %xmm9,%xmm4
movd 0(%r8),%xmm5
movd 0(%r9),%xmm0
movd 0(%r10),%xmm1
movd 0(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm12,%xmm7
.byte 102,15,56,0,238
movdqa %xmm12,%xmm2
psrld $6,%xmm7
movdqa %xmm12,%xmm1
pslld $7,%xmm2
movdqa %xmm5,0-128(%rax)
paddd %xmm15,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -128(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm12,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm3
pslld $26-21,%xmm2
pandn %xmm14,%xmm0
pand %xmm13,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm8,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm9,%xmm3
movdqa %xmm8,%xmm7
pslld $10,%xmm2
pxor %xmm8,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm9,%xmm15
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm15
paddd %xmm5,%xmm11
pxor %xmm2,%xmm7
paddd %xmm5,%xmm15
paddd %xmm7,%xmm15
movd 4(%r8),%xmm5
movd 4(%r9),%xmm0
movd 4(%r10),%xmm1
movd 4(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm11,%xmm7
movdqa %xmm11,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm11,%xmm1
pslld $7,%xmm2
movdqa %xmm5,16-128(%rax)
paddd %xmm14,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -96(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm11,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm4
pslld $26-21,%xmm2
pandn %xmm13,%xmm0
pand %xmm12,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm15,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm15,%xmm7
pslld $10,%xmm2
pxor %xmm15,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm8,%xmm14
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm14
paddd %xmm5,%xmm10
pxor %xmm2,%xmm7
paddd %xmm5,%xmm14
paddd %xmm7,%xmm14
movd 8(%r8),%xmm5
movd 8(%r9),%xmm0
movd 8(%r10),%xmm1
movd 8(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm10,%xmm7
.byte 102,15,56,0,238
movdqa %xmm10,%xmm2
psrld $6,%xmm7
movdqa %xmm10,%xmm1
pslld $7,%xmm2
movdqa %xmm5,32-128(%rax)
paddd %xmm13,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm10,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm3
pslld $26-21,%xmm2
pandn %xmm12,%xmm0
pand %xmm11,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm14,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm15,%xmm3
movdqa %xmm14,%xmm7
pslld $10,%xmm2
pxor %xmm14,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm15,%xmm13
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm13
paddd %xmm5,%xmm9
pxor %xmm2,%xmm7
paddd %xmm5,%xmm13
paddd %xmm7,%xmm13
movd 12(%r8),%xmm5
movd 12(%r9),%xmm0
movd 12(%r10),%xmm1
movd 12(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm9,%xmm7
movdqa %xmm9,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm9,%xmm1
pslld $7,%xmm2
movdqa %xmm5,48-128(%rax)
paddd %xmm12,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -32(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm9,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm4
pslld $26-21,%xmm2
pandn %xmm11,%xmm0
pand %xmm10,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm13,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm14,%xmm4
movdqa %xmm13,%xmm7
pslld $10,%xmm2
pxor %xmm13,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm14,%xmm12
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm12
paddd %xmm5,%xmm8
pxor %xmm2,%xmm7
paddd %xmm5,%xmm12
paddd %xmm7,%xmm12
movd 16(%r8),%xmm5
movd 16(%r9),%xmm0
movd 16(%r10),%xmm1
movd 16(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm8,%xmm7
.byte 102,15,56,0,238
movdqa %xmm8,%xmm2
psrld $6,%xmm7
movdqa %xmm8,%xmm1
pslld $7,%xmm2
movdqa %xmm5,64-128(%rax)
paddd %xmm11,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 0(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm8,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm3
pslld $26-21,%xmm2
pandn %xmm10,%xmm0
pand %xmm9,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm12,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm13,%xmm3
movdqa %xmm12,%xmm7
pslld $10,%xmm2
pxor %xmm12,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm13,%xmm11
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm11
paddd %xmm5,%xmm15
pxor %xmm2,%xmm7
paddd %xmm5,%xmm11
paddd %xmm7,%xmm11
movd 20(%r8),%xmm5
movd 20(%r9),%xmm0
movd 20(%r10),%xmm1
movd 20(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm15,%xmm7
movdqa %xmm15,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm15,%xmm1
pslld $7,%xmm2
movdqa %xmm5,80-128(%rax)
paddd %xmm10,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 32(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm15,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm4
pslld $26-21,%xmm2
pandn %xmm9,%xmm0
pand %xmm8,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm11,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm12,%xmm4
movdqa %xmm11,%xmm7
pslld $10,%xmm2
pxor %xmm11,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm12,%xmm10
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm10
paddd %xmm5,%xmm14
pxor %xmm2,%xmm7
paddd %xmm5,%xmm10
paddd %xmm7,%xmm10
movd 24(%r8),%xmm5
movd 24(%r9),%xmm0
movd 24(%r10),%xmm1
movd 24(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm14,%xmm7
.byte 102,15,56,0,238
movdqa %xmm14,%xmm2
psrld $6,%xmm7
movdqa %xmm14,%xmm1
pslld $7,%xmm2
movdqa %xmm5,96-128(%rax)
paddd %xmm9,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm14,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm3
pslld $26-21,%xmm2
pandn %xmm8,%xmm0
pand %xmm15,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm10,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm11,%xmm3
movdqa %xmm10,%xmm7
pslld $10,%xmm2
pxor %xmm10,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm11,%xmm9
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm9
paddd %xmm5,%xmm13
pxor %xmm2,%xmm7
paddd %xmm5,%xmm9
paddd %xmm7,%xmm9
movd 28(%r8),%xmm5
movd 28(%r9),%xmm0
movd 28(%r10),%xmm1
movd 28(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm13,%xmm7
movdqa %xmm13,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm13,%xmm1
pslld $7,%xmm2
movdqa %xmm5,112-128(%rax)
paddd %xmm8,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 96(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm13,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm4
pslld $26-21,%xmm2
pandn %xmm15,%xmm0
pand %xmm14,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm9,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm10,%xmm4
movdqa %xmm9,%xmm7
pslld $10,%xmm2
pxor %xmm9,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm10,%xmm8
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm8
paddd %xmm5,%xmm12
pxor %xmm2,%xmm7
paddd %xmm5,%xmm8
paddd %xmm7,%xmm8
leaq 256(%rbp),%rbp
movd 32(%r8),%xmm5
movd 32(%r9),%xmm0
movd 32(%r10),%xmm1
movd 32(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm12,%xmm7
.byte 102,15,56,0,238
movdqa %xmm12,%xmm2
psrld $6,%xmm7
movdqa %xmm12,%xmm1
pslld $7,%xmm2
movdqa %xmm5,128-128(%rax)
paddd %xmm15,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -128(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm12,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm3
pslld $26-21,%xmm2
pandn %xmm14,%xmm0
pand %xmm13,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm8,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm9,%xmm3
movdqa %xmm8,%xmm7
pslld $10,%xmm2
pxor %xmm8,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm9,%xmm15
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm15
paddd %xmm5,%xmm11
pxor %xmm2,%xmm7
paddd %xmm5,%xmm15
paddd %xmm7,%xmm15
movd 36(%r8),%xmm5
movd 36(%r9),%xmm0
movd 36(%r10),%xmm1
movd 36(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm11,%xmm7
movdqa %xmm11,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm11,%xmm1
pslld $7,%xmm2
movdqa %xmm5,144-128(%rax)
paddd %xmm14,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -96(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm11,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm4
pslld $26-21,%xmm2
pandn %xmm13,%xmm0
pand %xmm12,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm15,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm15,%xmm7
pslld $10,%xmm2
pxor %xmm15,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm8,%xmm14
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm14
paddd %xmm5,%xmm10
pxor %xmm2,%xmm7
paddd %xmm5,%xmm14
paddd %xmm7,%xmm14
movd 40(%r8),%xmm5
movd 40(%r9),%xmm0
movd 40(%r10),%xmm1
movd 40(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm10,%xmm7
.byte 102,15,56,0,238
movdqa %xmm10,%xmm2
psrld $6,%xmm7
movdqa %xmm10,%xmm1
pslld $7,%xmm2
movdqa %xmm5,160-128(%rax)
paddd %xmm13,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm10,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm3
pslld $26-21,%xmm2
pandn %xmm12,%xmm0
pand %xmm11,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm14,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm15,%xmm3
movdqa %xmm14,%xmm7
pslld $10,%xmm2
pxor %xmm14,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm15,%xmm13
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm13
paddd %xmm5,%xmm9
pxor %xmm2,%xmm7
paddd %xmm5,%xmm13
paddd %xmm7,%xmm13
movd 44(%r8),%xmm5
movd 44(%r9),%xmm0
movd 44(%r10),%xmm1
movd 44(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm9,%xmm7
movdqa %xmm9,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm9,%xmm1
pslld $7,%xmm2
movdqa %xmm5,176-128(%rax)
paddd %xmm12,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -32(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm9,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm4
pslld $26-21,%xmm2
pandn %xmm11,%xmm0
pand %xmm10,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm13,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm14,%xmm4
movdqa %xmm13,%xmm7
pslld $10,%xmm2
pxor %xmm13,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm14,%xmm12
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm12
paddd %xmm5,%xmm8
pxor %xmm2,%xmm7
paddd %xmm5,%xmm12
paddd %xmm7,%xmm12
movd 48(%r8),%xmm5
movd 48(%r9),%xmm0
movd 48(%r10),%xmm1
movd 48(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm8,%xmm7
.byte 102,15,56,0,238
movdqa %xmm8,%xmm2
psrld $6,%xmm7
movdqa %xmm8,%xmm1
pslld $7,%xmm2
movdqa %xmm5,192-128(%rax)
paddd %xmm11,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 0(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm8,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm3
pslld $26-21,%xmm2
pandn %xmm10,%xmm0
pand %xmm9,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm12,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm13,%xmm3
movdqa %xmm12,%xmm7
pslld $10,%xmm2
pxor %xmm12,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm13,%xmm11
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm11
paddd %xmm5,%xmm15
pxor %xmm2,%xmm7
paddd %xmm5,%xmm11
paddd %xmm7,%xmm11
movd 52(%r8),%xmm5
movd 52(%r9),%xmm0
movd 52(%r10),%xmm1
movd 52(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm15,%xmm7
movdqa %xmm15,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm15,%xmm1
pslld $7,%xmm2
movdqa %xmm5,208-128(%rax)
paddd %xmm10,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 32(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm15,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm4
pslld $26-21,%xmm2
pandn %xmm9,%xmm0
pand %xmm8,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm11,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm12,%xmm4
movdqa %xmm11,%xmm7
pslld $10,%xmm2
pxor %xmm11,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm12,%xmm10
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm10
paddd %xmm5,%xmm14
pxor %xmm2,%xmm7
paddd %xmm5,%xmm10
paddd %xmm7,%xmm10
movd 56(%r8),%xmm5
movd 56(%r9),%xmm0
movd 56(%r10),%xmm1
movd 56(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm14,%xmm7
.byte 102,15,56,0,238
movdqa %xmm14,%xmm2
psrld $6,%xmm7
movdqa %xmm14,%xmm1
pslld $7,%xmm2
movdqa %xmm5,224-128(%rax)
paddd %xmm9,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm14,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm3
pslld $26-21,%xmm2
pandn %xmm8,%xmm0
pand %xmm15,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm10,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm11,%xmm3
movdqa %xmm10,%xmm7
pslld $10,%xmm2
pxor %xmm10,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm11,%xmm9
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm9
paddd %xmm5,%xmm13
pxor %xmm2,%xmm7
paddd %xmm5,%xmm9
paddd %xmm7,%xmm9
movd 60(%r8),%xmm5
leaq 64(%r8),%r8
movd 60(%r9),%xmm0
leaq 64(%r9),%r9
movd 60(%r10),%xmm1
leaq 64(%r10),%r10
movd 60(%r11),%xmm2
leaq 64(%r11),%r11
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm13,%xmm7
movdqa %xmm13,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm13,%xmm1
pslld $7,%xmm2
movdqa %xmm5,240-128(%rax)
paddd %xmm8,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 96(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm13,%xmm0
prefetcht0 63(%r8)
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm4
pslld $26-21,%xmm2
pandn %xmm15,%xmm0
pand %xmm14,%xmm4
pxor %xmm1,%xmm7
prefetcht0 63(%r9)
movdqa %xmm9,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm10,%xmm4
movdqa %xmm9,%xmm7
pslld $10,%xmm2
pxor %xmm9,%xmm4
prefetcht0 63(%r10)
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
prefetcht0 63(%r11)
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm10,%xmm8
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm8
paddd %xmm5,%xmm12
pxor %xmm2,%xmm7
paddd %xmm5,%xmm8
paddd %xmm7,%xmm8
leaq 256(%rbp),%rbp
movdqu 0-128(%rax),%xmm5
movl $3,%ecx
jmp L$oop_16_xx
.p2align 5
L$oop_16_xx:
movdqa 16-128(%rax),%xmm6
paddd 144-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 224-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm12,%xmm7
movdqa %xmm12,%xmm2
psrld $6,%xmm7
movdqa %xmm12,%xmm1
pslld $7,%xmm2
movdqa %xmm5,0-128(%rax)
paddd %xmm15,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -128(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm12,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm3
pslld $26-21,%xmm2
pandn %xmm14,%xmm0
pand %xmm13,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm8,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm9,%xmm3
movdqa %xmm8,%xmm7
pslld $10,%xmm2
pxor %xmm8,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm9,%xmm15
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm15
paddd %xmm5,%xmm11
pxor %xmm2,%xmm7
paddd %xmm5,%xmm15
paddd %xmm7,%xmm15
movdqa 32-128(%rax),%xmm5
paddd 160-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 240-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm11,%xmm7
movdqa %xmm11,%xmm2
psrld $6,%xmm7
movdqa %xmm11,%xmm1
pslld $7,%xmm2
movdqa %xmm6,16-128(%rax)
paddd %xmm14,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -96(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm11,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm4
pslld $26-21,%xmm2
pandn %xmm13,%xmm0
pand %xmm12,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm15,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm15,%xmm7
pslld $10,%xmm2
pxor %xmm15,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm8,%xmm14
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm14
paddd %xmm6,%xmm10
pxor %xmm2,%xmm7
paddd %xmm6,%xmm14
paddd %xmm7,%xmm14
movdqa 48-128(%rax),%xmm6
paddd 176-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 0-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm10,%xmm7
movdqa %xmm10,%xmm2
psrld $6,%xmm7
movdqa %xmm10,%xmm1
pslld $7,%xmm2
movdqa %xmm5,32-128(%rax)
paddd %xmm13,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm10,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm3
pslld $26-21,%xmm2
pandn %xmm12,%xmm0
pand %xmm11,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm14,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm15,%xmm3
movdqa %xmm14,%xmm7
pslld $10,%xmm2
pxor %xmm14,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm15,%xmm13
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm13
paddd %xmm5,%xmm9
pxor %xmm2,%xmm7
paddd %xmm5,%xmm13
paddd %xmm7,%xmm13
movdqa 64-128(%rax),%xmm5
paddd 192-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 16-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm9,%xmm7
movdqa %xmm9,%xmm2
psrld $6,%xmm7
movdqa %xmm9,%xmm1
pslld $7,%xmm2
movdqa %xmm6,48-128(%rax)
paddd %xmm12,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -32(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm9,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm4
pslld $26-21,%xmm2
pandn %xmm11,%xmm0
pand %xmm10,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm13,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm14,%xmm4
movdqa %xmm13,%xmm7
pslld $10,%xmm2
pxor %xmm13,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm14,%xmm12
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm12
paddd %xmm6,%xmm8
pxor %xmm2,%xmm7
paddd %xmm6,%xmm12
paddd %xmm7,%xmm12
movdqa 80-128(%rax),%xmm6
paddd 208-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 32-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm8,%xmm7
movdqa %xmm8,%xmm2
psrld $6,%xmm7
movdqa %xmm8,%xmm1
pslld $7,%xmm2
movdqa %xmm5,64-128(%rax)
paddd %xmm11,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 0(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm8,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm3
pslld $26-21,%xmm2
pandn %xmm10,%xmm0
pand %xmm9,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm12,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm13,%xmm3
movdqa %xmm12,%xmm7
pslld $10,%xmm2
pxor %xmm12,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm13,%xmm11
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm11
paddd %xmm5,%xmm15
pxor %xmm2,%xmm7
paddd %xmm5,%xmm11
paddd %xmm7,%xmm11
movdqa 96-128(%rax),%xmm5
paddd 224-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 48-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm15,%xmm7
movdqa %xmm15,%xmm2
psrld $6,%xmm7
movdqa %xmm15,%xmm1
pslld $7,%xmm2
movdqa %xmm6,80-128(%rax)
paddd %xmm10,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 32(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm15,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm4
pslld $26-21,%xmm2
pandn %xmm9,%xmm0
pand %xmm8,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm11,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm12,%xmm4
movdqa %xmm11,%xmm7
pslld $10,%xmm2
pxor %xmm11,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm12,%xmm10
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm10
paddd %xmm6,%xmm14
pxor %xmm2,%xmm7
paddd %xmm6,%xmm10
paddd %xmm7,%xmm10
movdqa 112-128(%rax),%xmm6
paddd 240-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 64-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm14,%xmm7
movdqa %xmm14,%xmm2
psrld $6,%xmm7
movdqa %xmm14,%xmm1
pslld $7,%xmm2
movdqa %xmm5,96-128(%rax)
paddd %xmm9,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm14,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm3
pslld $26-21,%xmm2
pandn %xmm8,%xmm0
pand %xmm15,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm10,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm11,%xmm3
movdqa %xmm10,%xmm7
pslld $10,%xmm2
pxor %xmm10,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm11,%xmm9
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm9
paddd %xmm5,%xmm13
pxor %xmm2,%xmm7
paddd %xmm5,%xmm9
paddd %xmm7,%xmm9
movdqa 128-128(%rax),%xmm5
paddd 0-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 80-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm13,%xmm7
movdqa %xmm13,%xmm2
psrld $6,%xmm7
movdqa %xmm13,%xmm1
pslld $7,%xmm2
movdqa %xmm6,112-128(%rax)
paddd %xmm8,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 96(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm13,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm4
pslld $26-21,%xmm2
pandn %xmm15,%xmm0
pand %xmm14,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm9,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm10,%xmm4
movdqa %xmm9,%xmm7
pslld $10,%xmm2
pxor %xmm9,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm10,%xmm8
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm8
paddd %xmm6,%xmm12
pxor %xmm2,%xmm7
paddd %xmm6,%xmm8
paddd %xmm7,%xmm8
leaq 256(%rbp),%rbp
movdqa 144-128(%rax),%xmm6
paddd 16-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 96-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm12,%xmm7
movdqa %xmm12,%xmm2
psrld $6,%xmm7
movdqa %xmm12,%xmm1
pslld $7,%xmm2
movdqa %xmm5,128-128(%rax)
paddd %xmm15,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -128(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm12,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm3
pslld $26-21,%xmm2
pandn %xmm14,%xmm0
pand %xmm13,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm8,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm9,%xmm3
movdqa %xmm8,%xmm7
pslld $10,%xmm2
pxor %xmm8,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm9,%xmm15
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm15
paddd %xmm5,%xmm11
pxor %xmm2,%xmm7
paddd %xmm5,%xmm15
paddd %xmm7,%xmm15
movdqa 160-128(%rax),%xmm5
paddd 32-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 112-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm11,%xmm7
movdqa %xmm11,%xmm2
psrld $6,%xmm7
movdqa %xmm11,%xmm1
pslld $7,%xmm2
movdqa %xmm6,144-128(%rax)
paddd %xmm14,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -96(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm11,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm4
pslld $26-21,%xmm2
pandn %xmm13,%xmm0
pand %xmm12,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm15,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm15,%xmm7
pslld $10,%xmm2
pxor %xmm15,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm8,%xmm14
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm14
paddd %xmm6,%xmm10
pxor %xmm2,%xmm7
paddd %xmm6,%xmm14
paddd %xmm7,%xmm14
movdqa 176-128(%rax),%xmm6
paddd 48-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 128-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm10,%xmm7
movdqa %xmm10,%xmm2
psrld $6,%xmm7
movdqa %xmm10,%xmm1
pslld $7,%xmm2
movdqa %xmm5,160-128(%rax)
paddd %xmm13,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm10,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm3
pslld $26-21,%xmm2
pandn %xmm12,%xmm0
pand %xmm11,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm14,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm15,%xmm3
movdqa %xmm14,%xmm7
pslld $10,%xmm2
pxor %xmm14,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm15,%xmm13
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm13
paddd %xmm5,%xmm9
pxor %xmm2,%xmm7
paddd %xmm5,%xmm13
paddd %xmm7,%xmm13
movdqa 192-128(%rax),%xmm5
paddd 64-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 144-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm9,%xmm7
movdqa %xmm9,%xmm2
psrld $6,%xmm7
movdqa %xmm9,%xmm1
pslld $7,%xmm2
movdqa %xmm6,176-128(%rax)
paddd %xmm12,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -32(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm9,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm4
pslld $26-21,%xmm2
pandn %xmm11,%xmm0
pand %xmm10,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm13,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm14,%xmm4
movdqa %xmm13,%xmm7
pslld $10,%xmm2
pxor %xmm13,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm14,%xmm12
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm12
paddd %xmm6,%xmm8
pxor %xmm2,%xmm7
paddd %xmm6,%xmm12
paddd %xmm7,%xmm12
movdqa 208-128(%rax),%xmm6
paddd 80-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 160-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm8,%xmm7
movdqa %xmm8,%xmm2
psrld $6,%xmm7
movdqa %xmm8,%xmm1
pslld $7,%xmm2
movdqa %xmm5,192-128(%rax)
paddd %xmm11,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 0(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm8,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm3
pslld $26-21,%xmm2
pandn %xmm10,%xmm0
pand %xmm9,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm12,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm13,%xmm3
movdqa %xmm12,%xmm7
pslld $10,%xmm2
pxor %xmm12,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm13,%xmm11
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm11
paddd %xmm5,%xmm15
pxor %xmm2,%xmm7
paddd %xmm5,%xmm11
paddd %xmm7,%xmm11
movdqa 224-128(%rax),%xmm5
paddd 96-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 176-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm15,%xmm7
movdqa %xmm15,%xmm2
psrld $6,%xmm7
movdqa %xmm15,%xmm1
pslld $7,%xmm2
movdqa %xmm6,208-128(%rax)
paddd %xmm10,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 32(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm15,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm4
pslld $26-21,%xmm2
pandn %xmm9,%xmm0
pand %xmm8,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm11,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm12,%xmm4
movdqa %xmm11,%xmm7
pslld $10,%xmm2
pxor %xmm11,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm12,%xmm10
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm10
paddd %xmm6,%xmm14
pxor %xmm2,%xmm7
paddd %xmm6,%xmm10
paddd %xmm7,%xmm10
movdqa 240-128(%rax),%xmm6
paddd 112-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 192-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm14,%xmm7
movdqa %xmm14,%xmm2
psrld $6,%xmm7
movdqa %xmm14,%xmm1
pslld $7,%xmm2
movdqa %xmm5,224-128(%rax)
paddd %xmm9,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm14,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm3
pslld $26-21,%xmm2
pandn %xmm8,%xmm0
pand %xmm15,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm10,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm11,%xmm3
movdqa %xmm10,%xmm7
pslld $10,%xmm2
pxor %xmm10,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm11,%xmm9
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm9
paddd %xmm5,%xmm13
pxor %xmm2,%xmm7
paddd %xmm5,%xmm9
paddd %xmm7,%xmm9
movdqa 0-128(%rax),%xmm5
paddd 128-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 208-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm13,%xmm7
movdqa %xmm13,%xmm2
psrld $6,%xmm7
movdqa %xmm13,%xmm1
pslld $7,%xmm2
movdqa %xmm6,240-128(%rax)
paddd %xmm8,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 96(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm13,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm4
pslld $26-21,%xmm2
pandn %xmm15,%xmm0
pand %xmm14,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm9,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm10,%xmm4
movdqa %xmm9,%xmm7
pslld $10,%xmm2
pxor %xmm9,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm10,%xmm8
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm8
paddd %xmm6,%xmm12
pxor %xmm2,%xmm7
paddd %xmm6,%xmm8
paddd %xmm7,%xmm8
leaq 256(%rbp),%rbp
decl %ecx
jnz L$oop_16_xx
movl $1,%ecx
leaq K256+128(%rip),%rbp
movdqa (%rbx),%xmm7
cmpl 0(%rbx),%ecx
pxor %xmm0,%xmm0
cmovgeq %rbp,%r8
cmpl 4(%rbx),%ecx
movdqa %xmm7,%xmm6
cmovgeq %rbp,%r9
cmpl 8(%rbx),%ecx
pcmpgtd %xmm0,%xmm6
cmovgeq %rbp,%r10
cmpl 12(%rbx),%ecx
paddd %xmm6,%xmm7
cmovgeq %rbp,%r11
movdqu 0-128(%rdi),%xmm0
pand %xmm6,%xmm8
movdqu 32-128(%rdi),%xmm1
pand %xmm6,%xmm9
movdqu 64-128(%rdi),%xmm2
pand %xmm6,%xmm10
movdqu 96-128(%rdi),%xmm5
pand %xmm6,%xmm11
paddd %xmm0,%xmm8
movdqu 128-128(%rdi),%xmm0
pand %xmm6,%xmm12
paddd %xmm1,%xmm9
movdqu 160-128(%rdi),%xmm1
pand %xmm6,%xmm13
paddd %xmm2,%xmm10
movdqu 192-128(%rdi),%xmm2
pand %xmm6,%xmm14
paddd %xmm5,%xmm11
movdqu 224-128(%rdi),%xmm5
pand %xmm6,%xmm15
paddd %xmm0,%xmm12
paddd %xmm1,%xmm13
movdqu %xmm8,0-128(%rdi)
paddd %xmm2,%xmm14
movdqu %xmm9,32-128(%rdi)
paddd %xmm5,%xmm15
movdqu %xmm10,64-128(%rdi)
movdqu %xmm11,96-128(%rdi)
movdqu %xmm12,128-128(%rdi)
movdqu %xmm13,160-128(%rdi)
movdqu %xmm14,192-128(%rdi)
movdqu %xmm15,224-128(%rdi)
movdqa %xmm7,(%rbx)
movdqa L$pbswap(%rip),%xmm6
decl %edx
jnz L$oop
movl 280(%rsp),%edx
leaq 16(%rdi),%rdi
leaq 64(%rsi),%rsi
decl %edx
jnz L$oop_grande
L$done:
movq 272(%rsp),%rax
movq -16(%rax),%rbp
movq -8(%rax),%rbx
leaq (%rax),%rsp
L$epilogue:
.byte 0xf3,0xc3
.p2align 5
sha256_multi_block_shaext:
_shaext_shortcut:
movq %rsp,%rax
pushq %rbx
pushq %rbp
subq $288,%rsp
shll $1,%edx
andq $-256,%rsp
leaq 128(%rdi),%rdi
movq %rax,272(%rsp)
L$body_shaext:
leaq 256(%rsp),%rbx
leaq K256_shaext+128(%rip),%rbp
L$oop_grande_shaext:
movl %edx,280(%rsp)
xorl %edx,%edx
movq 0(%rsi),%r8
movl 8(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,0(%rbx)
cmovleq %rsp,%r8
movq 16(%rsi),%r9
movl 24(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,4(%rbx)
cmovleq %rsp,%r9
testl %edx,%edx
jz L$done_shaext
movq 0-128(%rdi),%xmm12
movq 32-128(%rdi),%xmm4
movq 64-128(%rdi),%xmm13
movq 96-128(%rdi),%xmm5
movq 128-128(%rdi),%xmm8
movq 160-128(%rdi),%xmm9
movq 192-128(%rdi),%xmm10
movq 224-128(%rdi),%xmm11
punpckldq %xmm4,%xmm12
punpckldq %xmm5,%xmm13
punpckldq %xmm9,%xmm8
punpckldq %xmm11,%xmm10
movdqa K256_shaext-16(%rip),%xmm3
movdqa %xmm12,%xmm14
movdqa %xmm13,%xmm15
punpcklqdq %xmm8,%xmm12
punpcklqdq %xmm10,%xmm13
punpckhqdq %xmm8,%xmm14
punpckhqdq %xmm10,%xmm15
pshufd $27,%xmm12,%xmm12
pshufd $27,%xmm13,%xmm13
pshufd $27,%xmm14,%xmm14
pshufd $27,%xmm15,%xmm15
jmp L$oop_shaext
.p2align 5
L$oop_shaext:
movdqu 0(%r8),%xmm4
movdqu 0(%r9),%xmm8
movdqu 16(%r8),%xmm5
movdqu 16(%r9),%xmm9
movdqu 32(%r8),%xmm6
.byte 102,15,56,0,227
movdqu 32(%r9),%xmm10
.byte 102,68,15,56,0,195
movdqu 48(%r8),%xmm7
leaq 64(%r8),%r8
movdqu 48(%r9),%xmm11
leaq 64(%r9),%r9
movdqa 0-128(%rbp),%xmm0
.byte 102,15,56,0,235
paddd %xmm4,%xmm0
pxor %xmm12,%xmm4
movdqa %xmm0,%xmm1
movdqa 0-128(%rbp),%xmm2
.byte 102,68,15,56,0,203
paddd %xmm8,%xmm2
movdqa %xmm13,80(%rsp)
.byte 69,15,56,203,236
pxor %xmm14,%xmm8
movdqa %xmm2,%xmm0
movdqa %xmm15,112(%rsp)
.byte 69,15,56,203,254
pshufd $0x0e,%xmm1,%xmm0
pxor %xmm12,%xmm4
movdqa %xmm12,64(%rsp)
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
pxor %xmm14,%xmm8
movdqa %xmm14,96(%rsp)
movdqa 16-128(%rbp),%xmm1
paddd %xmm5,%xmm1
.byte 102,15,56,0,243
.byte 69,15,56,203,247
movdqa %xmm1,%xmm0
movdqa 16-128(%rbp),%xmm2
paddd %xmm9,%xmm2
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
prefetcht0 127(%r8)
.byte 102,15,56,0,251
.byte 102,68,15,56,0,211
prefetcht0 127(%r9)
.byte 69,15,56,203,254
pshufd $0x0e,%xmm1,%xmm0
.byte 102,68,15,56,0,219
.byte 15,56,204,229
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 32-128(%rbp),%xmm1
paddd %xmm6,%xmm1
.byte 69,15,56,203,247
movdqa %xmm1,%xmm0
movdqa 32-128(%rbp),%xmm2
paddd %xmm10,%xmm2
.byte 69,15,56,203,236
.byte 69,15,56,204,193
movdqa %xmm2,%xmm0
movdqa %xmm7,%xmm3
.byte 69,15,56,203,254
pshufd $0x0e,%xmm1,%xmm0
.byte 102,15,58,15,222,4
paddd %xmm3,%xmm4
movdqa %xmm11,%xmm3
.byte 102,65,15,58,15,218,4
.byte 15,56,204,238
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 48-128(%rbp),%xmm1
paddd %xmm7,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,202
movdqa %xmm1,%xmm0
movdqa 48-128(%rbp),%xmm2
paddd %xmm3,%xmm8
paddd %xmm11,%xmm2
.byte 15,56,205,231
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm4,%xmm3
.byte 102,15,58,15,223,4
.byte 69,15,56,203,254
.byte 69,15,56,205,195
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm5
movdqa %xmm8,%xmm3
.byte 102,65,15,58,15,219,4
.byte 15,56,204,247
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 64-128(%rbp),%xmm1
paddd %xmm4,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,211
movdqa %xmm1,%xmm0
movdqa 64-128(%rbp),%xmm2
paddd %xmm3,%xmm9
paddd %xmm8,%xmm2
.byte 15,56,205,236
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,58,15,220,4
.byte 69,15,56,203,254
.byte 69,15,56,205,200
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm6
movdqa %xmm9,%xmm3
.byte 102,65,15,58,15,216,4
.byte 15,56,204,252
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 80-128(%rbp),%xmm1
paddd %xmm5,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,216
movdqa %xmm1,%xmm0
movdqa 80-128(%rbp),%xmm2
paddd %xmm3,%xmm10
paddd %xmm9,%xmm2
.byte 15,56,205,245
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm6,%xmm3
.byte 102,15,58,15,221,4
.byte 69,15,56,203,254
.byte 69,15,56,205,209
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm7
movdqa %xmm10,%xmm3
.byte 102,65,15,58,15,217,4
.byte 15,56,204,229
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 96-128(%rbp),%xmm1
paddd %xmm6,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,193
movdqa %xmm1,%xmm0
movdqa 96-128(%rbp),%xmm2
paddd %xmm3,%xmm11
paddd %xmm10,%xmm2
.byte 15,56,205,254
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm7,%xmm3
.byte 102,15,58,15,222,4
.byte 69,15,56,203,254
.byte 69,15,56,205,218
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm4
movdqa %xmm11,%xmm3
.byte 102,65,15,58,15,218,4
.byte 15,56,204,238
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 112-128(%rbp),%xmm1
paddd %xmm7,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,202
movdqa %xmm1,%xmm0
movdqa 112-128(%rbp),%xmm2
paddd %xmm3,%xmm8
paddd %xmm11,%xmm2
.byte 15,56,205,231
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm4,%xmm3
.byte 102,15,58,15,223,4
.byte 69,15,56,203,254
.byte 69,15,56,205,195
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm5
movdqa %xmm8,%xmm3
.byte 102,65,15,58,15,219,4
.byte 15,56,204,247
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 128-128(%rbp),%xmm1
paddd %xmm4,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,211
movdqa %xmm1,%xmm0
movdqa 128-128(%rbp),%xmm2
paddd %xmm3,%xmm9
paddd %xmm8,%xmm2
.byte 15,56,205,236
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,58,15,220,4
.byte 69,15,56,203,254
.byte 69,15,56,205,200
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm6
movdqa %xmm9,%xmm3
.byte 102,65,15,58,15,216,4
.byte 15,56,204,252
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 144-128(%rbp),%xmm1
paddd %xmm5,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,216
movdqa %xmm1,%xmm0
movdqa 144-128(%rbp),%xmm2
paddd %xmm3,%xmm10
paddd %xmm9,%xmm2
.byte 15,56,205,245
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm6,%xmm3
.byte 102,15,58,15,221,4
.byte 69,15,56,203,254
.byte 69,15,56,205,209
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm7
movdqa %xmm10,%xmm3
.byte 102,65,15,58,15,217,4
.byte 15,56,204,229
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 160-128(%rbp),%xmm1
paddd %xmm6,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,193
movdqa %xmm1,%xmm0
movdqa 160-128(%rbp),%xmm2
paddd %xmm3,%xmm11
paddd %xmm10,%xmm2
.byte 15,56,205,254
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm7,%xmm3
.byte 102,15,58,15,222,4
.byte 69,15,56,203,254
.byte 69,15,56,205,218
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm4
movdqa %xmm11,%xmm3
.byte 102,65,15,58,15,218,4
.byte 15,56,204,238
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 176-128(%rbp),%xmm1
paddd %xmm7,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,202
movdqa %xmm1,%xmm0
movdqa 176-128(%rbp),%xmm2
paddd %xmm3,%xmm8
paddd %xmm11,%xmm2
.byte 15,56,205,231
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm4,%xmm3
.byte 102,15,58,15,223,4
.byte 69,15,56,203,254
.byte 69,15,56,205,195
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm5
movdqa %xmm8,%xmm3
.byte 102,65,15,58,15,219,4
.byte 15,56,204,247
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 192-128(%rbp),%xmm1
paddd %xmm4,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,211
movdqa %xmm1,%xmm0
movdqa 192-128(%rbp),%xmm2
paddd %xmm3,%xmm9
paddd %xmm8,%xmm2
.byte 15,56,205,236
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,58,15,220,4
.byte 69,15,56,203,254
.byte 69,15,56,205,200
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm6
movdqa %xmm9,%xmm3
.byte 102,65,15,58,15,216,4
.byte 15,56,204,252
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 208-128(%rbp),%xmm1
paddd %xmm5,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,216
movdqa %xmm1,%xmm0
movdqa 208-128(%rbp),%xmm2
paddd %xmm3,%xmm10
paddd %xmm9,%xmm2
.byte 15,56,205,245
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm6,%xmm3
.byte 102,15,58,15,221,4
.byte 69,15,56,203,254
.byte 69,15,56,205,209
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm7
movdqa %xmm10,%xmm3
.byte 102,65,15,58,15,217,4
nop
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 224-128(%rbp),%xmm1
paddd %xmm6,%xmm1
.byte 69,15,56,203,247
movdqa %xmm1,%xmm0
movdqa 224-128(%rbp),%xmm2
paddd %xmm3,%xmm11
paddd %xmm10,%xmm2
.byte 15,56,205,254
nop
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movl $1,%ecx
pxor %xmm6,%xmm6
.byte 69,15,56,203,254
.byte 69,15,56,205,218
pshufd $0x0e,%xmm1,%xmm0
movdqa 240-128(%rbp),%xmm1
paddd %xmm7,%xmm1
movq (%rbx),%xmm7
nop
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 240-128(%rbp),%xmm2
paddd %xmm11,%xmm2
.byte 69,15,56,203,247
movdqa %xmm1,%xmm0
cmpl 0(%rbx),%ecx
cmovgeq %rsp,%r8
cmpl 4(%rbx),%ecx
cmovgeq %rsp,%r9
pshufd $0x00,%xmm7,%xmm9
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
pshufd $0x55,%xmm7,%xmm10
movdqa %xmm7,%xmm11
.byte 69,15,56,203,254
pshufd $0x0e,%xmm1,%xmm0
pcmpgtd %xmm6,%xmm9
pcmpgtd %xmm6,%xmm10
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
pcmpgtd %xmm6,%xmm11
movdqa K256_shaext-16(%rip),%xmm3
.byte 69,15,56,203,247
pand %xmm9,%xmm13
pand %xmm10,%xmm15
pand %xmm9,%xmm12
pand %xmm10,%xmm14
paddd %xmm7,%xmm11
paddd 80(%rsp),%xmm13
paddd 112(%rsp),%xmm15
paddd 64(%rsp),%xmm12
paddd 96(%rsp),%xmm14
movq %xmm11,(%rbx)
decl %edx
jnz L$oop_shaext
movl 280(%rsp),%edx
pshufd $27,%xmm12,%xmm12
pshufd $27,%xmm13,%xmm13
pshufd $27,%xmm14,%xmm14
pshufd $27,%xmm15,%xmm15
movdqa %xmm12,%xmm5
movdqa %xmm13,%xmm6
punpckldq %xmm14,%xmm12
punpckhdq %xmm14,%xmm5
punpckldq %xmm15,%xmm13
punpckhdq %xmm15,%xmm6
movq %xmm12,0-128(%rdi)
psrldq $8,%xmm12
movq %xmm5,128-128(%rdi)
psrldq $8,%xmm5
movq %xmm12,32-128(%rdi)
movq %xmm5,160-128(%rdi)
movq %xmm13,64-128(%rdi)
psrldq $8,%xmm13
movq %xmm6,192-128(%rdi)
psrldq $8,%xmm6
movq %xmm13,96-128(%rdi)
movq %xmm6,224-128(%rdi)
leaq 8(%rdi),%rdi
leaq 32(%rsi),%rsi
decl %edx
jnz L$oop_grande_shaext
L$done_shaext:
movq -16(%rax),%rbp
movq -8(%rax),%rbx
leaq (%rax),%rsp
L$epilogue_shaext:
.byte 0xf3,0xc3
.p2align 5
sha256_multi_block_avx:
_avx_shortcut:
shrq $32,%rcx
cmpl $2,%edx
jb L$avx
testl $32,%ecx
jnz _avx2_shortcut
jmp L$avx
.p2align 5
L$avx:
movq %rsp,%rax
pushq %rbx
pushq %rbp
subq $288,%rsp
andq $-256,%rsp
movq %rax,272(%rsp)
L$body_avx:
leaq K256+128(%rip),%rbp
leaq 256(%rsp),%rbx
leaq 128(%rdi),%rdi
L$oop_grande_avx:
movl %edx,280(%rsp)
xorl %edx,%edx
movq 0(%rsi),%r8
movl 8(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,0(%rbx)
cmovleq %rbp,%r8
movq 16(%rsi),%r9
movl 24(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,4(%rbx)
cmovleq %rbp,%r9
movq 32(%rsi),%r10
movl 40(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,8(%rbx)
cmovleq %rbp,%r10
movq 48(%rsi),%r11
movl 56(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,12(%rbx)
cmovleq %rbp,%r11
testl %edx,%edx
jz L$done_avx
vmovdqu 0-128(%rdi),%xmm8
leaq 128(%rsp),%rax
vmovdqu 32-128(%rdi),%xmm9
vmovdqu 64-128(%rdi),%xmm10
vmovdqu 96-128(%rdi),%xmm11
vmovdqu 128-128(%rdi),%xmm12
vmovdqu 160-128(%rdi),%xmm13
vmovdqu 192-128(%rdi),%xmm14
vmovdqu 224-128(%rdi),%xmm15
vmovdqu L$pbswap(%rip),%xmm6
jmp L$oop_avx
.p2align 5
L$oop_avx:
vpxor %xmm9,%xmm10,%xmm4
vmovd 0(%r8),%xmm5
vmovd 0(%r9),%xmm0
vpinsrd $1,0(%r10),%xmm5,%xmm5
vpinsrd $1,0(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm12,%xmm7
vpslld $26,%xmm12,%xmm2
vmovdqu %xmm5,0-128(%rax)
vpaddd %xmm15,%xmm5,%xmm5
vpsrld $11,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm12,%xmm2
vpaddd -128(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm12,%xmm2
vpandn %xmm14,%xmm12,%xmm0
vpand %xmm13,%xmm12,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm8,%xmm15
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm8,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm8,%xmm9,%xmm3
vpxor %xmm1,%xmm15,%xmm15
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm8,%xmm1
vpslld $19,%xmm8,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm15,%xmm7
vpsrld $22,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm8,%xmm2
vpxor %xmm4,%xmm9,%xmm15
vpaddd %xmm5,%xmm11,%xmm11
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm15,%xmm15
vpaddd %xmm7,%xmm15,%xmm15
vmovd 4(%r8),%xmm5
vmovd 4(%r9),%xmm0
vpinsrd $1,4(%r10),%xmm5,%xmm5
vpinsrd $1,4(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm11,%xmm7
vpslld $26,%xmm11,%xmm2
vmovdqu %xmm5,16-128(%rax)
vpaddd %xmm14,%xmm5,%xmm5
vpsrld $11,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm11,%xmm2
vpaddd -96(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm11,%xmm2
vpandn %xmm13,%xmm11,%xmm0
vpand %xmm12,%xmm11,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm15,%xmm14
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm15,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm15,%xmm8,%xmm4
vpxor %xmm1,%xmm14,%xmm14
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm15,%xmm1
vpslld $19,%xmm15,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm14,%xmm7
vpsrld $22,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm15,%xmm2
vpxor %xmm3,%xmm8,%xmm14
vpaddd %xmm5,%xmm10,%xmm10
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm14,%xmm14
vpaddd %xmm7,%xmm14,%xmm14
vmovd 8(%r8),%xmm5
vmovd 8(%r9),%xmm0
vpinsrd $1,8(%r10),%xmm5,%xmm5
vpinsrd $1,8(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm10,%xmm7
vpslld $26,%xmm10,%xmm2
vmovdqu %xmm5,32-128(%rax)
vpaddd %xmm13,%xmm5,%xmm5
vpsrld $11,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm10,%xmm2
vpaddd -64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm10,%xmm2
vpandn %xmm12,%xmm10,%xmm0
vpand %xmm11,%xmm10,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm14,%xmm13
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm14,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm14,%xmm15,%xmm3
vpxor %xmm1,%xmm13,%xmm13
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm14,%xmm1
vpslld $19,%xmm14,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm13,%xmm7
vpsrld $22,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm14,%xmm2
vpxor %xmm4,%xmm15,%xmm13
vpaddd %xmm5,%xmm9,%xmm9
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm13,%xmm13
vpaddd %xmm7,%xmm13,%xmm13
vmovd 12(%r8),%xmm5
vmovd 12(%r9),%xmm0
vpinsrd $1,12(%r10),%xmm5,%xmm5
vpinsrd $1,12(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm9,%xmm7
vpslld $26,%xmm9,%xmm2
vmovdqu %xmm5,48-128(%rax)
vpaddd %xmm12,%xmm5,%xmm5
vpsrld $11,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm9,%xmm2
vpaddd -32(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm9,%xmm2
vpandn %xmm11,%xmm9,%xmm0
vpand %xmm10,%xmm9,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm13,%xmm12
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm13,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm13,%xmm14,%xmm4
vpxor %xmm1,%xmm12,%xmm12
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm13,%xmm1
vpslld $19,%xmm13,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm12,%xmm7
vpsrld $22,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm13,%xmm2
vpxor %xmm3,%xmm14,%xmm12
vpaddd %xmm5,%xmm8,%xmm8
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm12,%xmm12
vpaddd %xmm7,%xmm12,%xmm12
vmovd 16(%r8),%xmm5
vmovd 16(%r9),%xmm0
vpinsrd $1,16(%r10),%xmm5,%xmm5
vpinsrd $1,16(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm8,%xmm7
vpslld $26,%xmm8,%xmm2
vmovdqu %xmm5,64-128(%rax)
vpaddd %xmm11,%xmm5,%xmm5
vpsrld $11,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm8,%xmm2
vpaddd 0(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm8,%xmm2
vpandn %xmm10,%xmm8,%xmm0
vpand %xmm9,%xmm8,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm12,%xmm11
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm12,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm12,%xmm13,%xmm3
vpxor %xmm1,%xmm11,%xmm11
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm12,%xmm1
vpslld $19,%xmm12,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm11,%xmm7
vpsrld $22,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm12,%xmm2
vpxor %xmm4,%xmm13,%xmm11
vpaddd %xmm5,%xmm15,%xmm15
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm11,%xmm11
vpaddd %xmm7,%xmm11,%xmm11
vmovd 20(%r8),%xmm5
vmovd 20(%r9),%xmm0
vpinsrd $1,20(%r10),%xmm5,%xmm5
vpinsrd $1,20(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm15,%xmm7
vpslld $26,%xmm15,%xmm2
vmovdqu %xmm5,80-128(%rax)
vpaddd %xmm10,%xmm5,%xmm5
vpsrld $11,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm15,%xmm2
vpaddd 32(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm15,%xmm2
vpandn %xmm9,%xmm15,%xmm0
vpand %xmm8,%xmm15,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm11,%xmm10
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm11,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm11,%xmm12,%xmm4
vpxor %xmm1,%xmm10,%xmm10
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm11,%xmm1
vpslld $19,%xmm11,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm10,%xmm7
vpsrld $22,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm11,%xmm2
vpxor %xmm3,%xmm12,%xmm10
vpaddd %xmm5,%xmm14,%xmm14
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm10,%xmm10
vpaddd %xmm7,%xmm10,%xmm10
vmovd 24(%r8),%xmm5
vmovd 24(%r9),%xmm0
vpinsrd $1,24(%r10),%xmm5,%xmm5
vpinsrd $1,24(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm14,%xmm7
vpslld $26,%xmm14,%xmm2
vmovdqu %xmm5,96-128(%rax)
vpaddd %xmm9,%xmm5,%xmm5
vpsrld $11,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm14,%xmm2
vpaddd 64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm14,%xmm2
vpandn %xmm8,%xmm14,%xmm0
vpand %xmm15,%xmm14,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm10,%xmm9
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm10,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm10,%xmm11,%xmm3
vpxor %xmm1,%xmm9,%xmm9
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm10,%xmm1
vpslld $19,%xmm10,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm9,%xmm7
vpsrld $22,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm10,%xmm2
vpxor %xmm4,%xmm11,%xmm9
vpaddd %xmm5,%xmm13,%xmm13
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm9,%xmm9
vpaddd %xmm7,%xmm9,%xmm9
vmovd 28(%r8),%xmm5
vmovd 28(%r9),%xmm0
vpinsrd $1,28(%r10),%xmm5,%xmm5
vpinsrd $1,28(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm13,%xmm7
vpslld $26,%xmm13,%xmm2
vmovdqu %xmm5,112-128(%rax)
vpaddd %xmm8,%xmm5,%xmm5
vpsrld $11,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm13,%xmm2
vpaddd 96(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm13,%xmm2
vpandn %xmm15,%xmm13,%xmm0
vpand %xmm14,%xmm13,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm9,%xmm8
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm9,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm9,%xmm10,%xmm4
vpxor %xmm1,%xmm8,%xmm8
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm9,%xmm1
vpslld $19,%xmm9,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm8,%xmm7
vpsrld $22,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm9,%xmm2
vpxor %xmm3,%xmm10,%xmm8
vpaddd %xmm5,%xmm12,%xmm12
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm8,%xmm8
vpaddd %xmm7,%xmm8,%xmm8
addq $256,%rbp
vmovd 32(%r8),%xmm5
vmovd 32(%r9),%xmm0
vpinsrd $1,32(%r10),%xmm5,%xmm5
vpinsrd $1,32(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm12,%xmm7
vpslld $26,%xmm12,%xmm2
vmovdqu %xmm5,128-128(%rax)
vpaddd %xmm15,%xmm5,%xmm5
vpsrld $11,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm12,%xmm2
vpaddd -128(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm12,%xmm2
vpandn %xmm14,%xmm12,%xmm0
vpand %xmm13,%xmm12,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm8,%xmm15
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm8,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm8,%xmm9,%xmm3
vpxor %xmm1,%xmm15,%xmm15
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm8,%xmm1
vpslld $19,%xmm8,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm15,%xmm7
vpsrld $22,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm8,%xmm2
vpxor %xmm4,%xmm9,%xmm15
vpaddd %xmm5,%xmm11,%xmm11
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm15,%xmm15
vpaddd %xmm7,%xmm15,%xmm15
vmovd 36(%r8),%xmm5
vmovd 36(%r9),%xmm0
vpinsrd $1,36(%r10),%xmm5,%xmm5
vpinsrd $1,36(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm11,%xmm7
vpslld $26,%xmm11,%xmm2
vmovdqu %xmm5,144-128(%rax)
vpaddd %xmm14,%xmm5,%xmm5
vpsrld $11,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm11,%xmm2
vpaddd -96(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm11,%xmm2
vpandn %xmm13,%xmm11,%xmm0
vpand %xmm12,%xmm11,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm15,%xmm14
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm15,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm15,%xmm8,%xmm4
vpxor %xmm1,%xmm14,%xmm14
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm15,%xmm1
vpslld $19,%xmm15,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm14,%xmm7
vpsrld $22,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm15,%xmm2
vpxor %xmm3,%xmm8,%xmm14
vpaddd %xmm5,%xmm10,%xmm10
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm14,%xmm14
vpaddd %xmm7,%xmm14,%xmm14
vmovd 40(%r8),%xmm5
vmovd 40(%r9),%xmm0
vpinsrd $1,40(%r10),%xmm5,%xmm5
vpinsrd $1,40(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm10,%xmm7
vpslld $26,%xmm10,%xmm2
vmovdqu %xmm5,160-128(%rax)
vpaddd %xmm13,%xmm5,%xmm5
vpsrld $11,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm10,%xmm2
vpaddd -64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm10,%xmm2
vpandn %xmm12,%xmm10,%xmm0
vpand %xmm11,%xmm10,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm14,%xmm13
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm14,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm14,%xmm15,%xmm3
vpxor %xmm1,%xmm13,%xmm13
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm14,%xmm1
vpslld $19,%xmm14,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm13,%xmm7
vpsrld $22,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm14,%xmm2
vpxor %xmm4,%xmm15,%xmm13
vpaddd %xmm5,%xmm9,%xmm9
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm13,%xmm13
vpaddd %xmm7,%xmm13,%xmm13
vmovd 44(%r8),%xmm5
vmovd 44(%r9),%xmm0
vpinsrd $1,44(%r10),%xmm5,%xmm5
vpinsrd $1,44(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm9,%xmm7
vpslld $26,%xmm9,%xmm2
vmovdqu %xmm5,176-128(%rax)
vpaddd %xmm12,%xmm5,%xmm5
vpsrld $11,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm9,%xmm2
vpaddd -32(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm9,%xmm2
vpandn %xmm11,%xmm9,%xmm0
vpand %xmm10,%xmm9,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm13,%xmm12
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm13,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm13,%xmm14,%xmm4
vpxor %xmm1,%xmm12,%xmm12
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm13,%xmm1
vpslld $19,%xmm13,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm12,%xmm7
vpsrld $22,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm13,%xmm2
vpxor %xmm3,%xmm14,%xmm12
vpaddd %xmm5,%xmm8,%xmm8
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm12,%xmm12
vpaddd %xmm7,%xmm12,%xmm12
vmovd 48(%r8),%xmm5
vmovd 48(%r9),%xmm0
vpinsrd $1,48(%r10),%xmm5,%xmm5
vpinsrd $1,48(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm8,%xmm7
vpslld $26,%xmm8,%xmm2
vmovdqu %xmm5,192-128(%rax)
vpaddd %xmm11,%xmm5,%xmm5
vpsrld $11,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm8,%xmm2
vpaddd 0(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm8,%xmm2
vpandn %xmm10,%xmm8,%xmm0
vpand %xmm9,%xmm8,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm12,%xmm11
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm12,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm12,%xmm13,%xmm3
vpxor %xmm1,%xmm11,%xmm11
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm12,%xmm1
vpslld $19,%xmm12,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm11,%xmm7
vpsrld $22,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm12,%xmm2
vpxor %xmm4,%xmm13,%xmm11
vpaddd %xmm5,%xmm15,%xmm15
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm11,%xmm11
vpaddd %xmm7,%xmm11,%xmm11
vmovd 52(%r8),%xmm5
vmovd 52(%r9),%xmm0
vpinsrd $1,52(%r10),%xmm5,%xmm5
vpinsrd $1,52(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm15,%xmm7
vpslld $26,%xmm15,%xmm2
vmovdqu %xmm5,208-128(%rax)
vpaddd %xmm10,%xmm5,%xmm5
vpsrld $11,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm15,%xmm2
vpaddd 32(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm15,%xmm2
vpandn %xmm9,%xmm15,%xmm0
vpand %xmm8,%xmm15,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm11,%xmm10
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm11,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm11,%xmm12,%xmm4
vpxor %xmm1,%xmm10,%xmm10
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm11,%xmm1
vpslld $19,%xmm11,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm10,%xmm7
vpsrld $22,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm11,%xmm2
vpxor %xmm3,%xmm12,%xmm10
vpaddd %xmm5,%xmm14,%xmm14
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm10,%xmm10
vpaddd %xmm7,%xmm10,%xmm10
vmovd 56(%r8),%xmm5
vmovd 56(%r9),%xmm0
vpinsrd $1,56(%r10),%xmm5,%xmm5
vpinsrd $1,56(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm14,%xmm7
vpslld $26,%xmm14,%xmm2
vmovdqu %xmm5,224-128(%rax)
vpaddd %xmm9,%xmm5,%xmm5
vpsrld $11,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm14,%xmm2
vpaddd 64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm14,%xmm2
vpandn %xmm8,%xmm14,%xmm0
vpand %xmm15,%xmm14,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm10,%xmm9
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm10,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm10,%xmm11,%xmm3
vpxor %xmm1,%xmm9,%xmm9
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm10,%xmm1
vpslld $19,%xmm10,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm9,%xmm7
vpsrld $22,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm10,%xmm2
vpxor %xmm4,%xmm11,%xmm9
vpaddd %xmm5,%xmm13,%xmm13
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm9,%xmm9
vpaddd %xmm7,%xmm9,%xmm9
vmovd 60(%r8),%xmm5
leaq 64(%r8),%r8
vmovd 60(%r9),%xmm0
leaq 64(%r9),%r9
vpinsrd $1,60(%r10),%xmm5,%xmm5
leaq 64(%r10),%r10
vpinsrd $1,60(%r11),%xmm0,%xmm0
leaq 64(%r11),%r11
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm13,%xmm7
vpslld $26,%xmm13,%xmm2
vmovdqu %xmm5,240-128(%rax)
vpaddd %xmm8,%xmm5,%xmm5
vpsrld $11,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm13,%xmm2
vpaddd 96(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
prefetcht0 63(%r8)
vpslld $7,%xmm13,%xmm2
vpandn %xmm15,%xmm13,%xmm0
vpand %xmm14,%xmm13,%xmm4
prefetcht0 63(%r9)
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm9,%xmm8
vpxor %xmm2,%xmm7,%xmm7
prefetcht0 63(%r10)
vpslld $30,%xmm9,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm9,%xmm10,%xmm4
prefetcht0 63(%r11)
vpxor %xmm1,%xmm8,%xmm8
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm9,%xmm1
vpslld $19,%xmm9,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm8,%xmm7
vpsrld $22,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm9,%xmm2
vpxor %xmm3,%xmm10,%xmm8
vpaddd %xmm5,%xmm12,%xmm12
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm8,%xmm8
vpaddd %xmm7,%xmm8,%xmm8
addq $256,%rbp
vmovdqu 0-128(%rax),%xmm5
movl $3,%ecx
jmp L$oop_16_xx_avx
.p2align 5
L$oop_16_xx_avx:
vmovdqu 16-128(%rax),%xmm6
vpaddd 144-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 224-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm12,%xmm7
vpslld $26,%xmm12,%xmm2
vmovdqu %xmm5,0-128(%rax)
vpaddd %xmm15,%xmm5,%xmm5
vpsrld $11,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm12,%xmm2
vpaddd -128(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm12,%xmm2
vpandn %xmm14,%xmm12,%xmm0
vpand %xmm13,%xmm12,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm8,%xmm15
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm8,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm8,%xmm9,%xmm3
vpxor %xmm1,%xmm15,%xmm15
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm8,%xmm1
vpslld $19,%xmm8,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm15,%xmm7
vpsrld $22,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm8,%xmm2
vpxor %xmm4,%xmm9,%xmm15
vpaddd %xmm5,%xmm11,%xmm11
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm15,%xmm15
vpaddd %xmm7,%xmm15,%xmm15
vmovdqu 32-128(%rax),%xmm5
vpaddd 160-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 240-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm11,%xmm7
vpslld $26,%xmm11,%xmm2
vmovdqu %xmm6,16-128(%rax)
vpaddd %xmm14,%xmm6,%xmm6
vpsrld $11,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm11,%xmm2
vpaddd -96(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm11,%xmm2
vpandn %xmm13,%xmm11,%xmm0
vpand %xmm12,%xmm11,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm15,%xmm14
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm15,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm15,%xmm8,%xmm4
vpxor %xmm1,%xmm14,%xmm14
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm15,%xmm1
vpslld $19,%xmm15,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm14,%xmm7
vpsrld $22,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm15,%xmm2
vpxor %xmm3,%xmm8,%xmm14
vpaddd %xmm6,%xmm10,%xmm10
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm14,%xmm14
vpaddd %xmm7,%xmm14,%xmm14
vmovdqu 48-128(%rax),%xmm6
vpaddd 176-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 0-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm10,%xmm7
vpslld $26,%xmm10,%xmm2
vmovdqu %xmm5,32-128(%rax)
vpaddd %xmm13,%xmm5,%xmm5
vpsrld $11,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm10,%xmm2
vpaddd -64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm10,%xmm2
vpandn %xmm12,%xmm10,%xmm0
vpand %xmm11,%xmm10,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm14,%xmm13
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm14,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm14,%xmm15,%xmm3
vpxor %xmm1,%xmm13,%xmm13
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm14,%xmm1
vpslld $19,%xmm14,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm13,%xmm7
vpsrld $22,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm14,%xmm2
vpxor %xmm4,%xmm15,%xmm13
vpaddd %xmm5,%xmm9,%xmm9
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm13,%xmm13
vpaddd %xmm7,%xmm13,%xmm13
vmovdqu 64-128(%rax),%xmm5
vpaddd 192-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 16-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm9,%xmm7
vpslld $26,%xmm9,%xmm2
vmovdqu %xmm6,48-128(%rax)
vpaddd %xmm12,%xmm6,%xmm6
vpsrld $11,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm9,%xmm2
vpaddd -32(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm9,%xmm2
vpandn %xmm11,%xmm9,%xmm0
vpand %xmm10,%xmm9,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm13,%xmm12
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm13,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm13,%xmm14,%xmm4
vpxor %xmm1,%xmm12,%xmm12
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm13,%xmm1
vpslld $19,%xmm13,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm12,%xmm7
vpsrld $22,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm13,%xmm2
vpxor %xmm3,%xmm14,%xmm12
vpaddd %xmm6,%xmm8,%xmm8
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm12,%xmm12
vpaddd %xmm7,%xmm12,%xmm12
vmovdqu 80-128(%rax),%xmm6
vpaddd 208-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 32-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm8,%xmm7
vpslld $26,%xmm8,%xmm2
vmovdqu %xmm5,64-128(%rax)
vpaddd %xmm11,%xmm5,%xmm5
vpsrld $11,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm8,%xmm2
vpaddd 0(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm8,%xmm2
vpandn %xmm10,%xmm8,%xmm0
vpand %xmm9,%xmm8,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm12,%xmm11
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm12,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm12,%xmm13,%xmm3
vpxor %xmm1,%xmm11,%xmm11
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm12,%xmm1
vpslld $19,%xmm12,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm11,%xmm7
vpsrld $22,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm12,%xmm2
vpxor %xmm4,%xmm13,%xmm11
vpaddd %xmm5,%xmm15,%xmm15
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm11,%xmm11
vpaddd %xmm7,%xmm11,%xmm11
vmovdqu 96-128(%rax),%xmm5
vpaddd 224-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 48-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm15,%xmm7
vpslld $26,%xmm15,%xmm2
vmovdqu %xmm6,80-128(%rax)
vpaddd %xmm10,%xmm6,%xmm6
vpsrld $11,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm15,%xmm2
vpaddd 32(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm15,%xmm2
vpandn %xmm9,%xmm15,%xmm0
vpand %xmm8,%xmm15,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm11,%xmm10
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm11,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm11,%xmm12,%xmm4
vpxor %xmm1,%xmm10,%xmm10
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm11,%xmm1
vpslld $19,%xmm11,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm10,%xmm7
vpsrld $22,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm11,%xmm2
vpxor %xmm3,%xmm12,%xmm10
vpaddd %xmm6,%xmm14,%xmm14
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm10,%xmm10
vpaddd %xmm7,%xmm10,%xmm10
vmovdqu 112-128(%rax),%xmm6
vpaddd 240-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 64-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm14,%xmm7
vpslld $26,%xmm14,%xmm2
vmovdqu %xmm5,96-128(%rax)
vpaddd %xmm9,%xmm5,%xmm5
vpsrld $11,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm14,%xmm2
vpaddd 64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm14,%xmm2
vpandn %xmm8,%xmm14,%xmm0
vpand %xmm15,%xmm14,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm10,%xmm9
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm10,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm10,%xmm11,%xmm3
vpxor %xmm1,%xmm9,%xmm9
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm10,%xmm1
vpslld $19,%xmm10,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm9,%xmm7
vpsrld $22,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm10,%xmm2
vpxor %xmm4,%xmm11,%xmm9
vpaddd %xmm5,%xmm13,%xmm13
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm9,%xmm9
vpaddd %xmm7,%xmm9,%xmm9
vmovdqu 128-128(%rax),%xmm5
vpaddd 0-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 80-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm13,%xmm7
vpslld $26,%xmm13,%xmm2
vmovdqu %xmm6,112-128(%rax)
vpaddd %xmm8,%xmm6,%xmm6
vpsrld $11,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm13,%xmm2
vpaddd 96(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm13,%xmm2
vpandn %xmm15,%xmm13,%xmm0
vpand %xmm14,%xmm13,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm9,%xmm8
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm9,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm9,%xmm10,%xmm4
vpxor %xmm1,%xmm8,%xmm8
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm9,%xmm1
vpslld $19,%xmm9,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm8,%xmm7
vpsrld $22,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm9,%xmm2
vpxor %xmm3,%xmm10,%xmm8
vpaddd %xmm6,%xmm12,%xmm12
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm8,%xmm8
vpaddd %xmm7,%xmm8,%xmm8
addq $256,%rbp
vmovdqu 144-128(%rax),%xmm6
vpaddd 16-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 96-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm12,%xmm7
vpslld $26,%xmm12,%xmm2
vmovdqu %xmm5,128-128(%rax)
vpaddd %xmm15,%xmm5,%xmm5
vpsrld $11,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm12,%xmm2
vpaddd -128(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm12,%xmm2
vpandn %xmm14,%xmm12,%xmm0
vpand %xmm13,%xmm12,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm8,%xmm15
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm8,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm8,%xmm9,%xmm3
vpxor %xmm1,%xmm15,%xmm15
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm8,%xmm1
vpslld $19,%xmm8,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm15,%xmm7
vpsrld $22,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm8,%xmm2
vpxor %xmm4,%xmm9,%xmm15
vpaddd %xmm5,%xmm11,%xmm11
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm15,%xmm15
vpaddd %xmm7,%xmm15,%xmm15
vmovdqu 160-128(%rax),%xmm5
vpaddd 32-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 112-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm11,%xmm7
vpslld $26,%xmm11,%xmm2
vmovdqu %xmm6,144-128(%rax)
vpaddd %xmm14,%xmm6,%xmm6
vpsrld $11,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm11,%xmm2
vpaddd -96(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm11,%xmm2
vpandn %xmm13,%xmm11,%xmm0
vpand %xmm12,%xmm11,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm15,%xmm14
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm15,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm15,%xmm8,%xmm4
vpxor %xmm1,%xmm14,%xmm14
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm15,%xmm1
vpslld $19,%xmm15,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm14,%xmm7
vpsrld $22,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm15,%xmm2
vpxor %xmm3,%xmm8,%xmm14
vpaddd %xmm6,%xmm10,%xmm10
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm14,%xmm14
vpaddd %xmm7,%xmm14,%xmm14
vmovdqu 176-128(%rax),%xmm6
vpaddd 48-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 128-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm10,%xmm7
vpslld $26,%xmm10,%xmm2
vmovdqu %xmm5,160-128(%rax)
vpaddd %xmm13,%xmm5,%xmm5
vpsrld $11,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm10,%xmm2
vpaddd -64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm10,%xmm2
vpandn %xmm12,%xmm10,%xmm0
vpand %xmm11,%xmm10,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm14,%xmm13
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm14,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm14,%xmm15,%xmm3
vpxor %xmm1,%xmm13,%xmm13
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm14,%xmm1
vpslld $19,%xmm14,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm13,%xmm7
vpsrld $22,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm14,%xmm2
vpxor %xmm4,%xmm15,%xmm13
vpaddd %xmm5,%xmm9,%xmm9
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm13,%xmm13
vpaddd %xmm7,%xmm13,%xmm13
vmovdqu 192-128(%rax),%xmm5
vpaddd 64-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 144-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm9,%xmm7
vpslld $26,%xmm9,%xmm2
vmovdqu %xmm6,176-128(%rax)
vpaddd %xmm12,%xmm6,%xmm6
vpsrld $11,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm9,%xmm2
vpaddd -32(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm9,%xmm2
vpandn %xmm11,%xmm9,%xmm0
vpand %xmm10,%xmm9,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm13,%xmm12
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm13,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm13,%xmm14,%xmm4
vpxor %xmm1,%xmm12,%xmm12
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm13,%xmm1
vpslld $19,%xmm13,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm12,%xmm7
vpsrld $22,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm13,%xmm2
vpxor %xmm3,%xmm14,%xmm12
vpaddd %xmm6,%xmm8,%xmm8
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm12,%xmm12
vpaddd %xmm7,%xmm12,%xmm12
vmovdqu 208-128(%rax),%xmm6
vpaddd 80-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 160-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm8,%xmm7
vpslld $26,%xmm8,%xmm2
vmovdqu %xmm5,192-128(%rax)
vpaddd %xmm11,%xmm5,%xmm5
vpsrld $11,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm8,%xmm2
vpaddd 0(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm8,%xmm2
vpandn %xmm10,%xmm8,%xmm0
vpand %xmm9,%xmm8,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm12,%xmm11
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm12,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm12,%xmm13,%xmm3
vpxor %xmm1,%xmm11,%xmm11
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm12,%xmm1
vpslld $19,%xmm12,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm11,%xmm7
vpsrld $22,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm12,%xmm2
vpxor %xmm4,%xmm13,%xmm11
vpaddd %xmm5,%xmm15,%xmm15
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm11,%xmm11
vpaddd %xmm7,%xmm11,%xmm11
vmovdqu 224-128(%rax),%xmm5
vpaddd 96-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 176-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm15,%xmm7
vpslld $26,%xmm15,%xmm2
vmovdqu %xmm6,208-128(%rax)
vpaddd %xmm10,%xmm6,%xmm6
vpsrld $11,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm15,%xmm2
vpaddd 32(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm15,%xmm2
vpandn %xmm9,%xmm15,%xmm0
vpand %xmm8,%xmm15,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm11,%xmm10
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm11,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm11,%xmm12,%xmm4
vpxor %xmm1,%xmm10,%xmm10
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm11,%xmm1
vpslld $19,%xmm11,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm10,%xmm7
vpsrld $22,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm11,%xmm2
vpxor %xmm3,%xmm12,%xmm10
vpaddd %xmm6,%xmm14,%xmm14
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm10,%xmm10
vpaddd %xmm7,%xmm10,%xmm10
vmovdqu 240-128(%rax),%xmm6
vpaddd 112-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 192-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm14,%xmm7
vpslld $26,%xmm14,%xmm2
vmovdqu %xmm5,224-128(%rax)
vpaddd %xmm9,%xmm5,%xmm5
vpsrld $11,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm14,%xmm2
vpaddd 64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm14,%xmm2
vpandn %xmm8,%xmm14,%xmm0
vpand %xmm15,%xmm14,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm10,%xmm9
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm10,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm10,%xmm11,%xmm3
vpxor %xmm1,%xmm9,%xmm9
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm10,%xmm1
vpslld $19,%xmm10,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm9,%xmm7
vpsrld $22,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm10,%xmm2
vpxor %xmm4,%xmm11,%xmm9
vpaddd %xmm5,%xmm13,%xmm13
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm9,%xmm9
vpaddd %xmm7,%xmm9,%xmm9
vmovdqu 0-128(%rax),%xmm5
vpaddd 128-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 208-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm13,%xmm7
vpslld $26,%xmm13,%xmm2
vmovdqu %xmm6,240-128(%rax)
vpaddd %xmm8,%xmm6,%xmm6
vpsrld $11,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm13,%xmm2
vpaddd 96(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm13,%xmm2
vpandn %xmm15,%xmm13,%xmm0
vpand %xmm14,%xmm13,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm9,%xmm8
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm9,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm9,%xmm10,%xmm4
vpxor %xmm1,%xmm8,%xmm8
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm9,%xmm1
vpslld $19,%xmm9,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm8,%xmm7
vpsrld $22,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm9,%xmm2
vpxor %xmm3,%xmm10,%xmm8
vpaddd %xmm6,%xmm12,%xmm12
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm8,%xmm8
vpaddd %xmm7,%xmm8,%xmm8
addq $256,%rbp
decl %ecx
jnz L$oop_16_xx_avx
movl $1,%ecx
leaq K256+128(%rip),%rbp
cmpl 0(%rbx),%ecx
cmovgeq %rbp,%r8
cmpl 4(%rbx),%ecx
cmovgeq %rbp,%r9
cmpl 8(%rbx),%ecx
cmovgeq %rbp,%r10
cmpl 12(%rbx),%ecx
cmovgeq %rbp,%r11
vmovdqa (%rbx),%xmm7
vpxor %xmm0,%xmm0,%xmm0
vmovdqa %xmm7,%xmm6
vpcmpgtd %xmm0,%xmm6,%xmm6
vpaddd %xmm6,%xmm7,%xmm7
vmovdqu 0-128(%rdi),%xmm0
vpand %xmm6,%xmm8,%xmm8
vmovdqu 32-128(%rdi),%xmm1
vpand %xmm6,%xmm9,%xmm9
vmovdqu 64-128(%rdi),%xmm2
vpand %xmm6,%xmm10,%xmm10
vmovdqu 96-128(%rdi),%xmm5
vpand %xmm6,%xmm11,%xmm11
vpaddd %xmm0,%xmm8,%xmm8
vmovdqu 128-128(%rdi),%xmm0
vpand %xmm6,%xmm12,%xmm12
vpaddd %xmm1,%xmm9,%xmm9
vmovdqu 160-128(%rdi),%xmm1
vpand %xmm6,%xmm13,%xmm13
vpaddd %xmm2,%xmm10,%xmm10
vmovdqu 192-128(%rdi),%xmm2
vpand %xmm6,%xmm14,%xmm14
vpaddd %xmm5,%xmm11,%xmm11
vmovdqu 224-128(%rdi),%xmm5
vpand %xmm6,%xmm15,%xmm15
vpaddd %xmm0,%xmm12,%xmm12
vpaddd %xmm1,%xmm13,%xmm13
vmovdqu %xmm8,0-128(%rdi)
vpaddd %xmm2,%xmm14,%xmm14
vmovdqu %xmm9,32-128(%rdi)
vpaddd %xmm5,%xmm15,%xmm15
vmovdqu %xmm10,64-128(%rdi)
vmovdqu %xmm11,96-128(%rdi)
vmovdqu %xmm12,128-128(%rdi)
vmovdqu %xmm13,160-128(%rdi)
vmovdqu %xmm14,192-128(%rdi)
vmovdqu %xmm15,224-128(%rdi)
vmovdqu %xmm7,(%rbx)
vmovdqu L$pbswap(%rip),%xmm6
decl %edx
jnz L$oop_avx
movl 280(%rsp),%edx
leaq 16(%rdi),%rdi
leaq 64(%rsi),%rsi
decl %edx
jnz L$oop_grande_avx
L$done_avx:
movq 272(%rsp),%rax
vzeroupper
movq -16(%rax),%rbp
movq -8(%rax),%rbx
leaq (%rax),%rsp
L$epilogue_avx:
.byte 0xf3,0xc3
.p2align 5
sha256_multi_block_avx2:
_avx2_shortcut:
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $576,%rsp
andq $-256,%rsp
movq %rax,544(%rsp)
L$body_avx2:
leaq K256+128(%rip),%rbp
leaq 128(%rdi),%rdi
L$oop_grande_avx2:
movl %edx,552(%rsp)
xorl %edx,%edx
leaq 512(%rsp),%rbx
movq 0(%rsi),%r12
movl 8(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,0(%rbx)
cmovleq %rbp,%r12
movq 16(%rsi),%r13
movl 24(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,4(%rbx)
cmovleq %rbp,%r13
movq 32(%rsi),%r14
movl 40(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,8(%rbx)
cmovleq %rbp,%r14
movq 48(%rsi),%r15
movl 56(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,12(%rbx)
cmovleq %rbp,%r15
movq 64(%rsi),%r8
movl 72(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,16(%rbx)
cmovleq %rbp,%r8
movq 80(%rsi),%r9
movl 88(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,20(%rbx)
cmovleq %rbp,%r9
movq 96(%rsi),%r10
movl 104(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,24(%rbx)
cmovleq %rbp,%r10
movq 112(%rsi),%r11
movl 120(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,28(%rbx)
cmovleq %rbp,%r11
vmovdqu 0-128(%rdi),%ymm8
leaq 128(%rsp),%rax
vmovdqu 32-128(%rdi),%ymm9
leaq 256+128(%rsp),%rbx
vmovdqu 64-128(%rdi),%ymm10
vmovdqu 96-128(%rdi),%ymm11
vmovdqu 128-128(%rdi),%ymm12
vmovdqu 160-128(%rdi),%ymm13
vmovdqu 192-128(%rdi),%ymm14
vmovdqu 224-128(%rdi),%ymm15
vmovdqu L$pbswap(%rip),%ymm6
jmp L$oop_avx2
.p2align 5
L$oop_avx2:
vpxor %ymm9,%ymm10,%ymm4
vmovd 0(%r12),%xmm5
vmovd 0(%r8),%xmm0
vmovd 0(%r13),%xmm1
vmovd 0(%r9),%xmm2
vpinsrd $1,0(%r14),%xmm5,%xmm5
vpinsrd $1,0(%r10),%xmm0,%xmm0
vpinsrd $1,0(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,0(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm12,%ymm7
vpslld $26,%ymm12,%ymm2
vmovdqu %ymm5,0-128(%rax)
vpaddd %ymm15,%ymm5,%ymm5
vpsrld $11,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm12,%ymm2
vpaddd -128(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm12,%ymm2
vpandn %ymm14,%ymm12,%ymm0
vpand %ymm13,%ymm12,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm8,%ymm15
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm8,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm8,%ymm9,%ymm3
vpxor %ymm1,%ymm15,%ymm15
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm8,%ymm1
vpslld $19,%ymm8,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm15,%ymm7
vpsrld $22,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm8,%ymm2
vpxor %ymm4,%ymm9,%ymm15
vpaddd %ymm5,%ymm11,%ymm11
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm15,%ymm15
vpaddd %ymm7,%ymm15,%ymm15
vmovd 4(%r12),%xmm5
vmovd 4(%r8),%xmm0
vmovd 4(%r13),%xmm1
vmovd 4(%r9),%xmm2
vpinsrd $1,4(%r14),%xmm5,%xmm5
vpinsrd $1,4(%r10),%xmm0,%xmm0
vpinsrd $1,4(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,4(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm11,%ymm7
vpslld $26,%ymm11,%ymm2
vmovdqu %ymm5,32-128(%rax)
vpaddd %ymm14,%ymm5,%ymm5
vpsrld $11,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm11,%ymm2
vpaddd -96(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm11,%ymm2
vpandn %ymm13,%ymm11,%ymm0
vpand %ymm12,%ymm11,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm15,%ymm14
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm15,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm15,%ymm8,%ymm4
vpxor %ymm1,%ymm14,%ymm14
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm15,%ymm1
vpslld $19,%ymm15,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm14,%ymm7
vpsrld $22,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm15,%ymm2
vpxor %ymm3,%ymm8,%ymm14
vpaddd %ymm5,%ymm10,%ymm10
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm14,%ymm14
vpaddd %ymm7,%ymm14,%ymm14
vmovd 8(%r12),%xmm5
vmovd 8(%r8),%xmm0
vmovd 8(%r13),%xmm1
vmovd 8(%r9),%xmm2
vpinsrd $1,8(%r14),%xmm5,%xmm5
vpinsrd $1,8(%r10),%xmm0,%xmm0
vpinsrd $1,8(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,8(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm10,%ymm7
vpslld $26,%ymm10,%ymm2
vmovdqu %ymm5,64-128(%rax)
vpaddd %ymm13,%ymm5,%ymm5
vpsrld $11,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm10,%ymm2
vpaddd -64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm10,%ymm2
vpandn %ymm12,%ymm10,%ymm0
vpand %ymm11,%ymm10,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm14,%ymm13
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm14,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm14,%ymm15,%ymm3
vpxor %ymm1,%ymm13,%ymm13
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm14,%ymm1
vpslld $19,%ymm14,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm13,%ymm7
vpsrld $22,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm14,%ymm2
vpxor %ymm4,%ymm15,%ymm13
vpaddd %ymm5,%ymm9,%ymm9
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm13,%ymm13
vpaddd %ymm7,%ymm13,%ymm13
vmovd 12(%r12),%xmm5
vmovd 12(%r8),%xmm0
vmovd 12(%r13),%xmm1
vmovd 12(%r9),%xmm2
vpinsrd $1,12(%r14),%xmm5,%xmm5
vpinsrd $1,12(%r10),%xmm0,%xmm0
vpinsrd $1,12(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,12(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm9,%ymm7
vpslld $26,%ymm9,%ymm2
vmovdqu %ymm5,96-128(%rax)
vpaddd %ymm12,%ymm5,%ymm5
vpsrld $11,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm9,%ymm2
vpaddd -32(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm9,%ymm2
vpandn %ymm11,%ymm9,%ymm0
vpand %ymm10,%ymm9,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm13,%ymm12
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm13,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm13,%ymm14,%ymm4
vpxor %ymm1,%ymm12,%ymm12
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm13,%ymm1
vpslld $19,%ymm13,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm12,%ymm7
vpsrld $22,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm13,%ymm2
vpxor %ymm3,%ymm14,%ymm12
vpaddd %ymm5,%ymm8,%ymm8
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm12,%ymm12
vpaddd %ymm7,%ymm12,%ymm12
vmovd 16(%r12),%xmm5
vmovd 16(%r8),%xmm0
vmovd 16(%r13),%xmm1
vmovd 16(%r9),%xmm2
vpinsrd $1,16(%r14),%xmm5,%xmm5
vpinsrd $1,16(%r10),%xmm0,%xmm0
vpinsrd $1,16(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,16(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm8,%ymm7
vpslld $26,%ymm8,%ymm2
vmovdqu %ymm5,128-128(%rax)
vpaddd %ymm11,%ymm5,%ymm5
vpsrld $11,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm8,%ymm2
vpaddd 0(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm8,%ymm2
vpandn %ymm10,%ymm8,%ymm0
vpand %ymm9,%ymm8,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm12,%ymm11
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm12,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm12,%ymm13,%ymm3
vpxor %ymm1,%ymm11,%ymm11
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm12,%ymm1
vpslld $19,%ymm12,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm11,%ymm7
vpsrld $22,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm12,%ymm2
vpxor %ymm4,%ymm13,%ymm11
vpaddd %ymm5,%ymm15,%ymm15
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm11,%ymm11
vpaddd %ymm7,%ymm11,%ymm11
vmovd 20(%r12),%xmm5
vmovd 20(%r8),%xmm0
vmovd 20(%r13),%xmm1
vmovd 20(%r9),%xmm2
vpinsrd $1,20(%r14),%xmm5,%xmm5
vpinsrd $1,20(%r10),%xmm0,%xmm0
vpinsrd $1,20(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,20(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm15,%ymm7
vpslld $26,%ymm15,%ymm2
vmovdqu %ymm5,160-128(%rax)
vpaddd %ymm10,%ymm5,%ymm5
vpsrld $11,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm15,%ymm2
vpaddd 32(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm15,%ymm2
vpandn %ymm9,%ymm15,%ymm0
vpand %ymm8,%ymm15,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm11,%ymm10
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm11,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm11,%ymm12,%ymm4
vpxor %ymm1,%ymm10,%ymm10
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm11,%ymm1
vpslld $19,%ymm11,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm10,%ymm7
vpsrld $22,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm11,%ymm2
vpxor %ymm3,%ymm12,%ymm10
vpaddd %ymm5,%ymm14,%ymm14
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm10,%ymm10
vpaddd %ymm7,%ymm10,%ymm10
vmovd 24(%r12),%xmm5
vmovd 24(%r8),%xmm0
vmovd 24(%r13),%xmm1
vmovd 24(%r9),%xmm2
vpinsrd $1,24(%r14),%xmm5,%xmm5
vpinsrd $1,24(%r10),%xmm0,%xmm0
vpinsrd $1,24(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,24(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm14,%ymm7
vpslld $26,%ymm14,%ymm2
vmovdqu %ymm5,192-128(%rax)
vpaddd %ymm9,%ymm5,%ymm5
vpsrld $11,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm14,%ymm2
vpaddd 64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm14,%ymm2
vpandn %ymm8,%ymm14,%ymm0
vpand %ymm15,%ymm14,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm10,%ymm9
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm10,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm10,%ymm11,%ymm3
vpxor %ymm1,%ymm9,%ymm9
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm10,%ymm1
vpslld $19,%ymm10,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm9,%ymm7
vpsrld $22,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm10,%ymm2
vpxor %ymm4,%ymm11,%ymm9
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm9,%ymm9
vpaddd %ymm7,%ymm9,%ymm9
vmovd 28(%r12),%xmm5
vmovd 28(%r8),%xmm0
vmovd 28(%r13),%xmm1
vmovd 28(%r9),%xmm2
vpinsrd $1,28(%r14),%xmm5,%xmm5
vpinsrd $1,28(%r10),%xmm0,%xmm0
vpinsrd $1,28(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,28(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm13,%ymm7
vpslld $26,%ymm13,%ymm2
vmovdqu %ymm5,224-128(%rax)
vpaddd %ymm8,%ymm5,%ymm5
vpsrld $11,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm13,%ymm2
vpaddd 96(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm13,%ymm2
vpandn %ymm15,%ymm13,%ymm0
vpand %ymm14,%ymm13,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm9,%ymm8
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm9,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm9,%ymm10,%ymm4
vpxor %ymm1,%ymm8,%ymm8
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm9,%ymm1
vpslld $19,%ymm9,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm8,%ymm7
vpsrld $22,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm9,%ymm2
vpxor %ymm3,%ymm10,%ymm8
vpaddd %ymm5,%ymm12,%ymm12
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm8,%ymm8
vpaddd %ymm7,%ymm8,%ymm8
addq $256,%rbp
vmovd 32(%r12),%xmm5
vmovd 32(%r8),%xmm0
vmovd 32(%r13),%xmm1
vmovd 32(%r9),%xmm2
vpinsrd $1,32(%r14),%xmm5,%xmm5
vpinsrd $1,32(%r10),%xmm0,%xmm0
vpinsrd $1,32(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,32(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm12,%ymm7
vpslld $26,%ymm12,%ymm2
vmovdqu %ymm5,256-256-128(%rbx)
vpaddd %ymm15,%ymm5,%ymm5
vpsrld $11,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm12,%ymm2
vpaddd -128(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm12,%ymm2
vpandn %ymm14,%ymm12,%ymm0
vpand %ymm13,%ymm12,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm8,%ymm15
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm8,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm8,%ymm9,%ymm3
vpxor %ymm1,%ymm15,%ymm15
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm8,%ymm1
vpslld $19,%ymm8,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm15,%ymm7
vpsrld $22,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm8,%ymm2
vpxor %ymm4,%ymm9,%ymm15
vpaddd %ymm5,%ymm11,%ymm11
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm15,%ymm15
vpaddd %ymm7,%ymm15,%ymm15
vmovd 36(%r12),%xmm5
vmovd 36(%r8),%xmm0
vmovd 36(%r13),%xmm1
vmovd 36(%r9),%xmm2
vpinsrd $1,36(%r14),%xmm5,%xmm5
vpinsrd $1,36(%r10),%xmm0,%xmm0
vpinsrd $1,36(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,36(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm11,%ymm7
vpslld $26,%ymm11,%ymm2
vmovdqu %ymm5,288-256-128(%rbx)
vpaddd %ymm14,%ymm5,%ymm5
vpsrld $11,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm11,%ymm2
vpaddd -96(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm11,%ymm2
vpandn %ymm13,%ymm11,%ymm0
vpand %ymm12,%ymm11,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm15,%ymm14
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm15,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm15,%ymm8,%ymm4
vpxor %ymm1,%ymm14,%ymm14
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm15,%ymm1
vpslld $19,%ymm15,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm14,%ymm7
vpsrld $22,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm15,%ymm2
vpxor %ymm3,%ymm8,%ymm14
vpaddd %ymm5,%ymm10,%ymm10
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm14,%ymm14
vpaddd %ymm7,%ymm14,%ymm14
vmovd 40(%r12),%xmm5
vmovd 40(%r8),%xmm0
vmovd 40(%r13),%xmm1
vmovd 40(%r9),%xmm2
vpinsrd $1,40(%r14),%xmm5,%xmm5
vpinsrd $1,40(%r10),%xmm0,%xmm0
vpinsrd $1,40(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,40(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm10,%ymm7
vpslld $26,%ymm10,%ymm2
vmovdqu %ymm5,320-256-128(%rbx)
vpaddd %ymm13,%ymm5,%ymm5
vpsrld $11,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm10,%ymm2
vpaddd -64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm10,%ymm2
vpandn %ymm12,%ymm10,%ymm0
vpand %ymm11,%ymm10,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm14,%ymm13
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm14,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm14,%ymm15,%ymm3
vpxor %ymm1,%ymm13,%ymm13
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm14,%ymm1
vpslld $19,%ymm14,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm13,%ymm7
vpsrld $22,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm14,%ymm2
vpxor %ymm4,%ymm15,%ymm13
vpaddd %ymm5,%ymm9,%ymm9
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm13,%ymm13
vpaddd %ymm7,%ymm13,%ymm13
vmovd 44(%r12),%xmm5
vmovd 44(%r8),%xmm0
vmovd 44(%r13),%xmm1
vmovd 44(%r9),%xmm2
vpinsrd $1,44(%r14),%xmm5,%xmm5
vpinsrd $1,44(%r10),%xmm0,%xmm0
vpinsrd $1,44(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,44(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm9,%ymm7
vpslld $26,%ymm9,%ymm2
vmovdqu %ymm5,352-256-128(%rbx)
vpaddd %ymm12,%ymm5,%ymm5
vpsrld $11,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm9,%ymm2
vpaddd -32(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm9,%ymm2
vpandn %ymm11,%ymm9,%ymm0
vpand %ymm10,%ymm9,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm13,%ymm12
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm13,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm13,%ymm14,%ymm4
vpxor %ymm1,%ymm12,%ymm12
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm13,%ymm1
vpslld $19,%ymm13,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm12,%ymm7
vpsrld $22,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm13,%ymm2
vpxor %ymm3,%ymm14,%ymm12
vpaddd %ymm5,%ymm8,%ymm8
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm12,%ymm12
vpaddd %ymm7,%ymm12,%ymm12
vmovd 48(%r12),%xmm5
vmovd 48(%r8),%xmm0
vmovd 48(%r13),%xmm1
vmovd 48(%r9),%xmm2
vpinsrd $1,48(%r14),%xmm5,%xmm5
vpinsrd $1,48(%r10),%xmm0,%xmm0
vpinsrd $1,48(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,48(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm8,%ymm7
vpslld $26,%ymm8,%ymm2
vmovdqu %ymm5,384-256-128(%rbx)
vpaddd %ymm11,%ymm5,%ymm5
vpsrld $11,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm8,%ymm2
vpaddd 0(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm8,%ymm2
vpandn %ymm10,%ymm8,%ymm0
vpand %ymm9,%ymm8,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm12,%ymm11
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm12,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm12,%ymm13,%ymm3
vpxor %ymm1,%ymm11,%ymm11
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm12,%ymm1
vpslld $19,%ymm12,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm11,%ymm7
vpsrld $22,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm12,%ymm2
vpxor %ymm4,%ymm13,%ymm11
vpaddd %ymm5,%ymm15,%ymm15
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm11,%ymm11
vpaddd %ymm7,%ymm11,%ymm11
vmovd 52(%r12),%xmm5
vmovd 52(%r8),%xmm0
vmovd 52(%r13),%xmm1
vmovd 52(%r9),%xmm2
vpinsrd $1,52(%r14),%xmm5,%xmm5
vpinsrd $1,52(%r10),%xmm0,%xmm0
vpinsrd $1,52(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,52(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm15,%ymm7
vpslld $26,%ymm15,%ymm2
vmovdqu %ymm5,416-256-128(%rbx)
vpaddd %ymm10,%ymm5,%ymm5
vpsrld $11,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm15,%ymm2
vpaddd 32(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm15,%ymm2
vpandn %ymm9,%ymm15,%ymm0
vpand %ymm8,%ymm15,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm11,%ymm10
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm11,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm11,%ymm12,%ymm4
vpxor %ymm1,%ymm10,%ymm10
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm11,%ymm1
vpslld $19,%ymm11,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm10,%ymm7
vpsrld $22,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm11,%ymm2
vpxor %ymm3,%ymm12,%ymm10
vpaddd %ymm5,%ymm14,%ymm14
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm10,%ymm10
vpaddd %ymm7,%ymm10,%ymm10
vmovd 56(%r12),%xmm5
vmovd 56(%r8),%xmm0
vmovd 56(%r13),%xmm1
vmovd 56(%r9),%xmm2
vpinsrd $1,56(%r14),%xmm5,%xmm5
vpinsrd $1,56(%r10),%xmm0,%xmm0
vpinsrd $1,56(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,56(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm14,%ymm7
vpslld $26,%ymm14,%ymm2
vmovdqu %ymm5,448-256-128(%rbx)
vpaddd %ymm9,%ymm5,%ymm5
vpsrld $11,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm14,%ymm2
vpaddd 64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm14,%ymm2
vpandn %ymm8,%ymm14,%ymm0
vpand %ymm15,%ymm14,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm10,%ymm9
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm10,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm10,%ymm11,%ymm3
vpxor %ymm1,%ymm9,%ymm9
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm10,%ymm1
vpslld $19,%ymm10,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm9,%ymm7
vpsrld $22,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm10,%ymm2
vpxor %ymm4,%ymm11,%ymm9
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm9,%ymm9
vpaddd %ymm7,%ymm9,%ymm9
vmovd 60(%r12),%xmm5
leaq 64(%r12),%r12
vmovd 60(%r8),%xmm0
leaq 64(%r8),%r8
vmovd 60(%r13),%xmm1
leaq 64(%r13),%r13
vmovd 60(%r9),%xmm2
leaq 64(%r9),%r9
vpinsrd $1,60(%r14),%xmm5,%xmm5
leaq 64(%r14),%r14
vpinsrd $1,60(%r10),%xmm0,%xmm0
leaq 64(%r10),%r10
vpinsrd $1,60(%r15),%xmm1,%xmm1
leaq 64(%r15),%r15
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,60(%r11),%xmm2,%xmm2
leaq 64(%r11),%r11
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm13,%ymm7
vpslld $26,%ymm13,%ymm2
vmovdqu %ymm5,480-256-128(%rbx)
vpaddd %ymm8,%ymm5,%ymm5
vpsrld $11,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm13,%ymm2
vpaddd 96(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
prefetcht0 63(%r12)
vpslld $7,%ymm13,%ymm2
vpandn %ymm15,%ymm13,%ymm0
vpand %ymm14,%ymm13,%ymm4
prefetcht0 63(%r13)
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm9,%ymm8
vpxor %ymm2,%ymm7,%ymm7
prefetcht0 63(%r14)
vpslld $30,%ymm9,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm9,%ymm10,%ymm4
prefetcht0 63(%r15)
vpxor %ymm1,%ymm8,%ymm8
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm9,%ymm1
prefetcht0 63(%r8)
vpslld $19,%ymm9,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
prefetcht0 63(%r9)
vpxor %ymm1,%ymm8,%ymm7
vpsrld $22,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
prefetcht0 63(%r10)
vpslld $10,%ymm9,%ymm2
vpxor %ymm3,%ymm10,%ymm8
vpaddd %ymm5,%ymm12,%ymm12
prefetcht0 63(%r11)
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm8,%ymm8
vpaddd %ymm7,%ymm8,%ymm8
addq $256,%rbp
vmovdqu 0-128(%rax),%ymm5
movl $3,%ecx
jmp L$oop_16_xx_avx2
.p2align 5
L$oop_16_xx_avx2:
vmovdqu 32-128(%rax),%ymm6
vpaddd 288-256-128(%rbx),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 448-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm12,%ymm7
vpslld $26,%ymm12,%ymm2
vmovdqu %ymm5,0-128(%rax)
vpaddd %ymm15,%ymm5,%ymm5
vpsrld $11,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm12,%ymm2
vpaddd -128(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm12,%ymm2
vpandn %ymm14,%ymm12,%ymm0
vpand %ymm13,%ymm12,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm8,%ymm15
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm8,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm8,%ymm9,%ymm3
vpxor %ymm1,%ymm15,%ymm15
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm8,%ymm1
vpslld $19,%ymm8,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm15,%ymm7
vpsrld $22,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm8,%ymm2
vpxor %ymm4,%ymm9,%ymm15
vpaddd %ymm5,%ymm11,%ymm11
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm15,%ymm15
vpaddd %ymm7,%ymm15,%ymm15
vmovdqu 64-128(%rax),%ymm5
vpaddd 320-256-128(%rbx),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 480-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm11,%ymm7
vpslld $26,%ymm11,%ymm2
vmovdqu %ymm6,32-128(%rax)
vpaddd %ymm14,%ymm6,%ymm6
vpsrld $11,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm11,%ymm2
vpaddd -96(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm11,%ymm2
vpandn %ymm13,%ymm11,%ymm0
vpand %ymm12,%ymm11,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm15,%ymm14
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm15,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm15,%ymm8,%ymm4
vpxor %ymm1,%ymm14,%ymm14
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm15,%ymm1
vpslld $19,%ymm15,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm14,%ymm7
vpsrld $22,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm15,%ymm2
vpxor %ymm3,%ymm8,%ymm14
vpaddd %ymm6,%ymm10,%ymm10
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm14,%ymm14
vpaddd %ymm7,%ymm14,%ymm14
vmovdqu 96-128(%rax),%ymm6
vpaddd 352-256-128(%rbx),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 0-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm10,%ymm7
vpslld $26,%ymm10,%ymm2
vmovdqu %ymm5,64-128(%rax)
vpaddd %ymm13,%ymm5,%ymm5
vpsrld $11,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm10,%ymm2
vpaddd -64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm10,%ymm2
vpandn %ymm12,%ymm10,%ymm0
vpand %ymm11,%ymm10,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm14,%ymm13
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm14,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm14,%ymm15,%ymm3
vpxor %ymm1,%ymm13,%ymm13
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm14,%ymm1
vpslld $19,%ymm14,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm13,%ymm7
vpsrld $22,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm14,%ymm2
vpxor %ymm4,%ymm15,%ymm13
vpaddd %ymm5,%ymm9,%ymm9
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm13,%ymm13
vpaddd %ymm7,%ymm13,%ymm13
vmovdqu 128-128(%rax),%ymm5
vpaddd 384-256-128(%rbx),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 32-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm9,%ymm7
vpslld $26,%ymm9,%ymm2
vmovdqu %ymm6,96-128(%rax)
vpaddd %ymm12,%ymm6,%ymm6
vpsrld $11,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm9,%ymm2
vpaddd -32(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm9,%ymm2
vpandn %ymm11,%ymm9,%ymm0
vpand %ymm10,%ymm9,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm13,%ymm12
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm13,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm13,%ymm14,%ymm4
vpxor %ymm1,%ymm12,%ymm12
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm13,%ymm1
vpslld $19,%ymm13,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm12,%ymm7
vpsrld $22,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm13,%ymm2
vpxor %ymm3,%ymm14,%ymm12
vpaddd %ymm6,%ymm8,%ymm8
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm12,%ymm12
vpaddd %ymm7,%ymm12,%ymm12
vmovdqu 160-128(%rax),%ymm6
vpaddd 416-256-128(%rbx),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 64-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm8,%ymm7
vpslld $26,%ymm8,%ymm2
vmovdqu %ymm5,128-128(%rax)
vpaddd %ymm11,%ymm5,%ymm5
vpsrld $11,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm8,%ymm2
vpaddd 0(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm8,%ymm2
vpandn %ymm10,%ymm8,%ymm0
vpand %ymm9,%ymm8,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm12,%ymm11
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm12,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm12,%ymm13,%ymm3
vpxor %ymm1,%ymm11,%ymm11
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm12,%ymm1
vpslld $19,%ymm12,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm11,%ymm7
vpsrld $22,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm12,%ymm2
vpxor %ymm4,%ymm13,%ymm11
vpaddd %ymm5,%ymm15,%ymm15
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm11,%ymm11
vpaddd %ymm7,%ymm11,%ymm11
vmovdqu 192-128(%rax),%ymm5
vpaddd 448-256-128(%rbx),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 96-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm15,%ymm7
vpslld $26,%ymm15,%ymm2
vmovdqu %ymm6,160-128(%rax)
vpaddd %ymm10,%ymm6,%ymm6
vpsrld $11,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm15,%ymm2
vpaddd 32(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm15,%ymm2
vpandn %ymm9,%ymm15,%ymm0
vpand %ymm8,%ymm15,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm11,%ymm10
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm11,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm11,%ymm12,%ymm4
vpxor %ymm1,%ymm10,%ymm10
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm11,%ymm1
vpslld $19,%ymm11,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm10,%ymm7
vpsrld $22,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm11,%ymm2
vpxor %ymm3,%ymm12,%ymm10
vpaddd %ymm6,%ymm14,%ymm14
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm10,%ymm10
vpaddd %ymm7,%ymm10,%ymm10
vmovdqu 224-128(%rax),%ymm6
vpaddd 480-256-128(%rbx),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 128-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm14,%ymm7
vpslld $26,%ymm14,%ymm2
vmovdqu %ymm5,192-128(%rax)
vpaddd %ymm9,%ymm5,%ymm5
vpsrld $11,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm14,%ymm2
vpaddd 64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm14,%ymm2
vpandn %ymm8,%ymm14,%ymm0
vpand %ymm15,%ymm14,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm10,%ymm9
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm10,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm10,%ymm11,%ymm3
vpxor %ymm1,%ymm9,%ymm9
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm10,%ymm1
vpslld $19,%ymm10,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm9,%ymm7
vpsrld $22,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm10,%ymm2
vpxor %ymm4,%ymm11,%ymm9
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm9,%ymm9
vpaddd %ymm7,%ymm9,%ymm9
vmovdqu 256-256-128(%rbx),%ymm5
vpaddd 0-128(%rax),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 160-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm13,%ymm7
vpslld $26,%ymm13,%ymm2
vmovdqu %ymm6,224-128(%rax)
vpaddd %ymm8,%ymm6,%ymm6
vpsrld $11,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm13,%ymm2
vpaddd 96(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm13,%ymm2
vpandn %ymm15,%ymm13,%ymm0
vpand %ymm14,%ymm13,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm9,%ymm8
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm9,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm9,%ymm10,%ymm4
vpxor %ymm1,%ymm8,%ymm8
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm9,%ymm1
vpslld $19,%ymm9,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm8,%ymm7
vpsrld $22,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm9,%ymm2
vpxor %ymm3,%ymm10,%ymm8
vpaddd %ymm6,%ymm12,%ymm12
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm8,%ymm8
vpaddd %ymm7,%ymm8,%ymm8
addq $256,%rbp
vmovdqu 288-256-128(%rbx),%ymm6
vpaddd 32-128(%rax),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 192-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm12,%ymm7
vpslld $26,%ymm12,%ymm2
vmovdqu %ymm5,256-256-128(%rbx)
vpaddd %ymm15,%ymm5,%ymm5
vpsrld $11,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm12,%ymm2
vpaddd -128(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm12,%ymm2
vpandn %ymm14,%ymm12,%ymm0
vpand %ymm13,%ymm12,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm8,%ymm15
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm8,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm8,%ymm9,%ymm3
vpxor %ymm1,%ymm15,%ymm15
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm8,%ymm1
vpslld $19,%ymm8,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm15,%ymm7
vpsrld $22,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm8,%ymm2
vpxor %ymm4,%ymm9,%ymm15
vpaddd %ymm5,%ymm11,%ymm11
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm15,%ymm15
vpaddd %ymm7,%ymm15,%ymm15
vmovdqu 320-256-128(%rbx),%ymm5
vpaddd 64-128(%rax),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 224-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm11,%ymm7
vpslld $26,%ymm11,%ymm2
vmovdqu %ymm6,288-256-128(%rbx)
vpaddd %ymm14,%ymm6,%ymm6
vpsrld $11,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm11,%ymm2
vpaddd -96(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm11,%ymm2
vpandn %ymm13,%ymm11,%ymm0
vpand %ymm12,%ymm11,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm15,%ymm14
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm15,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm15,%ymm8,%ymm4
vpxor %ymm1,%ymm14,%ymm14
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm15,%ymm1
vpslld $19,%ymm15,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm14,%ymm7
vpsrld $22,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm15,%ymm2
vpxor %ymm3,%ymm8,%ymm14
vpaddd %ymm6,%ymm10,%ymm10
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm14,%ymm14
vpaddd %ymm7,%ymm14,%ymm14
vmovdqu 352-256-128(%rbx),%ymm6
vpaddd 96-128(%rax),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 256-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm10,%ymm7
vpslld $26,%ymm10,%ymm2
vmovdqu %ymm5,320-256-128(%rbx)
vpaddd %ymm13,%ymm5,%ymm5
vpsrld $11,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm10,%ymm2
vpaddd -64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm10,%ymm2
vpandn %ymm12,%ymm10,%ymm0
vpand %ymm11,%ymm10,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm14,%ymm13
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm14,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm14,%ymm15,%ymm3
vpxor %ymm1,%ymm13,%ymm13
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm14,%ymm1
vpslld $19,%ymm14,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm13,%ymm7
vpsrld $22,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm14,%ymm2
vpxor %ymm4,%ymm15,%ymm13
vpaddd %ymm5,%ymm9,%ymm9
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm13,%ymm13
vpaddd %ymm7,%ymm13,%ymm13
vmovdqu 384-256-128(%rbx),%ymm5
vpaddd 128-128(%rax),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 288-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm9,%ymm7
vpslld $26,%ymm9,%ymm2
vmovdqu %ymm6,352-256-128(%rbx)
vpaddd %ymm12,%ymm6,%ymm6
vpsrld $11,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm9,%ymm2
vpaddd -32(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm9,%ymm2
vpandn %ymm11,%ymm9,%ymm0
vpand %ymm10,%ymm9,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm13,%ymm12
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm13,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm13,%ymm14,%ymm4
vpxor %ymm1,%ymm12,%ymm12
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm13,%ymm1
vpslld $19,%ymm13,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm12,%ymm7
vpsrld $22,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm13,%ymm2
vpxor %ymm3,%ymm14,%ymm12
vpaddd %ymm6,%ymm8,%ymm8
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm12,%ymm12
vpaddd %ymm7,%ymm12,%ymm12
vmovdqu 416-256-128(%rbx),%ymm6
vpaddd 160-128(%rax),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 320-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm8,%ymm7
vpslld $26,%ymm8,%ymm2
vmovdqu %ymm5,384-256-128(%rbx)
vpaddd %ymm11,%ymm5,%ymm5
vpsrld $11,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm8,%ymm2
vpaddd 0(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm8,%ymm2
vpandn %ymm10,%ymm8,%ymm0
vpand %ymm9,%ymm8,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm12,%ymm11
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm12,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm12,%ymm13,%ymm3
vpxor %ymm1,%ymm11,%ymm11
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm12,%ymm1
vpslld $19,%ymm12,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm11,%ymm7
vpsrld $22,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm12,%ymm2
vpxor %ymm4,%ymm13,%ymm11
vpaddd %ymm5,%ymm15,%ymm15
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm11,%ymm11
vpaddd %ymm7,%ymm11,%ymm11
vmovdqu 448-256-128(%rbx),%ymm5
vpaddd 192-128(%rax),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 352-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm15,%ymm7
vpslld $26,%ymm15,%ymm2
vmovdqu %ymm6,416-256-128(%rbx)
vpaddd %ymm10,%ymm6,%ymm6
vpsrld $11,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm15,%ymm2
vpaddd 32(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm15,%ymm2
vpandn %ymm9,%ymm15,%ymm0
vpand %ymm8,%ymm15,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm11,%ymm10
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm11,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm11,%ymm12,%ymm4
vpxor %ymm1,%ymm10,%ymm10
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm11,%ymm1
vpslld $19,%ymm11,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm10,%ymm7
vpsrld $22,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm11,%ymm2
vpxor %ymm3,%ymm12,%ymm10
vpaddd %ymm6,%ymm14,%ymm14
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm10,%ymm10
vpaddd %ymm7,%ymm10,%ymm10
vmovdqu 480-256-128(%rbx),%ymm6
vpaddd 224-128(%rax),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 384-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm14,%ymm7
vpslld $26,%ymm14,%ymm2
vmovdqu %ymm5,448-256-128(%rbx)
vpaddd %ymm9,%ymm5,%ymm5
vpsrld $11,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm14,%ymm2
vpaddd 64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm14,%ymm2
vpandn %ymm8,%ymm14,%ymm0
vpand %ymm15,%ymm14,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm10,%ymm9
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm10,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm10,%ymm11,%ymm3
vpxor %ymm1,%ymm9,%ymm9
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm10,%ymm1
vpslld $19,%ymm10,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm9,%ymm7
vpsrld $22,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm10,%ymm2
vpxor %ymm4,%ymm11,%ymm9
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm9,%ymm9
vpaddd %ymm7,%ymm9,%ymm9
vmovdqu 0-128(%rax),%ymm5
vpaddd 256-256-128(%rbx),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 416-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm13,%ymm7
vpslld $26,%ymm13,%ymm2
vmovdqu %ymm6,480-256-128(%rbx)
vpaddd %ymm8,%ymm6,%ymm6
vpsrld $11,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm13,%ymm2
vpaddd 96(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm13,%ymm2
vpandn %ymm15,%ymm13,%ymm0
vpand %ymm14,%ymm13,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm9,%ymm8
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm9,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm9,%ymm10,%ymm4
vpxor %ymm1,%ymm8,%ymm8
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm9,%ymm1
vpslld $19,%ymm9,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm8,%ymm7
vpsrld $22,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm9,%ymm2
vpxor %ymm3,%ymm10,%ymm8
vpaddd %ymm6,%ymm12,%ymm12
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm8,%ymm8
vpaddd %ymm7,%ymm8,%ymm8
addq $256,%rbp
decl %ecx
jnz L$oop_16_xx_avx2
movl $1,%ecx
leaq 512(%rsp),%rbx
leaq K256+128(%rip),%rbp
cmpl 0(%rbx),%ecx
cmovgeq %rbp,%r12
cmpl 4(%rbx),%ecx
cmovgeq %rbp,%r13
cmpl 8(%rbx),%ecx
cmovgeq %rbp,%r14
cmpl 12(%rbx),%ecx
cmovgeq %rbp,%r15
cmpl 16(%rbx),%ecx
cmovgeq %rbp,%r8
cmpl 20(%rbx),%ecx
cmovgeq %rbp,%r9
cmpl 24(%rbx),%ecx
cmovgeq %rbp,%r10
cmpl 28(%rbx),%ecx
cmovgeq %rbp,%r11
vmovdqa (%rbx),%ymm7
vpxor %ymm0,%ymm0,%ymm0
vmovdqa %ymm7,%ymm6
vpcmpgtd %ymm0,%ymm6,%ymm6
vpaddd %ymm6,%ymm7,%ymm7
vmovdqu 0-128(%rdi),%ymm0
vpand %ymm6,%ymm8,%ymm8
vmovdqu 32-128(%rdi),%ymm1
vpand %ymm6,%ymm9,%ymm9
vmovdqu 64-128(%rdi),%ymm2
vpand %ymm6,%ymm10,%ymm10
vmovdqu 96-128(%rdi),%ymm5
vpand %ymm6,%ymm11,%ymm11
vpaddd %ymm0,%ymm8,%ymm8
vmovdqu 128-128(%rdi),%ymm0
vpand %ymm6,%ymm12,%ymm12
vpaddd %ymm1,%ymm9,%ymm9
vmovdqu 160-128(%rdi),%ymm1
vpand %ymm6,%ymm13,%ymm13
vpaddd %ymm2,%ymm10,%ymm10
vmovdqu 192-128(%rdi),%ymm2
vpand %ymm6,%ymm14,%ymm14
vpaddd %ymm5,%ymm11,%ymm11
vmovdqu 224-128(%rdi),%ymm5
vpand %ymm6,%ymm15,%ymm15
vpaddd %ymm0,%ymm12,%ymm12
vpaddd %ymm1,%ymm13,%ymm13
vmovdqu %ymm8,0-128(%rdi)
vpaddd %ymm2,%ymm14,%ymm14
vmovdqu %ymm9,32-128(%rdi)
vpaddd %ymm5,%ymm15,%ymm15
vmovdqu %ymm10,64-128(%rdi)
vmovdqu %ymm11,96-128(%rdi)
vmovdqu %ymm12,128-128(%rdi)
vmovdqu %ymm13,160-128(%rdi)
vmovdqu %ymm14,192-128(%rdi)
vmovdqu %ymm15,224-128(%rdi)
vmovdqu %ymm7,(%rbx)
leaq 256+128(%rsp),%rbx
vmovdqu L$pbswap(%rip),%ymm6
decl %edx
jnz L$oop_avx2
L$done_avx2:
movq 544(%rsp),%rax
vzeroupper
movq -48(%rax),%r15
movq -40(%rax),%r14
movq -32(%rax),%r13
movq -24(%rax),%r12
movq -16(%rax),%rbp
movq -8(%rax),%rbx
leaq (%rax),%rsp
L$epilogue_avx2:
.byte 0xf3,0xc3
.p2align 8
K256:
.long 1116352408,1116352408,1116352408,1116352408
.long 1116352408,1116352408,1116352408,1116352408
.long 1899447441,1899447441,1899447441,1899447441
.long 1899447441,1899447441,1899447441,1899447441
.long 3049323471,3049323471,3049323471,3049323471
.long 3049323471,3049323471,3049323471,3049323471
.long 3921009573,3921009573,3921009573,3921009573
.long 3921009573,3921009573,3921009573,3921009573
.long 961987163,961987163,961987163,961987163
.long 961987163,961987163,961987163,961987163
.long 1508970993,1508970993,1508970993,1508970993
.long 1508970993,1508970993,1508970993,1508970993
.long 2453635748,2453635748,2453635748,2453635748
.long 2453635748,2453635748,2453635748,2453635748
.long 2870763221,2870763221,2870763221,2870763221
.long 2870763221,2870763221,2870763221,2870763221
.long 3624381080,3624381080,3624381080,3624381080
.long 3624381080,3624381080,3624381080,3624381080
.long 310598401,310598401,310598401,310598401
.long 310598401,310598401,310598401,310598401
.long 607225278,607225278,607225278,607225278
.long 607225278,607225278,607225278,607225278
.long 1426881987,1426881987,1426881987,1426881987
.long 1426881987,1426881987,1426881987,1426881987
.long 1925078388,1925078388,1925078388,1925078388
.long 1925078388,1925078388,1925078388,1925078388
.long 2162078206,2162078206,2162078206,2162078206
.long 2162078206,2162078206,2162078206,2162078206
.long 2614888103,2614888103,2614888103,2614888103
.long 2614888103,2614888103,2614888103,2614888103
.long 3248222580,3248222580,3248222580,3248222580
.long 3248222580,3248222580,3248222580,3248222580
.long 3835390401,3835390401,3835390401,3835390401
.long 3835390401,3835390401,3835390401,3835390401
.long 4022224774,4022224774,4022224774,4022224774
.long 4022224774,4022224774,4022224774,4022224774
.long 264347078,264347078,264347078,264347078
.long 264347078,264347078,264347078,264347078
.long 604807628,604807628,604807628,604807628
.long 604807628,604807628,604807628,604807628
.long 770255983,770255983,770255983,770255983
.long 770255983,770255983,770255983,770255983
.long 1249150122,1249150122,1249150122,1249150122
.long 1249150122,1249150122,1249150122,1249150122
.long 1555081692,1555081692,1555081692,1555081692
.long 1555081692,1555081692,1555081692,1555081692
.long 1996064986,1996064986,1996064986,1996064986
.long 1996064986,1996064986,1996064986,1996064986
.long 2554220882,2554220882,2554220882,2554220882
.long 2554220882,2554220882,2554220882,2554220882
.long 2821834349,2821834349,2821834349,2821834349
.long 2821834349,2821834349,2821834349,2821834349
.long 2952996808,2952996808,2952996808,2952996808
.long 2952996808,2952996808,2952996808,2952996808
.long 3210313671,3210313671,3210313671,3210313671
.long 3210313671,3210313671,3210313671,3210313671
.long 3336571891,3336571891,3336571891,3336571891
.long 3336571891,3336571891,3336571891,3336571891
.long 3584528711,3584528711,3584528711,3584528711
.long 3584528711,3584528711,3584528711,3584528711
.long 113926993,113926993,113926993,113926993
.long 113926993,113926993,113926993,113926993
.long 338241895,338241895,338241895,338241895
.long 338241895,338241895,338241895,338241895
.long 666307205,666307205,666307205,666307205
.long 666307205,666307205,666307205,666307205
.long 773529912,773529912,773529912,773529912
.long 773529912,773529912,773529912,773529912
.long 1294757372,1294757372,1294757372,1294757372
.long 1294757372,1294757372,1294757372,1294757372
.long 1396182291,1396182291,1396182291,1396182291
.long 1396182291,1396182291,1396182291,1396182291
.long 1695183700,1695183700,1695183700,1695183700
.long 1695183700,1695183700,1695183700,1695183700
.long 1986661051,1986661051,1986661051,1986661051
.long 1986661051,1986661051,1986661051,1986661051
.long 2177026350,2177026350,2177026350,2177026350
.long 2177026350,2177026350,2177026350,2177026350
.long 2456956037,2456956037,2456956037,2456956037
.long 2456956037,2456956037,2456956037,2456956037
.long 2730485921,2730485921,2730485921,2730485921
.long 2730485921,2730485921,2730485921,2730485921
.long 2820302411,2820302411,2820302411,2820302411
.long 2820302411,2820302411,2820302411,2820302411
.long 3259730800,3259730800,3259730800,3259730800
.long 3259730800,3259730800,3259730800,3259730800
.long 3345764771,3345764771,3345764771,3345764771
.long 3345764771,3345764771,3345764771,3345764771
.long 3516065817,3516065817,3516065817,3516065817
.long 3516065817,3516065817,3516065817,3516065817
.long 3600352804,3600352804,3600352804,3600352804
.long 3600352804,3600352804,3600352804,3600352804
.long 4094571909,4094571909,4094571909,4094571909
.long 4094571909,4094571909,4094571909,4094571909
.long 275423344,275423344,275423344,275423344
.long 275423344,275423344,275423344,275423344
.long 430227734,430227734,430227734,430227734
.long 430227734,430227734,430227734,430227734
.long 506948616,506948616,506948616,506948616
.long 506948616,506948616,506948616,506948616
.long 659060556,659060556,659060556,659060556
.long 659060556,659060556,659060556,659060556
.long 883997877,883997877,883997877,883997877
.long 883997877,883997877,883997877,883997877
.long 958139571,958139571,958139571,958139571
.long 958139571,958139571,958139571,958139571
.long 1322822218,1322822218,1322822218,1322822218
.long 1322822218,1322822218,1322822218,1322822218
.long 1537002063,1537002063,1537002063,1537002063
.long 1537002063,1537002063,1537002063,1537002063
.long 1747873779,1747873779,1747873779,1747873779
.long 1747873779,1747873779,1747873779,1747873779
.long 1955562222,1955562222,1955562222,1955562222
.long 1955562222,1955562222,1955562222,1955562222
.long 2024104815,2024104815,2024104815,2024104815
.long 2024104815,2024104815,2024104815,2024104815
.long 2227730452,2227730452,2227730452,2227730452
.long 2227730452,2227730452,2227730452,2227730452
.long 2361852424,2361852424,2361852424,2361852424
.long 2361852424,2361852424,2361852424,2361852424
.long 2428436474,2428436474,2428436474,2428436474
.long 2428436474,2428436474,2428436474,2428436474
.long 2756734187,2756734187,2756734187,2756734187
.long 2756734187,2756734187,2756734187,2756734187
.long 3204031479,3204031479,3204031479,3204031479
.long 3204031479,3204031479,3204031479,3204031479
.long 3329325298,3329325298,3329325298,3329325298
.long 3329325298,3329325298,3329325298,3329325298
L$pbswap:
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
K256_shaext:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.byte 83,72,65,50,53,54,32,109,117,108,116,105,45,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
XDagger/xdag | 155,126 | client/algorithms/sha256-mb-x86_64.s | .text
.globl xsha256_multi_block
.type xsha256_multi_block,@function
.align 32
xsha256_multi_block:
movq xOPENSSL_ia32cap_P+4(%rip),%rcx
btq $61,%rcx
jc _shaext_shortcut
testl $268435456,%ecx
jnz _avx_shortcut
movq %rsp,%rax
pushq %rbx
pushq %rbp
subq $288,%rsp
andq $-256,%rsp
movq %rax,272(%rsp)
.Lbody:
leaq K256+128(%rip),%rbp
leaq 256(%rsp),%rbx
leaq 128(%rdi),%rdi
.Loop_grande:
movl %edx,280(%rsp)
xorl %edx,%edx
movq 0(%rsi),%r8
movl 8(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,0(%rbx)
cmovleq %rbp,%r8
movq 16(%rsi),%r9
movl 24(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,4(%rbx)
cmovleq %rbp,%r9
movq 32(%rsi),%r10
movl 40(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,8(%rbx)
cmovleq %rbp,%r10
movq 48(%rsi),%r11
movl 56(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,12(%rbx)
cmovleq %rbp,%r11
testl %edx,%edx
jz .Ldone
movdqu 0-128(%rdi),%xmm8
leaq 128(%rsp),%rax
movdqu 32-128(%rdi),%xmm9
movdqu 64-128(%rdi),%xmm10
movdqu 96-128(%rdi),%xmm11
movdqu 128-128(%rdi),%xmm12
movdqu 160-128(%rdi),%xmm13
movdqu 192-128(%rdi),%xmm14
movdqu 224-128(%rdi),%xmm15
movdqu .Lpbswap(%rip),%xmm6
jmp .Loop
.align 32
.Loop:
movdqa %xmm10,%xmm4
pxor %xmm9,%xmm4
movd 0(%r8),%xmm5
movd 0(%r9),%xmm0
movd 0(%r10),%xmm1
movd 0(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm12,%xmm7
.byte 102,15,56,0,238
movdqa %xmm12,%xmm2
psrld $6,%xmm7
movdqa %xmm12,%xmm1
pslld $7,%xmm2
movdqa %xmm5,0-128(%rax)
paddd %xmm15,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -128(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm12,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm3
pslld $26-21,%xmm2
pandn %xmm14,%xmm0
pand %xmm13,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm8,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm9,%xmm3
movdqa %xmm8,%xmm7
pslld $10,%xmm2
pxor %xmm8,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm9,%xmm15
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm15
paddd %xmm5,%xmm11
pxor %xmm2,%xmm7
paddd %xmm5,%xmm15
paddd %xmm7,%xmm15
movd 4(%r8),%xmm5
movd 4(%r9),%xmm0
movd 4(%r10),%xmm1
movd 4(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm11,%xmm7
movdqa %xmm11,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm11,%xmm1
pslld $7,%xmm2
movdqa %xmm5,16-128(%rax)
paddd %xmm14,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -96(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm11,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm4
pslld $26-21,%xmm2
pandn %xmm13,%xmm0
pand %xmm12,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm15,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm15,%xmm7
pslld $10,%xmm2
pxor %xmm15,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm8,%xmm14
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm14
paddd %xmm5,%xmm10
pxor %xmm2,%xmm7
paddd %xmm5,%xmm14
paddd %xmm7,%xmm14
movd 8(%r8),%xmm5
movd 8(%r9),%xmm0
movd 8(%r10),%xmm1
movd 8(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm10,%xmm7
.byte 102,15,56,0,238
movdqa %xmm10,%xmm2
psrld $6,%xmm7
movdqa %xmm10,%xmm1
pslld $7,%xmm2
movdqa %xmm5,32-128(%rax)
paddd %xmm13,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm10,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm3
pslld $26-21,%xmm2
pandn %xmm12,%xmm0
pand %xmm11,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm14,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm15,%xmm3
movdqa %xmm14,%xmm7
pslld $10,%xmm2
pxor %xmm14,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm15,%xmm13
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm13
paddd %xmm5,%xmm9
pxor %xmm2,%xmm7
paddd %xmm5,%xmm13
paddd %xmm7,%xmm13
movd 12(%r8),%xmm5
movd 12(%r9),%xmm0
movd 12(%r10),%xmm1
movd 12(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm9,%xmm7
movdqa %xmm9,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm9,%xmm1
pslld $7,%xmm2
movdqa %xmm5,48-128(%rax)
paddd %xmm12,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -32(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm9,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm4
pslld $26-21,%xmm2
pandn %xmm11,%xmm0
pand %xmm10,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm13,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm14,%xmm4
movdqa %xmm13,%xmm7
pslld $10,%xmm2
pxor %xmm13,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm14,%xmm12
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm12
paddd %xmm5,%xmm8
pxor %xmm2,%xmm7
paddd %xmm5,%xmm12
paddd %xmm7,%xmm12
movd 16(%r8),%xmm5
movd 16(%r9),%xmm0
movd 16(%r10),%xmm1
movd 16(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm8,%xmm7
.byte 102,15,56,0,238
movdqa %xmm8,%xmm2
psrld $6,%xmm7
movdqa %xmm8,%xmm1
pslld $7,%xmm2
movdqa %xmm5,64-128(%rax)
paddd %xmm11,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 0(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm8,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm3
pslld $26-21,%xmm2
pandn %xmm10,%xmm0
pand %xmm9,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm12,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm13,%xmm3
movdqa %xmm12,%xmm7
pslld $10,%xmm2
pxor %xmm12,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm13,%xmm11
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm11
paddd %xmm5,%xmm15
pxor %xmm2,%xmm7
paddd %xmm5,%xmm11
paddd %xmm7,%xmm11
movd 20(%r8),%xmm5
movd 20(%r9),%xmm0
movd 20(%r10),%xmm1
movd 20(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm15,%xmm7
movdqa %xmm15,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm15,%xmm1
pslld $7,%xmm2
movdqa %xmm5,80-128(%rax)
paddd %xmm10,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 32(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm15,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm4
pslld $26-21,%xmm2
pandn %xmm9,%xmm0
pand %xmm8,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm11,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm12,%xmm4
movdqa %xmm11,%xmm7
pslld $10,%xmm2
pxor %xmm11,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm12,%xmm10
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm10
paddd %xmm5,%xmm14
pxor %xmm2,%xmm7
paddd %xmm5,%xmm10
paddd %xmm7,%xmm10
movd 24(%r8),%xmm5
movd 24(%r9),%xmm0
movd 24(%r10),%xmm1
movd 24(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm14,%xmm7
.byte 102,15,56,0,238
movdqa %xmm14,%xmm2
psrld $6,%xmm7
movdqa %xmm14,%xmm1
pslld $7,%xmm2
movdqa %xmm5,96-128(%rax)
paddd %xmm9,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm14,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm3
pslld $26-21,%xmm2
pandn %xmm8,%xmm0
pand %xmm15,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm10,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm11,%xmm3
movdqa %xmm10,%xmm7
pslld $10,%xmm2
pxor %xmm10,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm11,%xmm9
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm9
paddd %xmm5,%xmm13
pxor %xmm2,%xmm7
paddd %xmm5,%xmm9
paddd %xmm7,%xmm9
movd 28(%r8),%xmm5
movd 28(%r9),%xmm0
movd 28(%r10),%xmm1
movd 28(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm13,%xmm7
movdqa %xmm13,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm13,%xmm1
pslld $7,%xmm2
movdqa %xmm5,112-128(%rax)
paddd %xmm8,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 96(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm13,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm4
pslld $26-21,%xmm2
pandn %xmm15,%xmm0
pand %xmm14,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm9,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm10,%xmm4
movdqa %xmm9,%xmm7
pslld $10,%xmm2
pxor %xmm9,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm10,%xmm8
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm8
paddd %xmm5,%xmm12
pxor %xmm2,%xmm7
paddd %xmm5,%xmm8
paddd %xmm7,%xmm8
leaq 256(%rbp),%rbp
movd 32(%r8),%xmm5
movd 32(%r9),%xmm0
movd 32(%r10),%xmm1
movd 32(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm12,%xmm7
.byte 102,15,56,0,238
movdqa %xmm12,%xmm2
psrld $6,%xmm7
movdqa %xmm12,%xmm1
pslld $7,%xmm2
movdqa %xmm5,128-128(%rax)
paddd %xmm15,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -128(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm12,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm3
pslld $26-21,%xmm2
pandn %xmm14,%xmm0
pand %xmm13,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm8,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm9,%xmm3
movdqa %xmm8,%xmm7
pslld $10,%xmm2
pxor %xmm8,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm9,%xmm15
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm15
paddd %xmm5,%xmm11
pxor %xmm2,%xmm7
paddd %xmm5,%xmm15
paddd %xmm7,%xmm15
movd 36(%r8),%xmm5
movd 36(%r9),%xmm0
movd 36(%r10),%xmm1
movd 36(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm11,%xmm7
movdqa %xmm11,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm11,%xmm1
pslld $7,%xmm2
movdqa %xmm5,144-128(%rax)
paddd %xmm14,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -96(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm11,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm4
pslld $26-21,%xmm2
pandn %xmm13,%xmm0
pand %xmm12,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm15,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm15,%xmm7
pslld $10,%xmm2
pxor %xmm15,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm8,%xmm14
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm14
paddd %xmm5,%xmm10
pxor %xmm2,%xmm7
paddd %xmm5,%xmm14
paddd %xmm7,%xmm14
movd 40(%r8),%xmm5
movd 40(%r9),%xmm0
movd 40(%r10),%xmm1
movd 40(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm10,%xmm7
.byte 102,15,56,0,238
movdqa %xmm10,%xmm2
psrld $6,%xmm7
movdqa %xmm10,%xmm1
pslld $7,%xmm2
movdqa %xmm5,160-128(%rax)
paddd %xmm13,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm10,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm3
pslld $26-21,%xmm2
pandn %xmm12,%xmm0
pand %xmm11,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm14,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm15,%xmm3
movdqa %xmm14,%xmm7
pslld $10,%xmm2
pxor %xmm14,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm15,%xmm13
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm13
paddd %xmm5,%xmm9
pxor %xmm2,%xmm7
paddd %xmm5,%xmm13
paddd %xmm7,%xmm13
movd 44(%r8),%xmm5
movd 44(%r9),%xmm0
movd 44(%r10),%xmm1
movd 44(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm9,%xmm7
movdqa %xmm9,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm9,%xmm1
pslld $7,%xmm2
movdqa %xmm5,176-128(%rax)
paddd %xmm12,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -32(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm9,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm4
pslld $26-21,%xmm2
pandn %xmm11,%xmm0
pand %xmm10,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm13,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm14,%xmm4
movdqa %xmm13,%xmm7
pslld $10,%xmm2
pxor %xmm13,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm14,%xmm12
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm12
paddd %xmm5,%xmm8
pxor %xmm2,%xmm7
paddd %xmm5,%xmm12
paddd %xmm7,%xmm12
movd 48(%r8),%xmm5
movd 48(%r9),%xmm0
movd 48(%r10),%xmm1
movd 48(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm8,%xmm7
.byte 102,15,56,0,238
movdqa %xmm8,%xmm2
psrld $6,%xmm7
movdqa %xmm8,%xmm1
pslld $7,%xmm2
movdqa %xmm5,192-128(%rax)
paddd %xmm11,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 0(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm8,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm3
pslld $26-21,%xmm2
pandn %xmm10,%xmm0
pand %xmm9,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm12,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm13,%xmm3
movdqa %xmm12,%xmm7
pslld $10,%xmm2
pxor %xmm12,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm13,%xmm11
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm11
paddd %xmm5,%xmm15
pxor %xmm2,%xmm7
paddd %xmm5,%xmm11
paddd %xmm7,%xmm11
movd 52(%r8),%xmm5
movd 52(%r9),%xmm0
movd 52(%r10),%xmm1
movd 52(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm15,%xmm7
movdqa %xmm15,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm15,%xmm1
pslld $7,%xmm2
movdqa %xmm5,208-128(%rax)
paddd %xmm10,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 32(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm15,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm4
pslld $26-21,%xmm2
pandn %xmm9,%xmm0
pand %xmm8,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm11,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm12,%xmm4
movdqa %xmm11,%xmm7
pslld $10,%xmm2
pxor %xmm11,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm12,%xmm10
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm10
paddd %xmm5,%xmm14
pxor %xmm2,%xmm7
paddd %xmm5,%xmm10
paddd %xmm7,%xmm10
movd 56(%r8),%xmm5
movd 56(%r9),%xmm0
movd 56(%r10),%xmm1
movd 56(%r11),%xmm2
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm14,%xmm7
.byte 102,15,56,0,238
movdqa %xmm14,%xmm2
psrld $6,%xmm7
movdqa %xmm14,%xmm1
pslld $7,%xmm2
movdqa %xmm5,224-128(%rax)
paddd %xmm9,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm14,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm3
pslld $26-21,%xmm2
pandn %xmm8,%xmm0
pand %xmm15,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm10,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm11,%xmm3
movdqa %xmm10,%xmm7
pslld $10,%xmm2
pxor %xmm10,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm11,%xmm9
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm9
paddd %xmm5,%xmm13
pxor %xmm2,%xmm7
paddd %xmm5,%xmm9
paddd %xmm7,%xmm9
movd 60(%r8),%xmm5
leaq 64(%r8),%r8
movd 60(%r9),%xmm0
leaq 64(%r9),%r9
movd 60(%r10),%xmm1
leaq 64(%r10),%r10
movd 60(%r11),%xmm2
leaq 64(%r11),%r11
punpckldq %xmm1,%xmm5
punpckldq %xmm2,%xmm0
punpckldq %xmm0,%xmm5
movdqa %xmm13,%xmm7
movdqa %xmm13,%xmm2
.byte 102,15,56,0,238
psrld $6,%xmm7
movdqa %xmm13,%xmm1
pslld $7,%xmm2
movdqa %xmm5,240-128(%rax)
paddd %xmm8,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 96(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm13,%xmm0
prefetcht0 63(%r8)
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm4
pslld $26-21,%xmm2
pandn %xmm15,%xmm0
pand %xmm14,%xmm4
pxor %xmm1,%xmm7
prefetcht0 63(%r9)
movdqa %xmm9,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm4,%xmm0
movdqa %xmm10,%xmm4
movdqa %xmm9,%xmm7
pslld $10,%xmm2
pxor %xmm9,%xmm4
prefetcht0 63(%r10)
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
prefetcht0 63(%r11)
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm10,%xmm8
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm8
paddd %xmm5,%xmm12
pxor %xmm2,%xmm7
paddd %xmm5,%xmm8
paddd %xmm7,%xmm8
leaq 256(%rbp),%rbp
movdqu 0-128(%rax),%xmm5
movl $3,%ecx
jmp .Loop_16_xx
.align 32
.Loop_16_xx:
movdqa 16-128(%rax),%xmm6
paddd 144-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 224-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm12,%xmm7
movdqa %xmm12,%xmm2
psrld $6,%xmm7
movdqa %xmm12,%xmm1
pslld $7,%xmm2
movdqa %xmm5,0-128(%rax)
paddd %xmm15,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -128(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm12,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm3
pslld $26-21,%xmm2
pandn %xmm14,%xmm0
pand %xmm13,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm8,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm9,%xmm3
movdqa %xmm8,%xmm7
pslld $10,%xmm2
pxor %xmm8,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm9,%xmm15
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm15
paddd %xmm5,%xmm11
pxor %xmm2,%xmm7
paddd %xmm5,%xmm15
paddd %xmm7,%xmm15
movdqa 32-128(%rax),%xmm5
paddd 160-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 240-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm11,%xmm7
movdqa %xmm11,%xmm2
psrld $6,%xmm7
movdqa %xmm11,%xmm1
pslld $7,%xmm2
movdqa %xmm6,16-128(%rax)
paddd %xmm14,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -96(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm11,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm4
pslld $26-21,%xmm2
pandn %xmm13,%xmm0
pand %xmm12,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm15,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm15,%xmm7
pslld $10,%xmm2
pxor %xmm15,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm8,%xmm14
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm14
paddd %xmm6,%xmm10
pxor %xmm2,%xmm7
paddd %xmm6,%xmm14
paddd %xmm7,%xmm14
movdqa 48-128(%rax),%xmm6
paddd 176-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 0-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm10,%xmm7
movdqa %xmm10,%xmm2
psrld $6,%xmm7
movdqa %xmm10,%xmm1
pslld $7,%xmm2
movdqa %xmm5,32-128(%rax)
paddd %xmm13,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm10,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm3
pslld $26-21,%xmm2
pandn %xmm12,%xmm0
pand %xmm11,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm14,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm15,%xmm3
movdqa %xmm14,%xmm7
pslld $10,%xmm2
pxor %xmm14,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm15,%xmm13
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm13
paddd %xmm5,%xmm9
pxor %xmm2,%xmm7
paddd %xmm5,%xmm13
paddd %xmm7,%xmm13
movdqa 64-128(%rax),%xmm5
paddd 192-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 16-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm9,%xmm7
movdqa %xmm9,%xmm2
psrld $6,%xmm7
movdqa %xmm9,%xmm1
pslld $7,%xmm2
movdqa %xmm6,48-128(%rax)
paddd %xmm12,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -32(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm9,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm4
pslld $26-21,%xmm2
pandn %xmm11,%xmm0
pand %xmm10,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm13,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm14,%xmm4
movdqa %xmm13,%xmm7
pslld $10,%xmm2
pxor %xmm13,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm14,%xmm12
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm12
paddd %xmm6,%xmm8
pxor %xmm2,%xmm7
paddd %xmm6,%xmm12
paddd %xmm7,%xmm12
movdqa 80-128(%rax),%xmm6
paddd 208-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 32-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm8,%xmm7
movdqa %xmm8,%xmm2
psrld $6,%xmm7
movdqa %xmm8,%xmm1
pslld $7,%xmm2
movdqa %xmm5,64-128(%rax)
paddd %xmm11,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 0(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm8,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm3
pslld $26-21,%xmm2
pandn %xmm10,%xmm0
pand %xmm9,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm12,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm13,%xmm3
movdqa %xmm12,%xmm7
pslld $10,%xmm2
pxor %xmm12,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm13,%xmm11
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm11
paddd %xmm5,%xmm15
pxor %xmm2,%xmm7
paddd %xmm5,%xmm11
paddd %xmm7,%xmm11
movdqa 96-128(%rax),%xmm5
paddd 224-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 48-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm15,%xmm7
movdqa %xmm15,%xmm2
psrld $6,%xmm7
movdqa %xmm15,%xmm1
pslld $7,%xmm2
movdqa %xmm6,80-128(%rax)
paddd %xmm10,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 32(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm15,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm4
pslld $26-21,%xmm2
pandn %xmm9,%xmm0
pand %xmm8,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm11,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm12,%xmm4
movdqa %xmm11,%xmm7
pslld $10,%xmm2
pxor %xmm11,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm12,%xmm10
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm10
paddd %xmm6,%xmm14
pxor %xmm2,%xmm7
paddd %xmm6,%xmm10
paddd %xmm7,%xmm10
movdqa 112-128(%rax),%xmm6
paddd 240-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 64-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm14,%xmm7
movdqa %xmm14,%xmm2
psrld $6,%xmm7
movdqa %xmm14,%xmm1
pslld $7,%xmm2
movdqa %xmm5,96-128(%rax)
paddd %xmm9,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm14,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm3
pslld $26-21,%xmm2
pandn %xmm8,%xmm0
pand %xmm15,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm10,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm11,%xmm3
movdqa %xmm10,%xmm7
pslld $10,%xmm2
pxor %xmm10,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm11,%xmm9
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm9
paddd %xmm5,%xmm13
pxor %xmm2,%xmm7
paddd %xmm5,%xmm9
paddd %xmm7,%xmm9
movdqa 128-128(%rax),%xmm5
paddd 0-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 80-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm13,%xmm7
movdqa %xmm13,%xmm2
psrld $6,%xmm7
movdqa %xmm13,%xmm1
pslld $7,%xmm2
movdqa %xmm6,112-128(%rax)
paddd %xmm8,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 96(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm13,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm4
pslld $26-21,%xmm2
pandn %xmm15,%xmm0
pand %xmm14,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm9,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm10,%xmm4
movdqa %xmm9,%xmm7
pslld $10,%xmm2
pxor %xmm9,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm10,%xmm8
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm8
paddd %xmm6,%xmm12
pxor %xmm2,%xmm7
paddd %xmm6,%xmm8
paddd %xmm7,%xmm8
leaq 256(%rbp),%rbp
movdqa 144-128(%rax),%xmm6
paddd 16-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 96-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm12,%xmm7
movdqa %xmm12,%xmm2
psrld $6,%xmm7
movdqa %xmm12,%xmm1
pslld $7,%xmm2
movdqa %xmm5,128-128(%rax)
paddd %xmm15,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -128(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm12,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm3
pslld $26-21,%xmm2
pandn %xmm14,%xmm0
pand %xmm13,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm8,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm9,%xmm3
movdqa %xmm8,%xmm7
pslld $10,%xmm2
pxor %xmm8,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm9,%xmm15
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm15
paddd %xmm5,%xmm11
pxor %xmm2,%xmm7
paddd %xmm5,%xmm15
paddd %xmm7,%xmm15
movdqa 160-128(%rax),%xmm5
paddd 32-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 112-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm11,%xmm7
movdqa %xmm11,%xmm2
psrld $6,%xmm7
movdqa %xmm11,%xmm1
pslld $7,%xmm2
movdqa %xmm6,144-128(%rax)
paddd %xmm14,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -96(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm11,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm4
pslld $26-21,%xmm2
pandn %xmm13,%xmm0
pand %xmm12,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm15,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm15,%xmm7
pslld $10,%xmm2
pxor %xmm15,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm8,%xmm14
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm14
paddd %xmm6,%xmm10
pxor %xmm2,%xmm7
paddd %xmm6,%xmm14
paddd %xmm7,%xmm14
movdqa 176-128(%rax),%xmm6
paddd 48-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 128-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm10,%xmm7
movdqa %xmm10,%xmm2
psrld $6,%xmm7
movdqa %xmm10,%xmm1
pslld $7,%xmm2
movdqa %xmm5,160-128(%rax)
paddd %xmm13,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm10,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm3
pslld $26-21,%xmm2
pandn %xmm12,%xmm0
pand %xmm11,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm14,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm15,%xmm3
movdqa %xmm14,%xmm7
pslld $10,%xmm2
pxor %xmm14,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm15,%xmm13
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm13
paddd %xmm5,%xmm9
pxor %xmm2,%xmm7
paddd %xmm5,%xmm13
paddd %xmm7,%xmm13
movdqa 192-128(%rax),%xmm5
paddd 64-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 144-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm9,%xmm7
movdqa %xmm9,%xmm2
psrld $6,%xmm7
movdqa %xmm9,%xmm1
pslld $7,%xmm2
movdqa %xmm6,176-128(%rax)
paddd %xmm12,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd -32(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm9,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm4
pslld $26-21,%xmm2
pandn %xmm11,%xmm0
pand %xmm10,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm13,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm14,%xmm4
movdqa %xmm13,%xmm7
pslld $10,%xmm2
pxor %xmm13,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm14,%xmm12
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm12
paddd %xmm6,%xmm8
pxor %xmm2,%xmm7
paddd %xmm6,%xmm12
paddd %xmm7,%xmm12
movdqa 208-128(%rax),%xmm6
paddd 80-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 160-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm8,%xmm7
movdqa %xmm8,%xmm2
psrld $6,%xmm7
movdqa %xmm8,%xmm1
pslld $7,%xmm2
movdqa %xmm5,192-128(%rax)
paddd %xmm11,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 0(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm8,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm8,%xmm3
pslld $26-21,%xmm2
pandn %xmm10,%xmm0
pand %xmm9,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm12,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm12,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm13,%xmm3
movdqa %xmm12,%xmm7
pslld $10,%xmm2
pxor %xmm12,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm13,%xmm11
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm11
paddd %xmm5,%xmm15
pxor %xmm2,%xmm7
paddd %xmm5,%xmm11
paddd %xmm7,%xmm11
movdqa 224-128(%rax),%xmm5
paddd 96-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 176-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm15,%xmm7
movdqa %xmm15,%xmm2
psrld $6,%xmm7
movdqa %xmm15,%xmm1
pslld $7,%xmm2
movdqa %xmm6,208-128(%rax)
paddd %xmm10,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 32(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm15,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm4
pslld $26-21,%xmm2
pandn %xmm9,%xmm0
pand %xmm8,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm11,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm11,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm12,%xmm4
movdqa %xmm11,%xmm7
pslld $10,%xmm2
pxor %xmm11,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm12,%xmm10
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm10
paddd %xmm6,%xmm14
pxor %xmm2,%xmm7
paddd %xmm6,%xmm10
paddd %xmm7,%xmm10
movdqa 240-128(%rax),%xmm6
paddd 112-128(%rax),%xmm5
movdqa %xmm6,%xmm7
movdqa %xmm6,%xmm1
psrld $3,%xmm7
movdqa %xmm6,%xmm2
psrld $7,%xmm1
movdqa 192-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm3
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm3,%xmm1
psrld $17,%xmm3
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
psrld $19-17,%xmm3
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm3,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm5
movdqa %xmm14,%xmm7
movdqa %xmm14,%xmm2
psrld $6,%xmm7
movdqa %xmm14,%xmm1
pslld $7,%xmm2
movdqa %xmm5,224-128(%rax)
paddd %xmm9,%xmm5
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 64(%rbp),%xmm5
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm14,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm14,%xmm3
pslld $26-21,%xmm2
pandn %xmm8,%xmm0
pand %xmm15,%xmm3
pxor %xmm1,%xmm7
movdqa %xmm10,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm10,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm5
pxor %xmm3,%xmm0
movdqa %xmm11,%xmm3
movdqa %xmm10,%xmm7
pslld $10,%xmm2
pxor %xmm10,%xmm3
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm5
pslld $19-10,%xmm2
pand %xmm3,%xmm4
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm11,%xmm9
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm4,%xmm9
paddd %xmm5,%xmm13
pxor %xmm2,%xmm7
paddd %xmm5,%xmm9
paddd %xmm7,%xmm9
movdqa 0-128(%rax),%xmm5
paddd 128-128(%rax),%xmm6
movdqa %xmm5,%xmm7
movdqa %xmm5,%xmm1
psrld $3,%xmm7
movdqa %xmm5,%xmm2
psrld $7,%xmm1
movdqa 208-128(%rax),%xmm0
pslld $14,%xmm2
pxor %xmm1,%xmm7
psrld $18-7,%xmm1
movdqa %xmm0,%xmm4
pxor %xmm2,%xmm7
pslld $25-14,%xmm2
pxor %xmm1,%xmm7
psrld $10,%xmm0
movdqa %xmm4,%xmm1
psrld $17,%xmm4
pxor %xmm2,%xmm7
pslld $13,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
psrld $19-17,%xmm4
pxor %xmm1,%xmm0
pslld $15-13,%xmm1
pxor %xmm4,%xmm0
pxor %xmm1,%xmm0
paddd %xmm0,%xmm6
movdqa %xmm13,%xmm7
movdqa %xmm13,%xmm2
psrld $6,%xmm7
movdqa %xmm13,%xmm1
pslld $7,%xmm2
movdqa %xmm6,240-128(%rax)
paddd %xmm8,%xmm6
psrld $11,%xmm1
pxor %xmm2,%xmm7
pslld $21-7,%xmm2
paddd 96(%rbp),%xmm6
pxor %xmm1,%xmm7
psrld $25-11,%xmm1
movdqa %xmm13,%xmm0
pxor %xmm2,%xmm7
movdqa %xmm13,%xmm4
pslld $26-21,%xmm2
pandn %xmm15,%xmm0
pand %xmm14,%xmm4
pxor %xmm1,%xmm7
movdqa %xmm9,%xmm1
pxor %xmm2,%xmm7
movdqa %xmm9,%xmm2
psrld $2,%xmm1
paddd %xmm7,%xmm6
pxor %xmm4,%xmm0
movdqa %xmm10,%xmm4
movdqa %xmm9,%xmm7
pslld $10,%xmm2
pxor %xmm9,%xmm4
psrld $13,%xmm7
pxor %xmm2,%xmm1
paddd %xmm0,%xmm6
pslld $19-10,%xmm2
pand %xmm4,%xmm3
pxor %xmm7,%xmm1
psrld $22-13,%xmm7
pxor %xmm2,%xmm1
movdqa %xmm10,%xmm8
pslld $30-19,%xmm2
pxor %xmm1,%xmm7
pxor %xmm3,%xmm8
paddd %xmm6,%xmm12
pxor %xmm2,%xmm7
paddd %xmm6,%xmm8
paddd %xmm7,%xmm8
leaq 256(%rbp),%rbp
decl %ecx
jnz .Loop_16_xx
movl $1,%ecx
leaq K256+128(%rip),%rbp
movdqa (%rbx),%xmm7
cmpl 0(%rbx),%ecx
pxor %xmm0,%xmm0
cmovgeq %rbp,%r8
cmpl 4(%rbx),%ecx
movdqa %xmm7,%xmm6
cmovgeq %rbp,%r9
cmpl 8(%rbx),%ecx
pcmpgtd %xmm0,%xmm6
cmovgeq %rbp,%r10
cmpl 12(%rbx),%ecx
paddd %xmm6,%xmm7
cmovgeq %rbp,%r11
movdqu 0-128(%rdi),%xmm0
pand %xmm6,%xmm8
movdqu 32-128(%rdi),%xmm1
pand %xmm6,%xmm9
movdqu 64-128(%rdi),%xmm2
pand %xmm6,%xmm10
movdqu 96-128(%rdi),%xmm5
pand %xmm6,%xmm11
paddd %xmm0,%xmm8
movdqu 128-128(%rdi),%xmm0
pand %xmm6,%xmm12
paddd %xmm1,%xmm9
movdqu 160-128(%rdi),%xmm1
pand %xmm6,%xmm13
paddd %xmm2,%xmm10
movdqu 192-128(%rdi),%xmm2
pand %xmm6,%xmm14
paddd %xmm5,%xmm11
movdqu 224-128(%rdi),%xmm5
pand %xmm6,%xmm15
paddd %xmm0,%xmm12
paddd %xmm1,%xmm13
movdqu %xmm8,0-128(%rdi)
paddd %xmm2,%xmm14
movdqu %xmm9,32-128(%rdi)
paddd %xmm5,%xmm15
movdqu %xmm10,64-128(%rdi)
movdqu %xmm11,96-128(%rdi)
movdqu %xmm12,128-128(%rdi)
movdqu %xmm13,160-128(%rdi)
movdqu %xmm14,192-128(%rdi)
movdqu %xmm15,224-128(%rdi)
movdqa %xmm7,(%rbx)
movdqa .Lpbswap(%rip),%xmm6
decl %edx
jnz .Loop
movl 280(%rsp),%edx
leaq 16(%rdi),%rdi
leaq 64(%rsi),%rsi
decl %edx
jnz .Loop_grande
.Ldone:
movq 272(%rsp),%rax
movq -16(%rax),%rbp
movq -8(%rax),%rbx
leaq (%rax),%rsp
.Lepilogue:
.byte 0xf3,0xc3
.size xsha256_multi_block,.-xsha256_multi_block
.type xsha256_multi_block_shaext,@function
.align 32
xsha256_multi_block_shaext:
_shaext_shortcut:
movq %rsp,%rax
pushq %rbx
pushq %rbp
subq $288,%rsp
shll $1,%edx
andq $-256,%rsp
leaq 128(%rdi),%rdi
movq %rax,272(%rsp)
.Lbody_shaext:
leaq 256(%rsp),%rbx
leaq K256_shaext+128(%rip),%rbp
.Loop_grande_shaext:
movl %edx,280(%rsp)
xorl %edx,%edx
movq 0(%rsi),%r8
movl 8(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,0(%rbx)
cmovleq %rsp,%r8
movq 16(%rsi),%r9
movl 24(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,4(%rbx)
cmovleq %rsp,%r9
testl %edx,%edx
jz .Ldone_shaext
movq 0-128(%rdi),%xmm12
movq 32-128(%rdi),%xmm4
movq 64-128(%rdi),%xmm13
movq 96-128(%rdi),%xmm5
movq 128-128(%rdi),%xmm8
movq 160-128(%rdi),%xmm9
movq 192-128(%rdi),%xmm10
movq 224-128(%rdi),%xmm11
punpckldq %xmm4,%xmm12
punpckldq %xmm5,%xmm13
punpckldq %xmm9,%xmm8
punpckldq %xmm11,%xmm10
movdqa K256_shaext-16(%rip),%xmm3
movdqa %xmm12,%xmm14
movdqa %xmm13,%xmm15
punpcklqdq %xmm8,%xmm12
punpcklqdq %xmm10,%xmm13
punpckhqdq %xmm8,%xmm14
punpckhqdq %xmm10,%xmm15
pshufd $27,%xmm12,%xmm12
pshufd $27,%xmm13,%xmm13
pshufd $27,%xmm14,%xmm14
pshufd $27,%xmm15,%xmm15
jmp .Loop_shaext
.align 32
.Loop_shaext:
movdqu 0(%r8),%xmm4
movdqu 0(%r9),%xmm8
movdqu 16(%r8),%xmm5
movdqu 16(%r9),%xmm9
movdqu 32(%r8),%xmm6
.byte 102,15,56,0,227
movdqu 32(%r9),%xmm10
.byte 102,68,15,56,0,195
movdqu 48(%r8),%xmm7
leaq 64(%r8),%r8
movdqu 48(%r9),%xmm11
leaq 64(%r9),%r9
movdqa 0-128(%rbp),%xmm0
.byte 102,15,56,0,235
paddd %xmm4,%xmm0
pxor %xmm12,%xmm4
movdqa %xmm0,%xmm1
movdqa 0-128(%rbp),%xmm2
.byte 102,68,15,56,0,203
paddd %xmm8,%xmm2
movdqa %xmm13,80(%rsp)
.byte 69,15,56,203,236
pxor %xmm14,%xmm8
movdqa %xmm2,%xmm0
movdqa %xmm15,112(%rsp)
.byte 69,15,56,203,254
pshufd $0x0e,%xmm1,%xmm0
pxor %xmm12,%xmm4
movdqa %xmm12,64(%rsp)
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
pxor %xmm14,%xmm8
movdqa %xmm14,96(%rsp)
movdqa 16-128(%rbp),%xmm1
paddd %xmm5,%xmm1
.byte 102,15,56,0,243
.byte 69,15,56,203,247
movdqa %xmm1,%xmm0
movdqa 16-128(%rbp),%xmm2
paddd %xmm9,%xmm2
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
prefetcht0 127(%r8)
.byte 102,15,56,0,251
.byte 102,68,15,56,0,211
prefetcht0 127(%r9)
.byte 69,15,56,203,254
pshufd $0x0e,%xmm1,%xmm0
.byte 102,68,15,56,0,219
.byte 15,56,204,229
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 32-128(%rbp),%xmm1
paddd %xmm6,%xmm1
.byte 69,15,56,203,247
movdqa %xmm1,%xmm0
movdqa 32-128(%rbp),%xmm2
paddd %xmm10,%xmm2
.byte 69,15,56,203,236
.byte 69,15,56,204,193
movdqa %xmm2,%xmm0
movdqa %xmm7,%xmm3
.byte 69,15,56,203,254
pshufd $0x0e,%xmm1,%xmm0
.byte 102,15,58,15,222,4
paddd %xmm3,%xmm4
movdqa %xmm11,%xmm3
.byte 102,65,15,58,15,218,4
.byte 15,56,204,238
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 48-128(%rbp),%xmm1
paddd %xmm7,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,202
movdqa %xmm1,%xmm0
movdqa 48-128(%rbp),%xmm2
paddd %xmm3,%xmm8
paddd %xmm11,%xmm2
.byte 15,56,205,231
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm4,%xmm3
.byte 102,15,58,15,223,4
.byte 69,15,56,203,254
.byte 69,15,56,205,195
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm5
movdqa %xmm8,%xmm3
.byte 102,65,15,58,15,219,4
.byte 15,56,204,247
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 64-128(%rbp),%xmm1
paddd %xmm4,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,211
movdqa %xmm1,%xmm0
movdqa 64-128(%rbp),%xmm2
paddd %xmm3,%xmm9
paddd %xmm8,%xmm2
.byte 15,56,205,236
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,58,15,220,4
.byte 69,15,56,203,254
.byte 69,15,56,205,200
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm6
movdqa %xmm9,%xmm3
.byte 102,65,15,58,15,216,4
.byte 15,56,204,252
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 80-128(%rbp),%xmm1
paddd %xmm5,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,216
movdqa %xmm1,%xmm0
movdqa 80-128(%rbp),%xmm2
paddd %xmm3,%xmm10
paddd %xmm9,%xmm2
.byte 15,56,205,245
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm6,%xmm3
.byte 102,15,58,15,221,4
.byte 69,15,56,203,254
.byte 69,15,56,205,209
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm7
movdqa %xmm10,%xmm3
.byte 102,65,15,58,15,217,4
.byte 15,56,204,229
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 96-128(%rbp),%xmm1
paddd %xmm6,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,193
movdqa %xmm1,%xmm0
movdqa 96-128(%rbp),%xmm2
paddd %xmm3,%xmm11
paddd %xmm10,%xmm2
.byte 15,56,205,254
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm7,%xmm3
.byte 102,15,58,15,222,4
.byte 69,15,56,203,254
.byte 69,15,56,205,218
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm4
movdqa %xmm11,%xmm3
.byte 102,65,15,58,15,218,4
.byte 15,56,204,238
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 112-128(%rbp),%xmm1
paddd %xmm7,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,202
movdqa %xmm1,%xmm0
movdqa 112-128(%rbp),%xmm2
paddd %xmm3,%xmm8
paddd %xmm11,%xmm2
.byte 15,56,205,231
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm4,%xmm3
.byte 102,15,58,15,223,4
.byte 69,15,56,203,254
.byte 69,15,56,205,195
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm5
movdqa %xmm8,%xmm3
.byte 102,65,15,58,15,219,4
.byte 15,56,204,247
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 128-128(%rbp),%xmm1
paddd %xmm4,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,211
movdqa %xmm1,%xmm0
movdqa 128-128(%rbp),%xmm2
paddd %xmm3,%xmm9
paddd %xmm8,%xmm2
.byte 15,56,205,236
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,58,15,220,4
.byte 69,15,56,203,254
.byte 69,15,56,205,200
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm6
movdqa %xmm9,%xmm3
.byte 102,65,15,58,15,216,4
.byte 15,56,204,252
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 144-128(%rbp),%xmm1
paddd %xmm5,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,216
movdqa %xmm1,%xmm0
movdqa 144-128(%rbp),%xmm2
paddd %xmm3,%xmm10
paddd %xmm9,%xmm2
.byte 15,56,205,245
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm6,%xmm3
.byte 102,15,58,15,221,4
.byte 69,15,56,203,254
.byte 69,15,56,205,209
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm7
movdqa %xmm10,%xmm3
.byte 102,65,15,58,15,217,4
.byte 15,56,204,229
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 160-128(%rbp),%xmm1
paddd %xmm6,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,193
movdqa %xmm1,%xmm0
movdqa 160-128(%rbp),%xmm2
paddd %xmm3,%xmm11
paddd %xmm10,%xmm2
.byte 15,56,205,254
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm7,%xmm3
.byte 102,15,58,15,222,4
.byte 69,15,56,203,254
.byte 69,15,56,205,218
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm4
movdqa %xmm11,%xmm3
.byte 102,65,15,58,15,218,4
.byte 15,56,204,238
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 176-128(%rbp),%xmm1
paddd %xmm7,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,202
movdqa %xmm1,%xmm0
movdqa 176-128(%rbp),%xmm2
paddd %xmm3,%xmm8
paddd %xmm11,%xmm2
.byte 15,56,205,231
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm4,%xmm3
.byte 102,15,58,15,223,4
.byte 69,15,56,203,254
.byte 69,15,56,205,195
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm5
movdqa %xmm8,%xmm3
.byte 102,65,15,58,15,219,4
.byte 15,56,204,247
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 192-128(%rbp),%xmm1
paddd %xmm4,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,211
movdqa %xmm1,%xmm0
movdqa 192-128(%rbp),%xmm2
paddd %xmm3,%xmm9
paddd %xmm8,%xmm2
.byte 15,56,205,236
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,58,15,220,4
.byte 69,15,56,203,254
.byte 69,15,56,205,200
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm6
movdqa %xmm9,%xmm3
.byte 102,65,15,58,15,216,4
.byte 15,56,204,252
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 208-128(%rbp),%xmm1
paddd %xmm5,%xmm1
.byte 69,15,56,203,247
.byte 69,15,56,204,216
movdqa %xmm1,%xmm0
movdqa 208-128(%rbp),%xmm2
paddd %xmm3,%xmm10
paddd %xmm9,%xmm2
.byte 15,56,205,245
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movdqa %xmm6,%xmm3
.byte 102,15,58,15,221,4
.byte 69,15,56,203,254
.byte 69,15,56,205,209
pshufd $0x0e,%xmm1,%xmm0
paddd %xmm3,%xmm7
movdqa %xmm10,%xmm3
.byte 102,65,15,58,15,217,4
nop
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 224-128(%rbp),%xmm1
paddd %xmm6,%xmm1
.byte 69,15,56,203,247
movdqa %xmm1,%xmm0
movdqa 224-128(%rbp),%xmm2
paddd %xmm3,%xmm11
paddd %xmm10,%xmm2
.byte 15,56,205,254
nop
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
movl $1,%ecx
pxor %xmm6,%xmm6
.byte 69,15,56,203,254
.byte 69,15,56,205,218
pshufd $0x0e,%xmm1,%xmm0
movdqa 240-128(%rbp),%xmm1
paddd %xmm7,%xmm1
movq (%rbx),%xmm7
nop
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
movdqa 240-128(%rbp),%xmm2
paddd %xmm11,%xmm2
.byte 69,15,56,203,247
movdqa %xmm1,%xmm0
cmpl 0(%rbx),%ecx
cmovgeq %rsp,%r8
cmpl 4(%rbx),%ecx
cmovgeq %rsp,%r9
pshufd $0x00,%xmm7,%xmm9
.byte 69,15,56,203,236
movdqa %xmm2,%xmm0
pshufd $0x55,%xmm7,%xmm10
movdqa %xmm7,%xmm11
.byte 69,15,56,203,254
pshufd $0x0e,%xmm1,%xmm0
pcmpgtd %xmm6,%xmm9
pcmpgtd %xmm6,%xmm10
.byte 69,15,56,203,229
pshufd $0x0e,%xmm2,%xmm0
pcmpgtd %xmm6,%xmm11
movdqa K256_shaext-16(%rip),%xmm3
.byte 69,15,56,203,247
pand %xmm9,%xmm13
pand %xmm10,%xmm15
pand %xmm9,%xmm12
pand %xmm10,%xmm14
paddd %xmm7,%xmm11
paddd 80(%rsp),%xmm13
paddd 112(%rsp),%xmm15
paddd 64(%rsp),%xmm12
paddd 96(%rsp),%xmm14
movq %xmm11,(%rbx)
decl %edx
jnz .Loop_shaext
movl 280(%rsp),%edx
pshufd $27,%xmm12,%xmm12
pshufd $27,%xmm13,%xmm13
pshufd $27,%xmm14,%xmm14
pshufd $27,%xmm15,%xmm15
movdqa %xmm12,%xmm5
movdqa %xmm13,%xmm6
punpckldq %xmm14,%xmm12
punpckhdq %xmm14,%xmm5
punpckldq %xmm15,%xmm13
punpckhdq %xmm15,%xmm6
movq %xmm12,0-128(%rdi)
psrldq $8,%xmm12
movq %xmm5,128-128(%rdi)
psrldq $8,%xmm5
movq %xmm12,32-128(%rdi)
movq %xmm5,160-128(%rdi)
movq %xmm13,64-128(%rdi)
psrldq $8,%xmm13
movq %xmm6,192-128(%rdi)
psrldq $8,%xmm6
movq %xmm13,96-128(%rdi)
movq %xmm6,224-128(%rdi)
leaq 8(%rdi),%rdi
leaq 32(%rsi),%rsi
decl %edx
jnz .Loop_grande_shaext
.Ldone_shaext:
movq -16(%rax),%rbp
movq -8(%rax),%rbx
leaq (%rax),%rsp
.Lepilogue_shaext:
.byte 0xf3,0xc3
.size xsha256_multi_block_shaext,.-xsha256_multi_block_shaext
.type xsha256_multi_block_avx,@function
.align 32
xsha256_multi_block_avx:
_avx_shortcut:
shrq $32,%rcx
cmpl $2,%edx
jb .Lavx
testl $32,%ecx
jnz _avx2_shortcut
jmp .Lavx
.align 32
.Lavx:
movq %rsp,%rax
pushq %rbx
pushq %rbp
subq $288,%rsp
andq $-256,%rsp
movq %rax,272(%rsp)
.Lbody_avx:
leaq K256+128(%rip),%rbp
leaq 256(%rsp),%rbx
leaq 128(%rdi),%rdi
.Loop_grande_avx:
movl %edx,280(%rsp)
xorl %edx,%edx
movq 0(%rsi),%r8
movl 8(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,0(%rbx)
cmovleq %rbp,%r8
movq 16(%rsi),%r9
movl 24(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,4(%rbx)
cmovleq %rbp,%r9
movq 32(%rsi),%r10
movl 40(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,8(%rbx)
cmovleq %rbp,%r10
movq 48(%rsi),%r11
movl 56(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,12(%rbx)
cmovleq %rbp,%r11
testl %edx,%edx
jz .Ldone_avx
vmovdqu 0-128(%rdi),%xmm8
leaq 128(%rsp),%rax
vmovdqu 32-128(%rdi),%xmm9
vmovdqu 64-128(%rdi),%xmm10
vmovdqu 96-128(%rdi),%xmm11
vmovdqu 128-128(%rdi),%xmm12
vmovdqu 160-128(%rdi),%xmm13
vmovdqu 192-128(%rdi),%xmm14
vmovdqu 224-128(%rdi),%xmm15
vmovdqu .Lpbswap(%rip),%xmm6
jmp .Loop_avx
.align 32
.Loop_avx:
vpxor %xmm9,%xmm10,%xmm4
vmovd 0(%r8),%xmm5
vmovd 0(%r9),%xmm0
vpinsrd $1,0(%r10),%xmm5,%xmm5
vpinsrd $1,0(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm12,%xmm7
vpslld $26,%xmm12,%xmm2
vmovdqu %xmm5,0-128(%rax)
vpaddd %xmm15,%xmm5,%xmm5
vpsrld $11,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm12,%xmm2
vpaddd -128(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm12,%xmm2
vpandn %xmm14,%xmm12,%xmm0
vpand %xmm13,%xmm12,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm8,%xmm15
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm8,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm8,%xmm9,%xmm3
vpxor %xmm1,%xmm15,%xmm15
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm8,%xmm1
vpslld $19,%xmm8,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm15,%xmm7
vpsrld $22,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm8,%xmm2
vpxor %xmm4,%xmm9,%xmm15
vpaddd %xmm5,%xmm11,%xmm11
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm15,%xmm15
vpaddd %xmm7,%xmm15,%xmm15
vmovd 4(%r8),%xmm5
vmovd 4(%r9),%xmm0
vpinsrd $1,4(%r10),%xmm5,%xmm5
vpinsrd $1,4(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm11,%xmm7
vpslld $26,%xmm11,%xmm2
vmovdqu %xmm5,16-128(%rax)
vpaddd %xmm14,%xmm5,%xmm5
vpsrld $11,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm11,%xmm2
vpaddd -96(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm11,%xmm2
vpandn %xmm13,%xmm11,%xmm0
vpand %xmm12,%xmm11,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm15,%xmm14
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm15,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm15,%xmm8,%xmm4
vpxor %xmm1,%xmm14,%xmm14
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm15,%xmm1
vpslld $19,%xmm15,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm14,%xmm7
vpsrld $22,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm15,%xmm2
vpxor %xmm3,%xmm8,%xmm14
vpaddd %xmm5,%xmm10,%xmm10
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm14,%xmm14
vpaddd %xmm7,%xmm14,%xmm14
vmovd 8(%r8),%xmm5
vmovd 8(%r9),%xmm0
vpinsrd $1,8(%r10),%xmm5,%xmm5
vpinsrd $1,8(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm10,%xmm7
vpslld $26,%xmm10,%xmm2
vmovdqu %xmm5,32-128(%rax)
vpaddd %xmm13,%xmm5,%xmm5
vpsrld $11,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm10,%xmm2
vpaddd -64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm10,%xmm2
vpandn %xmm12,%xmm10,%xmm0
vpand %xmm11,%xmm10,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm14,%xmm13
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm14,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm14,%xmm15,%xmm3
vpxor %xmm1,%xmm13,%xmm13
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm14,%xmm1
vpslld $19,%xmm14,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm13,%xmm7
vpsrld $22,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm14,%xmm2
vpxor %xmm4,%xmm15,%xmm13
vpaddd %xmm5,%xmm9,%xmm9
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm13,%xmm13
vpaddd %xmm7,%xmm13,%xmm13
vmovd 12(%r8),%xmm5
vmovd 12(%r9),%xmm0
vpinsrd $1,12(%r10),%xmm5,%xmm5
vpinsrd $1,12(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm9,%xmm7
vpslld $26,%xmm9,%xmm2
vmovdqu %xmm5,48-128(%rax)
vpaddd %xmm12,%xmm5,%xmm5
vpsrld $11,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm9,%xmm2
vpaddd -32(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm9,%xmm2
vpandn %xmm11,%xmm9,%xmm0
vpand %xmm10,%xmm9,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm13,%xmm12
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm13,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm13,%xmm14,%xmm4
vpxor %xmm1,%xmm12,%xmm12
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm13,%xmm1
vpslld $19,%xmm13,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm12,%xmm7
vpsrld $22,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm13,%xmm2
vpxor %xmm3,%xmm14,%xmm12
vpaddd %xmm5,%xmm8,%xmm8
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm12,%xmm12
vpaddd %xmm7,%xmm12,%xmm12
vmovd 16(%r8),%xmm5
vmovd 16(%r9),%xmm0
vpinsrd $1,16(%r10),%xmm5,%xmm5
vpinsrd $1,16(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm8,%xmm7
vpslld $26,%xmm8,%xmm2
vmovdqu %xmm5,64-128(%rax)
vpaddd %xmm11,%xmm5,%xmm5
vpsrld $11,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm8,%xmm2
vpaddd 0(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm8,%xmm2
vpandn %xmm10,%xmm8,%xmm0
vpand %xmm9,%xmm8,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm12,%xmm11
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm12,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm12,%xmm13,%xmm3
vpxor %xmm1,%xmm11,%xmm11
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm12,%xmm1
vpslld $19,%xmm12,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm11,%xmm7
vpsrld $22,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm12,%xmm2
vpxor %xmm4,%xmm13,%xmm11
vpaddd %xmm5,%xmm15,%xmm15
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm11,%xmm11
vpaddd %xmm7,%xmm11,%xmm11
vmovd 20(%r8),%xmm5
vmovd 20(%r9),%xmm0
vpinsrd $1,20(%r10),%xmm5,%xmm5
vpinsrd $1,20(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm15,%xmm7
vpslld $26,%xmm15,%xmm2
vmovdqu %xmm5,80-128(%rax)
vpaddd %xmm10,%xmm5,%xmm5
vpsrld $11,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm15,%xmm2
vpaddd 32(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm15,%xmm2
vpandn %xmm9,%xmm15,%xmm0
vpand %xmm8,%xmm15,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm11,%xmm10
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm11,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm11,%xmm12,%xmm4
vpxor %xmm1,%xmm10,%xmm10
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm11,%xmm1
vpslld $19,%xmm11,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm10,%xmm7
vpsrld $22,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm11,%xmm2
vpxor %xmm3,%xmm12,%xmm10
vpaddd %xmm5,%xmm14,%xmm14
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm10,%xmm10
vpaddd %xmm7,%xmm10,%xmm10
vmovd 24(%r8),%xmm5
vmovd 24(%r9),%xmm0
vpinsrd $1,24(%r10),%xmm5,%xmm5
vpinsrd $1,24(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm14,%xmm7
vpslld $26,%xmm14,%xmm2
vmovdqu %xmm5,96-128(%rax)
vpaddd %xmm9,%xmm5,%xmm5
vpsrld $11,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm14,%xmm2
vpaddd 64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm14,%xmm2
vpandn %xmm8,%xmm14,%xmm0
vpand %xmm15,%xmm14,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm10,%xmm9
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm10,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm10,%xmm11,%xmm3
vpxor %xmm1,%xmm9,%xmm9
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm10,%xmm1
vpslld $19,%xmm10,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm9,%xmm7
vpsrld $22,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm10,%xmm2
vpxor %xmm4,%xmm11,%xmm9
vpaddd %xmm5,%xmm13,%xmm13
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm9,%xmm9
vpaddd %xmm7,%xmm9,%xmm9
vmovd 28(%r8),%xmm5
vmovd 28(%r9),%xmm0
vpinsrd $1,28(%r10),%xmm5,%xmm5
vpinsrd $1,28(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm13,%xmm7
vpslld $26,%xmm13,%xmm2
vmovdqu %xmm5,112-128(%rax)
vpaddd %xmm8,%xmm5,%xmm5
vpsrld $11,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm13,%xmm2
vpaddd 96(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm13,%xmm2
vpandn %xmm15,%xmm13,%xmm0
vpand %xmm14,%xmm13,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm9,%xmm8
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm9,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm9,%xmm10,%xmm4
vpxor %xmm1,%xmm8,%xmm8
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm9,%xmm1
vpslld $19,%xmm9,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm8,%xmm7
vpsrld $22,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm9,%xmm2
vpxor %xmm3,%xmm10,%xmm8
vpaddd %xmm5,%xmm12,%xmm12
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm8,%xmm8
vpaddd %xmm7,%xmm8,%xmm8
addq $256,%rbp
vmovd 32(%r8),%xmm5
vmovd 32(%r9),%xmm0
vpinsrd $1,32(%r10),%xmm5,%xmm5
vpinsrd $1,32(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm12,%xmm7
vpslld $26,%xmm12,%xmm2
vmovdqu %xmm5,128-128(%rax)
vpaddd %xmm15,%xmm5,%xmm5
vpsrld $11,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm12,%xmm2
vpaddd -128(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm12,%xmm2
vpandn %xmm14,%xmm12,%xmm0
vpand %xmm13,%xmm12,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm8,%xmm15
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm8,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm8,%xmm9,%xmm3
vpxor %xmm1,%xmm15,%xmm15
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm8,%xmm1
vpslld $19,%xmm8,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm15,%xmm7
vpsrld $22,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm8,%xmm2
vpxor %xmm4,%xmm9,%xmm15
vpaddd %xmm5,%xmm11,%xmm11
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm15,%xmm15
vpaddd %xmm7,%xmm15,%xmm15
vmovd 36(%r8),%xmm5
vmovd 36(%r9),%xmm0
vpinsrd $1,36(%r10),%xmm5,%xmm5
vpinsrd $1,36(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm11,%xmm7
vpslld $26,%xmm11,%xmm2
vmovdqu %xmm5,144-128(%rax)
vpaddd %xmm14,%xmm5,%xmm5
vpsrld $11,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm11,%xmm2
vpaddd -96(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm11,%xmm2
vpandn %xmm13,%xmm11,%xmm0
vpand %xmm12,%xmm11,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm15,%xmm14
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm15,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm15,%xmm8,%xmm4
vpxor %xmm1,%xmm14,%xmm14
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm15,%xmm1
vpslld $19,%xmm15,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm14,%xmm7
vpsrld $22,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm15,%xmm2
vpxor %xmm3,%xmm8,%xmm14
vpaddd %xmm5,%xmm10,%xmm10
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm14,%xmm14
vpaddd %xmm7,%xmm14,%xmm14
vmovd 40(%r8),%xmm5
vmovd 40(%r9),%xmm0
vpinsrd $1,40(%r10),%xmm5,%xmm5
vpinsrd $1,40(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm10,%xmm7
vpslld $26,%xmm10,%xmm2
vmovdqu %xmm5,160-128(%rax)
vpaddd %xmm13,%xmm5,%xmm5
vpsrld $11,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm10,%xmm2
vpaddd -64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm10,%xmm2
vpandn %xmm12,%xmm10,%xmm0
vpand %xmm11,%xmm10,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm14,%xmm13
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm14,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm14,%xmm15,%xmm3
vpxor %xmm1,%xmm13,%xmm13
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm14,%xmm1
vpslld $19,%xmm14,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm13,%xmm7
vpsrld $22,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm14,%xmm2
vpxor %xmm4,%xmm15,%xmm13
vpaddd %xmm5,%xmm9,%xmm9
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm13,%xmm13
vpaddd %xmm7,%xmm13,%xmm13
vmovd 44(%r8),%xmm5
vmovd 44(%r9),%xmm0
vpinsrd $1,44(%r10),%xmm5,%xmm5
vpinsrd $1,44(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm9,%xmm7
vpslld $26,%xmm9,%xmm2
vmovdqu %xmm5,176-128(%rax)
vpaddd %xmm12,%xmm5,%xmm5
vpsrld $11,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm9,%xmm2
vpaddd -32(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm9,%xmm2
vpandn %xmm11,%xmm9,%xmm0
vpand %xmm10,%xmm9,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm13,%xmm12
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm13,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm13,%xmm14,%xmm4
vpxor %xmm1,%xmm12,%xmm12
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm13,%xmm1
vpslld $19,%xmm13,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm12,%xmm7
vpsrld $22,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm13,%xmm2
vpxor %xmm3,%xmm14,%xmm12
vpaddd %xmm5,%xmm8,%xmm8
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm12,%xmm12
vpaddd %xmm7,%xmm12,%xmm12
vmovd 48(%r8),%xmm5
vmovd 48(%r9),%xmm0
vpinsrd $1,48(%r10),%xmm5,%xmm5
vpinsrd $1,48(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm8,%xmm7
vpslld $26,%xmm8,%xmm2
vmovdqu %xmm5,192-128(%rax)
vpaddd %xmm11,%xmm5,%xmm5
vpsrld $11,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm8,%xmm2
vpaddd 0(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm8,%xmm2
vpandn %xmm10,%xmm8,%xmm0
vpand %xmm9,%xmm8,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm12,%xmm11
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm12,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm12,%xmm13,%xmm3
vpxor %xmm1,%xmm11,%xmm11
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm12,%xmm1
vpslld $19,%xmm12,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm11,%xmm7
vpsrld $22,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm12,%xmm2
vpxor %xmm4,%xmm13,%xmm11
vpaddd %xmm5,%xmm15,%xmm15
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm11,%xmm11
vpaddd %xmm7,%xmm11,%xmm11
vmovd 52(%r8),%xmm5
vmovd 52(%r9),%xmm0
vpinsrd $1,52(%r10),%xmm5,%xmm5
vpinsrd $1,52(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm15,%xmm7
vpslld $26,%xmm15,%xmm2
vmovdqu %xmm5,208-128(%rax)
vpaddd %xmm10,%xmm5,%xmm5
vpsrld $11,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm15,%xmm2
vpaddd 32(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm15,%xmm2
vpandn %xmm9,%xmm15,%xmm0
vpand %xmm8,%xmm15,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm11,%xmm10
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm11,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm11,%xmm12,%xmm4
vpxor %xmm1,%xmm10,%xmm10
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm11,%xmm1
vpslld $19,%xmm11,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm10,%xmm7
vpsrld $22,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm11,%xmm2
vpxor %xmm3,%xmm12,%xmm10
vpaddd %xmm5,%xmm14,%xmm14
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm10,%xmm10
vpaddd %xmm7,%xmm10,%xmm10
vmovd 56(%r8),%xmm5
vmovd 56(%r9),%xmm0
vpinsrd $1,56(%r10),%xmm5,%xmm5
vpinsrd $1,56(%r11),%xmm0,%xmm0
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm14,%xmm7
vpslld $26,%xmm14,%xmm2
vmovdqu %xmm5,224-128(%rax)
vpaddd %xmm9,%xmm5,%xmm5
vpsrld $11,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm14,%xmm2
vpaddd 64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm14,%xmm2
vpandn %xmm8,%xmm14,%xmm0
vpand %xmm15,%xmm14,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm10,%xmm9
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm10,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm10,%xmm11,%xmm3
vpxor %xmm1,%xmm9,%xmm9
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm10,%xmm1
vpslld $19,%xmm10,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm9,%xmm7
vpsrld $22,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm10,%xmm2
vpxor %xmm4,%xmm11,%xmm9
vpaddd %xmm5,%xmm13,%xmm13
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm9,%xmm9
vpaddd %xmm7,%xmm9,%xmm9
vmovd 60(%r8),%xmm5
leaq 64(%r8),%r8
vmovd 60(%r9),%xmm0
leaq 64(%r9),%r9
vpinsrd $1,60(%r10),%xmm5,%xmm5
leaq 64(%r10),%r10
vpinsrd $1,60(%r11),%xmm0,%xmm0
leaq 64(%r11),%r11
vpunpckldq %xmm0,%xmm5,%xmm5
vpshufb %xmm6,%xmm5,%xmm5
vpsrld $6,%xmm13,%xmm7
vpslld $26,%xmm13,%xmm2
vmovdqu %xmm5,240-128(%rax)
vpaddd %xmm8,%xmm5,%xmm5
vpsrld $11,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm13,%xmm2
vpaddd 96(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
prefetcht0 63(%r8)
vpslld $7,%xmm13,%xmm2
vpandn %xmm15,%xmm13,%xmm0
vpand %xmm14,%xmm13,%xmm4
prefetcht0 63(%r9)
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm9,%xmm8
vpxor %xmm2,%xmm7,%xmm7
prefetcht0 63(%r10)
vpslld $30,%xmm9,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm9,%xmm10,%xmm4
prefetcht0 63(%r11)
vpxor %xmm1,%xmm8,%xmm8
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm9,%xmm1
vpslld $19,%xmm9,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm8,%xmm7
vpsrld $22,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm9,%xmm2
vpxor %xmm3,%xmm10,%xmm8
vpaddd %xmm5,%xmm12,%xmm12
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm8,%xmm8
vpaddd %xmm7,%xmm8,%xmm8
addq $256,%rbp
vmovdqu 0-128(%rax),%xmm5
movl $3,%ecx
jmp .Loop_16_xx_avx
.align 32
.Loop_16_xx_avx:
vmovdqu 16-128(%rax),%xmm6
vpaddd 144-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 224-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm12,%xmm7
vpslld $26,%xmm12,%xmm2
vmovdqu %xmm5,0-128(%rax)
vpaddd %xmm15,%xmm5,%xmm5
vpsrld $11,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm12,%xmm2
vpaddd -128(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm12,%xmm2
vpandn %xmm14,%xmm12,%xmm0
vpand %xmm13,%xmm12,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm8,%xmm15
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm8,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm8,%xmm9,%xmm3
vpxor %xmm1,%xmm15,%xmm15
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm8,%xmm1
vpslld $19,%xmm8,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm15,%xmm7
vpsrld $22,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm8,%xmm2
vpxor %xmm4,%xmm9,%xmm15
vpaddd %xmm5,%xmm11,%xmm11
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm15,%xmm15
vpaddd %xmm7,%xmm15,%xmm15
vmovdqu 32-128(%rax),%xmm5
vpaddd 160-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 240-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm11,%xmm7
vpslld $26,%xmm11,%xmm2
vmovdqu %xmm6,16-128(%rax)
vpaddd %xmm14,%xmm6,%xmm6
vpsrld $11,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm11,%xmm2
vpaddd -96(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm11,%xmm2
vpandn %xmm13,%xmm11,%xmm0
vpand %xmm12,%xmm11,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm15,%xmm14
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm15,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm15,%xmm8,%xmm4
vpxor %xmm1,%xmm14,%xmm14
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm15,%xmm1
vpslld $19,%xmm15,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm14,%xmm7
vpsrld $22,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm15,%xmm2
vpxor %xmm3,%xmm8,%xmm14
vpaddd %xmm6,%xmm10,%xmm10
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm14,%xmm14
vpaddd %xmm7,%xmm14,%xmm14
vmovdqu 48-128(%rax),%xmm6
vpaddd 176-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 0-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm10,%xmm7
vpslld $26,%xmm10,%xmm2
vmovdqu %xmm5,32-128(%rax)
vpaddd %xmm13,%xmm5,%xmm5
vpsrld $11,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm10,%xmm2
vpaddd -64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm10,%xmm2
vpandn %xmm12,%xmm10,%xmm0
vpand %xmm11,%xmm10,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm14,%xmm13
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm14,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm14,%xmm15,%xmm3
vpxor %xmm1,%xmm13,%xmm13
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm14,%xmm1
vpslld $19,%xmm14,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm13,%xmm7
vpsrld $22,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm14,%xmm2
vpxor %xmm4,%xmm15,%xmm13
vpaddd %xmm5,%xmm9,%xmm9
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm13,%xmm13
vpaddd %xmm7,%xmm13,%xmm13
vmovdqu 64-128(%rax),%xmm5
vpaddd 192-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 16-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm9,%xmm7
vpslld $26,%xmm9,%xmm2
vmovdqu %xmm6,48-128(%rax)
vpaddd %xmm12,%xmm6,%xmm6
vpsrld $11,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm9,%xmm2
vpaddd -32(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm9,%xmm2
vpandn %xmm11,%xmm9,%xmm0
vpand %xmm10,%xmm9,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm13,%xmm12
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm13,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm13,%xmm14,%xmm4
vpxor %xmm1,%xmm12,%xmm12
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm13,%xmm1
vpslld $19,%xmm13,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm12,%xmm7
vpsrld $22,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm13,%xmm2
vpxor %xmm3,%xmm14,%xmm12
vpaddd %xmm6,%xmm8,%xmm8
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm12,%xmm12
vpaddd %xmm7,%xmm12,%xmm12
vmovdqu 80-128(%rax),%xmm6
vpaddd 208-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 32-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm8,%xmm7
vpslld $26,%xmm8,%xmm2
vmovdqu %xmm5,64-128(%rax)
vpaddd %xmm11,%xmm5,%xmm5
vpsrld $11,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm8,%xmm2
vpaddd 0(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm8,%xmm2
vpandn %xmm10,%xmm8,%xmm0
vpand %xmm9,%xmm8,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm12,%xmm11
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm12,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm12,%xmm13,%xmm3
vpxor %xmm1,%xmm11,%xmm11
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm12,%xmm1
vpslld $19,%xmm12,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm11,%xmm7
vpsrld $22,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm12,%xmm2
vpxor %xmm4,%xmm13,%xmm11
vpaddd %xmm5,%xmm15,%xmm15
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm11,%xmm11
vpaddd %xmm7,%xmm11,%xmm11
vmovdqu 96-128(%rax),%xmm5
vpaddd 224-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 48-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm15,%xmm7
vpslld $26,%xmm15,%xmm2
vmovdqu %xmm6,80-128(%rax)
vpaddd %xmm10,%xmm6,%xmm6
vpsrld $11,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm15,%xmm2
vpaddd 32(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm15,%xmm2
vpandn %xmm9,%xmm15,%xmm0
vpand %xmm8,%xmm15,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm11,%xmm10
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm11,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm11,%xmm12,%xmm4
vpxor %xmm1,%xmm10,%xmm10
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm11,%xmm1
vpslld $19,%xmm11,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm10,%xmm7
vpsrld $22,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm11,%xmm2
vpxor %xmm3,%xmm12,%xmm10
vpaddd %xmm6,%xmm14,%xmm14
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm10,%xmm10
vpaddd %xmm7,%xmm10,%xmm10
vmovdqu 112-128(%rax),%xmm6
vpaddd 240-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 64-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm14,%xmm7
vpslld $26,%xmm14,%xmm2
vmovdqu %xmm5,96-128(%rax)
vpaddd %xmm9,%xmm5,%xmm5
vpsrld $11,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm14,%xmm2
vpaddd 64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm14,%xmm2
vpandn %xmm8,%xmm14,%xmm0
vpand %xmm15,%xmm14,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm10,%xmm9
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm10,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm10,%xmm11,%xmm3
vpxor %xmm1,%xmm9,%xmm9
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm10,%xmm1
vpslld $19,%xmm10,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm9,%xmm7
vpsrld $22,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm10,%xmm2
vpxor %xmm4,%xmm11,%xmm9
vpaddd %xmm5,%xmm13,%xmm13
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm9,%xmm9
vpaddd %xmm7,%xmm9,%xmm9
vmovdqu 128-128(%rax),%xmm5
vpaddd 0-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 80-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm13,%xmm7
vpslld $26,%xmm13,%xmm2
vmovdqu %xmm6,112-128(%rax)
vpaddd %xmm8,%xmm6,%xmm6
vpsrld $11,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm13,%xmm2
vpaddd 96(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm13,%xmm2
vpandn %xmm15,%xmm13,%xmm0
vpand %xmm14,%xmm13,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm9,%xmm8
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm9,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm9,%xmm10,%xmm4
vpxor %xmm1,%xmm8,%xmm8
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm9,%xmm1
vpslld $19,%xmm9,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm8,%xmm7
vpsrld $22,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm9,%xmm2
vpxor %xmm3,%xmm10,%xmm8
vpaddd %xmm6,%xmm12,%xmm12
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm8,%xmm8
vpaddd %xmm7,%xmm8,%xmm8
addq $256,%rbp
vmovdqu 144-128(%rax),%xmm6
vpaddd 16-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 96-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm12,%xmm7
vpslld $26,%xmm12,%xmm2
vmovdqu %xmm5,128-128(%rax)
vpaddd %xmm15,%xmm5,%xmm5
vpsrld $11,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm12,%xmm2
vpaddd -128(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm12,%xmm2
vpandn %xmm14,%xmm12,%xmm0
vpand %xmm13,%xmm12,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm8,%xmm15
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm8,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm8,%xmm9,%xmm3
vpxor %xmm1,%xmm15,%xmm15
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm8,%xmm1
vpslld $19,%xmm8,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm15,%xmm7
vpsrld $22,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm8,%xmm2
vpxor %xmm4,%xmm9,%xmm15
vpaddd %xmm5,%xmm11,%xmm11
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm15,%xmm15
vpaddd %xmm7,%xmm15,%xmm15
vmovdqu 160-128(%rax),%xmm5
vpaddd 32-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 112-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm11,%xmm7
vpslld $26,%xmm11,%xmm2
vmovdqu %xmm6,144-128(%rax)
vpaddd %xmm14,%xmm6,%xmm6
vpsrld $11,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm11,%xmm2
vpaddd -96(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm11,%xmm2
vpandn %xmm13,%xmm11,%xmm0
vpand %xmm12,%xmm11,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm15,%xmm14
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm15,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm15,%xmm8,%xmm4
vpxor %xmm1,%xmm14,%xmm14
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm15,%xmm1
vpslld $19,%xmm15,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm14,%xmm7
vpsrld $22,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm15,%xmm2
vpxor %xmm3,%xmm8,%xmm14
vpaddd %xmm6,%xmm10,%xmm10
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm14,%xmm14
vpaddd %xmm7,%xmm14,%xmm14
vmovdqu 176-128(%rax),%xmm6
vpaddd 48-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 128-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm10,%xmm7
vpslld $26,%xmm10,%xmm2
vmovdqu %xmm5,160-128(%rax)
vpaddd %xmm13,%xmm5,%xmm5
vpsrld $11,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm10,%xmm2
vpaddd -64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm10,%xmm2
vpandn %xmm12,%xmm10,%xmm0
vpand %xmm11,%xmm10,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm14,%xmm13
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm14,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm14,%xmm15,%xmm3
vpxor %xmm1,%xmm13,%xmm13
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm14,%xmm1
vpslld $19,%xmm14,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm13,%xmm7
vpsrld $22,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm14,%xmm2
vpxor %xmm4,%xmm15,%xmm13
vpaddd %xmm5,%xmm9,%xmm9
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm13,%xmm13
vpaddd %xmm7,%xmm13,%xmm13
vmovdqu 192-128(%rax),%xmm5
vpaddd 64-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 144-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm9,%xmm7
vpslld $26,%xmm9,%xmm2
vmovdqu %xmm6,176-128(%rax)
vpaddd %xmm12,%xmm6,%xmm6
vpsrld $11,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm9,%xmm2
vpaddd -32(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm9,%xmm2
vpandn %xmm11,%xmm9,%xmm0
vpand %xmm10,%xmm9,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm13,%xmm12
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm13,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm13,%xmm14,%xmm4
vpxor %xmm1,%xmm12,%xmm12
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm13,%xmm1
vpslld $19,%xmm13,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm12,%xmm7
vpsrld $22,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm13,%xmm2
vpxor %xmm3,%xmm14,%xmm12
vpaddd %xmm6,%xmm8,%xmm8
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm12,%xmm12
vpaddd %xmm7,%xmm12,%xmm12
vmovdqu 208-128(%rax),%xmm6
vpaddd 80-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 160-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm8,%xmm7
vpslld $26,%xmm8,%xmm2
vmovdqu %xmm5,192-128(%rax)
vpaddd %xmm11,%xmm5,%xmm5
vpsrld $11,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm8,%xmm2
vpaddd 0(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm8,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm8,%xmm2
vpandn %xmm10,%xmm8,%xmm0
vpand %xmm9,%xmm8,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm12,%xmm11
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm12,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm12,%xmm13,%xmm3
vpxor %xmm1,%xmm11,%xmm11
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm12,%xmm1
vpslld $19,%xmm12,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm11,%xmm7
vpsrld $22,%xmm12,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm12,%xmm2
vpxor %xmm4,%xmm13,%xmm11
vpaddd %xmm5,%xmm15,%xmm15
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm11,%xmm11
vpaddd %xmm7,%xmm11,%xmm11
vmovdqu 224-128(%rax),%xmm5
vpaddd 96-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 176-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm15,%xmm7
vpslld $26,%xmm15,%xmm2
vmovdqu %xmm6,208-128(%rax)
vpaddd %xmm10,%xmm6,%xmm6
vpsrld $11,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm15,%xmm2
vpaddd 32(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm15,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm15,%xmm2
vpandn %xmm9,%xmm15,%xmm0
vpand %xmm8,%xmm15,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm11,%xmm10
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm11,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm11,%xmm12,%xmm4
vpxor %xmm1,%xmm10,%xmm10
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm11,%xmm1
vpslld $19,%xmm11,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm10,%xmm7
vpsrld $22,%xmm11,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm11,%xmm2
vpxor %xmm3,%xmm12,%xmm10
vpaddd %xmm6,%xmm14,%xmm14
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm10,%xmm10
vpaddd %xmm7,%xmm10,%xmm10
vmovdqu 240-128(%rax),%xmm6
vpaddd 112-128(%rax),%xmm5,%xmm5
vpsrld $3,%xmm6,%xmm7
vpsrld $7,%xmm6,%xmm1
vpslld $25,%xmm6,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm6,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm6,%xmm2
vmovdqu 192-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm5,%xmm5
vpxor %xmm1,%xmm3,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $6,%xmm14,%xmm7
vpslld $26,%xmm14,%xmm2
vmovdqu %xmm5,224-128(%rax)
vpaddd %xmm9,%xmm5,%xmm5
vpsrld $11,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm14,%xmm2
vpaddd 64(%rbp),%xmm5,%xmm5
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm14,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm14,%xmm2
vpandn %xmm8,%xmm14,%xmm0
vpand %xmm15,%xmm14,%xmm3
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm10,%xmm9
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm10,%xmm1
vpxor %xmm3,%xmm0,%xmm0
vpxor %xmm10,%xmm11,%xmm3
vpxor %xmm1,%xmm9,%xmm9
vpaddd %xmm7,%xmm5,%xmm5
vpsrld $13,%xmm10,%xmm1
vpslld $19,%xmm10,%xmm2
vpaddd %xmm0,%xmm5,%xmm5
vpand %xmm3,%xmm4,%xmm4
vpxor %xmm1,%xmm9,%xmm7
vpsrld $22,%xmm10,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm10,%xmm2
vpxor %xmm4,%xmm11,%xmm9
vpaddd %xmm5,%xmm13,%xmm13
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm5,%xmm9,%xmm9
vpaddd %xmm7,%xmm9,%xmm9
vmovdqu 0-128(%rax),%xmm5
vpaddd 128-128(%rax),%xmm6,%xmm6
vpsrld $3,%xmm5,%xmm7
vpsrld $7,%xmm5,%xmm1
vpslld $25,%xmm5,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpsrld $18,%xmm5,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $14,%xmm5,%xmm2
vmovdqu 208-128(%rax),%xmm0
vpsrld $10,%xmm0,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $17,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $15,%xmm0,%xmm2
vpaddd %xmm7,%xmm6,%xmm6
vpxor %xmm1,%xmm4,%xmm7
vpsrld $19,%xmm0,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $13,%xmm0,%xmm2
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $6,%xmm13,%xmm7
vpslld $26,%xmm13,%xmm2
vmovdqu %xmm6,240-128(%rax)
vpaddd %xmm8,%xmm6,%xmm6
vpsrld $11,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $21,%xmm13,%xmm2
vpaddd 96(%rbp),%xmm6,%xmm6
vpxor %xmm1,%xmm7,%xmm7
vpsrld $25,%xmm13,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $7,%xmm13,%xmm2
vpandn %xmm15,%xmm13,%xmm0
vpand %xmm14,%xmm13,%xmm4
vpxor %xmm1,%xmm7,%xmm7
vpsrld $2,%xmm9,%xmm8
vpxor %xmm2,%xmm7,%xmm7
vpslld $30,%xmm9,%xmm1
vpxor %xmm4,%xmm0,%xmm0
vpxor %xmm9,%xmm10,%xmm4
vpxor %xmm1,%xmm8,%xmm8
vpaddd %xmm7,%xmm6,%xmm6
vpsrld $13,%xmm9,%xmm1
vpslld $19,%xmm9,%xmm2
vpaddd %xmm0,%xmm6,%xmm6
vpand %xmm4,%xmm3,%xmm3
vpxor %xmm1,%xmm8,%xmm7
vpsrld $22,%xmm9,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpslld $10,%xmm9,%xmm2
vpxor %xmm3,%xmm10,%xmm8
vpaddd %xmm6,%xmm12,%xmm12
vpxor %xmm1,%xmm7,%xmm7
vpxor %xmm2,%xmm7,%xmm7
vpaddd %xmm6,%xmm8,%xmm8
vpaddd %xmm7,%xmm8,%xmm8
addq $256,%rbp
decl %ecx
jnz .Loop_16_xx_avx
movl $1,%ecx
leaq K256+128(%rip),%rbp
cmpl 0(%rbx),%ecx
cmovgeq %rbp,%r8
cmpl 4(%rbx),%ecx
cmovgeq %rbp,%r9
cmpl 8(%rbx),%ecx
cmovgeq %rbp,%r10
cmpl 12(%rbx),%ecx
cmovgeq %rbp,%r11
vmovdqa (%rbx),%xmm7
vpxor %xmm0,%xmm0,%xmm0
vmovdqa %xmm7,%xmm6
vpcmpgtd %xmm0,%xmm6,%xmm6
vpaddd %xmm6,%xmm7,%xmm7
vmovdqu 0-128(%rdi),%xmm0
vpand %xmm6,%xmm8,%xmm8
vmovdqu 32-128(%rdi),%xmm1
vpand %xmm6,%xmm9,%xmm9
vmovdqu 64-128(%rdi),%xmm2
vpand %xmm6,%xmm10,%xmm10
vmovdqu 96-128(%rdi),%xmm5
vpand %xmm6,%xmm11,%xmm11
vpaddd %xmm0,%xmm8,%xmm8
vmovdqu 128-128(%rdi),%xmm0
vpand %xmm6,%xmm12,%xmm12
vpaddd %xmm1,%xmm9,%xmm9
vmovdqu 160-128(%rdi),%xmm1
vpand %xmm6,%xmm13,%xmm13
vpaddd %xmm2,%xmm10,%xmm10
vmovdqu 192-128(%rdi),%xmm2
vpand %xmm6,%xmm14,%xmm14
vpaddd %xmm5,%xmm11,%xmm11
vmovdqu 224-128(%rdi),%xmm5
vpand %xmm6,%xmm15,%xmm15
vpaddd %xmm0,%xmm12,%xmm12
vpaddd %xmm1,%xmm13,%xmm13
vmovdqu %xmm8,0-128(%rdi)
vpaddd %xmm2,%xmm14,%xmm14
vmovdqu %xmm9,32-128(%rdi)
vpaddd %xmm5,%xmm15,%xmm15
vmovdqu %xmm10,64-128(%rdi)
vmovdqu %xmm11,96-128(%rdi)
vmovdqu %xmm12,128-128(%rdi)
vmovdqu %xmm13,160-128(%rdi)
vmovdqu %xmm14,192-128(%rdi)
vmovdqu %xmm15,224-128(%rdi)
vmovdqu %xmm7,(%rbx)
vmovdqu .Lpbswap(%rip),%xmm6
decl %edx
jnz .Loop_avx
movl 280(%rsp),%edx
leaq 16(%rdi),%rdi
leaq 64(%rsi),%rsi
decl %edx
jnz .Loop_grande_avx
.Ldone_avx:
movq 272(%rsp),%rax
vzeroupper
movq -16(%rax),%rbp
movq -8(%rax),%rbx
leaq (%rax),%rsp
.Lepilogue_avx:
.byte 0xf3,0xc3
.size xsha256_multi_block_avx,.-xsha256_multi_block_avx
.type xsha256_multi_block_avx2,@function
.align 32
xsha256_multi_block_avx2:
_avx2_shortcut:
movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
subq $576,%rsp
andq $-256,%rsp
movq %rax,544(%rsp)
.Lbody_avx2:
leaq K256+128(%rip),%rbp
leaq 128(%rdi),%rdi
.Loop_grande_avx2:
movl %edx,552(%rsp)
xorl %edx,%edx
leaq 512(%rsp),%rbx
movq 0(%rsi),%r12
movl 8(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,0(%rbx)
cmovleq %rbp,%r12
movq 16(%rsi),%r13
movl 24(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,4(%rbx)
cmovleq %rbp,%r13
movq 32(%rsi),%r14
movl 40(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,8(%rbx)
cmovleq %rbp,%r14
movq 48(%rsi),%r15
movl 56(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,12(%rbx)
cmovleq %rbp,%r15
movq 64(%rsi),%r8
movl 72(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,16(%rbx)
cmovleq %rbp,%r8
movq 80(%rsi),%r9
movl 88(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,20(%rbx)
cmovleq %rbp,%r9
movq 96(%rsi),%r10
movl 104(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,24(%rbx)
cmovleq %rbp,%r10
movq 112(%rsi),%r11
movl 120(%rsi),%ecx
cmpl %edx,%ecx
cmovgl %ecx,%edx
testl %ecx,%ecx
movl %ecx,28(%rbx)
cmovleq %rbp,%r11
vmovdqu 0-128(%rdi),%ymm8
leaq 128(%rsp),%rax
vmovdqu 32-128(%rdi),%ymm9
leaq 256+128(%rsp),%rbx
vmovdqu 64-128(%rdi),%ymm10
vmovdqu 96-128(%rdi),%ymm11
vmovdqu 128-128(%rdi),%ymm12
vmovdqu 160-128(%rdi),%ymm13
vmovdqu 192-128(%rdi),%ymm14
vmovdqu 224-128(%rdi),%ymm15
vmovdqu .Lpbswap(%rip),%ymm6
jmp .Loop_avx2
.align 32
.Loop_avx2:
vpxor %ymm9,%ymm10,%ymm4
vmovd 0(%r12),%xmm5
vmovd 0(%r8),%xmm0
vmovd 0(%r13),%xmm1
vmovd 0(%r9),%xmm2
vpinsrd $1,0(%r14),%xmm5,%xmm5
vpinsrd $1,0(%r10),%xmm0,%xmm0
vpinsrd $1,0(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,0(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm12,%ymm7
vpslld $26,%ymm12,%ymm2
vmovdqu %ymm5,0-128(%rax)
vpaddd %ymm15,%ymm5,%ymm5
vpsrld $11,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm12,%ymm2
vpaddd -128(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm12,%ymm2
vpandn %ymm14,%ymm12,%ymm0
vpand %ymm13,%ymm12,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm8,%ymm15
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm8,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm8,%ymm9,%ymm3
vpxor %ymm1,%ymm15,%ymm15
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm8,%ymm1
vpslld $19,%ymm8,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm15,%ymm7
vpsrld $22,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm8,%ymm2
vpxor %ymm4,%ymm9,%ymm15
vpaddd %ymm5,%ymm11,%ymm11
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm15,%ymm15
vpaddd %ymm7,%ymm15,%ymm15
vmovd 4(%r12),%xmm5
vmovd 4(%r8),%xmm0
vmovd 4(%r13),%xmm1
vmovd 4(%r9),%xmm2
vpinsrd $1,4(%r14),%xmm5,%xmm5
vpinsrd $1,4(%r10),%xmm0,%xmm0
vpinsrd $1,4(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,4(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm11,%ymm7
vpslld $26,%ymm11,%ymm2
vmovdqu %ymm5,32-128(%rax)
vpaddd %ymm14,%ymm5,%ymm5
vpsrld $11,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm11,%ymm2
vpaddd -96(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm11,%ymm2
vpandn %ymm13,%ymm11,%ymm0
vpand %ymm12,%ymm11,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm15,%ymm14
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm15,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm15,%ymm8,%ymm4
vpxor %ymm1,%ymm14,%ymm14
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm15,%ymm1
vpslld $19,%ymm15,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm14,%ymm7
vpsrld $22,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm15,%ymm2
vpxor %ymm3,%ymm8,%ymm14
vpaddd %ymm5,%ymm10,%ymm10
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm14,%ymm14
vpaddd %ymm7,%ymm14,%ymm14
vmovd 8(%r12),%xmm5
vmovd 8(%r8),%xmm0
vmovd 8(%r13),%xmm1
vmovd 8(%r9),%xmm2
vpinsrd $1,8(%r14),%xmm5,%xmm5
vpinsrd $1,8(%r10),%xmm0,%xmm0
vpinsrd $1,8(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,8(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm10,%ymm7
vpslld $26,%ymm10,%ymm2
vmovdqu %ymm5,64-128(%rax)
vpaddd %ymm13,%ymm5,%ymm5
vpsrld $11,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm10,%ymm2
vpaddd -64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm10,%ymm2
vpandn %ymm12,%ymm10,%ymm0
vpand %ymm11,%ymm10,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm14,%ymm13
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm14,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm14,%ymm15,%ymm3
vpxor %ymm1,%ymm13,%ymm13
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm14,%ymm1
vpslld $19,%ymm14,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm13,%ymm7
vpsrld $22,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm14,%ymm2
vpxor %ymm4,%ymm15,%ymm13
vpaddd %ymm5,%ymm9,%ymm9
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm13,%ymm13
vpaddd %ymm7,%ymm13,%ymm13
vmovd 12(%r12),%xmm5
vmovd 12(%r8),%xmm0
vmovd 12(%r13),%xmm1
vmovd 12(%r9),%xmm2
vpinsrd $1,12(%r14),%xmm5,%xmm5
vpinsrd $1,12(%r10),%xmm0,%xmm0
vpinsrd $1,12(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,12(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm9,%ymm7
vpslld $26,%ymm9,%ymm2
vmovdqu %ymm5,96-128(%rax)
vpaddd %ymm12,%ymm5,%ymm5
vpsrld $11,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm9,%ymm2
vpaddd -32(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm9,%ymm2
vpandn %ymm11,%ymm9,%ymm0
vpand %ymm10,%ymm9,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm13,%ymm12
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm13,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm13,%ymm14,%ymm4
vpxor %ymm1,%ymm12,%ymm12
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm13,%ymm1
vpslld $19,%ymm13,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm12,%ymm7
vpsrld $22,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm13,%ymm2
vpxor %ymm3,%ymm14,%ymm12
vpaddd %ymm5,%ymm8,%ymm8
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm12,%ymm12
vpaddd %ymm7,%ymm12,%ymm12
vmovd 16(%r12),%xmm5
vmovd 16(%r8),%xmm0
vmovd 16(%r13),%xmm1
vmovd 16(%r9),%xmm2
vpinsrd $1,16(%r14),%xmm5,%xmm5
vpinsrd $1,16(%r10),%xmm0,%xmm0
vpinsrd $1,16(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,16(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm8,%ymm7
vpslld $26,%ymm8,%ymm2
vmovdqu %ymm5,128-128(%rax)
vpaddd %ymm11,%ymm5,%ymm5
vpsrld $11,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm8,%ymm2
vpaddd 0(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm8,%ymm2
vpandn %ymm10,%ymm8,%ymm0
vpand %ymm9,%ymm8,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm12,%ymm11
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm12,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm12,%ymm13,%ymm3
vpxor %ymm1,%ymm11,%ymm11
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm12,%ymm1
vpslld $19,%ymm12,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm11,%ymm7
vpsrld $22,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm12,%ymm2
vpxor %ymm4,%ymm13,%ymm11
vpaddd %ymm5,%ymm15,%ymm15
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm11,%ymm11
vpaddd %ymm7,%ymm11,%ymm11
vmovd 20(%r12),%xmm5
vmovd 20(%r8),%xmm0
vmovd 20(%r13),%xmm1
vmovd 20(%r9),%xmm2
vpinsrd $1,20(%r14),%xmm5,%xmm5
vpinsrd $1,20(%r10),%xmm0,%xmm0
vpinsrd $1,20(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,20(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm15,%ymm7
vpslld $26,%ymm15,%ymm2
vmovdqu %ymm5,160-128(%rax)
vpaddd %ymm10,%ymm5,%ymm5
vpsrld $11,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm15,%ymm2
vpaddd 32(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm15,%ymm2
vpandn %ymm9,%ymm15,%ymm0
vpand %ymm8,%ymm15,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm11,%ymm10
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm11,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm11,%ymm12,%ymm4
vpxor %ymm1,%ymm10,%ymm10
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm11,%ymm1
vpslld $19,%ymm11,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm10,%ymm7
vpsrld $22,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm11,%ymm2
vpxor %ymm3,%ymm12,%ymm10
vpaddd %ymm5,%ymm14,%ymm14
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm10,%ymm10
vpaddd %ymm7,%ymm10,%ymm10
vmovd 24(%r12),%xmm5
vmovd 24(%r8),%xmm0
vmovd 24(%r13),%xmm1
vmovd 24(%r9),%xmm2
vpinsrd $1,24(%r14),%xmm5,%xmm5
vpinsrd $1,24(%r10),%xmm0,%xmm0
vpinsrd $1,24(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,24(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm14,%ymm7
vpslld $26,%ymm14,%ymm2
vmovdqu %ymm5,192-128(%rax)
vpaddd %ymm9,%ymm5,%ymm5
vpsrld $11,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm14,%ymm2
vpaddd 64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm14,%ymm2
vpandn %ymm8,%ymm14,%ymm0
vpand %ymm15,%ymm14,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm10,%ymm9
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm10,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm10,%ymm11,%ymm3
vpxor %ymm1,%ymm9,%ymm9
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm10,%ymm1
vpslld $19,%ymm10,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm9,%ymm7
vpsrld $22,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm10,%ymm2
vpxor %ymm4,%ymm11,%ymm9
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm9,%ymm9
vpaddd %ymm7,%ymm9,%ymm9
vmovd 28(%r12),%xmm5
vmovd 28(%r8),%xmm0
vmovd 28(%r13),%xmm1
vmovd 28(%r9),%xmm2
vpinsrd $1,28(%r14),%xmm5,%xmm5
vpinsrd $1,28(%r10),%xmm0,%xmm0
vpinsrd $1,28(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,28(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm13,%ymm7
vpslld $26,%ymm13,%ymm2
vmovdqu %ymm5,224-128(%rax)
vpaddd %ymm8,%ymm5,%ymm5
vpsrld $11,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm13,%ymm2
vpaddd 96(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm13,%ymm2
vpandn %ymm15,%ymm13,%ymm0
vpand %ymm14,%ymm13,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm9,%ymm8
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm9,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm9,%ymm10,%ymm4
vpxor %ymm1,%ymm8,%ymm8
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm9,%ymm1
vpslld $19,%ymm9,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm8,%ymm7
vpsrld $22,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm9,%ymm2
vpxor %ymm3,%ymm10,%ymm8
vpaddd %ymm5,%ymm12,%ymm12
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm8,%ymm8
vpaddd %ymm7,%ymm8,%ymm8
addq $256,%rbp
vmovd 32(%r12),%xmm5
vmovd 32(%r8),%xmm0
vmovd 32(%r13),%xmm1
vmovd 32(%r9),%xmm2
vpinsrd $1,32(%r14),%xmm5,%xmm5
vpinsrd $1,32(%r10),%xmm0,%xmm0
vpinsrd $1,32(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,32(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm12,%ymm7
vpslld $26,%ymm12,%ymm2
vmovdqu %ymm5,256-256-128(%rbx)
vpaddd %ymm15,%ymm5,%ymm5
vpsrld $11,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm12,%ymm2
vpaddd -128(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm12,%ymm2
vpandn %ymm14,%ymm12,%ymm0
vpand %ymm13,%ymm12,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm8,%ymm15
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm8,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm8,%ymm9,%ymm3
vpxor %ymm1,%ymm15,%ymm15
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm8,%ymm1
vpslld $19,%ymm8,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm15,%ymm7
vpsrld $22,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm8,%ymm2
vpxor %ymm4,%ymm9,%ymm15
vpaddd %ymm5,%ymm11,%ymm11
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm15,%ymm15
vpaddd %ymm7,%ymm15,%ymm15
vmovd 36(%r12),%xmm5
vmovd 36(%r8),%xmm0
vmovd 36(%r13),%xmm1
vmovd 36(%r9),%xmm2
vpinsrd $1,36(%r14),%xmm5,%xmm5
vpinsrd $1,36(%r10),%xmm0,%xmm0
vpinsrd $1,36(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,36(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm11,%ymm7
vpslld $26,%ymm11,%ymm2
vmovdqu %ymm5,288-256-128(%rbx)
vpaddd %ymm14,%ymm5,%ymm5
vpsrld $11,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm11,%ymm2
vpaddd -96(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm11,%ymm2
vpandn %ymm13,%ymm11,%ymm0
vpand %ymm12,%ymm11,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm15,%ymm14
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm15,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm15,%ymm8,%ymm4
vpxor %ymm1,%ymm14,%ymm14
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm15,%ymm1
vpslld $19,%ymm15,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm14,%ymm7
vpsrld $22,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm15,%ymm2
vpxor %ymm3,%ymm8,%ymm14
vpaddd %ymm5,%ymm10,%ymm10
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm14,%ymm14
vpaddd %ymm7,%ymm14,%ymm14
vmovd 40(%r12),%xmm5
vmovd 40(%r8),%xmm0
vmovd 40(%r13),%xmm1
vmovd 40(%r9),%xmm2
vpinsrd $1,40(%r14),%xmm5,%xmm5
vpinsrd $1,40(%r10),%xmm0,%xmm0
vpinsrd $1,40(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,40(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm10,%ymm7
vpslld $26,%ymm10,%ymm2
vmovdqu %ymm5,320-256-128(%rbx)
vpaddd %ymm13,%ymm5,%ymm5
vpsrld $11,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm10,%ymm2
vpaddd -64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm10,%ymm2
vpandn %ymm12,%ymm10,%ymm0
vpand %ymm11,%ymm10,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm14,%ymm13
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm14,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm14,%ymm15,%ymm3
vpxor %ymm1,%ymm13,%ymm13
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm14,%ymm1
vpslld $19,%ymm14,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm13,%ymm7
vpsrld $22,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm14,%ymm2
vpxor %ymm4,%ymm15,%ymm13
vpaddd %ymm5,%ymm9,%ymm9
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm13,%ymm13
vpaddd %ymm7,%ymm13,%ymm13
vmovd 44(%r12),%xmm5
vmovd 44(%r8),%xmm0
vmovd 44(%r13),%xmm1
vmovd 44(%r9),%xmm2
vpinsrd $1,44(%r14),%xmm5,%xmm5
vpinsrd $1,44(%r10),%xmm0,%xmm0
vpinsrd $1,44(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,44(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm9,%ymm7
vpslld $26,%ymm9,%ymm2
vmovdqu %ymm5,352-256-128(%rbx)
vpaddd %ymm12,%ymm5,%ymm5
vpsrld $11,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm9,%ymm2
vpaddd -32(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm9,%ymm2
vpandn %ymm11,%ymm9,%ymm0
vpand %ymm10,%ymm9,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm13,%ymm12
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm13,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm13,%ymm14,%ymm4
vpxor %ymm1,%ymm12,%ymm12
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm13,%ymm1
vpslld $19,%ymm13,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm12,%ymm7
vpsrld $22,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm13,%ymm2
vpxor %ymm3,%ymm14,%ymm12
vpaddd %ymm5,%ymm8,%ymm8
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm12,%ymm12
vpaddd %ymm7,%ymm12,%ymm12
vmovd 48(%r12),%xmm5
vmovd 48(%r8),%xmm0
vmovd 48(%r13),%xmm1
vmovd 48(%r9),%xmm2
vpinsrd $1,48(%r14),%xmm5,%xmm5
vpinsrd $1,48(%r10),%xmm0,%xmm0
vpinsrd $1,48(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,48(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm8,%ymm7
vpslld $26,%ymm8,%ymm2
vmovdqu %ymm5,384-256-128(%rbx)
vpaddd %ymm11,%ymm5,%ymm5
vpsrld $11,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm8,%ymm2
vpaddd 0(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm8,%ymm2
vpandn %ymm10,%ymm8,%ymm0
vpand %ymm9,%ymm8,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm12,%ymm11
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm12,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm12,%ymm13,%ymm3
vpxor %ymm1,%ymm11,%ymm11
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm12,%ymm1
vpslld $19,%ymm12,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm11,%ymm7
vpsrld $22,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm12,%ymm2
vpxor %ymm4,%ymm13,%ymm11
vpaddd %ymm5,%ymm15,%ymm15
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm11,%ymm11
vpaddd %ymm7,%ymm11,%ymm11
vmovd 52(%r12),%xmm5
vmovd 52(%r8),%xmm0
vmovd 52(%r13),%xmm1
vmovd 52(%r9),%xmm2
vpinsrd $1,52(%r14),%xmm5,%xmm5
vpinsrd $1,52(%r10),%xmm0,%xmm0
vpinsrd $1,52(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,52(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm15,%ymm7
vpslld $26,%ymm15,%ymm2
vmovdqu %ymm5,416-256-128(%rbx)
vpaddd %ymm10,%ymm5,%ymm5
vpsrld $11,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm15,%ymm2
vpaddd 32(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm15,%ymm2
vpandn %ymm9,%ymm15,%ymm0
vpand %ymm8,%ymm15,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm11,%ymm10
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm11,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm11,%ymm12,%ymm4
vpxor %ymm1,%ymm10,%ymm10
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm11,%ymm1
vpslld $19,%ymm11,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm10,%ymm7
vpsrld $22,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm11,%ymm2
vpxor %ymm3,%ymm12,%ymm10
vpaddd %ymm5,%ymm14,%ymm14
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm10,%ymm10
vpaddd %ymm7,%ymm10,%ymm10
vmovd 56(%r12),%xmm5
vmovd 56(%r8),%xmm0
vmovd 56(%r13),%xmm1
vmovd 56(%r9),%xmm2
vpinsrd $1,56(%r14),%xmm5,%xmm5
vpinsrd $1,56(%r10),%xmm0,%xmm0
vpinsrd $1,56(%r15),%xmm1,%xmm1
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,56(%r11),%xmm2,%xmm2
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm14,%ymm7
vpslld $26,%ymm14,%ymm2
vmovdqu %ymm5,448-256-128(%rbx)
vpaddd %ymm9,%ymm5,%ymm5
vpsrld $11,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm14,%ymm2
vpaddd 64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm14,%ymm2
vpandn %ymm8,%ymm14,%ymm0
vpand %ymm15,%ymm14,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm10,%ymm9
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm10,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm10,%ymm11,%ymm3
vpxor %ymm1,%ymm9,%ymm9
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm10,%ymm1
vpslld $19,%ymm10,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm9,%ymm7
vpsrld $22,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm10,%ymm2
vpxor %ymm4,%ymm11,%ymm9
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm9,%ymm9
vpaddd %ymm7,%ymm9,%ymm9
vmovd 60(%r12),%xmm5
leaq 64(%r12),%r12
vmovd 60(%r8),%xmm0
leaq 64(%r8),%r8
vmovd 60(%r13),%xmm1
leaq 64(%r13),%r13
vmovd 60(%r9),%xmm2
leaq 64(%r9),%r9
vpinsrd $1,60(%r14),%xmm5,%xmm5
leaq 64(%r14),%r14
vpinsrd $1,60(%r10),%xmm0,%xmm0
leaq 64(%r10),%r10
vpinsrd $1,60(%r15),%xmm1,%xmm1
leaq 64(%r15),%r15
vpunpckldq %ymm1,%ymm5,%ymm5
vpinsrd $1,60(%r11),%xmm2,%xmm2
leaq 64(%r11),%r11
vpunpckldq %ymm2,%ymm0,%ymm0
vinserti128 $1,%xmm0,%ymm5,%ymm5
vpshufb %ymm6,%ymm5,%ymm5
vpsrld $6,%ymm13,%ymm7
vpslld $26,%ymm13,%ymm2
vmovdqu %ymm5,480-256-128(%rbx)
vpaddd %ymm8,%ymm5,%ymm5
vpsrld $11,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm13,%ymm2
vpaddd 96(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
prefetcht0 63(%r12)
vpslld $7,%ymm13,%ymm2
vpandn %ymm15,%ymm13,%ymm0
vpand %ymm14,%ymm13,%ymm4
prefetcht0 63(%r13)
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm9,%ymm8
vpxor %ymm2,%ymm7,%ymm7
prefetcht0 63(%r14)
vpslld $30,%ymm9,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm9,%ymm10,%ymm4
prefetcht0 63(%r15)
vpxor %ymm1,%ymm8,%ymm8
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm9,%ymm1
prefetcht0 63(%r8)
vpslld $19,%ymm9,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm4,%ymm3,%ymm3
prefetcht0 63(%r9)
vpxor %ymm1,%ymm8,%ymm7
vpsrld $22,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
prefetcht0 63(%r10)
vpslld $10,%ymm9,%ymm2
vpxor %ymm3,%ymm10,%ymm8
vpaddd %ymm5,%ymm12,%ymm12
prefetcht0 63(%r11)
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm8,%ymm8
vpaddd %ymm7,%ymm8,%ymm8
addq $256,%rbp
vmovdqu 0-128(%rax),%ymm5
movl $3,%ecx
jmp .Loop_16_xx_avx2
.align 32
.Loop_16_xx_avx2:
vmovdqu 32-128(%rax),%ymm6
vpaddd 288-256-128(%rbx),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 448-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm12,%ymm7
vpslld $26,%ymm12,%ymm2
vmovdqu %ymm5,0-128(%rax)
vpaddd %ymm15,%ymm5,%ymm5
vpsrld $11,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm12,%ymm2
vpaddd -128(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm12,%ymm2
vpandn %ymm14,%ymm12,%ymm0
vpand %ymm13,%ymm12,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm8,%ymm15
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm8,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm8,%ymm9,%ymm3
vpxor %ymm1,%ymm15,%ymm15
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm8,%ymm1
vpslld $19,%ymm8,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm15,%ymm7
vpsrld $22,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm8,%ymm2
vpxor %ymm4,%ymm9,%ymm15
vpaddd %ymm5,%ymm11,%ymm11
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm15,%ymm15
vpaddd %ymm7,%ymm15,%ymm15
vmovdqu 64-128(%rax),%ymm5
vpaddd 320-256-128(%rbx),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 480-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm11,%ymm7
vpslld $26,%ymm11,%ymm2
vmovdqu %ymm6,32-128(%rax)
vpaddd %ymm14,%ymm6,%ymm6
vpsrld $11,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm11,%ymm2
vpaddd -96(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm11,%ymm2
vpandn %ymm13,%ymm11,%ymm0
vpand %ymm12,%ymm11,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm15,%ymm14
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm15,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm15,%ymm8,%ymm4
vpxor %ymm1,%ymm14,%ymm14
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm15,%ymm1
vpslld $19,%ymm15,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm14,%ymm7
vpsrld $22,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm15,%ymm2
vpxor %ymm3,%ymm8,%ymm14
vpaddd %ymm6,%ymm10,%ymm10
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm14,%ymm14
vpaddd %ymm7,%ymm14,%ymm14
vmovdqu 96-128(%rax),%ymm6
vpaddd 352-256-128(%rbx),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 0-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm10,%ymm7
vpslld $26,%ymm10,%ymm2
vmovdqu %ymm5,64-128(%rax)
vpaddd %ymm13,%ymm5,%ymm5
vpsrld $11,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm10,%ymm2
vpaddd -64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm10,%ymm2
vpandn %ymm12,%ymm10,%ymm0
vpand %ymm11,%ymm10,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm14,%ymm13
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm14,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm14,%ymm15,%ymm3
vpxor %ymm1,%ymm13,%ymm13
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm14,%ymm1
vpslld $19,%ymm14,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm13,%ymm7
vpsrld $22,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm14,%ymm2
vpxor %ymm4,%ymm15,%ymm13
vpaddd %ymm5,%ymm9,%ymm9
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm13,%ymm13
vpaddd %ymm7,%ymm13,%ymm13
vmovdqu 128-128(%rax),%ymm5
vpaddd 384-256-128(%rbx),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 32-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm9,%ymm7
vpslld $26,%ymm9,%ymm2
vmovdqu %ymm6,96-128(%rax)
vpaddd %ymm12,%ymm6,%ymm6
vpsrld $11,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm9,%ymm2
vpaddd -32(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm9,%ymm2
vpandn %ymm11,%ymm9,%ymm0
vpand %ymm10,%ymm9,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm13,%ymm12
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm13,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm13,%ymm14,%ymm4
vpxor %ymm1,%ymm12,%ymm12
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm13,%ymm1
vpslld $19,%ymm13,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm12,%ymm7
vpsrld $22,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm13,%ymm2
vpxor %ymm3,%ymm14,%ymm12
vpaddd %ymm6,%ymm8,%ymm8
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm12,%ymm12
vpaddd %ymm7,%ymm12,%ymm12
vmovdqu 160-128(%rax),%ymm6
vpaddd 416-256-128(%rbx),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 64-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm8,%ymm7
vpslld $26,%ymm8,%ymm2
vmovdqu %ymm5,128-128(%rax)
vpaddd %ymm11,%ymm5,%ymm5
vpsrld $11,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm8,%ymm2
vpaddd 0(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm8,%ymm2
vpandn %ymm10,%ymm8,%ymm0
vpand %ymm9,%ymm8,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm12,%ymm11
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm12,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm12,%ymm13,%ymm3
vpxor %ymm1,%ymm11,%ymm11
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm12,%ymm1
vpslld $19,%ymm12,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm11,%ymm7
vpsrld $22,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm12,%ymm2
vpxor %ymm4,%ymm13,%ymm11
vpaddd %ymm5,%ymm15,%ymm15
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm11,%ymm11
vpaddd %ymm7,%ymm11,%ymm11
vmovdqu 192-128(%rax),%ymm5
vpaddd 448-256-128(%rbx),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 96-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm15,%ymm7
vpslld $26,%ymm15,%ymm2
vmovdqu %ymm6,160-128(%rax)
vpaddd %ymm10,%ymm6,%ymm6
vpsrld $11,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm15,%ymm2
vpaddd 32(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm15,%ymm2
vpandn %ymm9,%ymm15,%ymm0
vpand %ymm8,%ymm15,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm11,%ymm10
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm11,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm11,%ymm12,%ymm4
vpxor %ymm1,%ymm10,%ymm10
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm11,%ymm1
vpslld $19,%ymm11,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm10,%ymm7
vpsrld $22,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm11,%ymm2
vpxor %ymm3,%ymm12,%ymm10
vpaddd %ymm6,%ymm14,%ymm14
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm10,%ymm10
vpaddd %ymm7,%ymm10,%ymm10
vmovdqu 224-128(%rax),%ymm6
vpaddd 480-256-128(%rbx),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 128-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm14,%ymm7
vpslld $26,%ymm14,%ymm2
vmovdqu %ymm5,192-128(%rax)
vpaddd %ymm9,%ymm5,%ymm5
vpsrld $11,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm14,%ymm2
vpaddd 64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm14,%ymm2
vpandn %ymm8,%ymm14,%ymm0
vpand %ymm15,%ymm14,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm10,%ymm9
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm10,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm10,%ymm11,%ymm3
vpxor %ymm1,%ymm9,%ymm9
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm10,%ymm1
vpslld $19,%ymm10,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm9,%ymm7
vpsrld $22,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm10,%ymm2
vpxor %ymm4,%ymm11,%ymm9
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm9,%ymm9
vpaddd %ymm7,%ymm9,%ymm9
vmovdqu 256-256-128(%rbx),%ymm5
vpaddd 0-128(%rax),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 160-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm13,%ymm7
vpslld $26,%ymm13,%ymm2
vmovdqu %ymm6,224-128(%rax)
vpaddd %ymm8,%ymm6,%ymm6
vpsrld $11,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm13,%ymm2
vpaddd 96(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm13,%ymm2
vpandn %ymm15,%ymm13,%ymm0
vpand %ymm14,%ymm13,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm9,%ymm8
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm9,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm9,%ymm10,%ymm4
vpxor %ymm1,%ymm8,%ymm8
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm9,%ymm1
vpslld $19,%ymm9,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm8,%ymm7
vpsrld $22,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm9,%ymm2
vpxor %ymm3,%ymm10,%ymm8
vpaddd %ymm6,%ymm12,%ymm12
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm8,%ymm8
vpaddd %ymm7,%ymm8,%ymm8
addq $256,%rbp
vmovdqu 288-256-128(%rbx),%ymm6
vpaddd 32-128(%rax),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 192-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm12,%ymm7
vpslld $26,%ymm12,%ymm2
vmovdqu %ymm5,256-256-128(%rbx)
vpaddd %ymm15,%ymm5,%ymm5
vpsrld $11,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm12,%ymm2
vpaddd -128(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm12,%ymm2
vpandn %ymm14,%ymm12,%ymm0
vpand %ymm13,%ymm12,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm8,%ymm15
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm8,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm8,%ymm9,%ymm3
vpxor %ymm1,%ymm15,%ymm15
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm8,%ymm1
vpslld $19,%ymm8,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm15,%ymm7
vpsrld $22,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm8,%ymm2
vpxor %ymm4,%ymm9,%ymm15
vpaddd %ymm5,%ymm11,%ymm11
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm15,%ymm15
vpaddd %ymm7,%ymm15,%ymm15
vmovdqu 320-256-128(%rbx),%ymm5
vpaddd 64-128(%rax),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 224-128(%rax),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm11,%ymm7
vpslld $26,%ymm11,%ymm2
vmovdqu %ymm6,288-256-128(%rbx)
vpaddd %ymm14,%ymm6,%ymm6
vpsrld $11,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm11,%ymm2
vpaddd -96(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm11,%ymm2
vpandn %ymm13,%ymm11,%ymm0
vpand %ymm12,%ymm11,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm15,%ymm14
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm15,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm15,%ymm8,%ymm4
vpxor %ymm1,%ymm14,%ymm14
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm15,%ymm1
vpslld $19,%ymm15,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm14,%ymm7
vpsrld $22,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm15,%ymm2
vpxor %ymm3,%ymm8,%ymm14
vpaddd %ymm6,%ymm10,%ymm10
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm14,%ymm14
vpaddd %ymm7,%ymm14,%ymm14
vmovdqu 352-256-128(%rbx),%ymm6
vpaddd 96-128(%rax),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 256-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm10,%ymm7
vpslld $26,%ymm10,%ymm2
vmovdqu %ymm5,320-256-128(%rbx)
vpaddd %ymm13,%ymm5,%ymm5
vpsrld $11,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm10,%ymm2
vpaddd -64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm10,%ymm2
vpandn %ymm12,%ymm10,%ymm0
vpand %ymm11,%ymm10,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm14,%ymm13
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm14,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm14,%ymm15,%ymm3
vpxor %ymm1,%ymm13,%ymm13
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm14,%ymm1
vpslld $19,%ymm14,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm13,%ymm7
vpsrld $22,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm14,%ymm2
vpxor %ymm4,%ymm15,%ymm13
vpaddd %ymm5,%ymm9,%ymm9
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm13,%ymm13
vpaddd %ymm7,%ymm13,%ymm13
vmovdqu 384-256-128(%rbx),%ymm5
vpaddd 128-128(%rax),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 288-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm9,%ymm7
vpslld $26,%ymm9,%ymm2
vmovdqu %ymm6,352-256-128(%rbx)
vpaddd %ymm12,%ymm6,%ymm6
vpsrld $11,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm9,%ymm2
vpaddd -32(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm9,%ymm2
vpandn %ymm11,%ymm9,%ymm0
vpand %ymm10,%ymm9,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm13,%ymm12
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm13,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm13,%ymm14,%ymm4
vpxor %ymm1,%ymm12,%ymm12
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm13,%ymm1
vpslld $19,%ymm13,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm12,%ymm7
vpsrld $22,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm13,%ymm2
vpxor %ymm3,%ymm14,%ymm12
vpaddd %ymm6,%ymm8,%ymm8
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm12,%ymm12
vpaddd %ymm7,%ymm12,%ymm12
vmovdqu 416-256-128(%rbx),%ymm6
vpaddd 160-128(%rax),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 320-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm8,%ymm7
vpslld $26,%ymm8,%ymm2
vmovdqu %ymm5,384-256-128(%rbx)
vpaddd %ymm11,%ymm5,%ymm5
vpsrld $11,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm8,%ymm2
vpaddd 0(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm8,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm8,%ymm2
vpandn %ymm10,%ymm8,%ymm0
vpand %ymm9,%ymm8,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm12,%ymm11
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm12,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm12,%ymm13,%ymm3
vpxor %ymm1,%ymm11,%ymm11
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm12,%ymm1
vpslld $19,%ymm12,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm11,%ymm7
vpsrld $22,%ymm12,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm12,%ymm2
vpxor %ymm4,%ymm13,%ymm11
vpaddd %ymm5,%ymm15,%ymm15
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm11,%ymm11
vpaddd %ymm7,%ymm11,%ymm11
vmovdqu 448-256-128(%rbx),%ymm5
vpaddd 192-128(%rax),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 352-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm15,%ymm7
vpslld $26,%ymm15,%ymm2
vmovdqu %ymm6,416-256-128(%rbx)
vpaddd %ymm10,%ymm6,%ymm6
vpsrld $11,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm15,%ymm2
vpaddd 32(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm15,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm15,%ymm2
vpandn %ymm9,%ymm15,%ymm0
vpand %ymm8,%ymm15,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm11,%ymm10
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm11,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm11,%ymm12,%ymm4
vpxor %ymm1,%ymm10,%ymm10
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm11,%ymm1
vpslld $19,%ymm11,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm10,%ymm7
vpsrld $22,%ymm11,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm11,%ymm2
vpxor %ymm3,%ymm12,%ymm10
vpaddd %ymm6,%ymm14,%ymm14
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm10,%ymm10
vpaddd %ymm7,%ymm10,%ymm10
vmovdqu 480-256-128(%rbx),%ymm6
vpaddd 224-128(%rax),%ymm5,%ymm5
vpsrld $3,%ymm6,%ymm7
vpsrld $7,%ymm6,%ymm1
vpslld $25,%ymm6,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm6,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm6,%ymm2
vmovdqu 384-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm5,%ymm5
vpxor %ymm1,%ymm3,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $6,%ymm14,%ymm7
vpslld $26,%ymm14,%ymm2
vmovdqu %ymm5,448-256-128(%rbx)
vpaddd %ymm9,%ymm5,%ymm5
vpsrld $11,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm14,%ymm2
vpaddd 64(%rbp),%ymm5,%ymm5
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm14,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm14,%ymm2
vpandn %ymm8,%ymm14,%ymm0
vpand %ymm15,%ymm14,%ymm3
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm10,%ymm9
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm10,%ymm1
vpxor %ymm3,%ymm0,%ymm0
vpxor %ymm10,%ymm11,%ymm3
vpxor %ymm1,%ymm9,%ymm9
vpaddd %ymm7,%ymm5,%ymm5
vpsrld $13,%ymm10,%ymm1
vpslld $19,%ymm10,%ymm2
vpaddd %ymm0,%ymm5,%ymm5
vpand %ymm3,%ymm4,%ymm4
vpxor %ymm1,%ymm9,%ymm7
vpsrld $22,%ymm10,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm10,%ymm2
vpxor %ymm4,%ymm11,%ymm9
vpaddd %ymm5,%ymm13,%ymm13
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm5,%ymm9,%ymm9
vpaddd %ymm7,%ymm9,%ymm9
vmovdqu 0-128(%rax),%ymm5
vpaddd 256-256-128(%rbx),%ymm6,%ymm6
vpsrld $3,%ymm5,%ymm7
vpsrld $7,%ymm5,%ymm1
vpslld $25,%ymm5,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpsrld $18,%ymm5,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $14,%ymm5,%ymm2
vmovdqu 416-256-128(%rbx),%ymm0
vpsrld $10,%ymm0,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $17,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $15,%ymm0,%ymm2
vpaddd %ymm7,%ymm6,%ymm6
vpxor %ymm1,%ymm4,%ymm7
vpsrld $19,%ymm0,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $13,%ymm0,%ymm2
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $6,%ymm13,%ymm7
vpslld $26,%ymm13,%ymm2
vmovdqu %ymm6,480-256-128(%rbx)
vpaddd %ymm8,%ymm6,%ymm6
vpsrld $11,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $21,%ymm13,%ymm2
vpaddd 96(%rbp),%ymm6,%ymm6
vpxor %ymm1,%ymm7,%ymm7
vpsrld $25,%ymm13,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $7,%ymm13,%ymm2
vpandn %ymm15,%ymm13,%ymm0
vpand %ymm14,%ymm13,%ymm4
vpxor %ymm1,%ymm7,%ymm7
vpsrld $2,%ymm9,%ymm8
vpxor %ymm2,%ymm7,%ymm7
vpslld $30,%ymm9,%ymm1
vpxor %ymm4,%ymm0,%ymm0
vpxor %ymm9,%ymm10,%ymm4
vpxor %ymm1,%ymm8,%ymm8
vpaddd %ymm7,%ymm6,%ymm6
vpsrld $13,%ymm9,%ymm1
vpslld $19,%ymm9,%ymm2
vpaddd %ymm0,%ymm6,%ymm6
vpand %ymm4,%ymm3,%ymm3
vpxor %ymm1,%ymm8,%ymm7
vpsrld $22,%ymm9,%ymm1
vpxor %ymm2,%ymm7,%ymm7
vpslld $10,%ymm9,%ymm2
vpxor %ymm3,%ymm10,%ymm8
vpaddd %ymm6,%ymm12,%ymm12
vpxor %ymm1,%ymm7,%ymm7
vpxor %ymm2,%ymm7,%ymm7
vpaddd %ymm6,%ymm8,%ymm8
vpaddd %ymm7,%ymm8,%ymm8
addq $256,%rbp
decl %ecx
jnz .Loop_16_xx_avx2
movl $1,%ecx
leaq 512(%rsp),%rbx
leaq K256+128(%rip),%rbp
cmpl 0(%rbx),%ecx
cmovgeq %rbp,%r12
cmpl 4(%rbx),%ecx
cmovgeq %rbp,%r13
cmpl 8(%rbx),%ecx
cmovgeq %rbp,%r14
cmpl 12(%rbx),%ecx
cmovgeq %rbp,%r15
cmpl 16(%rbx),%ecx
cmovgeq %rbp,%r8
cmpl 20(%rbx),%ecx
cmovgeq %rbp,%r9
cmpl 24(%rbx),%ecx
cmovgeq %rbp,%r10
cmpl 28(%rbx),%ecx
cmovgeq %rbp,%r11
vmovdqa (%rbx),%ymm7
vpxor %ymm0,%ymm0,%ymm0
vmovdqa %ymm7,%ymm6
vpcmpgtd %ymm0,%ymm6,%ymm6
vpaddd %ymm6,%ymm7,%ymm7
vmovdqu 0-128(%rdi),%ymm0
vpand %ymm6,%ymm8,%ymm8
vmovdqu 32-128(%rdi),%ymm1
vpand %ymm6,%ymm9,%ymm9
vmovdqu 64-128(%rdi),%ymm2
vpand %ymm6,%ymm10,%ymm10
vmovdqu 96-128(%rdi),%ymm5
vpand %ymm6,%ymm11,%ymm11
vpaddd %ymm0,%ymm8,%ymm8
vmovdqu 128-128(%rdi),%ymm0
vpand %ymm6,%ymm12,%ymm12
vpaddd %ymm1,%ymm9,%ymm9
vmovdqu 160-128(%rdi),%ymm1
vpand %ymm6,%ymm13,%ymm13
vpaddd %ymm2,%ymm10,%ymm10
vmovdqu 192-128(%rdi),%ymm2
vpand %ymm6,%ymm14,%ymm14
vpaddd %ymm5,%ymm11,%ymm11
vmovdqu 224-128(%rdi),%ymm5
vpand %ymm6,%ymm15,%ymm15
vpaddd %ymm0,%ymm12,%ymm12
vpaddd %ymm1,%ymm13,%ymm13
vmovdqu %ymm8,0-128(%rdi)
vpaddd %ymm2,%ymm14,%ymm14
vmovdqu %ymm9,32-128(%rdi)
vpaddd %ymm5,%ymm15,%ymm15
vmovdqu %ymm10,64-128(%rdi)
vmovdqu %ymm11,96-128(%rdi)
vmovdqu %ymm12,128-128(%rdi)
vmovdqu %ymm13,160-128(%rdi)
vmovdqu %ymm14,192-128(%rdi)
vmovdqu %ymm15,224-128(%rdi)
vmovdqu %ymm7,(%rbx)
leaq 256+128(%rsp),%rbx
vmovdqu .Lpbswap(%rip),%ymm6
decl %edx
jnz .Loop_avx2
.Ldone_avx2:
movq 544(%rsp),%rax
vzeroupper
movq -48(%rax),%r15
movq -40(%rax),%r14
movq -32(%rax),%r13
movq -24(%rax),%r12
movq -16(%rax),%rbp
movq -8(%rax),%rbx
leaq (%rax),%rsp
.Lepilogue_avx2:
.byte 0xf3,0xc3
.size xsha256_multi_block_avx2,.-xsha256_multi_block_avx2
.align 256
K256:
.long 1116352408,1116352408,1116352408,1116352408
.long 1116352408,1116352408,1116352408,1116352408
.long 1899447441,1899447441,1899447441,1899447441
.long 1899447441,1899447441,1899447441,1899447441
.long 3049323471,3049323471,3049323471,3049323471
.long 3049323471,3049323471,3049323471,3049323471
.long 3921009573,3921009573,3921009573,3921009573
.long 3921009573,3921009573,3921009573,3921009573
.long 961987163,961987163,961987163,961987163
.long 961987163,961987163,961987163,961987163
.long 1508970993,1508970993,1508970993,1508970993
.long 1508970993,1508970993,1508970993,1508970993
.long 2453635748,2453635748,2453635748,2453635748
.long 2453635748,2453635748,2453635748,2453635748
.long 2870763221,2870763221,2870763221,2870763221
.long 2870763221,2870763221,2870763221,2870763221
.long 3624381080,3624381080,3624381080,3624381080
.long 3624381080,3624381080,3624381080,3624381080
.long 310598401,310598401,310598401,310598401
.long 310598401,310598401,310598401,310598401
.long 607225278,607225278,607225278,607225278
.long 607225278,607225278,607225278,607225278
.long 1426881987,1426881987,1426881987,1426881987
.long 1426881987,1426881987,1426881987,1426881987
.long 1925078388,1925078388,1925078388,1925078388
.long 1925078388,1925078388,1925078388,1925078388
.long 2162078206,2162078206,2162078206,2162078206
.long 2162078206,2162078206,2162078206,2162078206
.long 2614888103,2614888103,2614888103,2614888103
.long 2614888103,2614888103,2614888103,2614888103
.long 3248222580,3248222580,3248222580,3248222580
.long 3248222580,3248222580,3248222580,3248222580
.long 3835390401,3835390401,3835390401,3835390401
.long 3835390401,3835390401,3835390401,3835390401
.long 4022224774,4022224774,4022224774,4022224774
.long 4022224774,4022224774,4022224774,4022224774
.long 264347078,264347078,264347078,264347078
.long 264347078,264347078,264347078,264347078
.long 604807628,604807628,604807628,604807628
.long 604807628,604807628,604807628,604807628
.long 770255983,770255983,770255983,770255983
.long 770255983,770255983,770255983,770255983
.long 1249150122,1249150122,1249150122,1249150122
.long 1249150122,1249150122,1249150122,1249150122
.long 1555081692,1555081692,1555081692,1555081692
.long 1555081692,1555081692,1555081692,1555081692
.long 1996064986,1996064986,1996064986,1996064986
.long 1996064986,1996064986,1996064986,1996064986
.long 2554220882,2554220882,2554220882,2554220882
.long 2554220882,2554220882,2554220882,2554220882
.long 2821834349,2821834349,2821834349,2821834349
.long 2821834349,2821834349,2821834349,2821834349
.long 2952996808,2952996808,2952996808,2952996808
.long 2952996808,2952996808,2952996808,2952996808
.long 3210313671,3210313671,3210313671,3210313671
.long 3210313671,3210313671,3210313671,3210313671
.long 3336571891,3336571891,3336571891,3336571891
.long 3336571891,3336571891,3336571891,3336571891
.long 3584528711,3584528711,3584528711,3584528711
.long 3584528711,3584528711,3584528711,3584528711
.long 113926993,113926993,113926993,113926993
.long 113926993,113926993,113926993,113926993
.long 338241895,338241895,338241895,338241895
.long 338241895,338241895,338241895,338241895
.long 666307205,666307205,666307205,666307205
.long 666307205,666307205,666307205,666307205
.long 773529912,773529912,773529912,773529912
.long 773529912,773529912,773529912,773529912
.long 1294757372,1294757372,1294757372,1294757372
.long 1294757372,1294757372,1294757372,1294757372
.long 1396182291,1396182291,1396182291,1396182291
.long 1396182291,1396182291,1396182291,1396182291
.long 1695183700,1695183700,1695183700,1695183700
.long 1695183700,1695183700,1695183700,1695183700
.long 1986661051,1986661051,1986661051,1986661051
.long 1986661051,1986661051,1986661051,1986661051
.long 2177026350,2177026350,2177026350,2177026350
.long 2177026350,2177026350,2177026350,2177026350
.long 2456956037,2456956037,2456956037,2456956037
.long 2456956037,2456956037,2456956037,2456956037
.long 2730485921,2730485921,2730485921,2730485921
.long 2730485921,2730485921,2730485921,2730485921
.long 2820302411,2820302411,2820302411,2820302411
.long 2820302411,2820302411,2820302411,2820302411
.long 3259730800,3259730800,3259730800,3259730800
.long 3259730800,3259730800,3259730800,3259730800
.long 3345764771,3345764771,3345764771,3345764771
.long 3345764771,3345764771,3345764771,3345764771
.long 3516065817,3516065817,3516065817,3516065817
.long 3516065817,3516065817,3516065817,3516065817
.long 3600352804,3600352804,3600352804,3600352804
.long 3600352804,3600352804,3600352804,3600352804
.long 4094571909,4094571909,4094571909,4094571909
.long 4094571909,4094571909,4094571909,4094571909
.long 275423344,275423344,275423344,275423344
.long 275423344,275423344,275423344,275423344
.long 430227734,430227734,430227734,430227734
.long 430227734,430227734,430227734,430227734
.long 506948616,506948616,506948616,506948616
.long 506948616,506948616,506948616,506948616
.long 659060556,659060556,659060556,659060556
.long 659060556,659060556,659060556,659060556
.long 883997877,883997877,883997877,883997877
.long 883997877,883997877,883997877,883997877
.long 958139571,958139571,958139571,958139571
.long 958139571,958139571,958139571,958139571
.long 1322822218,1322822218,1322822218,1322822218
.long 1322822218,1322822218,1322822218,1322822218
.long 1537002063,1537002063,1537002063,1537002063
.long 1537002063,1537002063,1537002063,1537002063
.long 1747873779,1747873779,1747873779,1747873779
.long 1747873779,1747873779,1747873779,1747873779
.long 1955562222,1955562222,1955562222,1955562222
.long 1955562222,1955562222,1955562222,1955562222
.long 2024104815,2024104815,2024104815,2024104815
.long 2024104815,2024104815,2024104815,2024104815
.long 2227730452,2227730452,2227730452,2227730452
.long 2227730452,2227730452,2227730452,2227730452
.long 2361852424,2361852424,2361852424,2361852424
.long 2361852424,2361852424,2361852424,2361852424
.long 2428436474,2428436474,2428436474,2428436474
.long 2428436474,2428436474,2428436474,2428436474
.long 2756734187,2756734187,2756734187,2756734187
.long 2756734187,2756734187,2756734187,2756734187
.long 3204031479,3204031479,3204031479,3204031479
.long 3204031479,3204031479,3204031479,3204031479
.long 3329325298,3329325298,3329325298,3329325298
.long 3329325298,3329325298,3329325298,3329325298
.Lpbswap:
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
K256_shaext:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.byte 83,72,65,50,53,54,32,109,117,108,116,105,45,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
|
xddcore/OpenNNA2.0 | 29,390 | platform/OpenNNA_STM32H7A3/Test Speed(CMSIS-DSP)/OpenNNA_STM32H7A3_Demo_Example/Core/Startup/startup_stm32h7a3zitxq.s | /**
******************************************************************************
* @file startup_stm32h7a3xxq.s
* @author MCD Application Team
* @brief STM32H7B3xx Devices vector table for GCC based toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m7
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_PVM_IRQHandler /* PVD/PVM through EXTI Line detection */
.word RTC_TAMP_STAMP_CSS_LSE_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word FDCAN1_IT0_IRQHandler /* FDCAN1 interrupt line 0 */
.word FDCAN2_IT0_IRQHandler /* FDCAN2 interrupt line 0 */
.word FDCAN1_IT1_IRQHandler /* FDCAN1 interrupt line 1 */
.word FDCAN2_IT1_IRQHandler /* FDCAN2 interrupt line 1 */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_IRQHandler /* TIM1 Break interrupt */
.word TIM1_UP_IRQHandler /* TIM1 Update interrupt */
.word TIM1_TRG_COM_IRQHandler /* TIM1 Trigger and Commutation interrupt */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word DFSDM2_IRQHandler /* DFSDM2 Interrupt */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FMC_IRQHandler /* FMC */
.word SDMMC1_IRQHandler /* SDMMC1 */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FDCAN_CAL_IRQHandler /* FDCAN calibration unit interrupt*/
.word DFSDM1_FLT4_IRQHandler /* DFSDM Filter4 Interrupt */
.word DFSDM1_FLT5_IRQHandler /* DFSDM Filter5 Interrupt */
.word DFSDM1_FLT6_IRQHandler /* DFSDM Filter6 Interrupt */
.word DFSDM1_FLT7_IRQHandler /* DFSDM Filter7 Interrupt */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_PSSI_IRQHandler /* DCMI, PSSI */
.word 0 /* Reserved */
.word RNG_IRQHandler /* RNG */
.word FPU_IRQHandler /* FPU */
.word UART7_IRQHandler /* UART7 */
.word UART8_IRQHandler /* UART8 */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
.word SPI6_IRQHandler /* SPI6 */
.word SAI1_IRQHandler /* SAI1 */
.word LTDC_IRQHandler /* LTDC */
.word LTDC_ER_IRQHandler /* LTDC error */
.word DMA2D_IRQHandler /* DMA2D */
.word SAI2_IRQHandler /* SAI2 */
.word OCTOSPI1_IRQHandler /* OCTOSPI1 */
.word LPTIM1_IRQHandler /* LPTIM1 */
.word CEC_IRQHandler /* HDMI_CEC */
.word I2C4_EV_IRQHandler /* I2C4 Event */
.word I2C4_ER_IRQHandler /* I2C4 Error */
.word SPDIF_RX_IRQHandler /* SPDIF_RX */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMAMUX1_OVR_IRQHandler /* DMAMUX1 Overrun interrupt */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DFSDM1_FLT0_IRQHandler /* DFSDM Filter0 Interrupt */
.word DFSDM1_FLT1_IRQHandler /* DFSDM Filter1 Interrupt */
.word DFSDM1_FLT2_IRQHandler /* DFSDM Filter2 Interrupt */
.word DFSDM1_FLT3_IRQHandler /* DFSDM Filter3 Interrupt */
.word 0 /* Reserved */
.word SWPMI1_IRQHandler /* Serial Wire Interface 1 global interrupt */
.word TIM15_IRQHandler /* TIM15 global Interrupt */
.word TIM16_IRQHandler /* TIM16 global Interrupt */
.word TIM17_IRQHandler /* TIM17 global Interrupt */
.word MDIOS_WKUP_IRQHandler /* MDIOS Wakeup Interrupt */
.word MDIOS_IRQHandler /* MDIOS global Interrupt */
.word JPEG_IRQHandler /* JPEG global Interrupt */
.word MDMA_IRQHandler /* MDMA global Interrupt */
.word 0 /* Reserved */
.word SDMMC2_IRQHandler /* SDMMC2 global Interrupt */
.word HSEM1_IRQHandler /* HSEM1 global Interrupt */
.word 0 /* Reserved */
.word DAC2_IRQHandler /* DAC2 global Interrupt */
.word DMAMUX2_OVR_IRQHandler /* DMAMUX Overrun interrupt */
.word BDMA2_Channel0_IRQHandler /* BDMA2 Channel 0 global Interrupt */
.word BDMA2_Channel1_IRQHandler /* BDMA2 Channel 1 global Interrupt */
.word BDMA2_Channel2_IRQHandler /* BDMA2 Channel 2 global Interrupt */
.word BDMA2_Channel3_IRQHandler /* BDMA2 Channel 3 global Interrupt */
.word BDMA2_Channel4_IRQHandler /* BDMA2 Channel 4 global Interrupt */
.word BDMA2_Channel5_IRQHandler /* BDMA2 Channel 5 global Interrupt */
.word BDMA2_Channel6_IRQHandler /* BDMA2 Channel 6 global Interrupt */
.word BDMA2_Channel7_IRQHandler /* BDMA2 Channel 7 global Interrupt */
.word COMP_IRQHandler /* COMP global Interrupt */
.word LPTIM2_IRQHandler /* LP TIM2 global interrupt */
.word LPTIM3_IRQHandler /* LP TIM3 global interrupt */
.word UART9_IRQHandler /* UART9 global interrupt */
.word USART10_IRQHandler /* USART10 global interrupt */
.word LPUART1_IRQHandler /* LP UART1 interrupt */
.word 0 /* Reserved */
.word CRS_IRQHandler /* Clock Recovery Global Interrupt */
.word ECC_IRQHandler /* ECC diagnostic Global Interrupt */
.word 0 /* Reserved */
.word DTS_IRQHandler /* DTS */
.word 0 /* Reserved */
.word WAKEUP_PIN_IRQHandler /* Interrupt for all 6 wake-up pins */
.word OCTOSPI2_IRQHandler /* OCTOSPI2 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word GFXMMU_IRQHandler /* GFXMMU */
.word BDMA1_IRQHandler /* BDMA1 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_PVM_IRQHandler
.thumb_set PVD_PVM_IRQHandler,Default_Handler
.weak RTC_TAMP_STAMP_CSS_LSE_IRQHandler
.thumb_set RTC_TAMP_STAMP_CSS_LSE_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak FDCAN1_IT0_IRQHandler
.thumb_set FDCAN1_IT0_IRQHandler,Default_Handler
.weak FDCAN2_IT0_IRQHandler
.thumb_set FDCAN2_IT0_IRQHandler,Default_Handler
.weak FDCAN1_IT1_IRQHandler
.thumb_set FDCAN1_IT1_IRQHandler,Default_Handler
.weak FDCAN2_IT1_IRQHandler
.thumb_set FDCAN2_IT1_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak DFSDM2_IRQHandler
.thumb_set DFSDM2_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FMC_IRQHandler
.thumb_set FMC_IRQHandler,Default_Handler
.weak SDMMC1_IRQHandler
.thumb_set SDMMC1_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak FDCAN_CAL_IRQHandler
.thumb_set FDCAN_CAL_IRQHandler,Default_Handler
.weak DFSDM1_FLT4_IRQHandler
.thumb_set DFSDM1_FLT4_IRQHandler,Default_Handler
.weak DFSDM1_FLT5_IRQHandler
.thumb_set DFSDM1_FLT5_IRQHandler,Default_Handler
.weak DFSDM1_FLT6_IRQHandler
.thumb_set DFSDM1_FLT6_IRQHandler,Default_Handler
.weak DFSDM1_FLT7_IRQHandler
.thumb_set DFSDM1_FLT7_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_PSSI_IRQHandler
.thumb_set DCMI_PSSI_IRQHandler,Default_Handler
.weak RNG_IRQHandler
.thumb_set RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak UART7_IRQHandler
.thumb_set UART7_IRQHandler,Default_Handler
.weak UART8_IRQHandler
.thumb_set UART8_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
.weak SPI6_IRQHandler
.thumb_set SPI6_IRQHandler,Default_Handler
.weak SAI1_IRQHandler
.thumb_set SAI1_IRQHandler,Default_Handler
.weak LTDC_IRQHandler
.thumb_set LTDC_IRQHandler,Default_Handler
.weak LTDC_ER_IRQHandler
.thumb_set LTDC_ER_IRQHandler,Default_Handler
.weak DMA2D_IRQHandler
.thumb_set DMA2D_IRQHandler,Default_Handler
.weak SAI2_IRQHandler
.thumb_set SAI2_IRQHandler,Default_Handler
.weak OCTOSPI1_IRQHandler
.thumb_set OCTOSPI1_IRQHandler,Default_Handler
.weak LPTIM1_IRQHandler
.thumb_set LPTIM1_IRQHandler,Default_Handler
.weak CEC_IRQHandler
.thumb_set CEC_IRQHandler,Default_Handler
.weak I2C4_EV_IRQHandler
.thumb_set I2C4_EV_IRQHandler,Default_Handler
.weak I2C4_ER_IRQHandler
.thumb_set I2C4_ER_IRQHandler,Default_Handler
.weak SPDIF_RX_IRQHandler
.thumb_set SPDIF_RX_IRQHandler,Default_Handler
.weak DMAMUX1_OVR_IRQHandler
.thumb_set DMAMUX1_OVR_IRQHandler,Default_Handler
.weak DFSDM1_FLT0_IRQHandler
.thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler
.weak DFSDM1_FLT1_IRQHandler
.thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler
.weak DFSDM1_FLT2_IRQHandler
.thumb_set DFSDM1_FLT2_IRQHandler,Default_Handler
.weak DFSDM1_FLT3_IRQHandler
.thumb_set DFSDM1_FLT3_IRQHandler,Default_Handler
.weak SWPMI1_IRQHandler
.thumb_set SWPMI1_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak MDIOS_WKUP_IRQHandler
.thumb_set MDIOS_WKUP_IRQHandler,Default_Handler
.weak MDIOS_IRQHandler
.thumb_set MDIOS_IRQHandler,Default_Handler
.weak JPEG_IRQHandler
.thumb_set JPEG_IRQHandler,Default_Handler
.weak MDMA_IRQHandler
.thumb_set MDMA_IRQHandler,Default_Handler
.weak SDMMC2_IRQHandler
.thumb_set SDMMC2_IRQHandler,Default_Handler
.weak HSEM1_IRQHandler
.thumb_set HSEM1_IRQHandler,Default_Handler
.weak DAC2_IRQHandler
.thumb_set DAC2_IRQHandler,Default_Handler
.weak DMAMUX2_OVR_IRQHandler
.thumb_set DMAMUX2_OVR_IRQHandler,Default_Handler
.weak BDMA2_Channel0_IRQHandler
.thumb_set BDMA2_Channel0_IRQHandler,Default_Handler
.weak BDMA2_Channel1_IRQHandler
.thumb_set BDMA2_Channel1_IRQHandler,Default_Handler
.weak BDMA2_Channel2_IRQHandler
.thumb_set BDMA2_Channel2_IRQHandler,Default_Handler
.weak BDMA2_Channel3_IRQHandler
.thumb_set BDMA2_Channel3_IRQHandler,Default_Handler
.weak BDMA2_Channel4_IRQHandler
.thumb_set BDMA2_Channel4_IRQHandler,Default_Handler
.weak BDMA2_Channel5_IRQHandler
.thumb_set BDMA2_Channel5_IRQHandler,Default_Handler
.weak BDMA2_Channel6_IRQHandler
.thumb_set BDMA2_Channel6_IRQHandler,Default_Handler
.weak BDMA2_Channel7_IRQHandler
.thumb_set BDMA2_Channel7_IRQHandler,Default_Handler
.weak COMP_IRQHandler
.thumb_set COMP_IRQHandler,Default_Handler
.weak LPTIM2_IRQHandler
.thumb_set LPTIM2_IRQHandler,Default_Handler
.weak LPTIM3_IRQHandler
.thumb_set LPTIM3_IRQHandler,Default_Handler
.weak LPTIM4_IRQHandler
.thumb_set LPTIM4_IRQHandler,Default_Handler
.weak LPTIM5_IRQHandler
.thumb_set LPTIM5_IRQHandler,Default_Handler
.weak UART9_IRQHandler
.thumb_set UART9_IRQHandler,Default_Handler
.weak USART10_IRQHandler
.thumb_set USART10_IRQHandler,Default_Handler
.weak LPUART1_IRQHandler
.thumb_set LPUART1_IRQHandler,Default_Handler
.weak CRS_IRQHandler
.thumb_set CRS_IRQHandler,Default_Handler
.weak ECC_IRQHandler
.thumb_set ECC_IRQHandler,Default_Handler
.weak DTS_IRQHandler
.thumb_set DTS_IRQHandler,Default_Handler
.weak WAKEUP_PIN_IRQHandler
.thumb_set WAKEUP_PIN_IRQHandler,Default_Handler
.weak OCTOSPI2_IRQHandler
.thumb_set OCTOSPI2_IRQHandler,Default_Handler
.weak GFXMMU_IRQHandler
.thumb_set GFXMMU_IRQHandler,Default_Handler
.weak BDMA1_IRQHandler
.thumb_set BDMA1_IRQHandler,Default_Handler
|
xddcore/OpenNNA2.0 | 29,390 | platform/OpenNNA_STM32H7A3/Test Speed(FPU)/OpenNNA_STM32H7A3_Demo_Example/Core/Startup/startup_stm32h7a3zitxq.s | /**
******************************************************************************
* @file startup_stm32h7a3xxq.s
* @author MCD Application Team
* @brief STM32H7B3xx Devices vector table for GCC based toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m7
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_PVM_IRQHandler /* PVD/PVM through EXTI Line detection */
.word RTC_TAMP_STAMP_CSS_LSE_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word FDCAN1_IT0_IRQHandler /* FDCAN1 interrupt line 0 */
.word FDCAN2_IT0_IRQHandler /* FDCAN2 interrupt line 0 */
.word FDCAN1_IT1_IRQHandler /* FDCAN1 interrupt line 1 */
.word FDCAN2_IT1_IRQHandler /* FDCAN2 interrupt line 1 */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_IRQHandler /* TIM1 Break interrupt */
.word TIM1_UP_IRQHandler /* TIM1 Update interrupt */
.word TIM1_TRG_COM_IRQHandler /* TIM1 Trigger and Commutation interrupt */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word DFSDM2_IRQHandler /* DFSDM2 Interrupt */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FMC_IRQHandler /* FMC */
.word SDMMC1_IRQHandler /* SDMMC1 */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FDCAN_CAL_IRQHandler /* FDCAN calibration unit interrupt*/
.word DFSDM1_FLT4_IRQHandler /* DFSDM Filter4 Interrupt */
.word DFSDM1_FLT5_IRQHandler /* DFSDM Filter5 Interrupt */
.word DFSDM1_FLT6_IRQHandler /* DFSDM Filter6 Interrupt */
.word DFSDM1_FLT7_IRQHandler /* DFSDM Filter7 Interrupt */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_PSSI_IRQHandler /* DCMI, PSSI */
.word 0 /* Reserved */
.word RNG_IRQHandler /* RNG */
.word FPU_IRQHandler /* FPU */
.word UART7_IRQHandler /* UART7 */
.word UART8_IRQHandler /* UART8 */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
.word SPI6_IRQHandler /* SPI6 */
.word SAI1_IRQHandler /* SAI1 */
.word LTDC_IRQHandler /* LTDC */
.word LTDC_ER_IRQHandler /* LTDC error */
.word DMA2D_IRQHandler /* DMA2D */
.word SAI2_IRQHandler /* SAI2 */
.word OCTOSPI1_IRQHandler /* OCTOSPI1 */
.word LPTIM1_IRQHandler /* LPTIM1 */
.word CEC_IRQHandler /* HDMI_CEC */
.word I2C4_EV_IRQHandler /* I2C4 Event */
.word I2C4_ER_IRQHandler /* I2C4 Error */
.word SPDIF_RX_IRQHandler /* SPDIF_RX */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMAMUX1_OVR_IRQHandler /* DMAMUX1 Overrun interrupt */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DFSDM1_FLT0_IRQHandler /* DFSDM Filter0 Interrupt */
.word DFSDM1_FLT1_IRQHandler /* DFSDM Filter1 Interrupt */
.word DFSDM1_FLT2_IRQHandler /* DFSDM Filter2 Interrupt */
.word DFSDM1_FLT3_IRQHandler /* DFSDM Filter3 Interrupt */
.word 0 /* Reserved */
.word SWPMI1_IRQHandler /* Serial Wire Interface 1 global interrupt */
.word TIM15_IRQHandler /* TIM15 global Interrupt */
.word TIM16_IRQHandler /* TIM16 global Interrupt */
.word TIM17_IRQHandler /* TIM17 global Interrupt */
.word MDIOS_WKUP_IRQHandler /* MDIOS Wakeup Interrupt */
.word MDIOS_IRQHandler /* MDIOS global Interrupt */
.word JPEG_IRQHandler /* JPEG global Interrupt */
.word MDMA_IRQHandler /* MDMA global Interrupt */
.word 0 /* Reserved */
.word SDMMC2_IRQHandler /* SDMMC2 global Interrupt */
.word HSEM1_IRQHandler /* HSEM1 global Interrupt */
.word 0 /* Reserved */
.word DAC2_IRQHandler /* DAC2 global Interrupt */
.word DMAMUX2_OVR_IRQHandler /* DMAMUX Overrun interrupt */
.word BDMA2_Channel0_IRQHandler /* BDMA2 Channel 0 global Interrupt */
.word BDMA2_Channel1_IRQHandler /* BDMA2 Channel 1 global Interrupt */
.word BDMA2_Channel2_IRQHandler /* BDMA2 Channel 2 global Interrupt */
.word BDMA2_Channel3_IRQHandler /* BDMA2 Channel 3 global Interrupt */
.word BDMA2_Channel4_IRQHandler /* BDMA2 Channel 4 global Interrupt */
.word BDMA2_Channel5_IRQHandler /* BDMA2 Channel 5 global Interrupt */
.word BDMA2_Channel6_IRQHandler /* BDMA2 Channel 6 global Interrupt */
.word BDMA2_Channel7_IRQHandler /* BDMA2 Channel 7 global Interrupt */
.word COMP_IRQHandler /* COMP global Interrupt */
.word LPTIM2_IRQHandler /* LP TIM2 global interrupt */
.word LPTIM3_IRQHandler /* LP TIM3 global interrupt */
.word UART9_IRQHandler /* UART9 global interrupt */
.word USART10_IRQHandler /* USART10 global interrupt */
.word LPUART1_IRQHandler /* LP UART1 interrupt */
.word 0 /* Reserved */
.word CRS_IRQHandler /* Clock Recovery Global Interrupt */
.word ECC_IRQHandler /* ECC diagnostic Global Interrupt */
.word 0 /* Reserved */
.word DTS_IRQHandler /* DTS */
.word 0 /* Reserved */
.word WAKEUP_PIN_IRQHandler /* Interrupt for all 6 wake-up pins */
.word OCTOSPI2_IRQHandler /* OCTOSPI2 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word GFXMMU_IRQHandler /* GFXMMU */
.word BDMA1_IRQHandler /* BDMA1 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_PVM_IRQHandler
.thumb_set PVD_PVM_IRQHandler,Default_Handler
.weak RTC_TAMP_STAMP_CSS_LSE_IRQHandler
.thumb_set RTC_TAMP_STAMP_CSS_LSE_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak FDCAN1_IT0_IRQHandler
.thumb_set FDCAN1_IT0_IRQHandler,Default_Handler
.weak FDCAN2_IT0_IRQHandler
.thumb_set FDCAN2_IT0_IRQHandler,Default_Handler
.weak FDCAN1_IT1_IRQHandler
.thumb_set FDCAN1_IT1_IRQHandler,Default_Handler
.weak FDCAN2_IT1_IRQHandler
.thumb_set FDCAN2_IT1_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak DFSDM2_IRQHandler
.thumb_set DFSDM2_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FMC_IRQHandler
.thumb_set FMC_IRQHandler,Default_Handler
.weak SDMMC1_IRQHandler
.thumb_set SDMMC1_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak FDCAN_CAL_IRQHandler
.thumb_set FDCAN_CAL_IRQHandler,Default_Handler
.weak DFSDM1_FLT4_IRQHandler
.thumb_set DFSDM1_FLT4_IRQHandler,Default_Handler
.weak DFSDM1_FLT5_IRQHandler
.thumb_set DFSDM1_FLT5_IRQHandler,Default_Handler
.weak DFSDM1_FLT6_IRQHandler
.thumb_set DFSDM1_FLT6_IRQHandler,Default_Handler
.weak DFSDM1_FLT7_IRQHandler
.thumb_set DFSDM1_FLT7_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_PSSI_IRQHandler
.thumb_set DCMI_PSSI_IRQHandler,Default_Handler
.weak RNG_IRQHandler
.thumb_set RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak UART7_IRQHandler
.thumb_set UART7_IRQHandler,Default_Handler
.weak UART8_IRQHandler
.thumb_set UART8_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
.weak SPI6_IRQHandler
.thumb_set SPI6_IRQHandler,Default_Handler
.weak SAI1_IRQHandler
.thumb_set SAI1_IRQHandler,Default_Handler
.weak LTDC_IRQHandler
.thumb_set LTDC_IRQHandler,Default_Handler
.weak LTDC_ER_IRQHandler
.thumb_set LTDC_ER_IRQHandler,Default_Handler
.weak DMA2D_IRQHandler
.thumb_set DMA2D_IRQHandler,Default_Handler
.weak SAI2_IRQHandler
.thumb_set SAI2_IRQHandler,Default_Handler
.weak OCTOSPI1_IRQHandler
.thumb_set OCTOSPI1_IRQHandler,Default_Handler
.weak LPTIM1_IRQHandler
.thumb_set LPTIM1_IRQHandler,Default_Handler
.weak CEC_IRQHandler
.thumb_set CEC_IRQHandler,Default_Handler
.weak I2C4_EV_IRQHandler
.thumb_set I2C4_EV_IRQHandler,Default_Handler
.weak I2C4_ER_IRQHandler
.thumb_set I2C4_ER_IRQHandler,Default_Handler
.weak SPDIF_RX_IRQHandler
.thumb_set SPDIF_RX_IRQHandler,Default_Handler
.weak DMAMUX1_OVR_IRQHandler
.thumb_set DMAMUX1_OVR_IRQHandler,Default_Handler
.weak DFSDM1_FLT0_IRQHandler
.thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler
.weak DFSDM1_FLT1_IRQHandler
.thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler
.weak DFSDM1_FLT2_IRQHandler
.thumb_set DFSDM1_FLT2_IRQHandler,Default_Handler
.weak DFSDM1_FLT3_IRQHandler
.thumb_set DFSDM1_FLT3_IRQHandler,Default_Handler
.weak SWPMI1_IRQHandler
.thumb_set SWPMI1_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak MDIOS_WKUP_IRQHandler
.thumb_set MDIOS_WKUP_IRQHandler,Default_Handler
.weak MDIOS_IRQHandler
.thumb_set MDIOS_IRQHandler,Default_Handler
.weak JPEG_IRQHandler
.thumb_set JPEG_IRQHandler,Default_Handler
.weak MDMA_IRQHandler
.thumb_set MDMA_IRQHandler,Default_Handler
.weak SDMMC2_IRQHandler
.thumb_set SDMMC2_IRQHandler,Default_Handler
.weak HSEM1_IRQHandler
.thumb_set HSEM1_IRQHandler,Default_Handler
.weak DAC2_IRQHandler
.thumb_set DAC2_IRQHandler,Default_Handler
.weak DMAMUX2_OVR_IRQHandler
.thumb_set DMAMUX2_OVR_IRQHandler,Default_Handler
.weak BDMA2_Channel0_IRQHandler
.thumb_set BDMA2_Channel0_IRQHandler,Default_Handler
.weak BDMA2_Channel1_IRQHandler
.thumb_set BDMA2_Channel1_IRQHandler,Default_Handler
.weak BDMA2_Channel2_IRQHandler
.thumb_set BDMA2_Channel2_IRQHandler,Default_Handler
.weak BDMA2_Channel3_IRQHandler
.thumb_set BDMA2_Channel3_IRQHandler,Default_Handler
.weak BDMA2_Channel4_IRQHandler
.thumb_set BDMA2_Channel4_IRQHandler,Default_Handler
.weak BDMA2_Channel5_IRQHandler
.thumb_set BDMA2_Channel5_IRQHandler,Default_Handler
.weak BDMA2_Channel6_IRQHandler
.thumb_set BDMA2_Channel6_IRQHandler,Default_Handler
.weak BDMA2_Channel7_IRQHandler
.thumb_set BDMA2_Channel7_IRQHandler,Default_Handler
.weak COMP_IRQHandler
.thumb_set COMP_IRQHandler,Default_Handler
.weak LPTIM2_IRQHandler
.thumb_set LPTIM2_IRQHandler,Default_Handler
.weak LPTIM3_IRQHandler
.thumb_set LPTIM3_IRQHandler,Default_Handler
.weak LPTIM4_IRQHandler
.thumb_set LPTIM4_IRQHandler,Default_Handler
.weak LPTIM5_IRQHandler
.thumb_set LPTIM5_IRQHandler,Default_Handler
.weak UART9_IRQHandler
.thumb_set UART9_IRQHandler,Default_Handler
.weak USART10_IRQHandler
.thumb_set USART10_IRQHandler,Default_Handler
.weak LPUART1_IRQHandler
.thumb_set LPUART1_IRQHandler,Default_Handler
.weak CRS_IRQHandler
.thumb_set CRS_IRQHandler,Default_Handler
.weak ECC_IRQHandler
.thumb_set ECC_IRQHandler,Default_Handler
.weak DTS_IRQHandler
.thumb_set DTS_IRQHandler,Default_Handler
.weak WAKEUP_PIN_IRQHandler
.thumb_set WAKEUP_PIN_IRQHandler,Default_Handler
.weak OCTOSPI2_IRQHandler
.thumb_set OCTOSPI2_IRQHandler,Default_Handler
.weak GFXMMU_IRQHandler
.thumb_set GFXMMU_IRQHandler,Default_Handler
.weak BDMA1_IRQHandler
.thumb_set BDMA1_IRQHandler,Default_Handler
|
xddcore/OpenNNA2.0 | 29,390 | platform/OpenNNA_STM32H7A3/OpenNNA_STM32H7A3_Demo_Example/Core/Startup/startup_stm32h7a3zitxq.s | /**
******************************************************************************
* @file startup_stm32h7a3xxq.s
* @author MCD Application Team
* @brief STM32H7B3xx Devices vector table for GCC based toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m7
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_PVM_IRQHandler /* PVD/PVM through EXTI Line detection */
.word RTC_TAMP_STAMP_CSS_LSE_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word FDCAN1_IT0_IRQHandler /* FDCAN1 interrupt line 0 */
.word FDCAN2_IT0_IRQHandler /* FDCAN2 interrupt line 0 */
.word FDCAN1_IT1_IRQHandler /* FDCAN1 interrupt line 1 */
.word FDCAN2_IT1_IRQHandler /* FDCAN2 interrupt line 1 */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_IRQHandler /* TIM1 Break interrupt */
.word TIM1_UP_IRQHandler /* TIM1 Update interrupt */
.word TIM1_TRG_COM_IRQHandler /* TIM1 Trigger and Commutation interrupt */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word DFSDM2_IRQHandler /* DFSDM2 Interrupt */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FMC_IRQHandler /* FMC */
.word SDMMC1_IRQHandler /* SDMMC1 */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FDCAN_CAL_IRQHandler /* FDCAN calibration unit interrupt*/
.word DFSDM1_FLT4_IRQHandler /* DFSDM Filter4 Interrupt */
.word DFSDM1_FLT5_IRQHandler /* DFSDM Filter5 Interrupt */
.word DFSDM1_FLT6_IRQHandler /* DFSDM Filter6 Interrupt */
.word DFSDM1_FLT7_IRQHandler /* DFSDM Filter7 Interrupt */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_PSSI_IRQHandler /* DCMI, PSSI */
.word 0 /* Reserved */
.word RNG_IRQHandler /* RNG */
.word FPU_IRQHandler /* FPU */
.word UART7_IRQHandler /* UART7 */
.word UART8_IRQHandler /* UART8 */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
.word SPI6_IRQHandler /* SPI6 */
.word SAI1_IRQHandler /* SAI1 */
.word LTDC_IRQHandler /* LTDC */
.word LTDC_ER_IRQHandler /* LTDC error */
.word DMA2D_IRQHandler /* DMA2D */
.word SAI2_IRQHandler /* SAI2 */
.word OCTOSPI1_IRQHandler /* OCTOSPI1 */
.word LPTIM1_IRQHandler /* LPTIM1 */
.word CEC_IRQHandler /* HDMI_CEC */
.word I2C4_EV_IRQHandler /* I2C4 Event */
.word I2C4_ER_IRQHandler /* I2C4 Error */
.word SPDIF_RX_IRQHandler /* SPDIF_RX */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMAMUX1_OVR_IRQHandler /* DMAMUX1 Overrun interrupt */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DFSDM1_FLT0_IRQHandler /* DFSDM Filter0 Interrupt */
.word DFSDM1_FLT1_IRQHandler /* DFSDM Filter1 Interrupt */
.word DFSDM1_FLT2_IRQHandler /* DFSDM Filter2 Interrupt */
.word DFSDM1_FLT3_IRQHandler /* DFSDM Filter3 Interrupt */
.word 0 /* Reserved */
.word SWPMI1_IRQHandler /* Serial Wire Interface 1 global interrupt */
.word TIM15_IRQHandler /* TIM15 global Interrupt */
.word TIM16_IRQHandler /* TIM16 global Interrupt */
.word TIM17_IRQHandler /* TIM17 global Interrupt */
.word MDIOS_WKUP_IRQHandler /* MDIOS Wakeup Interrupt */
.word MDIOS_IRQHandler /* MDIOS global Interrupt */
.word JPEG_IRQHandler /* JPEG global Interrupt */
.word MDMA_IRQHandler /* MDMA global Interrupt */
.word 0 /* Reserved */
.word SDMMC2_IRQHandler /* SDMMC2 global Interrupt */
.word HSEM1_IRQHandler /* HSEM1 global Interrupt */
.word 0 /* Reserved */
.word DAC2_IRQHandler /* DAC2 global Interrupt */
.word DMAMUX2_OVR_IRQHandler /* DMAMUX Overrun interrupt */
.word BDMA2_Channel0_IRQHandler /* BDMA2 Channel 0 global Interrupt */
.word BDMA2_Channel1_IRQHandler /* BDMA2 Channel 1 global Interrupt */
.word BDMA2_Channel2_IRQHandler /* BDMA2 Channel 2 global Interrupt */
.word BDMA2_Channel3_IRQHandler /* BDMA2 Channel 3 global Interrupt */
.word BDMA2_Channel4_IRQHandler /* BDMA2 Channel 4 global Interrupt */
.word BDMA2_Channel5_IRQHandler /* BDMA2 Channel 5 global Interrupt */
.word BDMA2_Channel6_IRQHandler /* BDMA2 Channel 6 global Interrupt */
.word BDMA2_Channel7_IRQHandler /* BDMA2 Channel 7 global Interrupt */
.word COMP_IRQHandler /* COMP global Interrupt */
.word LPTIM2_IRQHandler /* LP TIM2 global interrupt */
.word LPTIM3_IRQHandler /* LP TIM3 global interrupt */
.word UART9_IRQHandler /* UART9 global interrupt */
.word USART10_IRQHandler /* USART10 global interrupt */
.word LPUART1_IRQHandler /* LP UART1 interrupt */
.word 0 /* Reserved */
.word CRS_IRQHandler /* Clock Recovery Global Interrupt */
.word ECC_IRQHandler /* ECC diagnostic Global Interrupt */
.word 0 /* Reserved */
.word DTS_IRQHandler /* DTS */
.word 0 /* Reserved */
.word WAKEUP_PIN_IRQHandler /* Interrupt for all 6 wake-up pins */
.word OCTOSPI2_IRQHandler /* OCTOSPI2 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word GFXMMU_IRQHandler /* GFXMMU */
.word BDMA1_IRQHandler /* BDMA1 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_PVM_IRQHandler
.thumb_set PVD_PVM_IRQHandler,Default_Handler
.weak RTC_TAMP_STAMP_CSS_LSE_IRQHandler
.thumb_set RTC_TAMP_STAMP_CSS_LSE_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak FDCAN1_IT0_IRQHandler
.thumb_set FDCAN1_IT0_IRQHandler,Default_Handler
.weak FDCAN2_IT0_IRQHandler
.thumb_set FDCAN2_IT0_IRQHandler,Default_Handler
.weak FDCAN1_IT1_IRQHandler
.thumb_set FDCAN1_IT1_IRQHandler,Default_Handler
.weak FDCAN2_IT1_IRQHandler
.thumb_set FDCAN2_IT1_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak DFSDM2_IRQHandler
.thumb_set DFSDM2_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FMC_IRQHandler
.thumb_set FMC_IRQHandler,Default_Handler
.weak SDMMC1_IRQHandler
.thumb_set SDMMC1_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak FDCAN_CAL_IRQHandler
.thumb_set FDCAN_CAL_IRQHandler,Default_Handler
.weak DFSDM1_FLT4_IRQHandler
.thumb_set DFSDM1_FLT4_IRQHandler,Default_Handler
.weak DFSDM1_FLT5_IRQHandler
.thumb_set DFSDM1_FLT5_IRQHandler,Default_Handler
.weak DFSDM1_FLT6_IRQHandler
.thumb_set DFSDM1_FLT6_IRQHandler,Default_Handler
.weak DFSDM1_FLT7_IRQHandler
.thumb_set DFSDM1_FLT7_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_PSSI_IRQHandler
.thumb_set DCMI_PSSI_IRQHandler,Default_Handler
.weak RNG_IRQHandler
.thumb_set RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak UART7_IRQHandler
.thumb_set UART7_IRQHandler,Default_Handler
.weak UART8_IRQHandler
.thumb_set UART8_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
.weak SPI6_IRQHandler
.thumb_set SPI6_IRQHandler,Default_Handler
.weak SAI1_IRQHandler
.thumb_set SAI1_IRQHandler,Default_Handler
.weak LTDC_IRQHandler
.thumb_set LTDC_IRQHandler,Default_Handler
.weak LTDC_ER_IRQHandler
.thumb_set LTDC_ER_IRQHandler,Default_Handler
.weak DMA2D_IRQHandler
.thumb_set DMA2D_IRQHandler,Default_Handler
.weak SAI2_IRQHandler
.thumb_set SAI2_IRQHandler,Default_Handler
.weak OCTOSPI1_IRQHandler
.thumb_set OCTOSPI1_IRQHandler,Default_Handler
.weak LPTIM1_IRQHandler
.thumb_set LPTIM1_IRQHandler,Default_Handler
.weak CEC_IRQHandler
.thumb_set CEC_IRQHandler,Default_Handler
.weak I2C4_EV_IRQHandler
.thumb_set I2C4_EV_IRQHandler,Default_Handler
.weak I2C4_ER_IRQHandler
.thumb_set I2C4_ER_IRQHandler,Default_Handler
.weak SPDIF_RX_IRQHandler
.thumb_set SPDIF_RX_IRQHandler,Default_Handler
.weak DMAMUX1_OVR_IRQHandler
.thumb_set DMAMUX1_OVR_IRQHandler,Default_Handler
.weak DFSDM1_FLT0_IRQHandler
.thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler
.weak DFSDM1_FLT1_IRQHandler
.thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler
.weak DFSDM1_FLT2_IRQHandler
.thumb_set DFSDM1_FLT2_IRQHandler,Default_Handler
.weak DFSDM1_FLT3_IRQHandler
.thumb_set DFSDM1_FLT3_IRQHandler,Default_Handler
.weak SWPMI1_IRQHandler
.thumb_set SWPMI1_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak MDIOS_WKUP_IRQHandler
.thumb_set MDIOS_WKUP_IRQHandler,Default_Handler
.weak MDIOS_IRQHandler
.thumb_set MDIOS_IRQHandler,Default_Handler
.weak JPEG_IRQHandler
.thumb_set JPEG_IRQHandler,Default_Handler
.weak MDMA_IRQHandler
.thumb_set MDMA_IRQHandler,Default_Handler
.weak SDMMC2_IRQHandler
.thumb_set SDMMC2_IRQHandler,Default_Handler
.weak HSEM1_IRQHandler
.thumb_set HSEM1_IRQHandler,Default_Handler
.weak DAC2_IRQHandler
.thumb_set DAC2_IRQHandler,Default_Handler
.weak DMAMUX2_OVR_IRQHandler
.thumb_set DMAMUX2_OVR_IRQHandler,Default_Handler
.weak BDMA2_Channel0_IRQHandler
.thumb_set BDMA2_Channel0_IRQHandler,Default_Handler
.weak BDMA2_Channel1_IRQHandler
.thumb_set BDMA2_Channel1_IRQHandler,Default_Handler
.weak BDMA2_Channel2_IRQHandler
.thumb_set BDMA2_Channel2_IRQHandler,Default_Handler
.weak BDMA2_Channel3_IRQHandler
.thumb_set BDMA2_Channel3_IRQHandler,Default_Handler
.weak BDMA2_Channel4_IRQHandler
.thumb_set BDMA2_Channel4_IRQHandler,Default_Handler
.weak BDMA2_Channel5_IRQHandler
.thumb_set BDMA2_Channel5_IRQHandler,Default_Handler
.weak BDMA2_Channel6_IRQHandler
.thumb_set BDMA2_Channel6_IRQHandler,Default_Handler
.weak BDMA2_Channel7_IRQHandler
.thumb_set BDMA2_Channel7_IRQHandler,Default_Handler
.weak COMP_IRQHandler
.thumb_set COMP_IRQHandler,Default_Handler
.weak LPTIM2_IRQHandler
.thumb_set LPTIM2_IRQHandler,Default_Handler
.weak LPTIM3_IRQHandler
.thumb_set LPTIM3_IRQHandler,Default_Handler
.weak LPTIM4_IRQHandler
.thumb_set LPTIM4_IRQHandler,Default_Handler
.weak LPTIM5_IRQHandler
.thumb_set LPTIM5_IRQHandler,Default_Handler
.weak UART9_IRQHandler
.thumb_set UART9_IRQHandler,Default_Handler
.weak USART10_IRQHandler
.thumb_set USART10_IRQHandler,Default_Handler
.weak LPUART1_IRQHandler
.thumb_set LPUART1_IRQHandler,Default_Handler
.weak CRS_IRQHandler
.thumb_set CRS_IRQHandler,Default_Handler
.weak ECC_IRQHandler
.thumb_set ECC_IRQHandler,Default_Handler
.weak DTS_IRQHandler
.thumb_set DTS_IRQHandler,Default_Handler
.weak WAKEUP_PIN_IRQHandler
.thumb_set WAKEUP_PIN_IRQHandler,Default_Handler
.weak OCTOSPI2_IRQHandler
.thumb_set OCTOSPI2_IRQHandler,Default_Handler
.weak GFXMMU_IRQHandler
.thumb_set GFXMMU_IRQHandler,Default_Handler
.weak BDMA1_IRQHandler
.thumb_set BDMA1_IRQHandler,Default_Handler
|
x-eks-fusion/xfusion | 8,492 | boards/pt/pt3220/startup.s | ;ChipId: PT3220
;Stack Configuration------------------------------------------------------------
Stack_Size EQU 0x600
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
;-------------------------------------------------------------------------------
;Heap Configuration-------------------------------------------------------------
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
;-------------------------------------------------------------------------------
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset-------------------------------------
AREA RESET, DATA, READONLY
EXPORT __Vectors
__Vectors DCD __initial_sp ; 0, load top of stack
DCD Reset_Handler ; 1, Reset Handler
DCD NMI_Handler ; 2, NMI Handler
DCD HardFault_Handler ; 3, Hard Fault Handler
DCD 0 ; 4, Reserved
DCD 0 ; 5, Reserved
DCD 0 ; 6, Reserved
DCD 0 ; 7, Reserved
DCD 0 ; 8, Reserved
DCD 0 ; 9, Reserved
DCD 0 ; 10, Reserved
DCD SVCall_Handler ; 11, SVCall Handler
DCD 0 ; 12, Reserved
DCD 0 ; 13, Reserved
DCD PendSV_Handler ; 14, PendSV Handler
DCD SysTick_Handler ; 15, SysTick Handler
; External interrupts
DCD EXTI_IRQHandler ; 0, EXTI
DCD IWDT_IRQHandler ; 1, IWDT
DCD BLE_IRQHandler ; 2, BB
DCD DMAC_IRQHandler ; 3, DMAChannel
DCD BB_LP_IRQHandler ; 4, BB_LowPower
DCD BTMR_IRQHandler ; 5, BTMR
DCD CTMR_IRQHandler ; 6, CTMR
DCD ATMR_IRQHandler ; 7, ATMR
DCD RTC_IRQHandler ; 8, RTC
DCD I2C_IRQHandler ; 9, I2C
DCD SPIM_IRQHandler ; 10, SPI Master
DCD SPIS_IRQHandler ; 11, SPI Slave
DCD UART1_IRQHandler ; 12, UART1
DCD UART2_IRQHandler ; 13, UART2
DCD AON_PMU_IRQHandler ; 14, PMU
DCD LVD33_IRQHandler ; 15, LVD
DCD BOD12_IRQHandler ; 16, BOD
DCD USB_IRQHandler ; 17, USB
DCD USB_SOF_IRQHandler ; 18, USB_SOF
DCD FSHC_IRQHandler ; 19, FSHC
DCD MDM_IRQHandler ; 20, MODEM
DCD RF_IRQHandler ; 21, RF
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
;-------------------------------------------------------------------------------
AREA |.INT|, CODE, READONLY
;Reset Handler------------------------------------------------------------------
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT core_vector
IMPORT trim_load
; IMPORT sys_pre_init
;#ifndef CACHE_USE_SRAM
IF :LNOT::DEF:CACHE_USE_SRAM
;cache cfg
;SYSCFG->CACHSRAM_CFG = 0;
MOVS R0, #0x00
LDR R1, =0x4000102C
STR R0, [R1]
;CACHE->CRCR0.Word = 0x18004025;
LDR R0, =0x18004025
LDR R1, =0x1900000C
STR R0, [R1]
;CACHE->CCFR.CACHE_INST_DATA = 1;
;MOVS R0, #0x08
;LDR R1, =0x19000004
;STR R0, [R1]
;CACHE->CCR.CACHE_EN = 1;
MOVS R0, #0x01
LDR R1, =0x19000000
STR R0, [R1]
ENDIF
; copy vector to sram
LDR R0, =__Vectors
LDR R1, =core_vector
BLX R1
;APBMISC->XOSC16M_CTRL.Word = 0x00014894;
;.XOSC16M_LP=0 .XOSC16M_CAP_TR=0x14
LDR R0, =0x00014894
LDR R1, =0x40031054
STR R0, [R1]
; load ft trim
LDR R1, =trim_load
BLX R1
; LDR R1, =sys_pre_init
; BLX R1
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops here, can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVCall_Handler PROC
EXPORT SVCall_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
;peripheral module int ---------------------------------------------------------
Default_Handler PROC
EXPORT EXTI_IRQHandler [WEAK]
EXPORT IWDT_IRQHandler [WEAK]
EXPORT BLE_IRQHandler [WEAK]
EXPORT DMAC_IRQHandler [WEAK]
EXPORT BB_LP_IRQHandler [WEAK]
EXPORT BTMR_IRQHandler [WEAK]
EXPORT CTMR_IRQHandler [WEAK]
EXPORT ATMR_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT I2C_IRQHandler [WEAK]
EXPORT SPIM_IRQHandler [WEAK]
EXPORT SPIS_IRQHandler [WEAK]
EXPORT UART1_IRQHandler [WEAK]
EXPORT UART2_IRQHandler [WEAK]
EXPORT AON_PMU_IRQHandler [WEAK]
EXPORT LVD33_IRQHandler [WEAK]
EXPORT BOD12_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
EXPORT USB_SOF_IRQHandler [WEAK]
EXPORT FSHC_IRQHandler [WEAK]
EXPORT MDM_IRQHandler [WEAK]
EXPORT RF_IRQHandler [WEAK]
EXTI_IRQHandler
IWDT_IRQHandler
BLE_IRQHandler
DMAC_IRQHandler
BB_LP_IRQHandler
BTMR_IRQHandler
CTMR_IRQHandler
ATMR_IRQHandler
RTC_IRQHandler
I2C_IRQHandler
SPIM_IRQHandler
SPIS_IRQHandler
UART1_IRQHandler
UART2_IRQHandler
AON_PMU_IRQHandler
LVD33_IRQHandler
BOD12_IRQHandler
USB_IRQHandler
USB_SOF_IRQHandler
FSHC_IRQHandler
MDM_IRQHandler
RF_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, = (Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
|
xelerance/Openswan | 24,774 | lib/libcrypto/libaes/aes-i586.S | //
// Copyright (c) 2001, Dr Brian Gladman <brg@gladman.uk.net>, Worcester, UK.
// All rights reserved.
//
// TERMS
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The copyright holder's name must not be used to endorse or promote
// any products derived from this software without his specific prior
// written permission.
//
// This software is provided 'as is' with no express or implied warranties
// of correctness or fitness for purpose.
// Modified by Jari Ruusu, December 24 2001
// - Converted syntax to GNU CPP/assembler syntax
// - C programming interface converted back to "old" API
// - Minor portability cleanups and speed optimizations
// An AES (Rijndael) implementation for the Pentium. This version only
// implements the standard AES block length (128 bits, 16 bytes). This code
// does not preserve the eax, ecx or edx registers or the artihmetic status
// flags. However, the ebx, esi, edi, and ebp registers are preserved across
// calls.
// void aes_set_key(aes_context *cx, const unsigned char key[], const int key_len, const int f)
// void aes_encrypt(const aes_context *cx, const unsigned char in_blk[], unsigned char out_blk[])
// void aes_decrypt(const aes_context *cx, const unsigned char in_blk[], unsigned char out_blk[])
#if defined(USE_UNDERLINE)
# define aes_set_key _aes_set_key
# define aes_encrypt _aes_encrypt
# define aes_decrypt _aes_decrypt
#endif
#if !defined(ALIGN32BYTES)
# define ALIGN32BYTES 32
#endif
.file "aes-i586.S"
.globl aes_set_key
.globl aes_encrypt
.globl aes_decrypt
#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
// offsets to parameters with one register pushed onto stack
#define ctx 8 // AES context structure
#define in_blk 12 // input byte array address parameter
#define out_blk 16 // output byte array address parameter
// offsets in context structure
#define nkey 0 // key length, size 4
#define nrnd 4 // number of rounds, size 4
#define ekey 8 // encryption key schedule base address, size 256
#define dkey 264 // decryption key schedule base address, size 256
// This macro performs a forward encryption cycle. It is entered with
// the first previous round column values in %eax, %ebx, %esi and %edi and
// exits with the final values in the same registers.
#define fwd_rnd(p1,p2) \
mov %ebx,(%esp) ;\
movzbl %al,%edx ;\
mov %eax,%ecx ;\
mov p2(%ebp),%eax ;\
mov %edi,4(%esp) ;\
mov p2+12(%ebp),%edi ;\
xor p1(,%edx,4),%eax ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
mov p2+4(%ebp),%ebx ;\
xor p1+tlen(,%edx,4),%edi ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+3*tlen(,%ecx,4),%ebx ;\
mov %esi,%ecx ;\
mov p1+2*tlen(,%edx,4),%esi ;\
movzbl %cl,%edx ;\
xor p1(,%edx,4),%esi ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
xor p1+tlen(,%edx,4),%ebx ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+2*tlen(,%edx,4),%eax ;\
mov (%esp),%edx ;\
xor p1+3*tlen(,%ecx,4),%edi ;\
movzbl %dl,%ecx ;\
xor p2+8(%ebp),%esi ;\
xor p1(,%ecx,4),%ebx ;\
movzbl %dh,%ecx ;\
shr $16,%edx ;\
xor p1+tlen(,%ecx,4),%eax ;\
movzbl %dl,%ecx ;\
movzbl %dh,%edx ;\
xor p1+2*tlen(,%ecx,4),%edi ;\
mov 4(%esp),%ecx ;\
xor p1+3*tlen(,%edx,4),%esi ;\
movzbl %cl,%edx ;\
xor p1(,%edx,4),%edi ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
xor p1+tlen(,%edx,4),%esi ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+2*tlen(,%edx,4),%ebx ;\
xor p1+3*tlen(,%ecx,4),%eax
// This macro performs an inverse encryption cycle. It is entered with
// the first previous round column values in %eax, %ebx, %esi and %edi and
// exits with the final values in the same registers.
#define inv_rnd(p1,p2) \
movzbl %al,%edx ;\
mov %ebx,(%esp) ;\
mov %eax,%ecx ;\
mov p2(%ebp),%eax ;\
mov %edi,4(%esp) ;\
mov p2+4(%ebp),%ebx ;\
xor p1(,%edx,4),%eax ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
mov p2+12(%ebp),%edi ;\
xor p1+tlen(,%edx,4),%ebx ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+3*tlen(,%ecx,4),%edi ;\
mov %esi,%ecx ;\
mov p1+2*tlen(,%edx,4),%esi ;\
movzbl %cl,%edx ;\
xor p1(,%edx,4),%esi ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
xor p1+tlen(,%edx,4),%edi ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+2*tlen(,%edx,4),%eax ;\
mov (%esp),%edx ;\
xor p1+3*tlen(,%ecx,4),%ebx ;\
movzbl %dl,%ecx ;\
xor p2+8(%ebp),%esi ;\
xor p1(,%ecx,4),%ebx ;\
movzbl %dh,%ecx ;\
shr $16,%edx ;\
xor p1+tlen(,%ecx,4),%esi ;\
movzbl %dl,%ecx ;\
movzbl %dh,%edx ;\
xor p1+2*tlen(,%ecx,4),%edi ;\
mov 4(%esp),%ecx ;\
xor p1+3*tlen(,%edx,4),%eax ;\
movzbl %cl,%edx ;\
xor p1(,%edx,4),%edi ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
xor p1+tlen(,%edx,4),%eax ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+2*tlen(,%edx,4),%ebx ;\
xor p1+3*tlen(,%ecx,4),%esi
// AES (Rijndael) Encryption Subroutine
.text
.align ALIGN32BYTES
aes_encrypt:
push %ebp
mov ctx(%esp),%ebp // pointer to context
mov in_blk(%esp),%ecx
push %ebx
push %esi
push %edi
mov nrnd(%ebp),%edx // number of rounds
lea ekey+16(%ebp),%ebp // key pointer
// input four columns and xor in first round key
mov (%ecx),%eax
mov 4(%ecx),%ebx
mov 8(%ecx),%esi
mov 12(%ecx),%edi
xor -16(%ebp),%eax
xor -12(%ebp),%ebx
xor -8(%ebp),%esi
xor -4(%ebp),%edi
sub $8,%esp // space for register saves on stack
sub $10,%edx
je aes_15
add $32,%ebp
sub $2,%edx
je aes_13
add $32,%ebp
fwd_rnd(aes_ft_tab,-64) // 14 rounds for 256-bit key
fwd_rnd(aes_ft_tab,-48)
aes_13: fwd_rnd(aes_ft_tab,-32) // 12 rounds for 192-bit key
fwd_rnd(aes_ft_tab,-16)
aes_15: fwd_rnd(aes_ft_tab,0) // 10 rounds for 128-bit key
fwd_rnd(aes_ft_tab,16)
fwd_rnd(aes_ft_tab,32)
fwd_rnd(aes_ft_tab,48)
fwd_rnd(aes_ft_tab,64)
fwd_rnd(aes_ft_tab,80)
fwd_rnd(aes_ft_tab,96)
fwd_rnd(aes_ft_tab,112)
fwd_rnd(aes_ft_tab,128)
fwd_rnd(aes_fl_tab,144) // last round uses a different table
// move final values to the output array.
mov out_blk+20(%esp),%ebp
add $8,%esp
mov %eax,(%ebp)
mov %ebx,4(%ebp)
mov %esi,8(%ebp)
mov %edi,12(%ebp)
pop %edi
pop %esi
pop %ebx
pop %ebp
ret
// AES (Rijndael) Decryption Subroutine
.align ALIGN32BYTES
aes_decrypt:
push %ebp
mov ctx(%esp),%ebp // pointer to context
mov in_blk(%esp),%ecx
push %ebx
push %esi
push %edi
mov nrnd(%ebp),%edx // number of rounds
lea dkey+16(%ebp),%ebp // key pointer
// input four columns and xor in first round key
mov (%ecx),%eax
mov 4(%ecx),%ebx
mov 8(%ecx),%esi
mov 12(%ecx),%edi
xor -16(%ebp),%eax
xor -12(%ebp),%ebx
xor -8(%ebp),%esi
xor -4(%ebp),%edi
sub $8,%esp // space for register saves on stack
sub $10,%edx
je aes_25
add $32,%ebp
sub $2,%edx
je aes_23
add $32,%ebp
inv_rnd(aes_it_tab,-64) // 14 rounds for 256-bit key
inv_rnd(aes_it_tab,-48)
aes_23: inv_rnd(aes_it_tab,-32) // 12 rounds for 192-bit key
inv_rnd(aes_it_tab,-16)
aes_25: inv_rnd(aes_it_tab,0) // 10 rounds for 128-bit key
inv_rnd(aes_it_tab,16)
inv_rnd(aes_it_tab,32)
inv_rnd(aes_it_tab,48)
inv_rnd(aes_it_tab,64)
inv_rnd(aes_it_tab,80)
inv_rnd(aes_it_tab,96)
inv_rnd(aes_it_tab,112)
inv_rnd(aes_it_tab,128)
inv_rnd(aes_il_tab,144) // last round uses a different table
// move final values to the output array.
mov out_blk+20(%esp),%ebp
add $8,%esp
mov %eax,(%ebp)
mov %ebx,4(%ebp)
mov %esi,8(%ebp)
mov %edi,12(%ebp)
pop %edi
pop %esi
pop %ebx
pop %ebp
ret
// AES (Rijndael) Key Schedule Subroutine
// input/output parameters
#define aes_cx 12 // AES context
#define in_key 16 // key input array address
#define key_ln 20 // key length, bytes (16,24,32) or bits (128,192,256)
#define ed_flg 24 // 0=create both encr/decr keys, 1=create encr key only
// offsets for locals
#define cnt -4
#define kpf -8
#define slen 8
// This macro performs a column mixing operation on an input 32-bit
// word to give a 32-bit result. It uses each of the 4 bytes in the
// the input column to index 4 different tables of 256 32-bit words
// that are xored together to form the output value.
#define mix_col(p1) \
movzbl %bl,%ecx ;\
mov p1(,%ecx,4),%eax ;\
movzbl %bh,%ecx ;\
ror $16,%ebx ;\
xor p1+tlen(,%ecx,4),%eax ;\
movzbl %bl,%ecx ;\
xor p1+2*tlen(,%ecx,4),%eax ;\
movzbl %bh,%ecx ;\
xor p1+3*tlen(,%ecx,4),%eax
// Key Schedule Macros
#define ksc4(p1) \
rol $24,%ebx ;\
mix_col(aes_fl_tab) ;\
ror $8,%ebx ;\
xor 4*p1+aes_rcon_tab,%eax ;\
xor %eax,%esi ;\
xor %esi,%ebp ;\
mov %esi,16*p1(%edi) ;\
mov %ebp,16*p1+4(%edi) ;\
xor %ebp,%edx ;\
xor %edx,%ebx ;\
mov %edx,16*p1+8(%edi) ;\
mov %ebx,16*p1+12(%edi)
#define ksc6(p1) \
rol $24,%ebx ;\
mix_col(aes_fl_tab) ;\
ror $8,%ebx ;\
xor 4*p1+aes_rcon_tab,%eax ;\
xor 24*p1-24(%edi),%eax ;\
mov %eax,24*p1(%edi) ;\
xor 24*p1-20(%edi),%eax ;\
mov %eax,24*p1+4(%edi) ;\
xor %eax,%esi ;\
xor %esi,%ebp ;\
mov %esi,24*p1+8(%edi) ;\
mov %ebp,24*p1+12(%edi) ;\
xor %ebp,%edx ;\
xor %edx,%ebx ;\
mov %edx,24*p1+16(%edi) ;\
mov %ebx,24*p1+20(%edi)
#define ksc8(p1) \
rol $24,%ebx ;\
mix_col(aes_fl_tab) ;\
ror $8,%ebx ;\
xor 4*p1+aes_rcon_tab,%eax ;\
xor 32*p1-32(%edi),%eax ;\
mov %eax,32*p1(%edi) ;\
xor 32*p1-28(%edi),%eax ;\
mov %eax,32*p1+4(%edi) ;\
xor 32*p1-24(%edi),%eax ;\
mov %eax,32*p1+8(%edi) ;\
xor 32*p1-20(%edi),%eax ;\
mov %eax,32*p1+12(%edi) ;\
push %ebx ;\
mov %eax,%ebx ;\
mix_col(aes_fl_tab) ;\
pop %ebx ;\
xor %eax,%esi ;\
xor %esi,%ebp ;\
mov %esi,32*p1+16(%edi) ;\
mov %ebp,32*p1+20(%edi) ;\
xor %ebp,%edx ;\
xor %edx,%ebx ;\
mov %edx,32*p1+24(%edi) ;\
mov %ebx,32*p1+28(%edi)
.align ALIGN32BYTES
aes_set_key:
pushfl
push %ebp
mov %esp,%ebp
sub $slen,%esp
push %ebx
push %esi
push %edi
mov aes_cx(%ebp),%edx // edx -> AES context
mov key_ln(%ebp),%ecx // key length
cmpl $128,%ecx
jb aes_30
shr $3,%ecx
aes_30: cmpl $32,%ecx
je aes_32
cmpl $24,%ecx
je aes_32
mov $16,%ecx
aes_32: shr $2,%ecx
mov %ecx,nkey(%edx)
lea 6(%ecx),%eax // 10/12/14 for 4/6/8 32-bit key length
mov %eax,nrnd(%edx)
mov in_key(%ebp),%esi // key input array
lea ekey(%edx),%edi // key position in AES context
cld
push %ebp
mov %ecx,%eax // save key length in eax
rep ; movsl // words in the key schedule
mov -4(%esi),%ebx // put some values in registers
mov -8(%esi),%edx // to allow faster code
mov -12(%esi),%ebp
mov -16(%esi),%esi
cmpl $4,%eax // jump on key size
je aes_36
cmpl $6,%eax
je aes_35
ksc8(0)
ksc8(1)
ksc8(2)
ksc8(3)
ksc8(4)
ksc8(5)
ksc8(6)
jmp aes_37
aes_35: ksc6(0)
ksc6(1)
ksc6(2)
ksc6(3)
ksc6(4)
ksc6(5)
ksc6(6)
ksc6(7)
jmp aes_37
aes_36: ksc4(0)
ksc4(1)
ksc4(2)
ksc4(3)
ksc4(4)
ksc4(5)
ksc4(6)
ksc4(7)
ksc4(8)
ksc4(9)
aes_37: pop %ebp
mov aes_cx(%ebp),%edx // edx -> AES context
cmpl $0,ed_flg(%ebp)
jne aes_39
// compile decryption key schedule from encryption schedule - reverse
// order and do mix_column operation on round keys except first and last
mov nrnd(%edx),%eax // kt = cx->d_key + nc * cx->Nrnd
shl $2,%eax
lea dkey(%edx,%eax,4),%edi
lea ekey(%edx),%esi // kf = cx->e_key
movsl // copy first round key (unmodified)
movsl
movsl
movsl
sub $32,%edi
movl $1,cnt(%ebp)
aes_38: // do mix column on each column of
lodsl // each round key
mov %eax,%ebx
mix_col(aes_im_tab)
stosl
lodsl
mov %eax,%ebx
mix_col(aes_im_tab)
stosl
lodsl
mov %eax,%ebx
mix_col(aes_im_tab)
stosl
lodsl
mov %eax,%ebx
mix_col(aes_im_tab)
stosl
sub $32,%edi
incl cnt(%ebp)
mov cnt(%ebp),%eax
cmp nrnd(%edx),%eax
jb aes_38
movsl // copy last round key (unmodified)
movsl
movsl
movsl
aes_39: pop %edi
pop %esi
pop %ebx
mov %ebp,%esp
pop %ebp
popfl
ret
// finite field multiplies by {02}, {04} and {08}
#define f2(x) ((x<<1)^(((x>>7)&1)*0x11b))
#define f4(x) ((x<<2)^(((x>>6)&1)*0x11b)^(((x>>6)&2)*0x11b))
#define f8(x) ((x<<3)^(((x>>5)&1)*0x11b)^(((x>>5)&2)*0x11b)^(((x>>5)&4)*0x11b))
// finite field multiplies required in table generation
#define f3(x) (f2(x) ^ x)
#define f9(x) (f8(x) ^ x)
#define fb(x) (f8(x) ^ f2(x) ^ x)
#define fd(x) (f8(x) ^ f4(x) ^ x)
#define fe(x) (f8(x) ^ f4(x) ^ f2(x))
// These defines generate the forward table entries
#define u0(x) ((f3(x) << 24) | (x << 16) | (x << 8) | f2(x))
#define u1(x) ((x << 24) | (x << 16) | (f2(x) << 8) | f3(x))
#define u2(x) ((x << 24) | (f2(x) << 16) | (f3(x) << 8) | x)
#define u3(x) ((f2(x) << 24) | (f3(x) << 16) | (x << 8) | x)
// These defines generate the inverse table entries
#define v0(x) ((fb(x) << 24) | (fd(x) << 16) | (f9(x) << 8) | fe(x))
#define v1(x) ((fd(x) << 24) | (f9(x) << 16) | (fe(x) << 8) | fb(x))
#define v2(x) ((f9(x) << 24) | (fe(x) << 16) | (fb(x) << 8) | fd(x))
#define v3(x) ((fe(x) << 24) | (fb(x) << 16) | (fd(x) << 8) | f9(x))
// These defines generate entries for the last round tables
#define w0(x) (x)
#define w1(x) (x << 8)
#define w2(x) (x << 16)
#define w3(x) (x << 24)
// macro to generate inverse mix column tables (needed for the key schedule)
#define im_data0(p1) \
.long p1(0x00),p1(0x01),p1(0x02),p1(0x03),p1(0x04),p1(0x05),p1(0x06),p1(0x07) ;\
.long p1(0x08),p1(0x09),p1(0x0a),p1(0x0b),p1(0x0c),p1(0x0d),p1(0x0e),p1(0x0f) ;\
.long p1(0x10),p1(0x11),p1(0x12),p1(0x13),p1(0x14),p1(0x15),p1(0x16),p1(0x17) ;\
.long p1(0x18),p1(0x19),p1(0x1a),p1(0x1b),p1(0x1c),p1(0x1d),p1(0x1e),p1(0x1f)
#define im_data1(p1) \
.long p1(0x20),p1(0x21),p1(0x22),p1(0x23),p1(0x24),p1(0x25),p1(0x26),p1(0x27) ;\
.long p1(0x28),p1(0x29),p1(0x2a),p1(0x2b),p1(0x2c),p1(0x2d),p1(0x2e),p1(0x2f) ;\
.long p1(0x30),p1(0x31),p1(0x32),p1(0x33),p1(0x34),p1(0x35),p1(0x36),p1(0x37) ;\
.long p1(0x38),p1(0x39),p1(0x3a),p1(0x3b),p1(0x3c),p1(0x3d),p1(0x3e),p1(0x3f)
#define im_data2(p1) \
.long p1(0x40),p1(0x41),p1(0x42),p1(0x43),p1(0x44),p1(0x45),p1(0x46),p1(0x47) ;\
.long p1(0x48),p1(0x49),p1(0x4a),p1(0x4b),p1(0x4c),p1(0x4d),p1(0x4e),p1(0x4f) ;\
.long p1(0x50),p1(0x51),p1(0x52),p1(0x53),p1(0x54),p1(0x55),p1(0x56),p1(0x57) ;\
.long p1(0x58),p1(0x59),p1(0x5a),p1(0x5b),p1(0x5c),p1(0x5d),p1(0x5e),p1(0x5f)
#define im_data3(p1) \
.long p1(0x60),p1(0x61),p1(0x62),p1(0x63),p1(0x64),p1(0x65),p1(0x66),p1(0x67) ;\
.long p1(0x68),p1(0x69),p1(0x6a),p1(0x6b),p1(0x6c),p1(0x6d),p1(0x6e),p1(0x6f) ;\
.long p1(0x70),p1(0x71),p1(0x72),p1(0x73),p1(0x74),p1(0x75),p1(0x76),p1(0x77) ;\
.long p1(0x78),p1(0x79),p1(0x7a),p1(0x7b),p1(0x7c),p1(0x7d),p1(0x7e),p1(0x7f)
#define im_data4(p1) \
.long p1(0x80),p1(0x81),p1(0x82),p1(0x83),p1(0x84),p1(0x85),p1(0x86),p1(0x87) ;\
.long p1(0x88),p1(0x89),p1(0x8a),p1(0x8b),p1(0x8c),p1(0x8d),p1(0x8e),p1(0x8f) ;\
.long p1(0x90),p1(0x91),p1(0x92),p1(0x93),p1(0x94),p1(0x95),p1(0x96),p1(0x97) ;\
.long p1(0x98),p1(0x99),p1(0x9a),p1(0x9b),p1(0x9c),p1(0x9d),p1(0x9e),p1(0x9f)
#define im_data5(p1) \
.long p1(0xa0),p1(0xa1),p1(0xa2),p1(0xa3),p1(0xa4),p1(0xa5),p1(0xa6),p1(0xa7) ;\
.long p1(0xa8),p1(0xa9),p1(0xaa),p1(0xab),p1(0xac),p1(0xad),p1(0xae),p1(0xaf) ;\
.long p1(0xb0),p1(0xb1),p1(0xb2),p1(0xb3),p1(0xb4),p1(0xb5),p1(0xb6),p1(0xb7) ;\
.long p1(0xb8),p1(0xb9),p1(0xba),p1(0xbb),p1(0xbc),p1(0xbd),p1(0xbe),p1(0xbf)
#define im_data6(p1) \
.long p1(0xc0),p1(0xc1),p1(0xc2),p1(0xc3),p1(0xc4),p1(0xc5),p1(0xc6),p1(0xc7) ;\
.long p1(0xc8),p1(0xc9),p1(0xca),p1(0xcb),p1(0xcc),p1(0xcd),p1(0xce),p1(0xcf) ;\
.long p1(0xd0),p1(0xd1),p1(0xd2),p1(0xd3),p1(0xd4),p1(0xd5),p1(0xd6),p1(0xd7) ;\
.long p1(0xd8),p1(0xd9),p1(0xda),p1(0xdb),p1(0xdc),p1(0xdd),p1(0xde),p1(0xdf)
#define im_data7(p1) \
.long p1(0xe0),p1(0xe1),p1(0xe2),p1(0xe3),p1(0xe4),p1(0xe5),p1(0xe6),p1(0xe7) ;\
.long p1(0xe8),p1(0xe9),p1(0xea),p1(0xeb),p1(0xec),p1(0xed),p1(0xee),p1(0xef) ;\
.long p1(0xf0),p1(0xf1),p1(0xf2),p1(0xf3),p1(0xf4),p1(0xf5),p1(0xf6),p1(0xf7) ;\
.long p1(0xf8),p1(0xf9),p1(0xfa),p1(0xfb),p1(0xfc),p1(0xfd),p1(0xfe),p1(0xff)
// S-box data - 256 entries
#define sb_data0(p1) \
.long p1(0x63),p1(0x7c),p1(0x77),p1(0x7b),p1(0xf2),p1(0x6b),p1(0x6f),p1(0xc5) ;\
.long p1(0x30),p1(0x01),p1(0x67),p1(0x2b),p1(0xfe),p1(0xd7),p1(0xab),p1(0x76) ;\
.long p1(0xca),p1(0x82),p1(0xc9),p1(0x7d),p1(0xfa),p1(0x59),p1(0x47),p1(0xf0) ;\
.long p1(0xad),p1(0xd4),p1(0xa2),p1(0xaf),p1(0x9c),p1(0xa4),p1(0x72),p1(0xc0)
#define sb_data1(p1) \
.long p1(0xb7),p1(0xfd),p1(0x93),p1(0x26),p1(0x36),p1(0x3f),p1(0xf7),p1(0xcc) ;\
.long p1(0x34),p1(0xa5),p1(0xe5),p1(0xf1),p1(0x71),p1(0xd8),p1(0x31),p1(0x15) ;\
.long p1(0x04),p1(0xc7),p1(0x23),p1(0xc3),p1(0x18),p1(0x96),p1(0x05),p1(0x9a) ;\
.long p1(0x07),p1(0x12),p1(0x80),p1(0xe2),p1(0xeb),p1(0x27),p1(0xb2),p1(0x75)
#define sb_data2(p1) \
.long p1(0x09),p1(0x83),p1(0x2c),p1(0x1a),p1(0x1b),p1(0x6e),p1(0x5a),p1(0xa0) ;\
.long p1(0x52),p1(0x3b),p1(0xd6),p1(0xb3),p1(0x29),p1(0xe3),p1(0x2f),p1(0x84) ;\
.long p1(0x53),p1(0xd1),p1(0x00),p1(0xed),p1(0x20),p1(0xfc),p1(0xb1),p1(0x5b) ;\
.long p1(0x6a),p1(0xcb),p1(0xbe),p1(0x39),p1(0x4a),p1(0x4c),p1(0x58),p1(0xcf)
#define sb_data3(p1) \
.long p1(0xd0),p1(0xef),p1(0xaa),p1(0xfb),p1(0x43),p1(0x4d),p1(0x33),p1(0x85) ;\
.long p1(0x45),p1(0xf9),p1(0x02),p1(0x7f),p1(0x50),p1(0x3c),p1(0x9f),p1(0xa8) ;\
.long p1(0x51),p1(0xa3),p1(0x40),p1(0x8f),p1(0x92),p1(0x9d),p1(0x38),p1(0xf5) ;\
.long p1(0xbc),p1(0xb6),p1(0xda),p1(0x21),p1(0x10),p1(0xff),p1(0xf3),p1(0xd2)
#define sb_data4(p1) \
.long p1(0xcd),p1(0x0c),p1(0x13),p1(0xec),p1(0x5f),p1(0x97),p1(0x44),p1(0x17) ;\
.long p1(0xc4),p1(0xa7),p1(0x7e),p1(0x3d),p1(0x64),p1(0x5d),p1(0x19),p1(0x73) ;\
.long p1(0x60),p1(0x81),p1(0x4f),p1(0xdc),p1(0x22),p1(0x2a),p1(0x90),p1(0x88) ;\
.long p1(0x46),p1(0xee),p1(0xb8),p1(0x14),p1(0xde),p1(0x5e),p1(0x0b),p1(0xdb)
#define sb_data5(p1) \
.long p1(0xe0),p1(0x32),p1(0x3a),p1(0x0a),p1(0x49),p1(0x06),p1(0x24),p1(0x5c) ;\
.long p1(0xc2),p1(0xd3),p1(0xac),p1(0x62),p1(0x91),p1(0x95),p1(0xe4),p1(0x79) ;\
.long p1(0xe7),p1(0xc8),p1(0x37),p1(0x6d),p1(0x8d),p1(0xd5),p1(0x4e),p1(0xa9) ;\
.long p1(0x6c),p1(0x56),p1(0xf4),p1(0xea),p1(0x65),p1(0x7a),p1(0xae),p1(0x08)
#define sb_data6(p1) \
.long p1(0xba),p1(0x78),p1(0x25),p1(0x2e),p1(0x1c),p1(0xa6),p1(0xb4),p1(0xc6) ;\
.long p1(0xe8),p1(0xdd),p1(0x74),p1(0x1f),p1(0x4b),p1(0xbd),p1(0x8b),p1(0x8a) ;\
.long p1(0x70),p1(0x3e),p1(0xb5),p1(0x66),p1(0x48),p1(0x03),p1(0xf6),p1(0x0e) ;\
.long p1(0x61),p1(0x35),p1(0x57),p1(0xb9),p1(0x86),p1(0xc1),p1(0x1d),p1(0x9e)
#define sb_data7(p1) \
.long p1(0xe1),p1(0xf8),p1(0x98),p1(0x11),p1(0x69),p1(0xd9),p1(0x8e),p1(0x94) ;\
.long p1(0x9b),p1(0x1e),p1(0x87),p1(0xe9),p1(0xce),p1(0x55),p1(0x28),p1(0xdf) ;\
.long p1(0x8c),p1(0xa1),p1(0x89),p1(0x0d),p1(0xbf),p1(0xe6),p1(0x42),p1(0x68) ;\
.long p1(0x41),p1(0x99),p1(0x2d),p1(0x0f),p1(0xb0),p1(0x54),p1(0xbb),p1(0x16)
// Inverse S-box data - 256 entries
#define ib_data0(p1) \
.long p1(0x52),p1(0x09),p1(0x6a),p1(0xd5),p1(0x30),p1(0x36),p1(0xa5),p1(0x38) ;\
.long p1(0xbf),p1(0x40),p1(0xa3),p1(0x9e),p1(0x81),p1(0xf3),p1(0xd7),p1(0xfb) ;\
.long p1(0x7c),p1(0xe3),p1(0x39),p1(0x82),p1(0x9b),p1(0x2f),p1(0xff),p1(0x87) ;\
.long p1(0x34),p1(0x8e),p1(0x43),p1(0x44),p1(0xc4),p1(0xde),p1(0xe9),p1(0xcb)
#define ib_data1(p1) \
.long p1(0x54),p1(0x7b),p1(0x94),p1(0x32),p1(0xa6),p1(0xc2),p1(0x23),p1(0x3d) ;\
.long p1(0xee),p1(0x4c),p1(0x95),p1(0x0b),p1(0x42),p1(0xfa),p1(0xc3),p1(0x4e) ;\
.long p1(0x08),p1(0x2e),p1(0xa1),p1(0x66),p1(0x28),p1(0xd9),p1(0x24),p1(0xb2) ;\
.long p1(0x76),p1(0x5b),p1(0xa2),p1(0x49),p1(0x6d),p1(0x8b),p1(0xd1),p1(0x25)
#define ib_data2(p1) \
.long p1(0x72),p1(0xf8),p1(0xf6),p1(0x64),p1(0x86),p1(0x68),p1(0x98),p1(0x16) ;\
.long p1(0xd4),p1(0xa4),p1(0x5c),p1(0xcc),p1(0x5d),p1(0x65),p1(0xb6),p1(0x92) ;\
.long p1(0x6c),p1(0x70),p1(0x48),p1(0x50),p1(0xfd),p1(0xed),p1(0xb9),p1(0xda) ;\
.long p1(0x5e),p1(0x15),p1(0x46),p1(0x57),p1(0xa7),p1(0x8d),p1(0x9d),p1(0x84)
#define ib_data3(p1) \
.long p1(0x90),p1(0xd8),p1(0xab),p1(0x00),p1(0x8c),p1(0xbc),p1(0xd3),p1(0x0a) ;\
.long p1(0xf7),p1(0xe4),p1(0x58),p1(0x05),p1(0xb8),p1(0xb3),p1(0x45),p1(0x06) ;\
.long p1(0xd0),p1(0x2c),p1(0x1e),p1(0x8f),p1(0xca),p1(0x3f),p1(0x0f),p1(0x02) ;\
.long p1(0xc1),p1(0xaf),p1(0xbd),p1(0x03),p1(0x01),p1(0x13),p1(0x8a),p1(0x6b)
#define ib_data4(p1) \
.long p1(0x3a),p1(0x91),p1(0x11),p1(0x41),p1(0x4f),p1(0x67),p1(0xdc),p1(0xea) ;\
.long p1(0x97),p1(0xf2),p1(0xcf),p1(0xce),p1(0xf0),p1(0xb4),p1(0xe6),p1(0x73) ;\
.long p1(0x96),p1(0xac),p1(0x74),p1(0x22),p1(0xe7),p1(0xad),p1(0x35),p1(0x85) ;\
.long p1(0xe2),p1(0xf9),p1(0x37),p1(0xe8),p1(0x1c),p1(0x75),p1(0xdf),p1(0x6e)
#define ib_data5(p1) \
.long p1(0x47),p1(0xf1),p1(0x1a),p1(0x71),p1(0x1d),p1(0x29),p1(0xc5),p1(0x89) ;\
.long p1(0x6f),p1(0xb7),p1(0x62),p1(0x0e),p1(0xaa),p1(0x18),p1(0xbe),p1(0x1b) ;\
.long p1(0xfc),p1(0x56),p1(0x3e),p1(0x4b),p1(0xc6),p1(0xd2),p1(0x79),p1(0x20) ;\
.long p1(0x9a),p1(0xdb),p1(0xc0),p1(0xfe),p1(0x78),p1(0xcd),p1(0x5a),p1(0xf4)
#define ib_data6(p1) \
.long p1(0x1f),p1(0xdd),p1(0xa8),p1(0x33),p1(0x88),p1(0x07),p1(0xc7),p1(0x31) ;\
.long p1(0xb1),p1(0x12),p1(0x10),p1(0x59),p1(0x27),p1(0x80),p1(0xec),p1(0x5f) ;\
.long p1(0x60),p1(0x51),p1(0x7f),p1(0xa9),p1(0x19),p1(0xb5),p1(0x4a),p1(0x0d) ;\
.long p1(0x2d),p1(0xe5),p1(0x7a),p1(0x9f),p1(0x93),p1(0xc9),p1(0x9c),p1(0xef)
#define ib_data7(p1) \
.long p1(0xa0),p1(0xe0),p1(0x3b),p1(0x4d),p1(0xae),p1(0x2a),p1(0xf5),p1(0xb0) ;\
.long p1(0xc8),p1(0xeb),p1(0xbb),p1(0x3c),p1(0x83),p1(0x53),p1(0x99),p1(0x61) ;\
.long p1(0x17),p1(0x2b),p1(0x04),p1(0x7e),p1(0xba),p1(0x77),p1(0xd6),p1(0x26) ;\
.long p1(0xe1),p1(0x69),p1(0x14),p1(0x63),p1(0x55),p1(0x21),p1(0x0c),p1(0x7d)
// The rcon_table (needed for the key schedule)
//
// Here is original Dr Brian Gladman's source code:
// _rcon_tab:
// %assign x 1
// %rep 29
// dd x
// %assign x f2(x)
// %endrep
//
// Here is precomputed output (it's more portable this way):
.align ALIGN32BYTES
aes_rcon_tab:
.long 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80
.long 0x1b,0x36,0x6c,0xd8,0xab,0x4d,0x9a,0x2f
.long 0x5e,0xbc,0x63,0xc6,0x97,0x35,0x6a,0xd4
.long 0xb3,0x7d,0xfa,0xef,0xc5
// The forward xor tables
.align ALIGN32BYTES
aes_ft_tab:
sb_data0(u0)
sb_data1(u0)
sb_data2(u0)
sb_data3(u0)
sb_data4(u0)
sb_data5(u0)
sb_data6(u0)
sb_data7(u0)
sb_data0(u1)
sb_data1(u1)
sb_data2(u1)
sb_data3(u1)
sb_data4(u1)
sb_data5(u1)
sb_data6(u1)
sb_data7(u1)
sb_data0(u2)
sb_data1(u2)
sb_data2(u2)
sb_data3(u2)
sb_data4(u2)
sb_data5(u2)
sb_data6(u2)
sb_data7(u2)
sb_data0(u3)
sb_data1(u3)
sb_data2(u3)
sb_data3(u3)
sb_data4(u3)
sb_data5(u3)
sb_data6(u3)
sb_data7(u3)
.align ALIGN32BYTES
aes_fl_tab:
sb_data0(w0)
sb_data1(w0)
sb_data2(w0)
sb_data3(w0)
sb_data4(w0)
sb_data5(w0)
sb_data6(w0)
sb_data7(w0)
sb_data0(w1)
sb_data1(w1)
sb_data2(w1)
sb_data3(w1)
sb_data4(w1)
sb_data5(w1)
sb_data6(w1)
sb_data7(w1)
sb_data0(w2)
sb_data1(w2)
sb_data2(w2)
sb_data3(w2)
sb_data4(w2)
sb_data5(w2)
sb_data6(w2)
sb_data7(w2)
sb_data0(w3)
sb_data1(w3)
sb_data2(w3)
sb_data3(w3)
sb_data4(w3)
sb_data5(w3)
sb_data6(w3)
sb_data7(w3)
// The inverse xor tables
.align ALIGN32BYTES
aes_it_tab:
ib_data0(v0)
ib_data1(v0)
ib_data2(v0)
ib_data3(v0)
ib_data4(v0)
ib_data5(v0)
ib_data6(v0)
ib_data7(v0)
ib_data0(v1)
ib_data1(v1)
ib_data2(v1)
ib_data3(v1)
ib_data4(v1)
ib_data5(v1)
ib_data6(v1)
ib_data7(v1)
ib_data0(v2)
ib_data1(v2)
ib_data2(v2)
ib_data3(v2)
ib_data4(v2)
ib_data5(v2)
ib_data6(v2)
ib_data7(v2)
ib_data0(v3)
ib_data1(v3)
ib_data2(v3)
ib_data3(v3)
ib_data4(v3)
ib_data5(v3)
ib_data6(v3)
ib_data7(v3)
.align ALIGN32BYTES
aes_il_tab:
ib_data0(w0)
ib_data1(w0)
ib_data2(w0)
ib_data3(w0)
ib_data4(w0)
ib_data5(w0)
ib_data6(w0)
ib_data7(w0)
ib_data0(w1)
ib_data1(w1)
ib_data2(w1)
ib_data3(w1)
ib_data4(w1)
ib_data5(w1)
ib_data6(w1)
ib_data7(w1)
ib_data0(w2)
ib_data1(w2)
ib_data2(w2)
ib_data3(w2)
ib_data4(w2)
ib_data5(w2)
ib_data6(w2)
ib_data7(w2)
ib_data0(w3)
ib_data1(w3)
ib_data2(w3)
ib_data3(w3)
ib_data4(w3)
ib_data5(w3)
ib_data6(w3)
ib_data7(w3)
// The inverse mix column tables
.align ALIGN32BYTES
aes_im_tab:
im_data0(v0)
im_data1(v0)
im_data2(v0)
im_data3(v0)
im_data4(v0)
im_data5(v0)
im_data6(v0)
im_data7(v0)
im_data0(v1)
im_data1(v1)
im_data2(v1)
im_data3(v1)
im_data4(v1)
im_data5(v1)
im_data6(v1)
im_data7(v1)
im_data0(v2)
im_data1(v2)
im_data2(v2)
im_data3(v2)
im_data4(v2)
im_data5(v2)
im_data6(v2)
im_data7(v2)
im_data0(v3)
im_data1(v3)
im_data2(v3)
im_data3(v3)
im_data4(v3)
im_data5(v3)
im_data6(v3)
im_data7(v3)
|
xelerance/Openswan | 10,328 | linux/net/ipsec/match586.S | /* match.s -- Pentium-optimized version of longest_match()
* Written for zlib 1.1.2
* Copyright (C) 1998 Brian Raiter <breadbox@muppetlabs.com>
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License.
*/
#ifndef NO_UNDERLINE
#define match_init _ipcomp_match_init
#define longest_match _ipcomp_longest_match
#else
#define match_init ipcomp_match_init
#define longest_match ipcomp_longest_match
#endif
#define MAX_MATCH (258)
#define MIN_MATCH (3)
#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
/* stack frame offsets */
#define wmask 0 /* local copy of s->wmask */
#define window 4 /* local copy of s->window */
#define windowbestlen 8 /* s->window + bestlen */
#define chainlenscanend 12 /* high word: current chain len */
/* low word: last bytes sought */
#define scanstart 16 /* first two bytes of string */
#define scanalign 20 /* dword-misalignment of string */
#define nicematch 24 /* a good enough match size */
#define bestlen 28 /* size of best match so far */
#define scan 32 /* ptr to string wanting match */
#define LocalVarsSize (36)
/* saved ebx 36 */
/* saved edi 40 */
/* saved esi 44 */
/* saved ebp 48 */
/* return address 52 */
#define deflatestate 56 /* the function arguments */
#define curmatch 60
/* Offsets for fields in the deflate_state structure. These numbers
* are calculated from the definition of deflate_state, with the
* assumption that the compiler will dword-align the fields. (Thus,
* changing the definition of deflate_state could easily cause this
* program to crash horribly, without so much as a warning at
* compile time. Sigh.)
*/
#define dsWSize 36
#define dsWMask 44
#define dsWindow 48
#define dsPrev 56
#define dsMatchLen 88
#define dsPrevMatch 92
#define dsStrStart 100
#define dsMatchStart 104
#define dsLookahead 108
#define dsPrevLen 112
#define dsMaxChainLen 116
#define dsGoodMatch 132
#define dsNiceMatch 136
.file "match.S"
.globl match_init, longest_match
.text
/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
longest_match:
/* Save registers that the compiler may be using, and adjust %esp to */
/* make room for our stack frame. */
pushl %ebp
pushl %edi
pushl %esi
pushl %ebx
subl $LocalVarsSize, %esp
/* Retrieve the function arguments. %ecx will hold cur_match */
/* throughout the entire function. %edx will hold the pointer to the */
/* deflate_state structure during the function's setup (before */
/* entering the main loop). */
movl deflatestate(%esp), %edx
movl curmatch(%esp), %ecx
/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
movl dsNiceMatch(%edx), %eax
movl dsLookahead(%edx), %ebx
cmpl %eax, %ebx
jl LookaheadLess
movl %eax, %ebx
LookaheadLess: movl %ebx, nicematch(%esp)
/* register Bytef *scan = s->window + s->strstart; */
movl dsWindow(%edx), %esi
movl %esi, window(%esp)
movl dsStrStart(%edx), %ebp
lea (%esi,%ebp), %edi
movl %edi, scan(%esp)
/* Determine how many bytes the scan ptr is off from being */
/* dword-aligned. */
movl %edi, %eax
negl %eax
andl $3, %eax
movl %eax, scanalign(%esp)
/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
/* s->strstart - (IPos)MAX_DIST(s) : NIL; */
movl dsWSize(%edx), %eax
subl $MIN_LOOKAHEAD, %eax
subl %eax, %ebp
jg LimitPositive
xorl %ebp, %ebp
LimitPositive:
/* unsigned chain_length = s->max_chain_length; */
/* if (s->prev_length >= s->good_match) { */
/* chain_length >>= 2; */
/* } */
movl dsPrevLen(%edx), %eax
movl dsGoodMatch(%edx), %ebx
cmpl %ebx, %eax
movl dsMaxChainLen(%edx), %ebx
jl LastMatchGood
shrl $2, %ebx
LastMatchGood:
/* chainlen is decremented once beforehand so that the function can */
/* use the sign flag instead of the zero flag for the exit test. */
/* It is then shifted into the high word, to make room for the scanend */
/* scanend value, which it will always accompany. */
decl %ebx
shll $16, %ebx
/* int best_len = s->prev_length; */
movl dsPrevLen(%edx), %eax
movl %eax, bestlen(%esp)
/* Store the sum of s->window + best_len in %esi locally, and in %esi. */
addl %eax, %esi
movl %esi, windowbestlen(%esp)
/* register ush scan_start = *(ushf*)scan; */
/* register ush scan_end = *(ushf*)(scan+best_len-1); */
movw (%edi), %bx
movw %bx, scanstart(%esp)
movw -1(%edi,%eax), %bx
movl %ebx, chainlenscanend(%esp)
/* Posf *prev = s->prev; */
/* uInt wmask = s->w_mask; */
movl dsPrev(%edx), %edi
movl dsWMask(%edx), %edx
mov %edx, wmask(%esp)
/* Jump into the main loop. */
jmp LoopEntry
.balign 16
/* do {
* match = s->window + cur_match;
* if (*(ushf*)(match+best_len-1) != scan_end ||
* *(ushf*)match != scan_start) continue;
* [...]
* } while ((cur_match = prev[cur_match & wmask]) > limit
* && --chain_length != 0);
*
* Here is the inner loop of the function. The function will spend the
* majority of its time in this loop, and majority of that time will
* be spent in the first ten instructions.
*
* Within this loop:
* %ebx = chainlenscanend - i.e., ((chainlen << 16) | scanend)
* %ecx = curmatch
* %edx = curmatch & wmask
* %esi = windowbestlen - i.e., (window + bestlen)
* %edi = prev
* %ebp = limit
*
* Two optimization notes on the choice of instructions:
*
* The first instruction uses a 16-bit address, which costs an extra,
* unpairable cycle. This is cheaper than doing a 32-bit access and
* zeroing the high word, due to the 3-cycle misalignment penalty which
* would occur half the time. This also turns out to be cheaper than
* doing two separate 8-bit accesses, as the memory is so rarely in the
* L1 cache.
*
* The window buffer, however, apparently spends a lot of time in the
* cache, and so it is faster to retrieve the word at the end of the
* match string with two 8-bit loads. The instructions that test the
* word at the beginning of the match string, however, are executed
* much less frequently, and there it was cheaper to use 16-bit
* instructions, which avoided the necessity of saving off and
* subsequently reloading one of the other registers.
*/
LookupLoop:
/* 1 U & V */
movw (%edi,%edx,2), %cx /* 2 U pipe */
movl wmask(%esp), %edx /* 2 V pipe */
cmpl %ebp, %ecx /* 3 U pipe */
jbe LeaveNow /* 3 V pipe */
subl $0x00010000, %ebx /* 4 U pipe */
js LeaveNow /* 4 V pipe */
LoopEntry: movb -1(%esi,%ecx), %al /* 5 U pipe */
andl %ecx, %edx /* 5 V pipe */
cmpb %bl, %al /* 6 U pipe */
jnz LookupLoop /* 6 V pipe */
movb (%esi,%ecx), %ah
cmpb %bh, %ah
jnz LookupLoop
movl window(%esp), %eax
movw (%eax,%ecx), %ax
cmpw scanstart(%esp), %ax
jnz LookupLoop
/* Store the current value of chainlen. */
movl %ebx, chainlenscanend(%esp)
/* Point %edi to the string under scrutiny, and %esi to the string we */
/* are hoping to match it up with. In actuality, %esi and %edi are */
/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
/* initialized to -(MAX_MATCH_8 - scanalign). */
movl window(%esp), %esi
movl scan(%esp), %edi
addl %ecx, %esi
movl scanalign(%esp), %eax
movl $(-MAX_MATCH_8), %edx
lea MAX_MATCH_8(%edi,%eax), %edi
lea MAX_MATCH_8(%esi,%eax), %esi
/* Test the strings for equality, 8 bytes at a time. At the end,
* adjust %edx so that it is offset to the exact byte that mismatched.
*
* We already know at this point that the first three bytes of the
* strings match each other, and they can be safely passed over before
* starting the compare loop. So what this code does is skip over 0-3
* bytes, as much as necessary in order to dword-align the %edi
* pointer. (%esi will still be misaligned three times out of four.)
*
* It should be confessed that this loop usually does not represent
* much of the total running time. Replacing it with a more
* straightforward "rep cmpsb" would not drastically degrade
* performance.
*/
LoopCmps:
movl (%esi,%edx), %eax
movl (%edi,%edx), %ebx
xorl %ebx, %eax
jnz LeaveLoopCmps
movl 4(%esi,%edx), %eax
movl 4(%edi,%edx), %ebx
xorl %ebx, %eax
jnz LeaveLoopCmps4
addl $8, %edx
jnz LoopCmps
jmp LenMaximum
LeaveLoopCmps4: addl $4, %edx
LeaveLoopCmps: testl $0x0000FFFF, %eax
jnz LenLower
addl $2, %edx
shrl $16, %eax
LenLower: subb $1, %al
adcl $0, %edx
/* Calculate the length of the match. If it is longer than MAX_MATCH, */
/* then automatically accept it as the best possible match and leave. */
lea (%edi,%edx), %eax
movl scan(%esp), %edi
subl %edi, %eax
cmpl $MAX_MATCH, %eax
jge LenMaximum
/* If the length of the match is not longer than the best match we */
/* have so far, then forget it and return to the lookup loop. */
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
cmpl %ebx, %eax
jg LongerMatch
movl chainlenscanend(%esp), %ebx
movl windowbestlen(%esp), %esi
movl dsPrev(%edx), %edi
movl wmask(%esp), %edx
andl %ecx, %edx
jmp LookupLoop
/* s->match_start = cur_match; */
/* best_len = len; */
/* if (len >= nice_match) break; */
/* scan_end = *(ushf*)(scan+best_len-1); */
LongerMatch: movl nicematch(%esp), %ebx
movl %eax, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
cmpl %ebx, %eax
jge LeaveNow
movl window(%esp), %esi
addl %eax, %esi
movl %esi, windowbestlen(%esp)
movl chainlenscanend(%esp), %ebx
movw -1(%edi,%eax), %bx
movl dsPrev(%edx), %edi
movl %ebx, chainlenscanend(%esp)
movl wmask(%esp), %edx
andl %ecx, %edx
jmp LookupLoop
/* Accept the current string, with the maximum possible length. */
LenMaximum: movl deflatestate(%esp), %edx
movl $MAX_MATCH, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
/* return s->lookahead; */
LeaveNow:
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
movl dsLookahead(%edx), %eax
cmpl %eax, %ebx
jg LookaheadRet
movl %ebx, %eax
LookaheadRet:
/* Restore the stack and return from whence we came. */
addl $LocalVarsSize, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
match_init: ret
|
xelerance/Openswan | 9,109 | linux/net/ipsec/match686.S | /* match.s -- Pentium-Pro-optimized version of longest_match()
* Written for zlib 1.1.2
* Copyright (C) 1998 Brian Raiter <breadbox@muppetlabs.com>
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License.
*/
#ifndef NO_UNDERLINE
#define match_init _ipcomp_match_init
#define longest_match _ipcomp_longest_match
#else
#define match_init ipcomp_match_init
#define longest_match ipcomp_longest_match
#endif
#define MAX_MATCH (258)
#define MIN_MATCH (3)
#define MIN_LOOKAHEAD (MAX_MATCH + MIN_MATCH + 1)
#define MAX_MATCH_8 ((MAX_MATCH + 7) & ~7)
/* stack frame offsets */
#define chainlenwmask 0 /* high word: current chain len */
/* low word: s->wmask */
#define window 4 /* local copy of s->window */
#define windowbestlen 8 /* s->window + bestlen */
#define scanstart 16 /* first two bytes of string */
#define scanend 12 /* last two bytes of string */
#define scanalign 20 /* dword-misalignment of string */
#define nicematch 24 /* a good enough match size */
#define bestlen 28 /* size of best match so far */
#define scan 32 /* ptr to string wanting match */
#define LocalVarsSize (36)
/* saved ebx 36 */
/* saved edi 40 */
/* saved esi 44 */
/* saved ebp 48 */
/* return address 52 */
#define deflatestate 56 /* the function arguments */
#define curmatch 60
/* Offsets for fields in the deflate_state structure. These numbers
* are calculated from the definition of deflate_state, with the
* assumption that the compiler will dword-align the fields. (Thus,
* changing the definition of deflate_state could easily cause this
* program to crash horribly, without so much as a warning at
* compile time. Sigh.)
*/
#define dsWSize 36
#define dsWMask 44
#define dsWindow 48
#define dsPrev 56
#define dsMatchLen 88
#define dsPrevMatch 92
#define dsStrStart 100
#define dsMatchStart 104
#define dsLookahead 108
#define dsPrevLen 112
#define dsMaxChainLen 116
#define dsGoodMatch 132
#define dsNiceMatch 136
.file "match.S"
.globl match_init, longest_match
.text
/* uInt longest_match(deflate_state *deflatestate, IPos curmatch) */
longest_match:
/* Save registers that the compiler may be using, and adjust %esp to */
/* make room for our stack frame. */
pushl %ebp
pushl %edi
pushl %esi
pushl %ebx
subl $LocalVarsSize, %esp
/* Retrieve the function arguments. %ecx will hold cur_match */
/* throughout the entire function. %edx will hold the pointer to the */
/* deflate_state structure during the function's setup (before */
/* entering the main loop). */
movl deflatestate(%esp), %edx
movl curmatch(%esp), %ecx
/* uInt wmask = s->w_mask; */
/* unsigned chain_length = s->max_chain_length; */
/* if (s->prev_length >= s->good_match) { */
/* chain_length >>= 2; */
/* } */
movl dsPrevLen(%edx), %eax
movl dsGoodMatch(%edx), %ebx
cmpl %ebx, %eax
movl dsWMask(%edx), %eax
movl dsMaxChainLen(%edx), %ebx
jl LastMatchGood
shrl $2, %ebx
LastMatchGood:
/* chainlen is decremented once beforehand so that the function can */
/* use the sign flag instead of the zero flag for the exit test. */
/* It is then shifted into the high word, to make room for the wmask */
/* value, which it will always accompany. */
decl %ebx
shll $16, %ebx
orl %eax, %ebx
movl %ebx, chainlenwmask(%esp)
/* if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; */
movl dsNiceMatch(%edx), %eax
movl dsLookahead(%edx), %ebx
cmpl %eax, %ebx
jl LookaheadLess
movl %eax, %ebx
LookaheadLess: movl %ebx, nicematch(%esp)
/* register Bytef *scan = s->window + s->strstart; */
movl dsWindow(%edx), %esi
movl %esi, window(%esp)
movl dsStrStart(%edx), %ebp
lea (%esi,%ebp), %edi
movl %edi, scan(%esp)
/* Determine how many bytes the scan ptr is off from being */
/* dword-aligned. */
movl %edi, %eax
negl %eax
andl $3, %eax
movl %eax, scanalign(%esp)
/* IPos limit = s->strstart > (IPos)MAX_DIST(s) ? */
/* s->strstart - (IPos)MAX_DIST(s) : NIL; */
movl dsWSize(%edx), %eax
subl $MIN_LOOKAHEAD, %eax
subl %eax, %ebp
jg LimitPositive
xorl %ebp, %ebp
LimitPositive:
/* int best_len = s->prev_length; */
movl dsPrevLen(%edx), %eax
movl %eax, bestlen(%esp)
/* Store the sum of s->window + best_len in %esi locally, and in %esi. */
addl %eax, %esi
movl %esi, windowbestlen(%esp)
/* register ush scan_start = *(ushf*)scan; */
/* register ush scan_end = *(ushf*)(scan+best_len-1); */
/* Posf *prev = s->prev; */
movzwl (%edi), %ebx
movl %ebx, scanstart(%esp)
movzwl -1(%edi,%eax), %ebx
movl %ebx, scanend(%esp)
movl dsPrev(%edx), %edi
/* Jump into the main loop. */
movl chainlenwmask(%esp), %edx
jmp LoopEntry
.balign 16
/* do {
* match = s->window + cur_match;
* if (*(ushf*)(match+best_len-1) != scan_end ||
* *(ushf*)match != scan_start) continue;
* [...]
* } while ((cur_match = prev[cur_match & wmask]) > limit
* && --chain_length != 0);
*
* Here is the inner loop of the function. The function will spend the
* majority of its time in this loop, and majority of that time will
* be spent in the first ten instructions.
*
* Within this loop:
* %ebx = scanend
* %ecx = curmatch
* %edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)
* %esi = windowbestlen - i.e., (window + bestlen)
* %edi = prev
* %ebp = limit
*/
LookupLoop:
andl %edx, %ecx
movzwl (%edi,%ecx,2), %ecx
cmpl %ebp, %ecx
jbe LeaveNow
subl $0x00010000, %edx
js LeaveNow
LoopEntry: movzwl -1(%esi,%ecx), %eax
cmpl %ebx, %eax
jnz LookupLoop
movl window(%esp), %eax
movzwl (%eax,%ecx), %eax
cmpl scanstart(%esp), %eax
jnz LookupLoop
/* Store the current value of chainlen. */
movl %edx, chainlenwmask(%esp)
/* Point %edi to the string under scrutiny, and %esi to the string we */
/* are hoping to match it up with. In actuality, %esi and %edi are */
/* both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and %edx is */
/* initialized to -(MAX_MATCH_8 - scanalign). */
movl window(%esp), %esi
movl scan(%esp), %edi
addl %ecx, %esi
movl scanalign(%esp), %eax
movl $(-MAX_MATCH_8), %edx
lea MAX_MATCH_8(%edi,%eax), %edi
lea MAX_MATCH_8(%esi,%eax), %esi
/* Test the strings for equality, 8 bytes at a time. At the end,
* adjust %edx so that it is offset to the exact byte that mismatched.
*
* We already know at this point that the first three bytes of the
* strings match each other, and they can be safely passed over before
* starting the compare loop. So what this code does is skip over 0-3
* bytes, as much as necessary in order to dword-align the %edi
* pointer. (%esi will still be misaligned three times out of four.)
*
* It should be confessed that this loop usually does not represent
* much of the total running time. Replacing it with a more
* straightforward "rep cmpsb" would not drastically degrade
* performance.
*/
LoopCmps:
movl (%esi,%edx), %eax
xorl (%edi,%edx), %eax
jnz LeaveLoopCmps
movl 4(%esi,%edx), %eax
xorl 4(%edi,%edx), %eax
jnz LeaveLoopCmps4
addl $8, %edx
jnz LoopCmps
jmp LenMaximum
LeaveLoopCmps4: addl $4, %edx
LeaveLoopCmps: testl $0x0000FFFF, %eax
jnz LenLower
addl $2, %edx
shrl $16, %eax
LenLower: subb $1, %al
adcl $0, %edx
/* Calculate the length of the match. If it is longer than MAX_MATCH, */
/* then automatically accept it as the best possible match and leave. */
lea (%edi,%edx), %eax
movl scan(%esp), %edi
subl %edi, %eax
cmpl $MAX_MATCH, %eax
jge LenMaximum
/* If the length of the match is not longer than the best match we */
/* have so far, then forget it and return to the lookup loop. */
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
cmpl %ebx, %eax
jg LongerMatch
movl windowbestlen(%esp), %esi
movl dsPrev(%edx), %edi
movl scanend(%esp), %ebx
movl chainlenwmask(%esp), %edx
jmp LookupLoop
/* s->match_start = cur_match; */
/* best_len = len; */
/* if (len >= nice_match) break; */
/* scan_end = *(ushf*)(scan+best_len-1); */
LongerMatch: movl nicematch(%esp), %ebx
movl %eax, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
cmpl %ebx, %eax
jge LeaveNow
movl window(%esp), %esi
addl %eax, %esi
movl %esi, windowbestlen(%esp)
movzwl -1(%edi,%eax), %ebx
movl dsPrev(%edx), %edi
movl %ebx, scanend(%esp)
movl chainlenwmask(%esp), %edx
jmp LookupLoop
/* Accept the current string, with the maximum possible length. */
LenMaximum: movl deflatestate(%esp), %edx
movl $MAX_MATCH, bestlen(%esp)
movl %ecx, dsMatchStart(%edx)
/* if ((uInt)best_len <= s->lookahead) return (uInt)best_len; */
/* return s->lookahead; */
LeaveNow:
movl deflatestate(%esp), %edx
movl bestlen(%esp), %ebx
movl dsLookahead(%edx), %eax
cmpl %eax, %ebx
jg LookaheadRet
movl %ebx, %eax
LookaheadRet:
/* Restore the stack and return from whence we came. */
addl $LocalVarsSize, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
match_init: ret
|
xelerance/Openswan | 24,774 | linux/net/ipsec/aes/aes-i586.S | //
// Copyright (c) 2001, Dr Brian Gladman <brg@gladman.uk.net>, Worcester, UK.
// All rights reserved.
//
// TERMS
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The copyright holder's name must not be used to endorse or promote
// any products derived from this software without his specific prior
// written permission.
//
// This software is provided 'as is' with no express or implied warranties
// of correctness or fitness for purpose.
// Modified by Jari Ruusu, December 24 2001
// - Converted syntax to GNU CPP/assembler syntax
// - C programming interface converted back to "old" API
// - Minor portability cleanups and speed optimizations
// An AES (Rijndael) implementation for the Pentium. This version only
// implements the standard AES block length (128 bits, 16 bytes). This code
// does not preserve the eax, ecx or edx registers or the artihmetic status
// flags. However, the ebx, esi, edi, and ebp registers are preserved across
// calls.
// void aes_set_key(aes_context *cx, const unsigned char key[], const int key_len, const int f)
// void aes_encrypt(const aes_context *cx, const unsigned char in_blk[], unsigned char out_blk[])
// void aes_decrypt(const aes_context *cx, const unsigned char in_blk[], unsigned char out_blk[])
#if defined(USE_UNDERLINE)
# define aes_set_key _aes_set_key
# define aes_encrypt _aes_encrypt
# define aes_decrypt _aes_decrypt
#endif
#if !defined(ALIGN32BYTES)
# define ALIGN32BYTES 32
#endif
.file "aes-i586.S"
.globl aes_set_key
.globl aes_encrypt
.globl aes_decrypt
#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
// offsets to parameters with one register pushed onto stack
#define ctx 8 // AES context structure
#define in_blk 12 // input byte array address parameter
#define out_blk 16 // output byte array address parameter
// offsets in context structure
#define nkey 0 // key length, size 4
#define nrnd 4 // number of rounds, size 4
#define ekey 8 // encryption key schedule base address, size 256
#define dkey 264 // decryption key schedule base address, size 256
// This macro performs a forward encryption cycle. It is entered with
// the first previous round column values in %eax, %ebx, %esi and %edi and
// exits with the final values in the same registers.
#define fwd_rnd(p1,p2) \
mov %ebx,(%esp) ;\
movzbl %al,%edx ;\
mov %eax,%ecx ;\
mov p2(%ebp),%eax ;\
mov %edi,4(%esp) ;\
mov p2+12(%ebp),%edi ;\
xor p1(,%edx,4),%eax ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
mov p2+4(%ebp),%ebx ;\
xor p1+tlen(,%edx,4),%edi ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+3*tlen(,%ecx,4),%ebx ;\
mov %esi,%ecx ;\
mov p1+2*tlen(,%edx,4),%esi ;\
movzbl %cl,%edx ;\
xor p1(,%edx,4),%esi ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
xor p1+tlen(,%edx,4),%ebx ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+2*tlen(,%edx,4),%eax ;\
mov (%esp),%edx ;\
xor p1+3*tlen(,%ecx,4),%edi ;\
movzbl %dl,%ecx ;\
xor p2+8(%ebp),%esi ;\
xor p1(,%ecx,4),%ebx ;\
movzbl %dh,%ecx ;\
shr $16,%edx ;\
xor p1+tlen(,%ecx,4),%eax ;\
movzbl %dl,%ecx ;\
movzbl %dh,%edx ;\
xor p1+2*tlen(,%ecx,4),%edi ;\
mov 4(%esp),%ecx ;\
xor p1+3*tlen(,%edx,4),%esi ;\
movzbl %cl,%edx ;\
xor p1(,%edx,4),%edi ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
xor p1+tlen(,%edx,4),%esi ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+2*tlen(,%edx,4),%ebx ;\
xor p1+3*tlen(,%ecx,4),%eax
// This macro performs an inverse encryption cycle. It is entered with
// the first previous round column values in %eax, %ebx, %esi and %edi and
// exits with the final values in the same registers.
#define inv_rnd(p1,p2) \
movzbl %al,%edx ;\
mov %ebx,(%esp) ;\
mov %eax,%ecx ;\
mov p2(%ebp),%eax ;\
mov %edi,4(%esp) ;\
mov p2+4(%ebp),%ebx ;\
xor p1(,%edx,4),%eax ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
mov p2+12(%ebp),%edi ;\
xor p1+tlen(,%edx,4),%ebx ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+3*tlen(,%ecx,4),%edi ;\
mov %esi,%ecx ;\
mov p1+2*tlen(,%edx,4),%esi ;\
movzbl %cl,%edx ;\
xor p1(,%edx,4),%esi ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
xor p1+tlen(,%edx,4),%edi ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+2*tlen(,%edx,4),%eax ;\
mov (%esp),%edx ;\
xor p1+3*tlen(,%ecx,4),%ebx ;\
movzbl %dl,%ecx ;\
xor p2+8(%ebp),%esi ;\
xor p1(,%ecx,4),%ebx ;\
movzbl %dh,%ecx ;\
shr $16,%edx ;\
xor p1+tlen(,%ecx,4),%esi ;\
movzbl %dl,%ecx ;\
movzbl %dh,%edx ;\
xor p1+2*tlen(,%ecx,4),%edi ;\
mov 4(%esp),%ecx ;\
xor p1+3*tlen(,%edx,4),%eax ;\
movzbl %cl,%edx ;\
xor p1(,%edx,4),%edi ;\
movzbl %ch,%edx ;\
shr $16,%ecx ;\
xor p1+tlen(,%edx,4),%eax ;\
movzbl %cl,%edx ;\
movzbl %ch,%ecx ;\
xor p1+2*tlen(,%edx,4),%ebx ;\
xor p1+3*tlen(,%ecx,4),%esi
// AES (Rijndael) Encryption Subroutine
.text
.align ALIGN32BYTES
aes_encrypt:
push %ebp
mov ctx(%esp),%ebp // pointer to context
mov in_blk(%esp),%ecx
push %ebx
push %esi
push %edi
mov nrnd(%ebp),%edx // number of rounds
lea ekey+16(%ebp),%ebp // key pointer
// input four columns and xor in first round key
mov (%ecx),%eax
mov 4(%ecx),%ebx
mov 8(%ecx),%esi
mov 12(%ecx),%edi
xor -16(%ebp),%eax
xor -12(%ebp),%ebx
xor -8(%ebp),%esi
xor -4(%ebp),%edi
sub $8,%esp // space for register saves on stack
sub $10,%edx
je aes_15
add $32,%ebp
sub $2,%edx
je aes_13
add $32,%ebp
fwd_rnd(aes_ft_tab,-64) // 14 rounds for 256-bit key
fwd_rnd(aes_ft_tab,-48)
aes_13: fwd_rnd(aes_ft_tab,-32) // 12 rounds for 192-bit key
fwd_rnd(aes_ft_tab,-16)
aes_15: fwd_rnd(aes_ft_tab,0) // 10 rounds for 128-bit key
fwd_rnd(aes_ft_tab,16)
fwd_rnd(aes_ft_tab,32)
fwd_rnd(aes_ft_tab,48)
fwd_rnd(aes_ft_tab,64)
fwd_rnd(aes_ft_tab,80)
fwd_rnd(aes_ft_tab,96)
fwd_rnd(aes_ft_tab,112)
fwd_rnd(aes_ft_tab,128)
fwd_rnd(aes_fl_tab,144) // last round uses a different table
// move final values to the output array.
mov out_blk+20(%esp),%ebp
add $8,%esp
mov %eax,(%ebp)
mov %ebx,4(%ebp)
mov %esi,8(%ebp)
mov %edi,12(%ebp)
pop %edi
pop %esi
pop %ebx
pop %ebp
ret
// AES (Rijndael) Decryption Subroutine
.align ALIGN32BYTES
aes_decrypt:
push %ebp
mov ctx(%esp),%ebp // pointer to context
mov in_blk(%esp),%ecx
push %ebx
push %esi
push %edi
mov nrnd(%ebp),%edx // number of rounds
lea dkey+16(%ebp),%ebp // key pointer
// input four columns and xor in first round key
mov (%ecx),%eax
mov 4(%ecx),%ebx
mov 8(%ecx),%esi
mov 12(%ecx),%edi
xor -16(%ebp),%eax
xor -12(%ebp),%ebx
xor -8(%ebp),%esi
xor -4(%ebp),%edi
sub $8,%esp // space for register saves on stack
sub $10,%edx
je aes_25
add $32,%ebp
sub $2,%edx
je aes_23
add $32,%ebp
inv_rnd(aes_it_tab,-64) // 14 rounds for 256-bit key
inv_rnd(aes_it_tab,-48)
aes_23: inv_rnd(aes_it_tab,-32) // 12 rounds for 192-bit key
inv_rnd(aes_it_tab,-16)
aes_25: inv_rnd(aes_it_tab,0) // 10 rounds for 128-bit key
inv_rnd(aes_it_tab,16)
inv_rnd(aes_it_tab,32)
inv_rnd(aes_it_tab,48)
inv_rnd(aes_it_tab,64)
inv_rnd(aes_it_tab,80)
inv_rnd(aes_it_tab,96)
inv_rnd(aes_it_tab,112)
inv_rnd(aes_it_tab,128)
inv_rnd(aes_il_tab,144) // last round uses a different table
// move final values to the output array.
mov out_blk+20(%esp),%ebp
add $8,%esp
mov %eax,(%ebp)
mov %ebx,4(%ebp)
mov %esi,8(%ebp)
mov %edi,12(%ebp)
pop %edi
pop %esi
pop %ebx
pop %ebp
ret
// AES (Rijndael) Key Schedule Subroutine
// input/output parameters
#define aes_cx 12 // AES context
#define in_key 16 // key input array address
#define key_ln 20 // key length, bytes (16,24,32) or bits (128,192,256)
#define ed_flg 24 // 0=create both encr/decr keys, 1=create encr key only
// offsets for locals
#define cnt -4
#define kpf -8
#define slen 8
// This macro performs a column mixing operation on an input 32-bit
// word to give a 32-bit result. It uses each of the 4 bytes in the
// the input column to index 4 different tables of 256 32-bit words
// that are xored together to form the output value.
#define mix_col(p1) \
movzbl %bl,%ecx ;\
mov p1(,%ecx,4),%eax ;\
movzbl %bh,%ecx ;\
ror $16,%ebx ;\
xor p1+tlen(,%ecx,4),%eax ;\
movzbl %bl,%ecx ;\
xor p1+2*tlen(,%ecx,4),%eax ;\
movzbl %bh,%ecx ;\
xor p1+3*tlen(,%ecx,4),%eax
// Key Schedule Macros
#define ksc4(p1) \
rol $24,%ebx ;\
mix_col(aes_fl_tab) ;\
ror $8,%ebx ;\
xor 4*p1+aes_rcon_tab,%eax ;\
xor %eax,%esi ;\
xor %esi,%ebp ;\
mov %esi,16*p1(%edi) ;\
mov %ebp,16*p1+4(%edi) ;\
xor %ebp,%edx ;\
xor %edx,%ebx ;\
mov %edx,16*p1+8(%edi) ;\
mov %ebx,16*p1+12(%edi)
#define ksc6(p1) \
rol $24,%ebx ;\
mix_col(aes_fl_tab) ;\
ror $8,%ebx ;\
xor 4*p1+aes_rcon_tab,%eax ;\
xor 24*p1-24(%edi),%eax ;\
mov %eax,24*p1(%edi) ;\
xor 24*p1-20(%edi),%eax ;\
mov %eax,24*p1+4(%edi) ;\
xor %eax,%esi ;\
xor %esi,%ebp ;\
mov %esi,24*p1+8(%edi) ;\
mov %ebp,24*p1+12(%edi) ;\
xor %ebp,%edx ;\
xor %edx,%ebx ;\
mov %edx,24*p1+16(%edi) ;\
mov %ebx,24*p1+20(%edi)
#define ksc8(p1) \
rol $24,%ebx ;\
mix_col(aes_fl_tab) ;\
ror $8,%ebx ;\
xor 4*p1+aes_rcon_tab,%eax ;\
xor 32*p1-32(%edi),%eax ;\
mov %eax,32*p1(%edi) ;\
xor 32*p1-28(%edi),%eax ;\
mov %eax,32*p1+4(%edi) ;\
xor 32*p1-24(%edi),%eax ;\
mov %eax,32*p1+8(%edi) ;\
xor 32*p1-20(%edi),%eax ;\
mov %eax,32*p1+12(%edi) ;\
push %ebx ;\
mov %eax,%ebx ;\
mix_col(aes_fl_tab) ;\
pop %ebx ;\
xor %eax,%esi ;\
xor %esi,%ebp ;\
mov %esi,32*p1+16(%edi) ;\
mov %ebp,32*p1+20(%edi) ;\
xor %ebp,%edx ;\
xor %edx,%ebx ;\
mov %edx,32*p1+24(%edi) ;\
mov %ebx,32*p1+28(%edi)
.align ALIGN32BYTES
aes_set_key:
pushfl
push %ebp
mov %esp,%ebp
sub $slen,%esp
push %ebx
push %esi
push %edi
mov aes_cx(%ebp),%edx // edx -> AES context
mov key_ln(%ebp),%ecx // key length
cmpl $128,%ecx
jb aes_30
shr $3,%ecx
aes_30: cmpl $32,%ecx
je aes_32
cmpl $24,%ecx
je aes_32
mov $16,%ecx
aes_32: shr $2,%ecx
mov %ecx,nkey(%edx)
lea 6(%ecx),%eax // 10/12/14 for 4/6/8 32-bit key length
mov %eax,nrnd(%edx)
mov in_key(%ebp),%esi // key input array
lea ekey(%edx),%edi // key position in AES context
cld
push %ebp
mov %ecx,%eax // save key length in eax
rep ; movsl // words in the key schedule
mov -4(%esi),%ebx // put some values in registers
mov -8(%esi),%edx // to allow faster code
mov -12(%esi),%ebp
mov -16(%esi),%esi
cmpl $4,%eax // jump on key size
je aes_36
cmpl $6,%eax
je aes_35
ksc8(0)
ksc8(1)
ksc8(2)
ksc8(3)
ksc8(4)
ksc8(5)
ksc8(6)
jmp aes_37
aes_35: ksc6(0)
ksc6(1)
ksc6(2)
ksc6(3)
ksc6(4)
ksc6(5)
ksc6(6)
ksc6(7)
jmp aes_37
aes_36: ksc4(0)
ksc4(1)
ksc4(2)
ksc4(3)
ksc4(4)
ksc4(5)
ksc4(6)
ksc4(7)
ksc4(8)
ksc4(9)
aes_37: pop %ebp
mov aes_cx(%ebp),%edx // edx -> AES context
cmpl $0,ed_flg(%ebp)
jne aes_39
// compile decryption key schedule from encryption schedule - reverse
// order and do mix_column operation on round keys except first and last
mov nrnd(%edx),%eax // kt = cx->d_key + nc * cx->Nrnd
shl $2,%eax
lea dkey(%edx,%eax,4),%edi
lea ekey(%edx),%esi // kf = cx->e_key
movsl // copy first round key (unmodified)
movsl
movsl
movsl
sub $32,%edi
movl $1,cnt(%ebp)
aes_38: // do mix column on each column of
lodsl // each round key
mov %eax,%ebx
mix_col(aes_im_tab)
stosl
lodsl
mov %eax,%ebx
mix_col(aes_im_tab)
stosl
lodsl
mov %eax,%ebx
mix_col(aes_im_tab)
stosl
lodsl
mov %eax,%ebx
mix_col(aes_im_tab)
stosl
sub $32,%edi
incl cnt(%ebp)
mov cnt(%ebp),%eax
cmp nrnd(%edx),%eax
jb aes_38
movsl // copy last round key (unmodified)
movsl
movsl
movsl
aes_39: pop %edi
pop %esi
pop %ebx
mov %ebp,%esp
pop %ebp
popfl
ret
// finite field multiplies by {02}, {04} and {08}
#define f2(x) ((x<<1)^(((x>>7)&1)*0x11b))
#define f4(x) ((x<<2)^(((x>>6)&1)*0x11b)^(((x>>6)&2)*0x11b))
#define f8(x) ((x<<3)^(((x>>5)&1)*0x11b)^(((x>>5)&2)*0x11b)^(((x>>5)&4)*0x11b))
// finite field multiplies required in table generation
#define f3(x) (f2(x) ^ x)
#define f9(x) (f8(x) ^ x)
#define fb(x) (f8(x) ^ f2(x) ^ x)
#define fd(x) (f8(x) ^ f4(x) ^ x)
#define fe(x) (f8(x) ^ f4(x) ^ f2(x))
// These defines generate the forward table entries
#define u0(x) ((f3(x) << 24) | (x << 16) | (x << 8) | f2(x))
#define u1(x) ((x << 24) | (x << 16) | (f2(x) << 8) | f3(x))
#define u2(x) ((x << 24) | (f2(x) << 16) | (f3(x) << 8) | x)
#define u3(x) ((f2(x) << 24) | (f3(x) << 16) | (x << 8) | x)
// These defines generate the inverse table entries
#define v0(x) ((fb(x) << 24) | (fd(x) << 16) | (f9(x) << 8) | fe(x))
#define v1(x) ((fd(x) << 24) | (f9(x) << 16) | (fe(x) << 8) | fb(x))
#define v2(x) ((f9(x) << 24) | (fe(x) << 16) | (fb(x) << 8) | fd(x))
#define v3(x) ((fe(x) << 24) | (fb(x) << 16) | (fd(x) << 8) | f9(x))
// These defines generate entries for the last round tables
#define w0(x) (x)
#define w1(x) (x << 8)
#define w2(x) (x << 16)
#define w3(x) (x << 24)
// macro to generate inverse mix column tables (needed for the key schedule)
#define im_data0(p1) \
.long p1(0x00),p1(0x01),p1(0x02),p1(0x03),p1(0x04),p1(0x05),p1(0x06),p1(0x07) ;\
.long p1(0x08),p1(0x09),p1(0x0a),p1(0x0b),p1(0x0c),p1(0x0d),p1(0x0e),p1(0x0f) ;\
.long p1(0x10),p1(0x11),p1(0x12),p1(0x13),p1(0x14),p1(0x15),p1(0x16),p1(0x17) ;\
.long p1(0x18),p1(0x19),p1(0x1a),p1(0x1b),p1(0x1c),p1(0x1d),p1(0x1e),p1(0x1f)
#define im_data1(p1) \
.long p1(0x20),p1(0x21),p1(0x22),p1(0x23),p1(0x24),p1(0x25),p1(0x26),p1(0x27) ;\
.long p1(0x28),p1(0x29),p1(0x2a),p1(0x2b),p1(0x2c),p1(0x2d),p1(0x2e),p1(0x2f) ;\
.long p1(0x30),p1(0x31),p1(0x32),p1(0x33),p1(0x34),p1(0x35),p1(0x36),p1(0x37) ;\
.long p1(0x38),p1(0x39),p1(0x3a),p1(0x3b),p1(0x3c),p1(0x3d),p1(0x3e),p1(0x3f)
#define im_data2(p1) \
.long p1(0x40),p1(0x41),p1(0x42),p1(0x43),p1(0x44),p1(0x45),p1(0x46),p1(0x47) ;\
.long p1(0x48),p1(0x49),p1(0x4a),p1(0x4b),p1(0x4c),p1(0x4d),p1(0x4e),p1(0x4f) ;\
.long p1(0x50),p1(0x51),p1(0x52),p1(0x53),p1(0x54),p1(0x55),p1(0x56),p1(0x57) ;\
.long p1(0x58),p1(0x59),p1(0x5a),p1(0x5b),p1(0x5c),p1(0x5d),p1(0x5e),p1(0x5f)
#define im_data3(p1) \
.long p1(0x60),p1(0x61),p1(0x62),p1(0x63),p1(0x64),p1(0x65),p1(0x66),p1(0x67) ;\
.long p1(0x68),p1(0x69),p1(0x6a),p1(0x6b),p1(0x6c),p1(0x6d),p1(0x6e),p1(0x6f) ;\
.long p1(0x70),p1(0x71),p1(0x72),p1(0x73),p1(0x74),p1(0x75),p1(0x76),p1(0x77) ;\
.long p1(0x78),p1(0x79),p1(0x7a),p1(0x7b),p1(0x7c),p1(0x7d),p1(0x7e),p1(0x7f)
#define im_data4(p1) \
.long p1(0x80),p1(0x81),p1(0x82),p1(0x83),p1(0x84),p1(0x85),p1(0x86),p1(0x87) ;\
.long p1(0x88),p1(0x89),p1(0x8a),p1(0x8b),p1(0x8c),p1(0x8d),p1(0x8e),p1(0x8f) ;\
.long p1(0x90),p1(0x91),p1(0x92),p1(0x93),p1(0x94),p1(0x95),p1(0x96),p1(0x97) ;\
.long p1(0x98),p1(0x99),p1(0x9a),p1(0x9b),p1(0x9c),p1(0x9d),p1(0x9e),p1(0x9f)
#define im_data5(p1) \
.long p1(0xa0),p1(0xa1),p1(0xa2),p1(0xa3),p1(0xa4),p1(0xa5),p1(0xa6),p1(0xa7) ;\
.long p1(0xa8),p1(0xa9),p1(0xaa),p1(0xab),p1(0xac),p1(0xad),p1(0xae),p1(0xaf) ;\
.long p1(0xb0),p1(0xb1),p1(0xb2),p1(0xb3),p1(0xb4),p1(0xb5),p1(0xb6),p1(0xb7) ;\
.long p1(0xb8),p1(0xb9),p1(0xba),p1(0xbb),p1(0xbc),p1(0xbd),p1(0xbe),p1(0xbf)
#define im_data6(p1) \
.long p1(0xc0),p1(0xc1),p1(0xc2),p1(0xc3),p1(0xc4),p1(0xc5),p1(0xc6),p1(0xc7) ;\
.long p1(0xc8),p1(0xc9),p1(0xca),p1(0xcb),p1(0xcc),p1(0xcd),p1(0xce),p1(0xcf) ;\
.long p1(0xd0),p1(0xd1),p1(0xd2),p1(0xd3),p1(0xd4),p1(0xd5),p1(0xd6),p1(0xd7) ;\
.long p1(0xd8),p1(0xd9),p1(0xda),p1(0xdb),p1(0xdc),p1(0xdd),p1(0xde),p1(0xdf)
#define im_data7(p1) \
.long p1(0xe0),p1(0xe1),p1(0xe2),p1(0xe3),p1(0xe4),p1(0xe5),p1(0xe6),p1(0xe7) ;\
.long p1(0xe8),p1(0xe9),p1(0xea),p1(0xeb),p1(0xec),p1(0xed),p1(0xee),p1(0xef) ;\
.long p1(0xf0),p1(0xf1),p1(0xf2),p1(0xf3),p1(0xf4),p1(0xf5),p1(0xf6),p1(0xf7) ;\
.long p1(0xf8),p1(0xf9),p1(0xfa),p1(0xfb),p1(0xfc),p1(0xfd),p1(0xfe),p1(0xff)
// S-box data - 256 entries
#define sb_data0(p1) \
.long p1(0x63),p1(0x7c),p1(0x77),p1(0x7b),p1(0xf2),p1(0x6b),p1(0x6f),p1(0xc5) ;\
.long p1(0x30),p1(0x01),p1(0x67),p1(0x2b),p1(0xfe),p1(0xd7),p1(0xab),p1(0x76) ;\
.long p1(0xca),p1(0x82),p1(0xc9),p1(0x7d),p1(0xfa),p1(0x59),p1(0x47),p1(0xf0) ;\
.long p1(0xad),p1(0xd4),p1(0xa2),p1(0xaf),p1(0x9c),p1(0xa4),p1(0x72),p1(0xc0)
#define sb_data1(p1) \
.long p1(0xb7),p1(0xfd),p1(0x93),p1(0x26),p1(0x36),p1(0x3f),p1(0xf7),p1(0xcc) ;\
.long p1(0x34),p1(0xa5),p1(0xe5),p1(0xf1),p1(0x71),p1(0xd8),p1(0x31),p1(0x15) ;\
.long p1(0x04),p1(0xc7),p1(0x23),p1(0xc3),p1(0x18),p1(0x96),p1(0x05),p1(0x9a) ;\
.long p1(0x07),p1(0x12),p1(0x80),p1(0xe2),p1(0xeb),p1(0x27),p1(0xb2),p1(0x75)
#define sb_data2(p1) \
.long p1(0x09),p1(0x83),p1(0x2c),p1(0x1a),p1(0x1b),p1(0x6e),p1(0x5a),p1(0xa0) ;\
.long p1(0x52),p1(0x3b),p1(0xd6),p1(0xb3),p1(0x29),p1(0xe3),p1(0x2f),p1(0x84) ;\
.long p1(0x53),p1(0xd1),p1(0x00),p1(0xed),p1(0x20),p1(0xfc),p1(0xb1),p1(0x5b) ;\
.long p1(0x6a),p1(0xcb),p1(0xbe),p1(0x39),p1(0x4a),p1(0x4c),p1(0x58),p1(0xcf)
#define sb_data3(p1) \
.long p1(0xd0),p1(0xef),p1(0xaa),p1(0xfb),p1(0x43),p1(0x4d),p1(0x33),p1(0x85) ;\
.long p1(0x45),p1(0xf9),p1(0x02),p1(0x7f),p1(0x50),p1(0x3c),p1(0x9f),p1(0xa8) ;\
.long p1(0x51),p1(0xa3),p1(0x40),p1(0x8f),p1(0x92),p1(0x9d),p1(0x38),p1(0xf5) ;\
.long p1(0xbc),p1(0xb6),p1(0xda),p1(0x21),p1(0x10),p1(0xff),p1(0xf3),p1(0xd2)
#define sb_data4(p1) \
.long p1(0xcd),p1(0x0c),p1(0x13),p1(0xec),p1(0x5f),p1(0x97),p1(0x44),p1(0x17) ;\
.long p1(0xc4),p1(0xa7),p1(0x7e),p1(0x3d),p1(0x64),p1(0x5d),p1(0x19),p1(0x73) ;\
.long p1(0x60),p1(0x81),p1(0x4f),p1(0xdc),p1(0x22),p1(0x2a),p1(0x90),p1(0x88) ;\
.long p1(0x46),p1(0xee),p1(0xb8),p1(0x14),p1(0xde),p1(0x5e),p1(0x0b),p1(0xdb)
#define sb_data5(p1) \
.long p1(0xe0),p1(0x32),p1(0x3a),p1(0x0a),p1(0x49),p1(0x06),p1(0x24),p1(0x5c) ;\
.long p1(0xc2),p1(0xd3),p1(0xac),p1(0x62),p1(0x91),p1(0x95),p1(0xe4),p1(0x79) ;\
.long p1(0xe7),p1(0xc8),p1(0x37),p1(0x6d),p1(0x8d),p1(0xd5),p1(0x4e),p1(0xa9) ;\
.long p1(0x6c),p1(0x56),p1(0xf4),p1(0xea),p1(0x65),p1(0x7a),p1(0xae),p1(0x08)
#define sb_data6(p1) \
.long p1(0xba),p1(0x78),p1(0x25),p1(0x2e),p1(0x1c),p1(0xa6),p1(0xb4),p1(0xc6) ;\
.long p1(0xe8),p1(0xdd),p1(0x74),p1(0x1f),p1(0x4b),p1(0xbd),p1(0x8b),p1(0x8a) ;\
.long p1(0x70),p1(0x3e),p1(0xb5),p1(0x66),p1(0x48),p1(0x03),p1(0xf6),p1(0x0e) ;\
.long p1(0x61),p1(0x35),p1(0x57),p1(0xb9),p1(0x86),p1(0xc1),p1(0x1d),p1(0x9e)
#define sb_data7(p1) \
.long p1(0xe1),p1(0xf8),p1(0x98),p1(0x11),p1(0x69),p1(0xd9),p1(0x8e),p1(0x94) ;\
.long p1(0x9b),p1(0x1e),p1(0x87),p1(0xe9),p1(0xce),p1(0x55),p1(0x28),p1(0xdf) ;\
.long p1(0x8c),p1(0xa1),p1(0x89),p1(0x0d),p1(0xbf),p1(0xe6),p1(0x42),p1(0x68) ;\
.long p1(0x41),p1(0x99),p1(0x2d),p1(0x0f),p1(0xb0),p1(0x54),p1(0xbb),p1(0x16)
// Inverse S-box data - 256 entries
#define ib_data0(p1) \
.long p1(0x52),p1(0x09),p1(0x6a),p1(0xd5),p1(0x30),p1(0x36),p1(0xa5),p1(0x38) ;\
.long p1(0xbf),p1(0x40),p1(0xa3),p1(0x9e),p1(0x81),p1(0xf3),p1(0xd7),p1(0xfb) ;\
.long p1(0x7c),p1(0xe3),p1(0x39),p1(0x82),p1(0x9b),p1(0x2f),p1(0xff),p1(0x87) ;\
.long p1(0x34),p1(0x8e),p1(0x43),p1(0x44),p1(0xc4),p1(0xde),p1(0xe9),p1(0xcb)
#define ib_data1(p1) \
.long p1(0x54),p1(0x7b),p1(0x94),p1(0x32),p1(0xa6),p1(0xc2),p1(0x23),p1(0x3d) ;\
.long p1(0xee),p1(0x4c),p1(0x95),p1(0x0b),p1(0x42),p1(0xfa),p1(0xc3),p1(0x4e) ;\
.long p1(0x08),p1(0x2e),p1(0xa1),p1(0x66),p1(0x28),p1(0xd9),p1(0x24),p1(0xb2) ;\
.long p1(0x76),p1(0x5b),p1(0xa2),p1(0x49),p1(0x6d),p1(0x8b),p1(0xd1),p1(0x25)
#define ib_data2(p1) \
.long p1(0x72),p1(0xf8),p1(0xf6),p1(0x64),p1(0x86),p1(0x68),p1(0x98),p1(0x16) ;\
.long p1(0xd4),p1(0xa4),p1(0x5c),p1(0xcc),p1(0x5d),p1(0x65),p1(0xb6),p1(0x92) ;\
.long p1(0x6c),p1(0x70),p1(0x48),p1(0x50),p1(0xfd),p1(0xed),p1(0xb9),p1(0xda) ;\
.long p1(0x5e),p1(0x15),p1(0x46),p1(0x57),p1(0xa7),p1(0x8d),p1(0x9d),p1(0x84)
#define ib_data3(p1) \
.long p1(0x90),p1(0xd8),p1(0xab),p1(0x00),p1(0x8c),p1(0xbc),p1(0xd3),p1(0x0a) ;\
.long p1(0xf7),p1(0xe4),p1(0x58),p1(0x05),p1(0xb8),p1(0xb3),p1(0x45),p1(0x06) ;\
.long p1(0xd0),p1(0x2c),p1(0x1e),p1(0x8f),p1(0xca),p1(0x3f),p1(0x0f),p1(0x02) ;\
.long p1(0xc1),p1(0xaf),p1(0xbd),p1(0x03),p1(0x01),p1(0x13),p1(0x8a),p1(0x6b)
#define ib_data4(p1) \
.long p1(0x3a),p1(0x91),p1(0x11),p1(0x41),p1(0x4f),p1(0x67),p1(0xdc),p1(0xea) ;\
.long p1(0x97),p1(0xf2),p1(0xcf),p1(0xce),p1(0xf0),p1(0xb4),p1(0xe6),p1(0x73) ;\
.long p1(0x96),p1(0xac),p1(0x74),p1(0x22),p1(0xe7),p1(0xad),p1(0x35),p1(0x85) ;\
.long p1(0xe2),p1(0xf9),p1(0x37),p1(0xe8),p1(0x1c),p1(0x75),p1(0xdf),p1(0x6e)
#define ib_data5(p1) \
.long p1(0x47),p1(0xf1),p1(0x1a),p1(0x71),p1(0x1d),p1(0x29),p1(0xc5),p1(0x89) ;\
.long p1(0x6f),p1(0xb7),p1(0x62),p1(0x0e),p1(0xaa),p1(0x18),p1(0xbe),p1(0x1b) ;\
.long p1(0xfc),p1(0x56),p1(0x3e),p1(0x4b),p1(0xc6),p1(0xd2),p1(0x79),p1(0x20) ;\
.long p1(0x9a),p1(0xdb),p1(0xc0),p1(0xfe),p1(0x78),p1(0xcd),p1(0x5a),p1(0xf4)
#define ib_data6(p1) \
.long p1(0x1f),p1(0xdd),p1(0xa8),p1(0x33),p1(0x88),p1(0x07),p1(0xc7),p1(0x31) ;\
.long p1(0xb1),p1(0x12),p1(0x10),p1(0x59),p1(0x27),p1(0x80),p1(0xec),p1(0x5f) ;\
.long p1(0x60),p1(0x51),p1(0x7f),p1(0xa9),p1(0x19),p1(0xb5),p1(0x4a),p1(0x0d) ;\
.long p1(0x2d),p1(0xe5),p1(0x7a),p1(0x9f),p1(0x93),p1(0xc9),p1(0x9c),p1(0xef)
#define ib_data7(p1) \
.long p1(0xa0),p1(0xe0),p1(0x3b),p1(0x4d),p1(0xae),p1(0x2a),p1(0xf5),p1(0xb0) ;\
.long p1(0xc8),p1(0xeb),p1(0xbb),p1(0x3c),p1(0x83),p1(0x53),p1(0x99),p1(0x61) ;\
.long p1(0x17),p1(0x2b),p1(0x04),p1(0x7e),p1(0xba),p1(0x77),p1(0xd6),p1(0x26) ;\
.long p1(0xe1),p1(0x69),p1(0x14),p1(0x63),p1(0x55),p1(0x21),p1(0x0c),p1(0x7d)
// The rcon_table (needed for the key schedule)
//
// Here is original Dr Brian Gladman's source code:
// _rcon_tab:
// %assign x 1
// %rep 29
// dd x
// %assign x f2(x)
// %endrep
//
// Here is precomputed output (it's more portable this way):
.align ALIGN32BYTES
aes_rcon_tab:
.long 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80
.long 0x1b,0x36,0x6c,0xd8,0xab,0x4d,0x9a,0x2f
.long 0x5e,0xbc,0x63,0xc6,0x97,0x35,0x6a,0xd4
.long 0xb3,0x7d,0xfa,0xef,0xc5
// The forward xor tables
.align ALIGN32BYTES
aes_ft_tab:
sb_data0(u0)
sb_data1(u0)
sb_data2(u0)
sb_data3(u0)
sb_data4(u0)
sb_data5(u0)
sb_data6(u0)
sb_data7(u0)
sb_data0(u1)
sb_data1(u1)
sb_data2(u1)
sb_data3(u1)
sb_data4(u1)
sb_data5(u1)
sb_data6(u1)
sb_data7(u1)
sb_data0(u2)
sb_data1(u2)
sb_data2(u2)
sb_data3(u2)
sb_data4(u2)
sb_data5(u2)
sb_data6(u2)
sb_data7(u2)
sb_data0(u3)
sb_data1(u3)
sb_data2(u3)
sb_data3(u3)
sb_data4(u3)
sb_data5(u3)
sb_data6(u3)
sb_data7(u3)
.align ALIGN32BYTES
aes_fl_tab:
sb_data0(w0)
sb_data1(w0)
sb_data2(w0)
sb_data3(w0)
sb_data4(w0)
sb_data5(w0)
sb_data6(w0)
sb_data7(w0)
sb_data0(w1)
sb_data1(w1)
sb_data2(w1)
sb_data3(w1)
sb_data4(w1)
sb_data5(w1)
sb_data6(w1)
sb_data7(w1)
sb_data0(w2)
sb_data1(w2)
sb_data2(w2)
sb_data3(w2)
sb_data4(w2)
sb_data5(w2)
sb_data6(w2)
sb_data7(w2)
sb_data0(w3)
sb_data1(w3)
sb_data2(w3)
sb_data3(w3)
sb_data4(w3)
sb_data5(w3)
sb_data6(w3)
sb_data7(w3)
// The inverse xor tables
.align ALIGN32BYTES
aes_it_tab:
ib_data0(v0)
ib_data1(v0)
ib_data2(v0)
ib_data3(v0)
ib_data4(v0)
ib_data5(v0)
ib_data6(v0)
ib_data7(v0)
ib_data0(v1)
ib_data1(v1)
ib_data2(v1)
ib_data3(v1)
ib_data4(v1)
ib_data5(v1)
ib_data6(v1)
ib_data7(v1)
ib_data0(v2)
ib_data1(v2)
ib_data2(v2)
ib_data3(v2)
ib_data4(v2)
ib_data5(v2)
ib_data6(v2)
ib_data7(v2)
ib_data0(v3)
ib_data1(v3)
ib_data2(v3)
ib_data3(v3)
ib_data4(v3)
ib_data5(v3)
ib_data6(v3)
ib_data7(v3)
.align ALIGN32BYTES
aes_il_tab:
ib_data0(w0)
ib_data1(w0)
ib_data2(w0)
ib_data3(w0)
ib_data4(w0)
ib_data5(w0)
ib_data6(w0)
ib_data7(w0)
ib_data0(w1)
ib_data1(w1)
ib_data2(w1)
ib_data3(w1)
ib_data4(w1)
ib_data5(w1)
ib_data6(w1)
ib_data7(w1)
ib_data0(w2)
ib_data1(w2)
ib_data2(w2)
ib_data3(w2)
ib_data4(w2)
ib_data5(w2)
ib_data6(w2)
ib_data7(w2)
ib_data0(w3)
ib_data1(w3)
ib_data2(w3)
ib_data3(w3)
ib_data4(w3)
ib_data5(w3)
ib_data6(w3)
ib_data7(w3)
// The inverse mix column tables
.align ALIGN32BYTES
aes_im_tab:
im_data0(v0)
im_data1(v0)
im_data2(v0)
im_data3(v0)
im_data4(v0)
im_data5(v0)
im_data6(v0)
im_data7(v0)
im_data0(v1)
im_data1(v1)
im_data2(v1)
im_data3(v1)
im_data4(v1)
im_data5(v1)
im_data6(v1)
im_data7(v1)
im_data0(v2)
im_data1(v2)
im_data2(v2)
im_data3(v2)
im_data4(v2)
im_data5(v2)
im_data6(v2)
im_data7(v2)
im_data0(v3)
im_data1(v3)
im_data2(v3)
im_data3(v3)
im_data4(v3)
im_data5(v3)
im_data6(v3)
im_data7(v3)
|
xelerance/Openswan | 63,365 | linux/net/ipsec/des/dx86unix.S | /*
* This file was originally generated by Michael Richardson <mcr@freeswan.org>
* via the perl scripts found in the ASM subdir. It remains copyright of
* Eric Young, see the file COPYRIGHT.
*
* This was last done on October 9, 2002.
*
* While this file does not need to go through cpp, we pass it through
* CPP by naming it dx86unix.S instead of dx86unix.s because there is
* a bug in Rules.make for .s builds - specifically it references EXTRA_CFLAGS
* which may contain stuff that AS doesn't understand instead of
* referencing EXTRA_AFLAGS.
*/
.file "dx86unix.S"
.version "01.01"
.text
.align 16
.globl des_encrypt
.type des_encrypt , @function
des_encrypt:
pushl %esi
pushl %edi
movl 12(%esp), %esi
xorl %ecx, %ecx
pushl %ebx
pushl %ebp
movl (%esi), %eax
movl 28(%esp), %ebx
movl 4(%esi), %edi
roll $4, %eax
movl %eax, %esi
xorl %edi, %eax
andl $0xf0f0f0f0, %eax
xorl %eax, %esi
xorl %eax, %edi
roll $20, %edi
movl %edi, %eax
xorl %esi, %edi
andl $0xfff0000f, %edi
xorl %edi, %eax
xorl %edi, %esi
roll $14, %eax
movl %eax, %edi
xorl %esi, %eax
andl $0x33333333, %eax
xorl %eax, %edi
xorl %eax, %esi
roll $22, %esi
movl %esi, %eax
xorl %edi, %esi
andl $0x03fc03fc, %esi
xorl %esi, %eax
xorl %esi, %edi
roll $9, %eax
movl %eax, %esi
xorl %edi, %eax
andl $0xaaaaaaaa, %eax
xorl %eax, %esi
xorl %eax, %edi
.byte 209
.byte 199
movl 24(%esp), %ebp
cmpl $0, %ebx
je .L000start_decrypt
movl (%ebp), %eax
xorl %ebx, %ebx
movl 4(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 8(%ebp), %eax
xorl %ebx, %ebx
movl 12(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 16(%ebp), %eax
xorl %ebx, %ebx
movl 20(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 24(%ebp), %eax
xorl %ebx, %ebx
movl 28(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 32(%ebp), %eax
xorl %ebx, %ebx
movl 36(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 40(%ebp), %eax
xorl %ebx, %ebx
movl 44(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 48(%ebp), %eax
xorl %ebx, %ebx
movl 52(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 56(%ebp), %eax
xorl %ebx, %ebx
movl 60(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 64(%ebp), %eax
xorl %ebx, %ebx
movl 68(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 72(%ebp), %eax
xorl %ebx, %ebx
movl 76(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 80(%ebp), %eax
xorl %ebx, %ebx
movl 84(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 88(%ebp), %eax
xorl %ebx, %ebx
movl 92(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 96(%ebp), %eax
xorl %ebx, %ebx
movl 100(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 104(%ebp), %eax
xorl %ebx, %ebx
movl 108(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 112(%ebp), %eax
xorl %ebx, %ebx
movl 116(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 120(%ebp), %eax
xorl %ebx, %ebx
movl 124(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
jmp .L001end
.L000start_decrypt:
movl 120(%ebp), %eax
xorl %ebx, %ebx
movl 124(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 112(%ebp), %eax
xorl %ebx, %ebx
movl 116(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 104(%ebp), %eax
xorl %ebx, %ebx
movl 108(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 96(%ebp), %eax
xorl %ebx, %ebx
movl 100(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 88(%ebp), %eax
xorl %ebx, %ebx
movl 92(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 80(%ebp), %eax
xorl %ebx, %ebx
movl 84(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 72(%ebp), %eax
xorl %ebx, %ebx
movl 76(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 64(%ebp), %eax
xorl %ebx, %ebx
movl 68(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 56(%ebp), %eax
xorl %ebx, %ebx
movl 60(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 48(%ebp), %eax
xorl %ebx, %ebx
movl 52(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 40(%ebp), %eax
xorl %ebx, %ebx
movl 44(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 32(%ebp), %eax
xorl %ebx, %ebx
movl 36(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 24(%ebp), %eax
xorl %ebx, %ebx
movl 28(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 16(%ebp), %eax
xorl %ebx, %ebx
movl 20(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 8(%ebp), %eax
xorl %ebx, %ebx
movl 12(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl (%ebp), %eax
xorl %ebx, %ebx
movl 4(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
.L001end:
movl 20(%esp), %edx
.byte 209
.byte 206
movl %edi, %eax
xorl %esi, %edi
andl $0xaaaaaaaa, %edi
xorl %edi, %eax
xorl %edi, %esi
roll $23, %eax
movl %eax, %edi
xorl %esi, %eax
andl $0x03fc03fc, %eax
xorl %eax, %edi
xorl %eax, %esi
roll $10, %edi
movl %edi, %eax
xorl %esi, %edi
andl $0x33333333, %edi
xorl %edi, %eax
xorl %edi, %esi
roll $18, %esi
movl %esi, %edi
xorl %eax, %esi
andl $0xfff0000f, %esi
xorl %esi, %edi
xorl %esi, %eax
roll $12, %edi
movl %edi, %esi
xorl %eax, %edi
andl $0xf0f0f0f0, %edi
xorl %edi, %esi
xorl %edi, %eax
rorl $4, %eax
movl %eax, (%edx)
movl %esi, 4(%edx)
popl %ebp
popl %ebx
popl %edi
popl %esi
ret
.des_encrypt_end:
.size des_encrypt , .des_encrypt_end-des_encrypt
.ident "desasm.pl"
.text
.align 16
.globl des_encrypt2
.type des_encrypt2 , @function
des_encrypt2:
pushl %esi
pushl %edi
movl 12(%esp), %eax
xorl %ecx, %ecx
pushl %ebx
pushl %ebp
movl (%eax), %esi
movl 28(%esp), %ebx
roll $3, %esi
movl 4(%eax), %edi
roll $3, %edi
movl 24(%esp), %ebp
cmpl $0, %ebx
je .L002start_decrypt
movl (%ebp), %eax
xorl %ebx, %ebx
movl 4(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 8(%ebp), %eax
xorl %ebx, %ebx
movl 12(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 16(%ebp), %eax
xorl %ebx, %ebx
movl 20(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 24(%ebp), %eax
xorl %ebx, %ebx
movl 28(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 32(%ebp), %eax
xorl %ebx, %ebx
movl 36(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 40(%ebp), %eax
xorl %ebx, %ebx
movl 44(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 48(%ebp), %eax
xorl %ebx, %ebx
movl 52(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 56(%ebp), %eax
xorl %ebx, %ebx
movl 60(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 64(%ebp), %eax
xorl %ebx, %ebx
movl 68(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 72(%ebp), %eax
xorl %ebx, %ebx
movl 76(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 80(%ebp), %eax
xorl %ebx, %ebx
movl 84(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 88(%ebp), %eax
xorl %ebx, %ebx
movl 92(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 96(%ebp), %eax
xorl %ebx, %ebx
movl 100(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 104(%ebp), %eax
xorl %ebx, %ebx
movl 108(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 112(%ebp), %eax
xorl %ebx, %ebx
movl 116(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 120(%ebp), %eax
xorl %ebx, %ebx
movl 124(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
jmp .L003end
.L002start_decrypt:
movl 120(%ebp), %eax
xorl %ebx, %ebx
movl 124(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 112(%ebp), %eax
xorl %ebx, %ebx
movl 116(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 104(%ebp), %eax
xorl %ebx, %ebx
movl 108(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 96(%ebp), %eax
xorl %ebx, %ebx
movl 100(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 88(%ebp), %eax
xorl %ebx, %ebx
movl 92(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 80(%ebp), %eax
xorl %ebx, %ebx
movl 84(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 72(%ebp), %eax
xorl %ebx, %ebx
movl 76(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 64(%ebp), %eax
xorl %ebx, %ebx
movl 68(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 56(%ebp), %eax
xorl %ebx, %ebx
movl 60(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 48(%ebp), %eax
xorl %ebx, %ebx
movl 52(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 40(%ebp), %eax
xorl %ebx, %ebx
movl 44(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 32(%ebp), %eax
xorl %ebx, %ebx
movl 36(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 24(%ebp), %eax
xorl %ebx, %ebx
movl 28(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl 16(%ebp), %eax
xorl %ebx, %ebx
movl 20(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
movl 8(%ebp), %eax
xorl %ebx, %ebx
movl 12(%ebp), %edx
xorl %esi, %eax
xorl %esi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %edi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %edi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %edi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %edi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %edi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %edi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %edi
movl (%ebp), %eax
xorl %ebx, %ebx
movl 4(%ebp), %edx
xorl %edi, %eax
xorl %edi, %edx
andl $0xfcfcfcfc, %eax
andl $0xcfcfcfcf, %edx
movb %al, %bl
movb %ah, %cl
rorl $4, %edx
movl des_SPtrans(%ebx),%ebp
movb %dl, %bl
xorl %ebp, %esi
movl 0x200+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movb %dh, %cl
shrl $16, %eax
movl 0x100+des_SPtrans(%ebx),%ebp
xorl %ebp, %esi
movb %ah, %bl
shrl $16, %edx
movl 0x300+des_SPtrans(%ecx),%ebp
xorl %ebp, %esi
movl 24(%esp), %ebp
movb %dh, %cl
andl $0xff, %eax
andl $0xff, %edx
movl 0x600+des_SPtrans(%ebx),%ebx
xorl %ebx, %esi
movl 0x700+des_SPtrans(%ecx),%ebx
xorl %ebx, %esi
movl 0x400+des_SPtrans(%eax),%ebx
xorl %ebx, %esi
movl 0x500+des_SPtrans(%edx),%ebx
xorl %ebx, %esi
.L003end:
rorl $3, %edi
movl 20(%esp), %eax
rorl $3, %esi
movl %edi, (%eax)
movl %esi, 4(%eax)
popl %ebp
popl %ebx
popl %edi
popl %esi
ret
.des_encrypt2_end:
.size des_encrypt2 , .des_encrypt2_end-des_encrypt2
.ident "desasm.pl"
.text
.align 16
.globl des_encrypt3
.type des_encrypt3 , @function
des_encrypt3:
pushl %ebx
movl 8(%esp), %ebx
pushl %ebp
pushl %esi
pushl %edi
movl (%ebx), %edi
movl 4(%ebx), %esi
subl $12, %esp
roll $4, %edi
movl %edi, %edx
xorl %esi, %edi
andl $0xf0f0f0f0, %edi
xorl %edi, %edx
xorl %edi, %esi
roll $20, %esi
movl %esi, %edi
xorl %edx, %esi
andl $0xfff0000f, %esi
xorl %esi, %edi
xorl %esi, %edx
roll $14, %edi
movl %edi, %esi
xorl %edx, %edi
andl $0x33333333, %edi
xorl %edi, %esi
xorl %edi, %edx
roll $22, %edx
movl %edx, %edi
xorl %esi, %edx
andl $0x03fc03fc, %edx
xorl %edx, %edi
xorl %edx, %esi
roll $9, %edi
movl %edi, %edx
xorl %esi, %edi
andl $0xaaaaaaaa, %edi
xorl %edi, %edx
xorl %edi, %esi
rorl $3, %edx
rorl $2, %esi
movl %esi, 4(%ebx)
movl 36(%esp), %eax
movl %edx, (%ebx)
movl 40(%esp), %edi
movl 44(%esp), %esi
movl $1, 8(%esp)
movl %eax, 4(%esp)
movl %ebx, (%esp)
call des_encrypt2
movl $0, 8(%esp)
movl %edi, 4(%esp)
movl %ebx, (%esp)
call des_encrypt2
movl $1, 8(%esp)
movl %esi, 4(%esp)
movl %ebx, (%esp)
call des_encrypt2
addl $12, %esp
movl (%ebx), %edi
movl 4(%ebx), %esi
roll $2, %esi
roll $3, %edi
movl %edi, %eax
xorl %esi, %edi
andl $0xaaaaaaaa, %edi
xorl %edi, %eax
xorl %edi, %esi
roll $23, %eax
movl %eax, %edi
xorl %esi, %eax
andl $0x03fc03fc, %eax
xorl %eax, %edi
xorl %eax, %esi
roll $10, %edi
movl %edi, %eax
xorl %esi, %edi
andl $0x33333333, %edi
xorl %edi, %eax
xorl %edi, %esi
roll $18, %esi
movl %esi, %edi
xorl %eax, %esi
andl $0xfff0000f, %esi
xorl %esi, %edi
xorl %esi, %eax
roll $12, %edi
movl %edi, %esi
xorl %eax, %edi
andl $0xf0f0f0f0, %edi
xorl %edi, %esi
xorl %edi, %eax
rorl $4, %eax
movl %eax, (%ebx)
movl %esi, 4(%ebx)
popl %edi
popl %esi
popl %ebp
popl %ebx
ret
.des_encrypt3_end:
.size des_encrypt3 , .des_encrypt3_end-des_encrypt3
.ident "desasm.pl"
.text
.align 16
.globl des_decrypt3
.type des_decrypt3 , @function
des_decrypt3:
pushl %ebx
movl 8(%esp), %ebx
pushl %ebp
pushl %esi
pushl %edi
movl (%ebx), %edi
movl 4(%ebx), %esi
subl $12, %esp
roll $4, %edi
movl %edi, %edx
xorl %esi, %edi
andl $0xf0f0f0f0, %edi
xorl %edi, %edx
xorl %edi, %esi
roll $20, %esi
movl %esi, %edi
xorl %edx, %esi
andl $0xfff0000f, %esi
xorl %esi, %edi
xorl %esi, %edx
roll $14, %edi
movl %edi, %esi
xorl %edx, %edi
andl $0x33333333, %edi
xorl %edi, %esi
xorl %edi, %edx
roll $22, %edx
movl %edx, %edi
xorl %esi, %edx
andl $0x03fc03fc, %edx
xorl %edx, %edi
xorl %edx, %esi
roll $9, %edi
movl %edi, %edx
xorl %esi, %edi
andl $0xaaaaaaaa, %edi
xorl %edi, %edx
xorl %edi, %esi
rorl $3, %edx
rorl $2, %esi
movl %esi, 4(%ebx)
movl 36(%esp), %esi
movl %edx, (%ebx)
movl 40(%esp), %edi
movl 44(%esp), %eax
movl $0, 8(%esp)
movl %eax, 4(%esp)
movl %ebx, (%esp)
call des_encrypt2
movl $1, 8(%esp)
movl %edi, 4(%esp)
movl %ebx, (%esp)
call des_encrypt2
movl $0, 8(%esp)
movl %esi, 4(%esp)
movl %ebx, (%esp)
call des_encrypt2
addl $12, %esp
movl (%ebx), %edi
movl 4(%ebx), %esi
roll $2, %esi
roll $3, %edi
movl %edi, %eax
xorl %esi, %edi
andl $0xaaaaaaaa, %edi
xorl %edi, %eax
xorl %edi, %esi
roll $23, %eax
movl %eax, %edi
xorl %esi, %eax
andl $0x03fc03fc, %eax
xorl %eax, %edi
xorl %eax, %esi
roll $10, %edi
movl %edi, %eax
xorl %esi, %edi
andl $0x33333333, %edi
xorl %edi, %eax
xorl %edi, %esi
roll $18, %esi
movl %esi, %edi
xorl %eax, %esi
andl $0xfff0000f, %esi
xorl %esi, %edi
xorl %esi, %eax
roll $12, %edi
movl %edi, %esi
xorl %eax, %edi
andl $0xf0f0f0f0, %edi
xorl %edi, %esi
xorl %edi, %eax
rorl $4, %eax
movl %eax, (%ebx)
movl %esi, 4(%ebx)
popl %edi
popl %esi
popl %ebp
popl %ebx
ret
.des_decrypt3_end:
.size des_decrypt3 , .des_decrypt3_end-des_decrypt3
.ident "desasm.pl"
.text
.align 16
.globl des_ncbc_encrypt
.type des_ncbc_encrypt , @function
des_ncbc_encrypt:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 28(%esp), %ebp
movl 36(%esp), %ebx
movl (%ebx), %esi
movl 4(%ebx), %edi
pushl %edi
pushl %esi
pushl %edi
pushl %esi
movl %esp, %ebx
movl 36(%esp), %esi
movl 40(%esp), %edi
movl 56(%esp), %ecx
pushl %ecx
movl 52(%esp), %eax
pushl %eax
pushl %ebx
cmpl $0, %ecx
jz .L004decrypt
andl $4294967288, %ebp
movl 12(%esp), %eax
movl 16(%esp), %ebx
jz .L005encrypt_finish
.L006encrypt_loop:
movl (%esi), %ecx
movl 4(%esi), %edx
xorl %ecx, %eax
xorl %edx, %ebx
movl %eax, 12(%esp)
movl %ebx, 16(%esp)
call des_encrypt
movl 12(%esp), %eax
movl 16(%esp), %ebx
movl %eax, (%edi)
movl %ebx, 4(%edi)
addl $8, %esi
addl $8, %edi
subl $8, %ebp
jnz .L006encrypt_loop
.L005encrypt_finish:
movl 56(%esp), %ebp
andl $7, %ebp
jz .L007finish
xorl %ecx, %ecx
xorl %edx, %edx
movl .L008cbc_enc_jmp_table(,%ebp,4),%ebp
jmp *%ebp
.L009ej7:
movb 6(%esi), %dh
sall $8, %edx
.L010ej6:
movb 5(%esi), %dh
.L011ej5:
movb 4(%esi), %dl
.L012ej4:
movl (%esi), %ecx
jmp .L013ejend
.L014ej3:
movb 2(%esi), %ch
sall $8, %ecx
.L015ej2:
movb 1(%esi), %ch
.L016ej1:
movb (%esi), %cl
.L013ejend:
xorl %ecx, %eax
xorl %edx, %ebx
movl %eax, 12(%esp)
movl %ebx, 16(%esp)
call des_encrypt
movl 12(%esp), %eax
movl 16(%esp), %ebx
movl %eax, (%edi)
movl %ebx, 4(%edi)
jmp .L007finish
.align 16
.L004decrypt:
andl $4294967288, %ebp
movl 20(%esp), %eax
movl 24(%esp), %ebx
jz .L017decrypt_finish
.L018decrypt_loop:
movl (%esi), %eax
movl 4(%esi), %ebx
movl %eax, 12(%esp)
movl %ebx, 16(%esp)
call des_encrypt
movl 12(%esp), %eax
movl 16(%esp), %ebx
movl 20(%esp), %ecx
movl 24(%esp), %edx
xorl %eax, %ecx
xorl %ebx, %edx
movl (%esi), %eax
movl 4(%esi), %ebx
movl %ecx, (%edi)
movl %edx, 4(%edi)
movl %eax, 20(%esp)
movl %ebx, 24(%esp)
addl $8, %esi
addl $8, %edi
subl $8, %ebp
jnz .L018decrypt_loop
.L017decrypt_finish:
movl 56(%esp), %ebp
andl $7, %ebp
jz .L007finish
movl (%esi), %eax
movl 4(%esi), %ebx
movl %eax, 12(%esp)
movl %ebx, 16(%esp)
call des_encrypt
movl 12(%esp), %eax
movl 16(%esp), %ebx
movl 20(%esp), %ecx
movl 24(%esp), %edx
xorl %eax, %ecx
xorl %ebx, %edx
movl (%esi), %eax
movl 4(%esi), %ebx
.L019dj7:
rorl $16, %edx
movb %dl, 6(%edi)
shrl $16, %edx
.L020dj6:
movb %dh, 5(%edi)
.L021dj5:
movb %dl, 4(%edi)
.L022dj4:
movl %ecx, (%edi)
jmp .L023djend
.L024dj3:
rorl $16, %ecx
movb %cl, 2(%edi)
sall $16, %ecx
.L025dj2:
movb %ch, 1(%esi)
.L026dj1:
movb %cl, (%esi)
.L023djend:
jmp .L007finish
.align 16
.L007finish:
movl 64(%esp), %ecx
addl $28, %esp
movl %eax, (%ecx)
movl %ebx, 4(%ecx)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 16
.L008cbc_enc_jmp_table:
.long 0
.long .L016ej1
.long .L015ej2
.long .L014ej3
.long .L012ej4
.long .L011ej5
.long .L010ej6
.long .L009ej7
.align 16
.L027cbc_dec_jmp_table:
.long 0
.long .L026dj1
.long .L025dj2
.long .L024dj3
.long .L022dj4
.long .L021dj5
.long .L020dj6
.long .L019dj7
.des_ncbc_encrypt_end:
.size des_ncbc_encrypt , .des_ncbc_encrypt_end-des_ncbc_encrypt
.ident "desasm.pl"
.text
.align 16
.globl des_ede3_cbc_encrypt
.type des_ede3_cbc_encrypt , @function
des_ede3_cbc_encrypt:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 28(%esp), %ebp
movl 44(%esp), %ebx
movl (%ebx), %esi
movl 4(%ebx), %edi
pushl %edi
pushl %esi
pushl %edi
pushl %esi
movl %esp, %ebx
movl 36(%esp), %esi
movl 40(%esp), %edi
movl 64(%esp), %ecx
movl 56(%esp), %eax
pushl %eax
movl 56(%esp), %eax
pushl %eax
movl 56(%esp), %eax
pushl %eax
pushl %ebx
cmpl $0, %ecx
jz .L028decrypt
andl $4294967288, %ebp
movl 16(%esp), %eax
movl 20(%esp), %ebx
jz .L029encrypt_finish
.L030encrypt_loop:
movl (%esi), %ecx
movl 4(%esi), %edx
xorl %ecx, %eax
xorl %edx, %ebx
movl %eax, 16(%esp)
movl %ebx, 20(%esp)
call des_encrypt3
movl 16(%esp), %eax
movl 20(%esp), %ebx
movl %eax, (%edi)
movl %ebx, 4(%edi)
addl $8, %esi
addl $8, %edi
subl $8, %ebp
jnz .L030encrypt_loop
.L029encrypt_finish:
movl 60(%esp), %ebp
andl $7, %ebp
jz .L031finish
xorl %ecx, %ecx
xorl %edx, %edx
movl .L032cbc_enc_jmp_table(,%ebp,4),%ebp
jmp *%ebp
.L033ej7:
movb 6(%esi), %dh
sall $8, %edx
.L034ej6:
movb 5(%esi), %dh
.L035ej5:
movb 4(%esi), %dl
.L036ej4:
movl (%esi), %ecx
jmp .L037ejend
.L038ej3:
movb 2(%esi), %ch
sall $8, %ecx
.L039ej2:
movb 1(%esi), %ch
.L040ej1:
movb (%esi), %cl
.L037ejend:
xorl %ecx, %eax
xorl %edx, %ebx
movl %eax, 16(%esp)
movl %ebx, 20(%esp)
call des_encrypt3
movl 16(%esp), %eax
movl 20(%esp), %ebx
movl %eax, (%edi)
movl %ebx, 4(%edi)
jmp .L031finish
.align 16
.L028decrypt:
andl $4294967288, %ebp
movl 24(%esp), %eax
movl 28(%esp), %ebx
jz .L041decrypt_finish
.L042decrypt_loop:
movl (%esi), %eax
movl 4(%esi), %ebx
movl %eax, 16(%esp)
movl %ebx, 20(%esp)
call des_decrypt3
movl 16(%esp), %eax
movl 20(%esp), %ebx
movl 24(%esp), %ecx
movl 28(%esp), %edx
xorl %eax, %ecx
xorl %ebx, %edx
movl (%esi), %eax
movl 4(%esi), %ebx
movl %ecx, (%edi)
movl %edx, 4(%edi)
movl %eax, 24(%esp)
movl %ebx, 28(%esp)
addl $8, %esi
addl $8, %edi
subl $8, %ebp
jnz .L042decrypt_loop
.L041decrypt_finish:
movl 60(%esp), %ebp
andl $7, %ebp
jz .L031finish
movl (%esi), %eax
movl 4(%esi), %ebx
movl %eax, 16(%esp)
movl %ebx, 20(%esp)
call des_decrypt3
movl 16(%esp), %eax
movl 20(%esp), %ebx
movl 24(%esp), %ecx
movl 28(%esp), %edx
xorl %eax, %ecx
xorl %ebx, %edx
movl (%esi), %eax
movl 4(%esi), %ebx
.L043dj7:
rorl $16, %edx
movb %dl, 6(%edi)
shrl $16, %edx
.L044dj6:
movb %dh, 5(%edi)
.L045dj5:
movb %dl, 4(%edi)
.L046dj4:
movl %ecx, (%edi)
jmp .L047djend
.L048dj3:
rorl $16, %ecx
movb %cl, 2(%edi)
sall $16, %ecx
.L049dj2:
movb %ch, 1(%esi)
.L050dj1:
movb %cl, (%esi)
.L047djend:
jmp .L031finish
.align 16
.L031finish:
movl 76(%esp), %ecx
addl $32, %esp
movl %eax, (%ecx)
movl %ebx, 4(%ecx)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 16
.L032cbc_enc_jmp_table:
.long 0
.long .L040ej1
.long .L039ej2
.long .L038ej3
.long .L036ej4
.long .L035ej5
.long .L034ej6
.long .L033ej7
.align 16
.L051cbc_dec_jmp_table:
.long 0
.long .L050dj1
.long .L049dj2
.long .L048dj3
.long .L046dj4
.long .L045dj5
.long .L044dj6
.long .L043dj7
.des_ede3_cbc_encrypt_end:
.size des_ede3_cbc_encrypt , .des_ede3_cbc_encrypt_end-des_ede3_cbc_encrypt
.ident "desasm.pl"
|
xem/nes | 4,719 | nes-test-roms/tvpassfail/tv.s | PPUCTRL = $2000
PPUMASK = $2001
PPUSTATUS = $2002
PPUSCROLL = $2005
PPUADDR = $2006
PPUDATA = $2007
P1 = $4016
P2 = $4017
.segment "ZEROPAGE"
retraces: .res 3
isPAL: .res 1
palstate: .res 1
joy1: .res 1
last_joy1: .res 1
joy1new: .res 1
.segment "VECTORS"
.addr nmi, reset, irq
.segment "INESHDR"
.byt "NES", 26
.byt 1 ; number of 16 KB program segments
.byt 1 ; number of 8 KB chr segments
.byt 0 ; mapper, mirroring, etc
.byt 0 ; extended mapper info
.byt 0,0,0,0,0,0,0,0 ; f you DiskDude
.segment "CODE"
nmi:
inc retraces
irq:
rti
reset:
sei
lda #0
sta PPUCTRL
sta PPUMASK
cld
ldy #$40
sty P2
ldx #$FF
txs
@warmup1:
bit PPUSTATUS
bpl @warmup1
; we have nearly 29000 cycles to init other parts of the NES
; so do it while waiting for the PPU to signal that it's warming up
@clearZP:
sta $00,x
dex
bne @clearZP
; done with tasks; wait for warmup
@warmup2:
bit PPUSTATUS
bpl @warmup2
jsr detectPAL
; Display television test
; Set palette
ldx #0
stx PPUMASK
jsr setGrayPalette
.segment "RODATA"
healthScreen:
.incbin "health.nam"
.segment "CODE"
ldx #<healthScreen
ldy #>healthScreen
lda #$20
jsr copyNT
jsr displayLoop
tvTest:
; Skip this test on PAL
lda isPAL
beq :+
jmp aspectRatioTest
:
; Set up initial palette
ldx #0
stx PPUMASK
ldy #$3F
sty PPUADDR
stx PPUADDR
sty PPUDATA
sty PPUDATA
sty PPUDATA
sty PPUDATA
sty PPUDATA
stx PPUDATA
lda #$10
sta PPUDATA
lda #$30
sta PPUDATA
; Set up test screen
.segment "RODATA"
tvScreen:
.incbin "tv.nam"
.segment "CODE"
ldx #<tvScreen
ldy #>tvScreen
lda #$20
jsr copyNT
tvLoop:
; wait for vertical blanking NMI
lda PPUSTATUS
lda #$80
sta PPUCTRL
lda retraces
:
cmp retraces
beq :-
lda PPUSTATUS ; acknowledge NMI
; Rewrite palette
lda #0
sta PPUMASK
inc palstate
lda palstate
cmp #192
bcc :+
lda #0
sta palstate
:
lsr a
lsr a
lsr a
lsr a
clc
adc #$21
ldy #$3F
sty PPUADDR
ldy #1
sty PPUADDR
sta PPUDATA
clc
adc #4
cmp #$2D
bcc :+
sbc #$0C
:
sta PPUDATA
clc
adc #4
cmp #$2D
bcc :+
sbc #$0C
:
sta PPUDATA
; Turn on screen
lda #$00
sta PPUSCROLL
sta PPUSCROLL
lda #$80
sta PPUCTRL
lda #%00001010
sta PPUMASK
; Read controller
jsr readPad
lda joy1new
and #$90
beq tvLoop
aspectRatioTest:
; Set palette
ldx #0
stx PPUMASK
jsr setGrayPalette
.segment "RODATA"
ntscAspectScreen:
.incbin "square.nam"
palAspectScreen:
.incbin "palsquare.nam"
.segment "CODE"
lda isPAL
beq @copyNTSC
ldx #<palAspectScreen
ldy #>palAspectScreen
bne @gotoCopy
@copyNTSC:
ldx #<ntscAspectScreen
ldy #>ntscAspectScreen
@gotoCopy:
lda #$20
jsr copyNT
aspectLoop:
jsr displayLoop
jmp tvTest
.proc setGrayPalette
ldy #$3F
ldx #0
sty PPUADDR
stx PPUADDR
sty PPUDATA
stx PPUDATA
lda #$10
sta PPUDATA
lda #$30
sta PPUDATA
rts
.endproc
;;;
; Runs the game loop for one display,
; waiting for the player to press A.
.proc displayLoop
; wait for vertical blanking NMI
lda PPUSTATUS
lda #$80
sta PPUCTRL
lda retraces
waitForNMI:
cmp retraces
beq waitForNMI
lda PPUSTATUS ; acknowledge NMI
; Turn on screen
lda #$00
sta PPUSCROLL
sta PPUSCROLL
lda #$80
sta PPUCTRL
lda #%00001010
sta PPUMASK
; Read controller
jsr readPad
lda joy1new
and #$90
beq displayLoop
rts
.endproc
;;;
; Copies a nametable data from PRG ROM to VRAM.
; @param X bits 7-0 of source address
; @param Y bits 15-8 of source address
; @param A bits 15-8 of destination address
copyNT:
stx 0
sty 1
sta PPUADDR
ldy #0
sty PPUADDR
ldx #4
ldx #4
@loop:
lda (0),y
sta PPUDATA
iny
bne @loop
inc 1
dex
bne @loop
rts
readPad:
; Strobe the joypad and set up the last frame joy state.
lda #1
sta P1
lda joy1
sta last_joy1
ldx #8
lda #0
sta P1
; Read the joypad out serially.
@loop:
lda P1
lsr a
rol joy1
dex
bne @loop
; Compute which buttons were newly pressed.
lda last_joy1
eor #$FF
and joy1
sta joy1new
rts
;;;
; Sets isPAL to nonzero if the machine is on 50 Hz timing.
.proc detectPAL
lda PPUSTATUS ; acknowledge any pending NMI
; wait for vblank with the screen turned off
lda #$80
sta PPUCTRL
asl a
sta PPUMASK
lda retraces
:
cmp retraces
beq :-
lda retraces
; wait 24 * 1285 cycles, longer than the NTSC
; frame period but shorter than that of PAL
ldx #24
ldy #0
:
dey
bne :-
dex
bne :-
; compare to retrace count before waiting
; if we've had a vblank, then we're NTSC
; otherwise we're PAL
cmp retraces
bne notPAL
lda #1
sta isPAL
notPAL:
rts
.endproc
|
xem/nes | 2,547 | nes-test-roms/nmi_sync/demo_pal.s | ; Uses nmi_sync to manually display line on screen
; using timed write. See readme.txt.
;
; PAL NES only. Tested on hardware.
;
; ca65 -o rom.o demo_pal.s
; ld65 -C unrom.cfg rom.o -o demo_pal.nes
;
; Shay Green <gblargg@gmail.com>
.include "nmi_sync.s"
reset:
; Initialize PPU and palette
jsr init_graphics
; Synchronize with PPU and enable NMI
jsr init_nmi_sync_pal
; Loop endlessly
loop: jsr wait_nmi
; You could run normal code between NMIs here,
; as long as it completes BEFORE NMI. If it
; takes too long, synchronization may be off
; by a few cycles for that frame.
; ...
jmp loop
.align 256 ; branches must not cross page
nmi:
pha
; Do this sometime before you DMA sprites
jsr begin_nmi_sync
; DMA then enable sprites. Instructions before
; STA $4014 (excluding begin_nmi_sync) must take
; an even number of cycles. The only required
; instruction here is STA $4014.
bit <0 ; to make cycle count even
lda #0
sta $2003
lda #>sprites
sta $4014
lda #$10
sta $2001
; Our instructions up to this point MUST total
; 6900 cycles, so we'll burn the rest in a loop.
; delay 6900 - 30
lda #9
sec
nmi_1: pha
lda #150
nmi_2: sbc #1
bne nmi_2
pla
sbc #1
bne nmi_1
jsr end_nmi_sync
; We're now synchronized exactly to 7471 cycles
; after beginning of frame.
; delay 20486 - 7471 - 5
nop
lda #85
sec
nmi_3: pha
lda #28
nmi_4: sbc #1
bne nmi_4
pla
sbc #1
bne nmi_3
; Draw short line using monochrome mode bit
lda #$11
sta $2001 ; writes 20486 cycles into frame
lda #$10
sta $2001
pla
rti
.align 256
sprites:
; Reference sprites around manually-drawn line
.byte 118, 0, 0, 82
.byte 118, 0, 0, 82+8
.byte 118, 1, 0, 82+16
.byte 122, 0, 0, 84
.byte 122, 0, 0, 84+8
.res 256 - 5*4, $FF
init_graphics:
sei
; Init PPU
bit $2002
init_graphics_1:
bit $2002
bpl init_graphics_1
init_graphics_2:
bit $2002
bpl init_graphics_2
lda #0
sta $2000
sta $2001
; Load alternating black and white palette
lda #$3F
sta $2006
ldy #$E0
sty $2006
init_graphics_3:
sta $2007
eor #$0F
iny
bne init_graphics_3
rts
; Freeze program if this somehow gets triggered, rather
; than silently messing up timing
irq: jmp irq
.segment "HEADER"
.byte "NES",26, 2,1, 0,0 ; 32K PRG, 8K CHR, UNROM
.byte 0,0,0,0,0,0,0,0
.segment "VECTORS"
.word 0,0,0, nmi, reset, irq
.segment "CHARS"
; Characters for sprites
.byte $FF,0,0,0,0,0,0,0
.byte 0,0,0,0,0,0,0,0
.byte $FF,$FF,$FF,$FF,$FF,0,0,0
.byte 0,0,0,0,0,0,0,0
.res $2000 - $20
|
xem/nes | 2,543 | nes-test-roms/nmi_sync/demo_ntsc.s | ; Uses nmi_sync to manually display line on screen
; using timed write. See readme.txt.
;
; NTSC NES only. Tested on hardware.
;
; ca65 -o rom.o demo_ntsc.s
; ld65 -C unrom.cfg rom.o -o demo_ntsc.nes
;
; Shay Green <gblargg@gmail.com>
.include "nmi_sync.s"
reset:
; Initialize PPU and palette
jsr init_graphics
; Synchronize to PPU and enable NMI
jsr init_nmi_sync
; Loop endlessly
loop: jsr wait_nmi
; You could run normal code between NMIs here,
; as long as it completes BEFORE NMI. If it
; takes too long, synchronization may be off
; by a few cycles for that frame.
; ...
jmp loop
.align 256 ; branches must not cross page
nmi:
pha
; Do this sometime before you DMA sprites
jsr begin_nmi_sync
; DMA then enable sprites. Instructions before
; STA $4014 (excluding begin_nmi_sync) must take
; an even number of cycles. The only required
; instruction here is STA $4014.
bit <0 ; to make cycle count even
lda #0
sta $2003
lda #>sprites
sta $4014
lda #$10
sta $2001
; Our instructions up to this point MUST total
; 1715 cycles, so we'll burn the rest in a loop.
; delay 1715 - 30
lda #29
sec
nmi_1: pha
lda #9
nmi_2: sbc #1
bne nmi_2
pla
sbc #1
bne nmi_1
jsr end_nmi_sync
; We're now synchronized exactly to 2286 cycles
; after beginning of frame.
; delay 16168 - 2286 - 5
nop
lda #24
sec
nmi_3: pha
lda #113
nmi_4: sbc #1
bne nmi_4
pla
sbc #1
bne nmi_3
; Draw short line using monochrome mode bit
lda #$11
sta $2001 ; writes 16168 cycles into frame
lda #$10
sta $2001
pla
rti
.align 256
sprites:
; Reference sprites around manually-drawn line
.byte 118, 0, 0, 80
.byte 118, 0, 0, 80+8
.byte 118, 1, 0, 80+16
.byte 122, 0, 0, 80
.byte 122, 0, 0, 80+8
.res 256 - 5*4, $FF
init_graphics:
sei
; Init PPU
bit $2002
init_graphics_1:
bit $2002
bpl init_graphics_1
init_graphics_2:
bit $2002
bpl init_graphics_2
lda #0
sta $2000
sta $2001
; Load alternating black and white palette
lda #$3F
sta $2006
ldy #$E0
sty $2006
init_graphics_3:
sta $2007
eor #$0F
iny
bne init_graphics_3
rts
; Freeze program if this somehow gets triggered, rather
; than silently messing up timing
irq: jmp irq
.segment "HEADER"
.byte "NES",26, 2,1, 0,0 ; 32K PRG, 8K CHR, UNROM
.byte 0,0,0,0,0,0,0,0
.segment "VECTORS"
.word 0,0,0, nmi, reset, irq
.segment "CHARS"
; Characters for sprites
.byte $FF,0,0,0,0,0,0,0
.byte 0,0,0,0,0,0,0,0
.byte $FF,$FF,$FF,$FF,$FF,0,0,0
.byte 0,0,0,0,0,0,0,0
.res $2000 - $20
|
xem/nes | 3,120 | nes-test-roms/nmi_sync/nmi_sync.s | ; Allows precise PPU synchronization in NMI handler, without
; having to cycle-time code outside NMI handler.
.zeropage
nmi_sync_count: .res 1
.code
.align 256 ; branches must not cross page
; Initializes synchronization and enables NMI
; Preserved: X, Y
; Time: 15 frames average, 28 frames max
init_nmi_sync:
; Disable interrupts and rendering
sei
lda #0
sta $2000
sta $2001
; Coarse synchronize
bit $2002
init_nmi_sync_1:
bit $2002
bpl init_nmi_sync_1
; Synchronize to odd CPU cycle
sta $4014
; Fine synchronize
lda #3
init_nmi_sync_2:
sta nmi_sync_count
bit $2002
bit $2002
php
eor #$02
nop
nop
plp
bpl init_nmi_sync_2
; Delay one frame
init_nmi_sync_3:
bit $2002
bpl init_nmi_sync_3
; Enable rendering long enough for frame to
; be shortened if it's a short one, but not long
; enough that background will get displayed.
lda #$08
sta $2001
; Can reduce delay by up to 5 and this still works,
; so there's a good margin.
; delay 2377
lda #216
init_nmi_sync_4:
nop
nop
sec
sbc #1
bne init_nmi_sync_4
sta $2001
lda nmi_sync_count
; Wait for this and next frame to finish.
; If this frame was short, loop ends. If it was
; long, loop runs for a third frame.
init_nmi_sync_5:
bit $2002
bit $2002
php
eor #$02
sta nmi_sync_count
nop
nop
plp
bpl init_nmi_sync_5
; Enable NMI
lda #$80
sta $2000
rts
; Initializes synchronization and enables NMI on PAL NES
; Preserved: X, Y
; Time: about 20 frames
init_nmi_sync_pal:
; NMI will first occur within frame 2 after
; synchronization
lda #2
sta nmi_sync_count
; Disable interrupts and rendering
sei
lda #0
sta $2000
sta $2001
; Coarse synchronize
bit $2002
init_nmi_sync_pal_1:
bit $2002
bpl init_nmi_sync_pal_1
; Synchronize to odd CPU cycle
sta $4014
bit <0
; Fine synchronize
init_nmi_sync_pal_2:
bit <0
nop
bit $2002
bit $2002
bpl init_nmi_sync_pal_2
; Enable NMI
lda #$80
sta $2000
rts
; Waits until NMI occurs.
; Preserved: A, X, Y
wait_nmi:
pha
; Reset high/low flag so NMI can depend on it
bit $2002
; NMI must not occur during taken branch, so we
; only use branch to get out of loop.
lda nmi_sync_count
wait_nmi_1:
cmp nmi_sync_count
bne wait_nmi_2
jmp wait_nmi_1
wait_nmi_2:
pla
rts
; Must be called in NMI handler, before sprite DMA.
; Preserved: X, Y
begin_nmi_sync:
lda nmi_sync_count
and #$02
beq begin_nmi_sync_1
begin_nmi_sync_1:
rts
; Must be called after sprite DMA. Instructions before this
; must total 1715 (NTSC)/6900 (PAL) cycles, treating
; JSR begin_nmi_sync and STA $4014 as taking 10 cycles total)
; Next instruction will begin 2286 (NTSC)/7471 (PAL) cycles
; after the cycle that the frame began in.
; Preserved: X, Y
end_nmi_sync:
lda nmi_sync_count
inc nmi_sync_count
and #$02
bne end_nmi_sync_1
end_nmi_sync_1:
lda $2002
bmi end_nmi_sync_2
end_nmi_sync_2:
bmi end_nmi_sync_3
end_nmi_sync_3:
rts
; Keeps track of synchronization on frames where no
; synchronization is needed (where begin_nmi_sync/end_nmi_sync
; aren't called).
; Preserved: A, X, Y
track_nmi_sync:
inc nmi_sync_count
rts
|
xem/nes | 2,988 | nes-test-roms/full_palette/full_palette.s | ; Displays entire 400+ color NTSC NES palette on screen.
; Disables PPU rendering so that current scanline color can be
; set directly by VRAM address, then uses cycle-timed code to
; cycle through all colors in a clean grid.
;
; ca65 -o full_palette.o full_palette.s
; ld65 -t nes full_palette.o -o full_palette.nes
;
; Shay Green <gblargg@gmail.com>
.segment "HEADER"
.byte "NES",26, 2,1, 0,0
.segment "VECTORS"
.word 0,0,0, nmi, reset, irq
.segment "CHARS"
.res 8192
.segment "STARTUP" ; avoids warning
.segment "CODE"
even_frame = $200
irq:
nmi: rti
wait_vbl:
bit $2002
: bit $2002
bpl :-
rts
blacken_palette:
; Fill palette with black. Starts at $3FE0 so that VRAM
; address will wrap around to 0 afterwards, so that BG
; rendering will work correctly.
lda #$3F
sta $2006
lda #$E0
sta $2006
lda #$0F
ldy #$20
: sta $2007
dey
bne :-
rts
reset:
sei
ldx #$FF
txs
; Init PPU
jsr wait_vbl
jsr wait_vbl
lda #0
sta $2000
sta $2001
jsr blacken_palette
; Clear nametable
lda #$20
sta $2006
lda #$00
sta $2006
ldx #4
ldy #0
: sta $2007
iny
bne :-
dex
bne :-
; Synchronize precisely to VBL. VBL occurs every 29780.67
; CPU clocks. Loop takes 27 clocks. Every 1103 iterations,
; the second LDA $2002 will read exactly 29781 clocks
; after a previous read. Thus, the loop will effectively
; read $2002 one PPU clock later each frame. It starts out
; with VBL beginning sometime after this read, so that
; eventually VBL will begin just before the $2002 read,
; and thus leave CPU exactly synchronized to VBL.
jsr wait_vbl
nop
: nop
lda $2002
lda $2002
pha
pla
pha
pla
bpl :-
lda #0
sta even_frame
begin_frame:
jsr blacken_palette
; Enable BG so that PPU will make every other frame
; shorter by one PPU clock. This allows our code to
; synchronize better and reduce horizontal shaking.
lda #$08
sta $2001
; Delay 4739 cycles, well into frame
ldx #4
ldy #176
: dey
bne :-
dex
bne :-
nop
; Disable BG. Now electron beam color can be set by
; VRAM address pointing into palette.
lda #0
sta $2001
; Draw palette
ldy #0 ; Y = color
triplet:
; Draws one scanline of palette. Takes 106 cycles.
.macro draw_row
nop
nop
nop
tya
and #$18
asl a
ldx #$3F
stx $2006
stx $2006
tax
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
.endmacro
draw_row
; Palette writes are delayed a line, since VRAM address
; increments just after $2007 write. So we don't set
; color tint until after first row of triplet
tya
and #$E0
sta $2001
draw_row
iny
iny
iny
nop
draw_row
iny
beq :+ ; loop is more than 128 bytes, argh
jmp triplet
:
nop
; Delay 2869 cycles
ldy #239
: pha
pla
dey
bne :-
; Delay extra cycle every other frame
inc even_frame
lda even_frame
lsr a
bcs :+
: jmp begin_frame
|
xem/nes | 3,135 | nes-test-roms/full_palette/full_palette_smooth.s | ; Displays entire 400+ color NTSC NES palette on screen.
; Disables PPU rendering so that current scanline color can be
; set directly by VRAM address, then uses cycle-timed code to
; cycle through all colors in a clean grid.
;
; ca65 -o full_palette_smooth.o full_palette_smooth.s
; ld65 -t nes full_palette_smooth.o -o full_palette_smooth.nes
;
; Shay Green <gblargg@gmail.com>
.segment "HEADER"
.byte "NES",26, 2,1, 0,0
.segment "VECTORS"
.word 0,0,0, nmi, reset, irq
.segment "CHARS"
.res 8192
.segment "STARTUP" ; avoids warning
.segment "CODE"
even_frame = $200
irq:
nmi: rti
wait_vbl:
bit $2002
: bit $2002
bpl :-
rts
blacken_palette:
; Fill palette with black. Starts at $3FE0 so that VRAM
; address will wrap around to 0 afterwards, so that BG
; rendering will work correctly.
lda #$3F
sta $2006
lda #$E0
sta $2006
lda #$0F
ldy #$20
: sta $2007
dey
bne :-
rts
reset:
sei
ldx #$FF
txs
; Init PPU
jsr wait_vbl
jsr wait_vbl
lda #0
sta $2000
sta $2001
jsr blacken_palette
; Clear nametable
lda #$20
sta $2006
lda #$00
sta $2006
ldx #4
ldy #0
: sta $2007
iny
bne :-
dex
bne :-
; Synchronize precisely to VBL. VBL occurs every 29780.67
; CPU clocks. Loop takes 27 clocks. Every 1103 iterations,
; the second LDA $2002 will read exactly 29781 clocks
; after a previous read. Thus, the loop will effectively
; read $2002 one PPU clock later each frame. It starts out
; with VBL beginning sometime after this read, so that
; eventually VBL will begin just before the $2002 read,
; and thus leave CPU exactly synchronized to VBL.
jsr wait_vbl
nop
: nop
lda $2002
lda $2002
pha
pla
pha
pla
bpl :-
lda #0
sta even_frame
begin_frame:
jsr blacken_palette
; Enable BG so that PPU will make every other frame
; shorter by one PPU clock. This allows our code to
; synchronize better and reduce horizontal shaking.
lda #$08
sta $2001
; Delay 4739 cycles, well into frame
ldx #4
ldy #176
: dey
bne :-
dex
bne :-
nop
; Disable BG. Now electron beam color can be set by
; VRAM address pointing into palette.
lda #0
sta $2001
; Draw palette
ldy #$C0 ; Y = color
triplet:
; Draws one scanline of palette. Takes 98 cycles.
.macro draw_row
tya
and #$30
ldx #$3F
stx $2006
stx $2006
tax
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
inx
stx $2007
.endmacro
nop
nop
nop
nop
draw_row
; Palette writes are delayed a line, since VRAM address
; increments just after $2007 write. So we don't set
; color tint until after first row of triplet
tya
lsr a
and #$07
tax
lda tints,x
sta $2001
draw_row
pha
pla
pha
pla
nop
draw_row
iny
beq :+ ; loop is more than 128 bytes, argh
jmp triplet
:
nop
; Delay 2869 cycles
ldy #239
: pha
pla
dey
bne :-
; Delay extra cycle every other frame
inc even_frame
lda even_frame
lsr a
bcs :+
: jmp begin_frame
; Reorder color tints to be most gradual
tints: .byte $E0,$C0,$A0,$60,$20,$40,$80,$00
|
xem/nes | 4,095 | nes-test-roms/nrom368/crt0.s | ; Startup code for cc65 and Shiru's NES library
; based on code by Groepaz/Hitmen <groepaz@gmx.net>, Ullrich von Bassewitz <uz@cc65.org>
NES_MAPPER =0 ;mapper number
NES_PRG_BANKS =3 ;number of 16K PRG banks, change to 2 for NROM256
NES_CHR_BANKS =1 ;number of 8K CHR banks
NES_MIRRORING =0 ;0 horizontal, 1 vertical, 8 four screen
FT_DPCM_OFF =$ff00 ;samples offset, $c000 or higher, 64-byte steps
FT_SFX_STREAMS =4 ;number of sound effects played at once, can be 4 or less (faster)
.define FT_DPCM_ENABLE 0 ;zero to exclude all the DMC code
.define FT_SFX_ENABLE 1 ;zero to exclude all the sound effects code
.define SPEED_FIX 1 ;zero if you want to handle PAL/NTSC speed difference by yourself
.export _exit,__STARTUP__:absolute=1
.import initlib,push0,popa,popax,_main,zerobss,copydata
; Linker generated symbols
.import __RAM_START__ ,__RAM_SIZE__
.import __ROM0_START__ ,__ROM0_SIZE__
.import __STARTUP_LOAD__,__STARTUP_RUN__,__STARTUP_SIZE__
.import __CODE_LOAD__ ,__CODE_RUN__ ,__CODE_SIZE__
.import __RODATA_LOAD__ ,__RODATA_RUN__ ,__RODATA_SIZE__
.include "zeropage.inc"
PPU_CTRL =$2000
PPU_MASK =$2001
PPU_STATUS =$2002
PPU_OAM_ADDR=$2003
PPU_OAM_DATA=$2004
PPU_SCROLL =$2005
PPU_ADDR =$2006
PPU_DATA =$2007
PPU_OAM_DMA =$4014
PPU_FRAMECNT=$4017
DMC_FREQ =$4010
CTRL_PORT1 =$4016
CTRL_PORT2 =$4017
OAM_BUF =$0200
PAL_BUF =$01c0
FRAMECNT1 =$00
FRAMECNT2 =$01
NTSCMODE =$02
VRAMUPDATE =$03
PAD_STATE =$04 ;2 bytes, one per controller
PAD_STATEP =$06 ;2 bytes
PAD_STATET =$08 ;2 bytes
FT_TEMP =$0a ;7 bytes in zeropage
SCROLL_X =$11
SCROLL_Y =$12
PPU_CTRL_VAR=$13
PPU_MASK_VAR=$14
NAME_UPD_ADR=$15 ;word
NAME_UPD_LEN=$17
PAL_PTR =$18 ;word
RAND_SEED =$1a ;word
TEMP =$1c
PAD_BUF =TEMP+1
PTR =TEMP ;word
LEN =TEMP+2 ;word
NEXTSPR =TEMP+4
SCRX =TEMP+5
SCRY =TEMP+6
SRC =TEMP+7 ;word
DST =TEMP+9 ;word
RLE_LOW =TEMP
RLE_HIGH =TEMP+1
RLE_TAG =TEMP+2
RLE_BYTE =TEMP+3
FT_BASE_ADR =$0100 ;page in RAM, should be $xx00
FT_DPCM_PTR =(FT_DPCM_OFF&$3fff)>>6
.define FT_THREAD 1;undefine if you call sound effects in the same thread as sound update
.segment "HEADER"
.byte $4e,$45,$53,$1a
.byte NES_PRG_BANKS
.byte NES_CHR_BANKS
.byte NES_MIRRORING|(NES_MAPPER<<4)
.byte NES_MAPPER&$f0
.res 8,0
.segment "STARTUP"
start:
_exit:
sei
ldx #$ff
txs
inx
stx PPU_MASK
stx DMC_FREQ
stx PPU_CTRL ;no NMI
bit PPU_STATUS
@1:
bit PPU_STATUS
bpl @1
txa
@clearRAM:
sta $000,x
sta $100,x
sta $200,x
sta $300,x
sta $400,x
sta $500,x
sta $600,x
sta $700,x
inx
bne @clearRAM
@clearVRAM:
lda #$20
sta PPU_ADDR
txa
sta PPU_ADDR
ldy #$10
@3:
sta PPU_DATA
inx
bne @3
dey
bne @3
lda #4
jsr _pal_bright
lda #0
jsr _pal_clear
jsr _oam_clear
bit PPU_STATUS
@2:
bit PPU_STATUS
bpl @2
jsr zerobss
jsr copydata
lda #<(__RAM_START__+__RAM_SIZE__)
sta sp
lda #>(__RAM_START__+__RAM_SIZE__)
sta sp+1 ; Set argument stack ptr
jsr initlib
lda #%10000000
sta <PPU_CTRL_VAR
sta PPU_CTRL ;enable NMI
lda #%00000110
sta <PPU_MASK_VAR
lda <FRAMECNT1
@wait:
cmp <FRAMECNT1
beq @wait
ldx #52 ;blargg's code
ldy #24
@detectNTSC:
dex
bne @detectNTSC
dey
bne @detectNTSC
lda PPU_STATUS
and #$80
sta <NTSCMODE
jsr _ppu_off
lda <NTSCMODE
jsr FamiToneInit
.if(FT_DPCM_ENABLE)
ldx #<music_dpcm
ldy #>music_dpcm
jsr FamiToneSampleInit
.endif
.if(FT_SFX_ENABLE)
ldx #<sounds_data
ldy #>sounds_data
jsr FamiToneSfxInit
.endif
.if(!SPEED_FIX)
lda #0
sta <NTSCMODE
.endif
lda #$fd
sta <RAND_SEED
sta <RAND_SEED+1
lda #0
sta PPU_SCROLL
sta PPU_SCROLL
jmp _main ;no parameters
.include "neslib.s"
.segment "RODATA"
.include "music.s"
.if(FT_SFX_ENABLE)
sounds_data:
.include "sounds.s"
.endif
.segment "SAMPLES"
;.incbin "music_dpcm.bin"
.segment "VECTORS"
.word nmi ;$fffa vblank nmi
.word start ;$fffc reset
.word irq ;$fffe irq / brk
.segment "CHARS"
.incbin "tileset.chr"
.segment "PADDING"
.res 2048,0 |
xem/nes | 12,004 | nes-test-roms/nrom368/neslib.s | ;NES hardware-dependent functions by Shiru (shiru@mail.ru)
;Feel free to do anything you want with this code, consider it Public Domain
.export _pal_all,_pal_bg,_pal_spr,_pal_col,_pal_clear,_pal_bright
.export _ppu_off,_ppu_on_all,_ppu_on_bg,_ppu_on_spr,_ppu_mask
.export _oam_clear,_oam_spr,_oam_meta_spr,_oam_hide_rest
.export _ppu_waitnmi
.export _unrle_vram
.export _scroll
.export _bank_spr,_bank_bg
.export _vram_read,_vram_write
.export _music_play,_music_stop,_music_pause
.export _sfx_play
.export _pad_poll,_pad_trigger,_pad_state
.export _rand8,_rand16,_set_rand
.export _set_vram_update,_vram_adr,_vram_put,_vram_fill,_vram_inc
.export _memcpy
;NMI handler
nmi:
pha
txa
pha
tya
pha
lda <VRAMUPDATE
bne @upd
jmp @skipUpd
@upd:
ldx #$00
stx PPU_OAM_ADDR
lda #>OAM_BUF
sta PPU_OAM_DMA
lda #$3f
sta PPU_ADDR
stx PPU_ADDR
.repeat 4,I
ldy PAL_BUF+I
lda (PAL_PTR),y
sta PPU_DATA
.endrepeat
.repeat 7,J
lda PPU_DATA ;skip background color
.repeat 3,I
ldy PAL_BUF+5+(J*4)+I
lda (PAL_PTR),y
sta PPU_DATA
.endrepeat
.endrepeat
ldx <NAME_UPD_LEN
beq @skipUpd
ldy #0
@updName:
lda (NAME_UPD_ADR),y
iny
sta PPU_ADDR
lda (NAME_UPD_ADR),y
iny
sta PPU_ADDR
lda (NAME_UPD_ADR),y
iny
sta PPU_DATA
dex
bne @updName
@skipUpd:
lda #0
sta PPU_ADDR
sta PPU_ADDR
lda <SCROLL_X
sta PPU_SCROLL
lda <SCROLL_Y
sta PPU_SCROLL
lda <PPU_CTRL_VAR
sta PPU_CTRL
inc <FRAMECNT1
inc <FRAMECNT2
lda <FRAMECNT2
cmp #6
bne @skipNtsc
lda #0
sta <FRAMECNT2
@skipNtsc:
jsr FamiToneUpdate
pla
tay
pla
tax
pla
irq:
rti
;void __fastcall__ pal_all(const char *data);
_pal_all:
sta <PTR
stx <PTR+1
ldx #$00
lda #$20
pal_copy:
sta <LEN
ldy #$00
@0:
lda (PTR),y
sta PAL_BUF,x
inx
iny
dec <LEN
bne @0
rts
;void __fastcall__ pal_bg(const char *data);
_pal_bg:
sta <PTR
stx <PTR+1
ldx #$00
lda #$10
bne pal_copy ;bra
;void __fastcall__ pal_spr(const char *data);
_pal_spr:
sta <PTR
stx <PTR+1
ldx #$10
txa
bne pal_copy ;bra
;void __fastcall__ pal_col(unsigned char index,unsigned char color);
_pal_col:
sta <PTR
jsr popa
and #$1f
tax
lda <PTR
sta PAL_BUF,x
rts
;void __fastcall__ pal_clear(void);
_pal_clear:
lda #$0f
ldx #0
@1:
sta PAL_BUF,x
inx
cpx #$20
bne @1
rts
;void __fastcall__ pal_bright(unsigned char bright);
_pal_bright:
asl a
asl a
asl a
asl a
sta <PAL_PTR
asl <PAL_PTR
rol <PAL_PTR+1
asl <PAL_PTR
rol <PAL_PTR+1
lda <PAL_PTR
clc
adc #<palBrightTable
sta <PAL_PTR
lda <PAL_PTR+1
and #$03
adc #>palBrightTable
sta <PAL_PTR+1
rts
;void __fastcall__ ppu_off(void);
_ppu_off:
jsr _ppu_waitnmi
lda <PPU_MASK_VAR
and #%11100111
sta <PPU_MASK_VAR
sta PPU_MASK
lda #0
sta PPU_CTRL
rts
;void __fastcall__ ppu_on_all(void);
_ppu_on_all:
lda <PPU_MASK_VAR
ora #%00011000
ppu_onoff:
sta <PPU_MASK_VAR
sta PPU_MASK
lda #$80
sta PPU_CTRL
jsr _ppu_waitnmi
lda #$00
sta PPU_ADDR
sta PPU_ADDR
lda <PPU_CTRL_VAR
sta PPU_CTRL
rts
;void __fastcall__ ppu_on_bg(void);
_ppu_on_bg:
lda <PPU_MASK_VAR
ora #%00001000
bne ppu_onoff ;bra
;void __fastcall__ ppu_on_spr(void);
_ppu_on_spr:
lda <PPU_MASK_VAR
ora #%00010000
bne ppu_onoff ;bra
;void __fastcall__ ppu_mask(unsigned char mask);
_ppu_mask:
sta <PPU_MASK_VAR
sta PPU_MASK
rts
;void __fastcall__ oam_clear(void);
_oam_clear:
ldx #0
lda #$ff
@1:
sta OAM_BUF,x
inx
inx
inx
inx
bne @1
rts
;unsigned char __fastcall__ oam_spr(unsigned char x,unsigned char y,unsigned char chrnum,unsigned char attr,unsigned char sprid);
_oam_spr:
tax
jsr popa
sta OAM_BUF+2,x
jsr popa
sta OAM_BUF+1,x
jsr popa
sta OAM_BUF+0,x
jsr popa
sta OAM_BUF+3,x
txa
clc
adc #4
rts
;unsigned char __fastcall__ oam_meta_spr(unsigned char x,unsigned char y,unsigned char sprid,const unsigned char *data);
_oam_meta_spr:
sta <PTR
stx <PTR+1
jsr popa
tax
jsr popa
sta <SCRY
jsr popa
sta <SCRX
ldy #0
@1:
lda (PTR),y ;x offset
cmp #$80
beq @2
iny
clc
adc <SCRX
sta OAM_BUF+3,x
lda (PTR),y ;y offset
iny
clc
adc <SCRY
sta OAM_BUF+0,x
lda (PTR),y ;tile
iny
sta OAM_BUF+1,x
lda (PTR),y ;attribute
iny
sta OAM_BUF+2,x
inx
inx
inx
inx
jmp @1
@2:
txa
rts
;void __fastcall__ oam_hide_rest(unsigned char sprid);
_oam_hide_rest:
tax
lda #240
@1:
sta OAM_BUF,x
inx
inx
inx
inx
bne @1
rts
;void __fastcall__ ppu_waitnmi(void);
_ppu_waitnmi:
lda #1
sta <VRAMUPDATE
lda <FRAMECNT1
@1:
cmp <FRAMECNT1
beq @1
lda <NTSCMODE
beq @3
@2:
lda <FRAMECNT2
cmp #5
beq @2
@3:
lda #0
sta <VRAMUPDATE
rts
;void __fastcall__ unrle_vram(const unsigned char *data,unsigned int vram);
_unrle_vram:
stx PPU_ADDR
sta PPU_ADDR
jsr popax
sta <RLE_LOW
stx <RLE_HIGH
ldy #0
jsr rle_byte
sta <RLE_TAG
@1:
jsr rle_byte
cmp <RLE_TAG
beq @2
sta PPU_DATA
sta <RLE_BYTE
bne @1
@2:
jsr rle_byte
cmp #0
beq @4
tax
lda <RLE_BYTE
@3:
sta PPU_DATA
dex
bne @3
beq @1
@4:
rts
rle_byte:
lda (RLE_LOW),y
inc <RLE_LOW
bne @1
inc <RLE_HIGH
@1:
rts
;void __fastcall__ scroll(unsigned int x,unsigned int y);
_scroll:
sta <TEMP
txa
bne @1
lda <TEMP
cmp #240
bcs @1
sta <SCROLL_Y
lda #0
sta <TEMP
beq @2 ;bra
@1:
sec
lda <TEMP
sbc #240
sta <SCROLL_Y
lda #2
sta <TEMP
@2:
jsr popax
sta <SCROLL_X
txa
and #$01
ora <TEMP
sta <TEMP
lda <PPU_CTRL_VAR
and #$fc
ora <TEMP
sta <PPU_CTRL_VAR
rts
;void __fastcall__ bank_spr(unsigned char n);
_bank_spr:
and #$01
asl a
asl a
asl a
sta <TEMP
lda <PPU_CTRL_VAR
and #%11110111
ora <TEMP
sta <PPU_CTRL_VAR
rts
;void __fastcall__ bank_bg(unsigned char n);
_bank_bg:
and #$01
asl a
asl a
asl a
asl a
sta <TEMP
lda <PPU_CTRL_VAR
and #%11101111
ora <TEMP
sta <PPU_CTRL_VAR
rts
;void __fastcall__ vram_read(unsigned char *dst,unsigned int adr,unsigned int size);
_vram_read:
sta <TEMP
stx <TEMP+1
jsr popax
stx PPU_ADDR
sta PPU_ADDR
lda PPU_DATA
jsr popax
sta <TEMP+2
stx <TEMP+3
ldy #0
@1:
lda PPU_DATA
sta (TEMP+2),y
inc <TEMP+2
bne @2
inc <TEMP+3
@2:
lda <TEMP
bne @3
dec <TEMP+1
@3:
dec <TEMP
lda <TEMP
ora <TEMP+1
bne @1
rts
;void __fastcall__ vram_write(unsigned char *src,unsigned int adr,unsigned int size);
_vram_write:
sta <TEMP
stx <TEMP+1
jsr popax
stx PPU_ADDR
sta PPU_ADDR
jsr popax
sta <TEMP+2
stx <TEMP+3
ldy #0
@1:
lda (TEMP+2),y
sta PPU_DATA
inc <TEMP+2
bne @2
inc <TEMP+3
@2:
lda <TEMP
bne @3
dec <TEMP+1
@3:
dec <TEMP
lda <TEMP
ora <TEMP+1
bne @1
rts
;void __fastcall__ music_play(const unsigned char *data);
_music_play:
stx <PTR
tax
ldy <PTR
jmp FamiToneMusicStart
;void __fastcall__ music_stop(void);
_music_stop=FamiToneMusicStop
;void __fastcall__ music_pause(unsigned char pause);
_music_pause=FamiToneMusicPause
;void __fastcall__ sfx_play(unsigned char sound,unsigned char channel);
_sfx_play:
and #$03
tax
lda @sfxPriority,x
tax
jsr popa
jmp FamiToneSfxStart
@sfxPriority:
.byte FT_SFX_CH0,FT_SFX_CH1,FT_SFX_CH2,FT_SFX_CH3
;unsigned char __fastcall__ pad_poll(unsigned char pad);
_pad_poll:
tay
ldx #0
@padPollPort:
lda #1
sta CTRL_PORT1
lda #0
sta CTRL_PORT1
lda #8
sta <TEMP
@padPollLoop:
lda CTRL_PORT1,y
lsr a
ror <PAD_BUF,x
dec <TEMP
bne @padPollLoop
inx
cpx #3
bne @padPollPort
lda <PAD_BUF
cmp <PAD_BUF+1
beq @done
cmp <PAD_BUF+2
beq @done
lda <PAD_BUF+1
@done:
sta <PAD_STATE,y
tax
eor <PAD_STATEP,y
and <PAD_STATE ,y
sta <PAD_STATET,y
txa
sta <PAD_STATEP,y
rts
;unsigned char __fastcall__ pad_trigger(unsigned char pad);
_pad_trigger:
pha
jsr _pad_poll
pla
tax
lda <PAD_STATET,x
rts
;unsigned char __fastcall__ pad_state(unsigned char pad);
_pad_state:
tax
lda <PAD_STATE,x
rts
;unsigned char __fastcall__ rand8(void);
;Galois random generator, found somewhere
;out: A random number 0..255
rand1:
lda <RAND_SEED
asl a
bcc @1
eor #$cf
@1:
sta <RAND_SEED
rts
rand2:
lda <RAND_SEED+1
asl a
bcc @1
eor #$d7
@1:
sta <RAND_SEED+1
rts
_rand8:
jsr rand1
jsr rand2
adc <RAND_SEED
rts
;unsigned int __fastcall__ rand16(void);
_rand16:
jsr rand1
tax
jsr rand2
rts
;void __fastcall__ set_rand(unsigned char seed);
_set_rand:
sta <RAND_SEED
stx <RAND_SEED+1
rts
;void __fastcall__ set_vram_update(unsigned char len,unsigned char *buf);
_set_vram_update:
sta <NAME_UPD_ADR
stx <NAME_UPD_ADR+1
jsr popa
sta <NAME_UPD_LEN
rts
;void __fastcall__ vram_adr(unsigned int adr);
_vram_adr:
stx PPU_ADDR
sta PPU_ADDR
rts
;void __fastcall__ vram_put(unsigned char n);
_vram_put:
sta PPU_DATA
rts
;void __fastcall__ vram_fill(unsigned char n,unsigned int len);
_vram_fill:
sta <LEN
stx <LEN+1
jsr popa
ldx <LEN+1
beq @2
ldx #0
@1:
sta PPU_DATA
dex
bne @1
dec <LEN+1
bne @1
@2:
ldx <LEN
beq @4
@3:
sta PPU_DATA
dex
bne @3
@4:
rts
;void __fastcall__ vram_inc(unsigned char n);
_vram_inc:
beq @1
lda #$04
@1:
sta <TEMP
lda <PPU_CTRL_VAR
and #$fb
ora <TEMP
sta <PPU_CTRL_VAR
sta PPU_CTRL
rts
;void __fastcall__ memcpy(void *dst,void *src,unsigned int len);
_memcpy:
sta <LEN
stx <LEN+1
jsr popax
sta <SRC
stx <SRC+1
jsr popax
sta <DST
stx <DST+1
ldx #0
@1:
lda <LEN+1
beq @2
jsr @3
dec <LEN+1
inc <SRC+1
inc <DST+1
jmp @1
@2:
ldx <LEN
beq @5
@3:
ldy #0
@4:
lda (SRC),y
sta (DST),y
iny
dex
bne @4
@5:
rts
palBrightTable:
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f ;0 black
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f ;1
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f
.byte $00,$01,$02,$03,$04,$05,$06,$07,$08,$09,$0a,$0b,$0c,$0f,$0e,$0f
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f ;2
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f
.byte $00,$01,$02,$03,$04,$05,$06,$07,$08,$09,$0a,$0b,$0c,$0f,$0e,$0f
.byte $10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$1a,$1b,$1c,$1f,$1e,$0f
.byte $0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f,$0f ;3
.byte $00,$01,$02,$03,$04,$05,$06,$07,$08,$09,$0a,$0b,$0c,$0f,$0e,$0f
.byte $10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$1a,$1b,$1c,$1f,$1e,$0f
.byte $20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$2a,$2b,$2c,$2d,$2e,$0f
.byte $00,$01,$02,$03,$04,$05,$06,$07,$08,$09,$0a,$0b,$0c,$0f,$0e,$0f ;4 normal
.byte $10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$1a,$1b,$1c,$1f,$1e,$0f
.byte $20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$2a,$2b,$2c,$2d,$2e,$0f
.byte $30,$31,$32,$33,$34,$35,$36,$37,$38,$39,$3a,$3b,$3c,$3d,$3e,$0f
.byte $10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$1a,$1b,$1c,$1f,$1e,$2d ;5
.byte $20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$2a,$2b,$2c,$2d,$2e,$2d
.byte $30,$31,$32,$33,$34,$35,$36,$37,$38,$39,$3a,$3b,$3c,$3d,$3e,$2d
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$2d
.byte $20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$2a,$2b,$2c,$2d,$2e,$00 ;6
.byte $30,$31,$32,$33,$34,$35,$36,$37,$38,$39,$3a,$3b,$3c,$3d,$3e,$00
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$00
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$00
.byte $30,$31,$32,$33,$34,$35,$36,$37,$38,$39,$3a,$3b,$3c,$3d,$3e,$10 ;7
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$10
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$10
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$10
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30 ;8 white
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30
.byte $30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30,$30
.include "famitone.s" |
xem/nes | 20,555 | nes-test-roms/nrom368/famitone.s | ;FamiTone audio library v1.24
;by Shiru (shiru@mail.ru) 06'11
;The name is suggested by Memblers from NesDev
;Feel free to do anything you want with this code, consider it Public Domain
;aliases for APU registers
APU_PL1_VOL = $4000
APU_PL1_SWEEP = $4001
APU_PL1_LO = $4002
APU_PL1_HI = $4003
APU_PL2_VOL = $4004
APU_PL2_SWEEP = $4005
APU_PL2_LO = $4006
APU_PL2_HI = $4007
APU_TRI_LINEAR = $4008
APU_TRI_LO = $400a
APU_TRI_HI = $400b
APU_NOISE_VOL = $400c
APU_NOISE_LO = $400e
APU_NOISE_HI = $400f
APU_DMC_FREQ = $4010
APU_DMC_RAW = $4011
APU_DMC_START = $4012
APU_DMC_LEN = $4013
APU_SND_CHN = $4015
;all the FamiTone variables take 112+15*FT_SFX_STREAMS bytes in a RAM page
FT_FRAME_CNT = FT_BASE_ADR
FT_SONG_SPEED = FT_BASE_ADR+1
FT_INSTRUMENT_L = FT_BASE_ADR+2
FT_INSTRUMENT_H = FT_BASE_ADR+3
FT_PULSE1_PREV = FT_BASE_ADR+4
FT_PULSE2_PREV = FT_BASE_ADR+5
FT_CHANNELS = FT_BASE_ADR+6
FT_CH1_VARS = FT_CHANNELS
FT_CH2_VARS = FT_CHANNELS+9
FT_CH3_VARS = FT_CHANNELS+18
FT_CH4_VARS = FT_CHANNELS+27
FT_CH5_VARS = FT_CHANNELS+36
FT_ENVELOPES = FT_BASE_ADR+51
FT_CH1_ENVS = FT_ENVELOPES ;three envelopes (5*3 bytes) for pulse and triangle
FT_CH2_ENVS = FT_ENVELOPES+15
FT_CH3_ENVS = FT_ENVELOPES+30
FT_CH4_ENVS = FT_ENVELOPES+45 ;only two envelopes (5*2 bytes) for noise
FT_DPCM_TABLE_L = FT_BASE_ADR+106
FT_DPCM_TABLE_H = FT_BASE_ADR+107
FT_DPCM_EFFECT = FT_BASE_ADR+108
FT_SFX_ADR_L = FT_BASE_ADR+109
FT_SFX_ADR_H = FT_BASE_ADR+110
FT_PAL_ADJUST = FT_BASE_ADR+111
;envelope variables offsets, every envelope uses 5 bytes
FT_ENV_STRUCT_SIZE = 5
FT_ENV_VALUE = FT_BASE_ADR+0
FT_ENV_REPEAT = FT_BASE_ADR+1
FT_ENV_ADR_L = FT_BASE_ADR+2
FT_ENV_ADR_H = FT_BASE_ADR+3
FT_ENV_PTR = FT_BASE_ADR+4
;channels variables offsets, every channel uses 9 bytes
FT_CHN_STRUCT_SIZE = 9
FT_CHN_REPEAT = FT_BASE_ADR+0
FT_CHN_NOTE = FT_BASE_ADR+1
FT_CHN_INSTRUMENT = FT_BASE_ADR+2
FT_CHN_DUTY = FT_BASE_ADR+3
FT_CHN_PTR_L = FT_BASE_ADR+4
FT_CHN_PTR_H = FT_BASE_ADR+5
FT_CHN_RETURN_L = FT_BASE_ADR+6
FT_CHN_RETURN_H = FT_BASE_ADR+7
FT_CHN_REF_LEN = FT_BASE_ADR+8
;aliases for outputs
FT_CH1_NOTE = FT_CH1_VARS+.lobyte(FT_CHN_NOTE)
FT_CH2_NOTE = FT_CH2_VARS+.lobyte(FT_CHN_NOTE)
FT_CH3_NOTE = FT_CH3_VARS+.lobyte(FT_CHN_NOTE)
FT_CH4_NOTE = FT_CH4_VARS+.lobyte(FT_CHN_NOTE)
FT_CH5_NOTE = FT_CH5_VARS+.lobyte(FT_CHN_NOTE)
FT_CH1_VOLUME = FT_CH1_ENVS+.lobyte(FT_ENV_VALUE)
FT_CH2_VOLUME = FT_CH2_ENVS+.lobyte(FT_ENV_VALUE)
FT_CH3_VOLUME = FT_CH3_ENVS+.lobyte(FT_ENV_VALUE)
FT_CH4_VOLUME = FT_CH4_ENVS+.lobyte(FT_ENV_VALUE)
FT_CH1_NOTE_OFF = FT_CH1_ENVS+.lobyte(FT_ENV_VALUE)+FT_ENV_STRUCT_SIZE
FT_CH2_NOTE_OFF = FT_CH2_ENVS+.lobyte(FT_ENV_VALUE)+FT_ENV_STRUCT_SIZE
FT_CH3_NOTE_OFF = FT_CH3_ENVS+.lobyte(FT_ENV_VALUE)+FT_ENV_STRUCT_SIZE
FT_CH4_NOTE_OFF = FT_CH4_ENVS+.lobyte(FT_ENV_VALUE)+FT_ENV_STRUCT_SIZE
FT_CH1_PITCH_OFF = FT_CH1_ENVS+.lobyte(FT_ENV_VALUE)+FT_ENV_STRUCT_SIZE*2
FT_CH2_PITCH_OFF = FT_CH2_ENVS+.lobyte(FT_ENV_VALUE)+FT_ENV_STRUCT_SIZE*2
FT_CH3_PITCH_OFF = FT_CH3_ENVS+.lobyte(FT_ENV_VALUE)+FT_ENV_STRUCT_SIZE*2
FT_CH1_DUTY = FT_CH1_VARS+.lobyte(FT_CHN_DUTY)
FT_CH2_DUTY = FT_CH2_VARS+.lobyte(FT_CHN_DUTY)
FT_CH3_DUTY = FT_CH3_VARS+.lobyte(FT_CHN_DUTY)
FT_CH4_DUTY = FT_CH4_VARS+.lobyte(FT_CHN_DUTY)
;output buffer, used then sound effects are enabled
FT_OUT_BUF = FT_BASE_ADR+112 ;11 bytes
;aliases for APU registers for music FamiTone
.if(!FT_SFX_ENABLE) ;if sound effects are disabled, just write to APU
FT_MR_PULSE1_V = APU_PL1_VOL
FT_MR_PULSE1_L = APU_PL1_LO
FT_MR_PULSE1_H = APU_PL1_HI
FT_MR_PULSE2_V = APU_PL2_VOL
FT_MR_PULSE2_L = APU_PL2_LO
FT_MR_PULSE2_H = APU_PL2_HI
FT_MR_TRI_V = APU_TRI_LINEAR
FT_MR_TRI_L = APU_TRI_LO
FT_MR_TRI_H = APU_TRI_HI
FT_MR_NOISE_V = APU_NOISE_VOL
FT_MR_NOISE_F = APU_NOISE_LO
.else ;otherwise write to output buffer
FT_MR_PULSE1_V = FT_OUT_BUF
FT_MR_PULSE1_L = FT_OUT_BUF+1
FT_MR_PULSE1_H = FT_OUT_BUF+2
FT_MR_PULSE2_V = FT_OUT_BUF+3
FT_MR_PULSE2_L = FT_OUT_BUF+4
FT_MR_PULSE2_H = FT_OUT_BUF+5
FT_MR_TRI_V = FT_OUT_BUF+6
FT_MR_TRI_L = FT_OUT_BUF+7
FT_MR_TRI_H = FT_OUT_BUF+8
FT_MR_NOISE_V = FT_OUT_BUF+9
FT_MR_NOISE_F = FT_OUT_BUF+10
.endif
;sound effect stream variables, 15 bytes per stream
FT_SFX_BASE_ADR = FT_BASE_ADR+123
FT_SFX_STRUCT_SIZE = 15
FT_SFX_REPEAT = FT_SFX_BASE_ADR
FT_SFX_PTR_L = FT_SFX_BASE_ADR+1
FT_SFX_PTR_H = FT_SFX_BASE_ADR+2
FT_SFX_OFF = FT_SFX_BASE_ADR+3
FT_SFX_BUF = FT_SFX_BASE_ADR+4 ;11 bytes
;aliases for channels to use in user calls
FT_SFX_CH0 = 0
FT_SFX_CH1 = FT_SFX_STRUCT_SIZE
FT_SFX_CH2 = FT_SFX_STRUCT_SIZE*2
FT_SFX_CH3 = FT_SFX_STRUCT_SIZE*3
;reset APU, initialize FamiTone
;in: A 0 for PAL, not 0 for NTSC
FamiToneInit:
cmp #0
beq @pal
lda #$ff
@pal:
sta FT_PAL_ADJUST
lda #$0f ;enable channels, stop DMC
sta APU_SND_CHN
lda #$81 ;disable triangle length counter
sta APU_TRI_LINEAR
lda #$01 ;load noise length
sta APU_NOISE_HI
lda #$30 ;volumes to 0
sta APU_PL1_VOL
sta APU_PL2_VOL
sta APU_NOISE_VOL
lda #$08 ;no sweep
sta APU_PL1_SWEEP
sta APU_PL2_SWEEP
lda #$ff
sta FT_PULSE1_PREV
sta FT_PULSE2_PREV
;stop music that currently plays (it is a part of FamiToneInit as well)
FamiToneMusicStop:
ldx #.lobyte(FT_CHANNELS) ;reset all the channels variables
ldy #5
@setChannels:
lda #0
sta FT_CHN_PTR_L,x
sta FT_CHN_PTR_H,x
sta FT_CHN_REPEAT,x
sta FT_CHN_REF_LEN,x
sta FT_CHN_INSTRUMENT,x
lda #63
sta FT_CHN_NOTE,x
lda #$30
sta FT_CHN_DUTY,x
txa
clc
adc #FT_CHN_STRUCT_SIZE
tax
dey
bne @setChannels
sty FT_CH1_VOLUME ;reset volumes
sty FT_CH2_VOLUME
sty FT_CH3_VOLUME
sty FT_CH4_VOLUME
sty FT_DPCM_EFFECT ;no DPCM effect playing
sty FT_SONG_SPEED ;no music playing
rts
;start playing a music
;in: X,Y address of the module (LSB,MSB)
FamiToneMusicStart:
lda #0
sta FT_SONG_SPEED ;stop music update
stx <FT_TEMP
sty <FT_TEMP+1
tay ;A=0
ldx FT_PAL_ADJUST
bmi @noSetAdjust
sta FT_PAL_ADJUST
@noSetAdjust:
ldx #.lobyte(FT_CHANNELS)
lda #5
sta FT_TEMP+2
@setChannels:
lda (FT_TEMP),y
sta FT_CHN_PTR_L,x
iny
lda (FT_TEMP),y
sta FT_CHN_PTR_H,x
iny
lda #0
sta FT_CHN_REPEAT,x
sta FT_CHN_REF_LEN,x
sta FT_CHN_INSTRUMENT,x
lda #63
sta FT_CHN_NOTE,x
lda #$30
sta FT_CHN_DUTY,x
txa
clc
adc #FT_CHN_STRUCT_SIZE
tax
dec <FT_TEMP+2
bne @setChannels
lda (FT_TEMP),y
iny
sta FT_INSTRUMENT_L
lda (FT_TEMP),y
iny
sta FT_INSTRUMENT_H
lda (FT_TEMP),y
pha
lda #4
sta <FT_TEMP+6
lda #.lobyte(FT_ENVELOPES)
@setEnvelopes:
pha
tax
lda #0
jsr setInstrument
pla
adc #FT_ENV_STRUCT_SIZE*3
dec <FT_TEMP+6
bne @setEnvelopes
pla
sta FT_SONG_SPEED ;this enables music update
sta FT_FRAME_CNT
rts
;pause and unpause current music
;in: A 0 or not 0 to play or pause
FamiToneMusicPause:
tax
lda FT_SONG_SPEED
cpx #0
beq @play
ora #$80
bne @set
@play:
and #$7f
@set:
sta FT_SONG_SPEED
rts
;update FamiTone state, should be called every TV frame
FamiToneUpdate:
.if(FT_THREAD)
lda FT_TEMP
pha
lda FT_TEMP+1
pha
.endif
ldx FT_PAL_ADJUST ;for PAL mode count 0..4
bmi @noAdjust
inx
cpx #5
bne @noSkip
ldx #0
@noSkip:
stx FT_PAL_ADJUST
@noAdjust:
lda FT_SONG_SPEED
beq @noMusic
bmi @noMusic ;music paused
lda FT_FRAME_CNT ;check TV frame counter
beq @row ;if it is 0 or negative, it is a new row
bmi @row
bne @noRow
@row:
lda #.lobyte(FT_CHANNELS) ;start of a row, updating all the channels
sta <FT_TEMP+4
lda #.lobyte(FT_ENVELOPES)
sta <FT_TEMP+5
lda #4 ;process pulse, triangle, and noise channels
sta <FT_TEMP+6
ldy #0
@processChns:
ldx <FT_TEMP+4
jsr channelStep
bcs @noNewNote ;check if there was a new note
ldx <FT_TEMP+4 ;setting up a new note with current instrument
lda FT_CHN_INSTRUMENT,x
ldx <FT_TEMP+5
jsr setInstrument
ldx <FT_TEMP+4
sta FT_CHN_DUTY,x
@noNewNote:
lda <FT_TEMP+4 ;next channel
clc
adc #FT_CHN_STRUCT_SIZE
sta <FT_TEMP+4
lda <FT_TEMP+5 ;next envelopes block
clc
adc #FT_ENV_STRUCT_SIZE*3
sta <FT_TEMP+5
dec <FT_TEMP+6
bne @processChns
.if(FT_DPCM_ENABLE)
ldx #.lobyte(FT_CH5_VARS)
jsr channelStep
bcs @ch5done
cmp #63
bne @ch5note
jsr FamiToneSampleStop
jmp @ch5done
@ch5note:
lda FT_CH5_NOTE
jsr FamiToneSampleStartM
@ch5done:
.endif
lda FT_FRAME_CNT ;set TV frame counter
clc
adc FT_SONG_SPEED
sta FT_FRAME_CNT
@noRow:
lda #11 ;now process all the envelopes (11 because noise has no pitch)
sta <FT_TEMP+2
ldx #.lobyte(FT_ENVELOPES)
@processEnvs:
txa
pha
jsr envelopeStep
pla
clc
adc #FT_ENV_STRUCT_SIZE
tax
dec <FT_TEMP+2
bne @processEnvs
@noMusic:
lda FT_SONG_SPEED
bpl @noPause
lda #$30
sta FT_MR_PULSE1_V
sta FT_MR_PULSE2_V
sta FT_MR_NOISE_V
lda #$80
sta FT_MR_TRI_V
jmp @noSkipM
@noPause:
;converting output values and sending them to the APU or into output buffer
lda FT_CH1_NOTE
cmp #63
bne @ch1note
lda #0
jmp @ch1cut
@ch1note:
clc
adc FT_CH1_NOTE_OFF
asl a
tax
lda FT_CH1_PITCH_OFF
pha
adc noteTable,x
sta FT_MR_PULSE1_L
pla
ora #$7f
bmi @ch1sign
lda #0
@ch1sign:
adc noteTable+1,x
.if(!FT_SFX_ENABLE)
cmp FT_PULSE1_PREV
beq @ch1prev
sta FT_PULSE1_PREV
.endif
sta FT_MR_PULSE1_H
@ch1prev:
lda FT_CH1_VOLUME
@ch1cut:
ora FT_CH1_DUTY
sta FT_MR_PULSE1_V
lda FT_CH2_NOTE
cmp #63
bne @ch2note
lda #0
jmp @ch2cut
@ch2note:
clc
adc FT_CH2_NOTE_OFF
asl a
tax
lda FT_CH2_PITCH_OFF
pha
adc noteTable,x
sta FT_MR_PULSE2_L
pla
ora #$7f
bmi @ch2sign
lda #0
@ch2sign:
adc noteTable+1,x
.if(!FT_SFX_ENABLE)
cmp FT_PULSE2_PREV
beq @ch2prev
sta FT_PULSE2_PREV
.endif
sta FT_MR_PULSE2_H
@ch2prev:
lda FT_CH2_VOLUME
@ch2cut:
ora FT_CH2_DUTY
sta FT_MR_PULSE2_V
lda FT_CH3_NOTE
cmp #63
bne @ch3note
lda #0
jmp @ch3cut
@ch3note:
clc
adc FT_CH3_NOTE_OFF
asl a
tax
lda FT_CH3_PITCH_OFF
pha
adc noteTable,x
sta FT_MR_TRI_L
pla
ora #$7f
bmi @ch3sign
lda #0
@ch3sign:
adc noteTable+1,x
sta FT_MR_TRI_H
lda FT_CH3_VOLUME
@ch3cut:
ora #$80
sta FT_MR_TRI_V
lda FT_CH4_NOTE
cmp #63
bne @ch4note
lda #0
jmp @ch4cut
@ch4note:
clc
adc FT_CH4_NOTE_OFF
and #$0f
eor #$0f
sta <FT_TEMP
lda FT_CH4_DUTY
asl a
and #$80
ora <FT_TEMP
sta FT_MR_NOISE_F
lda FT_CH4_VOLUME
@ch4cut:
ora #$f0
sta FT_MR_NOISE_V
dec FT_FRAME_CNT
lda FT_PAL_ADJUST ;for PAL mode decrease row length every fifth frame
bne @noSkipM
dec FT_FRAME_CNT
@noSkipM:
.if(FT_SFX_ENABLE)
ldx #FT_SFX_CH0 ;process all the sound effects streams
lda #FT_SFX_STREAMS
sta <FT_TEMP+3
@updateSfxChannels:
jsr FamiToneSfxUpdate
txa
clc
adc #FT_SFX_STRUCT_SIZE
tax
dec <FT_TEMP+3
bne @updateSfxChannels
lda FT_OUT_BUF ;now send data from output buffer to the APU
sta APU_PL1_VOL
lda FT_OUT_BUF+1
sta APU_PL1_LO
lda FT_OUT_BUF+2
cmp FT_PULSE1_PREV
beq @noUpdatePulse1
sta FT_PULSE1_PREV
sta APU_PL1_HI
@noUpdatePulse1:
lda FT_OUT_BUF+3
sta APU_PL2_VOL
lda FT_OUT_BUF+4
sta APU_PL2_LO
lda FT_OUT_BUF+5
cmp FT_PULSE2_PREV
beq @noUpdatePulse2
sta FT_PULSE2_PREV
sta APU_PL2_HI
@noUpdatePulse2:
lda FT_OUT_BUF+6
sta APU_TRI_LINEAR
lda FT_OUT_BUF+7
sta APU_TRI_LO
lda FT_OUT_BUF+8
sta APU_TRI_HI
lda FT_OUT_BUF+9
sta APU_NOISE_VOL
lda FT_OUT_BUF+10
sta APU_NOISE_LO
.endif
.if(FT_THREAD)
pla
sta FT_TEMP+1
pla
sta FT_TEMP
.endif
rts
;set envelopes of an instrument
;in: A instrument number 0..31
; X is offset of block of envelopes in the FamiTone's RAM page
;out: Y 0
setInstrument:
asl a ;A*8, every instrument takes 8 bytes
asl a
asl a
clc
adc FT_INSTRUMENT_L ;get instrument address into FT_TEMP
sta <FT_TEMP
lda #0
adc FT_INSTRUMENT_H
sta <FT_TEMP+1
lda #3 ;three envelopes for pulse and triangle channels
cpx #.lobyte(FT_CH4_ENVS)
bne @noNoise
lda #2 ;only two envelopes for noise, no pitch
@noNoise:
sta <FT_TEMP+2
ldy #0
clc
@loop:
lda (FT_TEMP),y ;get LSB of an envelope
sta FT_ENV_ADR_L,x
iny
lda (FT_TEMP),y ;get MSB
sta FT_ENV_ADR_H,x
iny
lda #0
sta FT_ENV_REPEAT,x ;reset repeat counter
sta FT_ENV_PTR,x ;reset envelope pointer
txa
adc #FT_ENV_STRUCT_SIZE
tax
dec <FT_TEMP+2
bne @loop
ldy #6
lda (FT_TEMP),y ;duty cycle
ldy #0
rts
;process channel
;in: X is offset of channel variables in the FamiTone's RAM page
; Y 0
;out: Carry is reset if the was new note
channelStep:
lda FT_CHN_REF_LEN,x ;check reference rows counter
beq @noRef ;if it is zero, there is no reference
dec FT_CHN_REF_LEN,x ;decrease rows counter
bne @noRef
lda FT_CHN_RETURN_L,x ;end of reference, return to previous pointer
sta <FT_TEMP
lda FT_CHN_RETURN_H,x
sta <FT_TEMP+1
jmp @noRepeatR
@noRef:
lda FT_CHN_REPEAT,x ;check pause counter
beq @noRepeat
dec FT_CHN_REPEAT,x ;decrease pause counter
sec ;no new note
rts
@noRepeat:
lda FT_CHN_PTR_L,x ;load channel pointer into temp
sta <FT_TEMP
lda FT_CHN_PTR_H,x
sta <FT_TEMP+1
@noRepeatR:
@readByte:
lda (FT_TEMP),y ;read byte of the channel
inc <FT_TEMP ;increase pointer
bne @1
inc <FT_TEMP+1
@1:
cmp #%01000000 ;if bits 7 or 6 are set, it is instrument, tag, or effect
bcs @special
sta FT_CHN_NOTE,x ;remember note code
@return:
lda <FT_TEMP ;store pointer from temp to the variable
sta FT_CHN_PTR_L,x
lda <FT_TEMP+1
sta FT_CHN_PTR_H,x
rts
@special:
cmp #%11000000 ;check if it is special tag
bcs @noInstr
cmp #%10000000 ;check if it is instrument
bcc @noPause
and #$3f
sta FT_CHN_REPEAT,x ;set up pause counter
jmp @return ;no new note, because Carry is set
@noPause:
and #$3f
sta FT_CHN_INSTRUMENT,x ;remember instrument number
jmp @readByte ;and read next byte
@noInstr:
cmp #%11111111 ;check if it is reference
bne @noEof
clc ;remember return address+3
lda <FT_TEMP
adc #3
sta FT_CHN_RETURN_L,x
lda <FT_TEMP+1
adc #0
sta FT_CHN_RETURN_H,x
lda (FT_TEMP),y ;read length of the reference (how many rows)
sta FT_CHN_REF_LEN,x
iny
lda (FT_TEMP),y ;read 16-bit absolute address of the reference
sta <FT_TEMP+2 ;remember in temp
iny
lda (FT_TEMP),y
sta <FT_TEMP+1
lda <FT_TEMP+2
sta <FT_TEMP
ldy #0
jmp @readByte ;and read next byte
@noEof:
cmp #%11111110 ;check if it is end of the channel
bne @effect
lda (FT_TEMP),y ;read two next bytes
sta <FT_TEMP+2
iny
lda (FT_TEMP),y
dey
sta <FT_TEMP+1 ;and set current pointer
lda <FT_TEMP+2
sta <FT_TEMP
jmp @readByte ;and read next byte
@effect:
and #$3f ;speed change
sta FT_SONG_SPEED
jmp @readByte
;process an envelope
;it does not matter which type an envelope is
;in: X offset of the envelope variables in the FamiTone's RAM page
envelopeStep:
lda FT_ENV_REPEAT,x ;check envelope repeat counter
beq @noRepeat ;if it is zero, process the envelope
dec FT_ENV_REPEAT,x ;otherwise decrement the counter
rts
@noRepeat:
lda FT_ENV_ADR_L,x ;load envelope address into temp
sta <FT_TEMP
lda FT_ENV_ADR_H,x
sta <FT_TEMP+1
ldy FT_ENV_PTR,x ;load envelope pointer
@readByte:
lda (FT_TEMP),y ;read byte of the envelope
iny ;increase pointer
ora #0
bpl @special ;if it is below 127, it is special code
clc ;otherwise it is an output value+192
adc #256-192
sta FT_ENV_VALUE,x
tya
sta FT_ENV_PTR,x ;remember the pointer and return
rts
@special:
cmp #127 ;if it is 127, it is end of an envelope
beq @loop
sta FT_ENV_REPEAT,x ;otherwise it is a value for repeat counter
tya
sta FT_ENV_PTR,x ;remember the value and return
rts
@loop:
lda (FT_TEMP),y ;load loop pointer
tay
jmp @readByte ;and read next byte
.if(FT_DPCM_ENABLE)
;set sample table pointer, only needed if DMC is used
;in: X,Y is address of a table that holds parameters for 12 samples
FamiToneSampleInit:
stx FT_DPCM_TABLE_L
sty FT_DPCM_TABLE_H
rts
;stop DMC sample if it plays
FamiToneSampleStop:
lda #%00001111
sta APU_SND_CHN
rts
;play DMC sample from table, used by music player and could be used externally
;in: A is number of sample, 0..11
FamiToneSampleStartM: ;for music (low priority)
ldx FT_DPCM_EFFECT
beq FamiToneSampleStartS
tax
lda APU_SND_CHN
and #16
beq @noEffect
rts
@noEffect:
sta FT_DPCM_EFFECT
txa
jmp FamiToneSampleStartS
FamiToneSampleStart: ;for sound effects (high priority)
ldx #1
stx FT_DPCM_EFFECT
FamiToneSampleStartS:
asl a ;get address in sample table
asl a
adc FT_DPCM_TABLE_L
sta <FT_TEMP
lda #0
adc FT_DPCM_TABLE_H
sta <FT_TEMP+1
lda #%00001111 ;stop DMC
sta APU_SND_CHN
ldy #0
lda (FT_TEMP),y ;sample offset
sta APU_DMC_START
iny
lda (FT_TEMP),y ;sample length
sta APU_DMC_LEN
iny
lda (FT_TEMP),y ;pitch and loop
sta APU_DMC_FREQ
lda #32 ;reset DAC counter
sta APU_DMC_RAW
lda #%00011111 ;start DMC
sta APU_SND_CHN
rts
.endif
.if(FT_SFX_ENABLE)
;init sound effects player, set pointer to data
;in: X,Y is address of sound effects data
FamiToneSfxInit:
stx FT_SFX_ADR_L ;remember pointer to the data
sty FT_SFX_ADR_H
ldx #FT_SFX_CH0 ;init all the streams
ldy #FT_SFX_STREAMS
@1:
lda #0
sta FT_SFX_REPEAT,x
sta FT_SFX_PTR_H,x
jsr FamiToneSfxClearBuf
txa
clc
adc #FT_SFX_STRUCT_SIZE
tax
dey
bne @1
rts
;play a sound effect
;in: A is a number of the sound effect
; X is offset of sound effect stream, should be FT_SFX_CH0..FT_SFX_CH3
FamiToneSfxStart:
asl a ;get address in effects list
tay
lda #0
sta FT_SFX_PTR_H,x ;this stops the effect
sta FT_SFX_REPEAT,x ;reset repeat counter and pointer offset
sta FT_SFX_OFF,x
jsr FamiToneSfxClearBuf
lda FT_SFX_ADR_L
sta <FT_TEMP
lda FT_SFX_ADR_H
sta <FT_TEMP+1
lda (FT_TEMP),y ;read effect pointer from the table
sta FT_SFX_PTR_L,x ;and remember it
iny
lda (FT_TEMP),y
sta FT_SFX_PTR_H,x ;this enables the effect
rts
;clear output buffer of a sound effect
;in: A is 0
; X is offset of sound effect stream
FamiToneSfxClearBuf:
sta FT_SFX_BUF+6,x
lda #$30
sta FT_SFX_BUF,x
sta FT_SFX_BUF+3,x
sta FT_SFX_BUF+9,x
rts
;update one sound effect stream
;in: X is offset of sound effect stream
FamiToneSfxUpdate:
lda FT_SFX_REPEAT,x ;check if repeat counter is not zero
beq @noRepeat
dec FT_SFX_REPEAT,x ;decrement and return
jmp @updateBuf ;just mix with output buffer
@noRepeat:
lda FT_SFX_PTR_H,x ;check if MSB of pointer is not zero
bne @sfxActive
rts ;return otherwise, no active effect
@sfxActive:
sta <FT_TEMP+1 ;load effect pointer into temp
lda FT_SFX_PTR_L,x
sta <FT_TEMP
ldy FT_SFX_OFF,x
@readByte:
lda (FT_TEMP),y ;read byte of effect
iny
cmp #$10 ;if it is less than $10, it is register write
bcc @getData
cmp #$ff ;if it is $ff, it is end of the effect
bcs @eof
adc #256-$10 ;otherwise it is number of repeats +$10
sta FT_SFX_REPEAT,x
tya
sta FT_SFX_OFF,x
jmp @updateBuf
@eof:
lda #0
sta FT_SFX_PTR_H,x
jmp @updateBuf
@getData:
stx <FT_TEMP+2 ;it is register write
adc <FT_TEMP+2 ;get offset in the effect output buffer
tax
lda (FT_TEMP),y ;read value
iny
sta FT_SFX_BUF,x ;store into output buffer
ldx <FT_TEMP+2
jmp @readByte ;and read next byte
@updateBuf:
lda FT_OUT_BUF ;now compare effect output buffer with main output buffer
and #$0f ;if volume of pulse 1 of effect is higher than of main buffer
sta <FT_TEMP ;overwrite pulse 1 of main buffer with one from effect buffer
lda FT_SFX_BUF,x
and #$0f
cmp <FT_TEMP
bcc @noPulse1
lda FT_SFX_BUF,x
sta FT_OUT_BUF
lda FT_SFX_BUF+1,x
sta FT_OUT_BUF+1
lda FT_SFX_BUF+2,x
sta FT_OUT_BUF+2
@noPulse1:
lda FT_OUT_BUF+3 ;same for pulse 2
and #$0f
sta <FT_TEMP
lda FT_SFX_BUF+3,x
and #$0f
cmp <FT_TEMP
bcc @noPulse2
lda FT_SFX_BUF+3,x
sta FT_OUT_BUF+3
lda FT_SFX_BUF+4,x
sta FT_OUT_BUF+4
lda FT_SFX_BUF+5,x
sta FT_OUT_BUF+5
@noPulse2:
lda FT_SFX_BUF+6,x ;overwrite triangle of main output buffer if it is active
beq @noTriangle
sta FT_OUT_BUF+6
lda FT_SFX_BUF+7,x
sta FT_OUT_BUF+7
lda FT_SFX_BUF+8,x
sta FT_OUT_BUF+8
@noTriangle:
lda FT_SFX_BUF+9,x ;same as for pulse 1 and 2, but for noise
and #$0f
sta <FT_TEMP
lda FT_OUT_BUF+9
and #$0f
cmp <FT_TEMP
bcs @noNoise
lda FT_SFX_BUF+9,x
sta FT_OUT_BUF+9
lda FT_SFX_BUF+10,x
sta FT_OUT_BUF+10
@noNoise:
rts
.endif
noteTable: ;NTSC, 11-bit dividers, octaves 1-5
.word $6ad,$64d,$5f2,$59d,$54c,$500,$4b8,$474,$434,$3f7,$3be,$388
.word $356,$326,$2f8,$2ce,$2a5,$27f,$25b,$239,$219,$1fb,$1de,$1c3
.word $1aa,$192,$17b,$166,$152,$13f,$12d,$11c,$10c,$0fd,$0ee,$0e1
.word $0d4,$0c8,$0bd,$0b2,$0a8,$09f,$096,$08d,$085,$07e,$076,$070
.word $069,$063,$05e,$058,$053,$04f,$04a,$046,$042,$03e,$03a,$037
.word 0,0,0,0
|
xem/nes | 36,714 | nes-test-roms/scrolltest/scroll.s | ;ͻ
;Split screen multidirection scrolling with MMC1 mapper
;Written by Cadaver, 1-2/2000
;Fixed to work on Nintendulator by Cadaver, 1/2004, added NTSC detection
;More stuff maybe to follow..
;
;Use freely, but at your own risk!
;ͼ
processor 6502
org $c000
CTRL_A = 1
CTRL_B = 2
CTRL_SELECT = 4
CTRL_START = 8
CTRL_UP = 16
CTRL_DOWN = 32
CTRL_LEFT = 64
CTRL_RIGHT = 128
SCRSIZEX = 32
SCRSIZEY = 30
SCROLL_LEFT = 1
SCROLL_RIGHT = 2
SCROLL_UP = 4
SCROLL_DOWN = 8
SPLITPOINT = $c0
PPU0VALUE = $80
nmicount = $00
mapptrlo = $01
mapptrhi = $02
blockx = $03
blocky = $04
scrollx = $05
scrolly = $06
scrollsx = $07
scrollsy = $08
rawscrollx = $09
rawscrolly = $0a
screenshift = $0b
srclo = $0c
srchi = $0d
destlo = $0e
desthi = $0f
mcharlo = $10
mcharhi = $11
mcharunder = $12
temp1 = $13
temp2 = $14
temp3 = $15
temp4 = $16
temp5 = $17
temp6 = $18
temp7 = $19
temp8 = $1a
temp9 = $1b
controldelay = $1c
control = $1d
ntscflag = $1e
ntscdelay = $1f
COLORBUF = $680
HORIZBUF = $6c0
btmrowlo = $6de
btmrowhi = $6df
VERTBUF = $6e0
SPRBUF = $700
start: sei ;Forbid interrupts
ldx #$ff
txs
jsr setupppu
jsr setupmapper
jsr clearsprites
jsr detectntsc
jsr waitvblank
jsr loadchars
jsr loadpalette
jsr setnametable
jsr resetscroll
lda #<mapdata
sta mapptrlo
lda #>mapdata
sta mapptrhi
lda #$00 ;Set initial speed
sta scrollsx
sta scrollsy
sta controldelay
mainloop: jsr waitvblank
jsr erasemagicchar
jsr scrollaction
jsr drawmagicchar
jsr setgamescreen
jsr setsprites
lda ntscdelay ;Handle NTSC delay
sec
sbc ntscflag
bcs ml_nontscdelay
lda #$05
ml_nontscdelay: sta ntscdelay
bcc ml_skip
jsr readcontroller
jsr steering
jsr scrolllogic
ml_skip: jsr setpanel
mainloop2: jmp mainloop
readcontroller: lda #$01
sta $4016
lda #$00
sta $4016
sta control
ldx #$08
readcloop: lda $4016
ror
lda control
ror
sta control
dex
bne readcloop
rts
steering: inc controldelay
lda controldelay
and #$03
beq steering2
rts
steering2: lda control
and #CTRL_UP
beq steering3
lda scrollsy
cmp #-8
beq steering3
dec scrollsy
steering3: lda control
and #CTRL_DOWN
beq steering4
lda scrollsy
cmp #8
beq steering4
inc scrollsy
steering4: lda control
and #CTRL_LEFT
beq steering5
lda scrollsx
cmp #-8
beq steering5
dec scrollsx
steering5: lda control
and #CTRL_RIGHT
beq steering6
lda scrollsx
cmp #8
beq steering6
inc scrollsx
steering6: lda control
and #CTRL_A+CTRL_B
beq steering7
lda #$00
sta scrollsx
sta scrollsy
steering7: rts
resetscroll: lda #$00
sta scrollx
sta scrolly
sta scrollsx
sta scrollsy
sta rawscrollx
sta rawscrolly
sta blockx
sta blocky
sta screenshift
ldx #$07 ;Calculate 7 * MAPSIZEX
lda #$00 ;to help scrolling
sta btmrowlo
sta btmrowhi
rscr_loop1: lda btmrowlo
clc
adc mapsizex
sta btmrowlo
lda btmrowhi
adc mapsizex+1
sta btmrowhi
dex
bne rscr_loop1
ldx #$3f
rscr_loop2: lda #$00 ;Clear color buffer
sta COLORBUF,x
dex
bpl rscr_loop2
lda #$ff ;Reset magic char
sta mcharlo
sta mcharhi
lda #SPLITPOINT-1 ;Set sprite0 for screen
sta SPRBUF ;split
lda #$ff
sta SPRBUF+1
lda #$00
sta SPRBUF+2
lda #$f8
sta SPRBUF+3
rts
erasemagicchar: lda mcharhi
bmi emc_noneed
sta $2006
lda mcharlo
sta $2006
lda mcharunder
sta $2007
emc_noneed: lda #$ff
sta mcharhi
sta mcharlo
rts
drawmagicchar: lda rawscrolly
clc
adc #(SPLITPOINT/8)+2
cmp #SCRSIZEY
bcc dmc_posok
sbc #SCRSIZEY
dmc_posok: asl
tax
lda scract_rowtbl+1,x
sta mcharhi
lda scrollx
and #$07
cmp #$01
lda rawscrollx
adc #SCRSIZEX-2
and #SCRSIZEX-1
ora scract_rowtbl,x
sta mcharlo
lda mcharhi
sta $2006
lda mcharlo
sta $2006
lda $2007 ;First read is rubbish
lda $2007
sta mcharunder
lda mcharhi
sta $2006
lda mcharlo
sta $2006
lda #$ff
sta $2007
rts
;ͻ
;SCROLLLOGIC
;
;Updates scrolling position, block & map pointers and draws the new graphics
;to the horizontal and vertical buffers for the SCROLLACTION routine.
;
;Parameters: -
;Returns: -
;Destroys: A,X,Y
;ͼ
scrolllogic: lda #$00
sta screenshift
lda scrollx
lsr
lsr
lsr
sta temp1 ;Temp1 = old raw x scrolling
lda scrolly
lsr
lsr
lsr
sta temp2 ;Temp2 = old raw y scrolling
lda scrollx
clc
adc scrollsx
sta scrollx
lsr
lsr
lsr
sta rawscrollx ;New raw x scrolling
lda scrollsy
clc
adc scrolly
cmp #$f0
bcc scrlog_notover
ldx scrollsy
bmi scrlog_overneg
sec
sbc #$f0
jmp scrlog_notover
scrlog_overneg: sec
sbc #$10
scrlog_notover: sta scrolly
lsr
lsr
lsr
sta rawscrolly ;new raw y scrolling
lda temp1 ;Any shifting in X-dir?
cmp rawscrollx
beq scrlog_xshiftok
lda scrollsx
bmi scrlog_xshiftneg
lda #SCROLL_RIGHT
sta screenshift
inc blockx
lda blockx
cmp #$04
bne scrlog_xshiftok
lda #$00
sta blockx
inc mapptrlo
bne scrlog_xshiftok
inc mapptrhi
jmp scrlog_xshiftok
scrlog_xshiftneg:
lda #SCROLL_LEFT
sta screenshift
dec blockx
bpl scrlog_xshiftok
lda #$03
sta blockx
dec mapptrlo
lda mapptrlo
cmp #$ff
bne scrlog_xshiftok
dec mapptrhi
scrlog_xshiftok:
lda temp2 ;Any shifting in Y-dir?
cmp rawscrolly
beq scrlog_yshiftok
lda scrollsy
bmi scrlog_yshiftneg
lda #SCROLL_DOWN
ora screenshift
sta screenshift
inc blocky
lda blocky
cmp #$04
bne scrlog_yshiftok
lda #$00
sta blocky
lda mapptrlo
clc
adc mapsizex
sta mapptrlo
lda mapptrhi
adc mapsizex+1
sta mapptrhi
jmp scrlog_yshiftok
scrlog_yshiftneg:
lda #SCROLL_UP
ora screenshift
sta screenshift
dec blocky
bpl scrlog_yshiftok
lda #$03
sta blocky
lda mapptrlo
sec
sbc mapsizex
sta mapptrlo
lda mapptrhi
sbc mapsizex+1
sta mapptrhi
scrlog_yshiftok:lda screenshift
asl
tax
lda scrlog_jumptbl,x
sta temp1
lda scrlog_jumptbl+1,x
sta temp2
jmp (temp1)
scrlog_jumptbl: dc.w scrlog_shno ;0
dc.w scrlog_shleft ;1
dc.w scrlog_shright ;2
dc.w scrlog_shno ;3
dc.w scrlog_shup ;4
dc.w scrlog_shupleft ;5
dc.w scrlog_shupright ;6
dc.w scrlog_shno ;7
dc.w scrlog_shdown ;8
dc.w scrlog_shdownleft ;9
dc.w scrlog_shdownright ;10
scrlog_shno: rts
scrlog_shleft: jmp scrlog_doleft
scrlog_shright: jmp scrlog_doright
scrlog_shup: jmp scrlog_doup
scrlog_shdown: jmp scrlog_dodown
scrlog_shupleft:jsr scrlog_doleft
jmp scrlog_doup
scrlog_shupright:jsr scrlog_doright
jmp scrlog_doup
scrlog_shdownleft:jsr scrlog_doleft
jmp scrlog_dodown
scrlog_shdownright:jsr scrlog_doright
jmp scrlog_dodown
scrlog_doleft: lda mapptrlo ;Calc. map pointer
sta srclo
lda mapptrhi
sta srchi
lda #SCRSIZEY
sta temp1 ;Chars to do
lda rawscrollx ;Position onscreen where drawing
sta temp6 ;happens (to help coloring)
ldx rawscrolly ;Pointer within screen
lda blocky ;Calc. starting blockindex
asl
asl
adc blockx
scrlog_doleftnb:sta temp2 ;Index within block
ldy #$00 ;Get block from map
lda (srclo),y
tay
lda blocktbllo,y
sta destlo
lda blocktblhi,y
sta desthi
lda blockdata,y ;Color
sta temp5
ldy temp2
scrlog_doleftloop:
lda (destlo),y
sta HORIZBUF,x
jsr scrlog_xcolor
inx
cpx #SCRSIZEY
bcc scrlog_doleftno1
ldx #$00
scrlog_doleftno1:
dec temp1
beq scrlog_doleftrdy
tya
clc
adc #$04
tay
cmp #$10
bcc scrlog_doleftloop
lda srclo
clc
adc mapsizex
sta srclo
lda srchi
adc mapsizex+1
sta srchi
lda blockx
jmp scrlog_doleftnb
scrlog_doleftrdy:rts
scrlog_doright: lda mapptrlo
clc
adc #$07
sta srclo
lda mapptrhi
adc #$00
sta srchi
lda rawscrollx
clc
adc #SCRSIZEX-1
and #SCRSIZEX-1
sta temp6
lda #SCRSIZEY
sta temp1 ;Chars to do
lda blockx
clc
adc #$03
cmp #$04
bcc scrlog_dorightno2
and #$03
pha
inc srclo
bne scrlog_dorightno3
inc srchi
scrlog_dorightno3:pla
scrlog_dorightno2:sta temp3
ldx rawscrolly ;Pointer within screen
lda blocky
asl
asl
clc
adc temp3
scrlog_dorightnb:sta temp2 ;Pointer within block
ldy #$00 ;Get block from map
lda (srclo),y
tay
lda blocktbllo,y
sta destlo
lda blocktblhi,y
sta desthi
lda blockdata,y ;Color
sta temp5
ldy temp2
scrlog_dorightloop:
lda (destlo),y
sta HORIZBUF,x
jsr scrlog_xcolor
inx
cpx #SCRSIZEY
bcc scrlog_dorightno1
ldx #$00
scrlog_dorightno1:
dec temp1
beq scrlog_dorightrdy
tya
clc
adc #$04
tay
cmp #$10
bcc scrlog_dorightloop
lda srclo
clc
adc mapsizex
sta srclo
lda srchi
adc mapsizex+1
sta srchi
lda temp3
jmp scrlog_dorightnb
scrlog_dorightrdy:rts
scrlog_doup: lda mapptrlo
sta srclo
lda mapptrhi
sta srchi
lda #SCRSIZEX
sta temp1 ;Chars to do
lda rawscrolly
sta temp6
ldx rawscrollx ;Pointer within screen
lda blocky
asl
asl
sta temp3
adc blockx
scrlog_doupnb: sta temp2 ;Pointer within block
ldy #$00 ;Get block from map
lda (srclo),y
tay
lda blocktbllo,y
sta destlo
lda blocktblhi,y
sta desthi
lda blockdata,y ;Color
sta temp5
ldy temp2
scrlog_douploop:lda (destlo),y
sta VERTBUF,x
jsr scrlog_ycolor
inx
txa
and #SCRSIZEX-1
tax
dec temp1
beq scrlog_douprdy
iny
tya
and #$03
bne scrlog_douploop
inc srclo
bne scrlog_doupno2
inc srchi
scrlog_doupno2: lda temp3
jmp scrlog_doupnb
scrlog_douprdy: rts
scrlog_dodown: lda mapptrlo
clc
adc btmrowlo
sta srclo
lda mapptrhi
adc btmrowhi
sta srchi
lda #SCRSIZEX
sta temp1 ;Chars to do
lda rawscrolly
clc
adc #SCRSIZEY-1
cmp #SCRSIZEY
bcc scrlog_dodownok1
sbc #SCRSIZEY
scrlog_dodownok1:
sta temp6
ldx rawscrollx ;Pointer within screen
lda blocky
clc
adc #$01
cmp #$04
bcc scrlog_dodownno3
and #$03
pha
lda srclo
clc
adc mapsizex
sta srclo
lda srchi
adc mapsizex+1
sta srchi
pla
scrlog_dodownno3:
asl
asl
sta temp3
adc blockx
scrlog_dodownnb:sta temp2 ;Pointer within block
ldy #$00 ;Get block from map
lda (srclo),y
tay
lda blocktbllo,y
sta destlo
lda blocktblhi,y
sta desthi
lda blockdata,y ;Color
sta temp5
ldy temp2
scrlog_dodownloop:lda (destlo),y
sta VERTBUF,x
jsr scrlog_ycolor
inx
txa
and #SCRSIZEX-1
tax
dec temp1
beq scrlog_dodownrdy
iny
tya
and #$03
bne scrlog_dodownloop
inc srclo
bne scrlog_dodownno2
inc srchi
scrlog_dodownno2:lda temp3
jmp scrlog_dodownnb
scrlog_dodownrdy:rts
;ͻ
;SCRLOG_XCOLOR
;SCRLOG_YCOLOR
;
;Subroutines to color the blocks (not so simple :-))
;
;Parameters: Temp5 = Block color byte
; Temp6 = Position (X for xcolor, Y for ycolor)
; X = Position in the other axis
; Y = Position within the block
;Returns: -
;Destroys: A
;ͼ
scrlog_xcolor: txa
and #$01
beq scrlog_xcolorok
rts
scrlog_xcolorok:tya
and #$02 ;Right part of block?
bne scrlog_xcolor0
lda temp5
jmp scrlog_xcolor1
scrlog_xcolor0: lda temp5
lsr
lsr
scrlog_xcolor1: cpy #$08 ;Lower part of block?
bcc scrlog_xcolor2
lsr
lsr
lsr
lsr
scrlog_xcolor2: and #$03
sta temp7 ;Color now in temp7
txa
asl
and #$f8
sta temp8 ;Position, where to draw
lda temp6
lsr
lsr
clc
adc temp8
sta temp8 ;Byteposition is ready in temp8
sty temp9 ;Store Y
txa ;Check Y-fineposition
and #$02
bne scrlog_xcolor5 ;Lower part
scrlog_xcolor3: lda temp6 ;Check X-fineposition
and #$02
bne scrlog_xcolor4 ;Right part
ldy temp8
lda COLORBUF,y
and #%11111100
ora temp7
sta COLORBUF,y
ldy temp9 ;Get Y back
rts
scrlog_xcolor4: ldy temp7
lda scrlog_mul4tbl,y
sta temp7
ldy temp8
lda COLORBUF,y
and #%11110011
ora temp7
sta COLORBUF,y
ldy temp9
rts
scrlog_xcolor5: lda temp6
and #$02
bne scrlog_xcolor6
ldy temp7
lda scrlog_mul16tbl,y
sta temp7
ldy temp8
lda COLORBUF,y
and #%11001111
ora temp7
sta COLORBUF,y
ldy temp9
rts
scrlog_xcolor6: ldy temp7
lda scrlog_mul64tbl,y
sta temp7
ldy temp8
lda COLORBUF,y
and #%00111111
ora temp7
sta COLORBUF,y
ldy temp9
rts
scrlog_ycolor: txa
and #$01
beq scrlog_ycolorok
rts
scrlog_ycolorok:tya
and #$02 ;Right part of block?
bne scrlog_ycolor0
lda temp5
jmp scrlog_ycolor1
scrlog_ycolor0: lda temp5
lsr
lsr
scrlog_ycolor1: cpy #$08 ;Lower part of block?
bcc scrlog_ycolor2
lsr
lsr
lsr
lsr
scrlog_ycolor2: and #$03
sta temp7 ;Color now in temp7
lda temp6
asl
and #$f8
sta temp8 ;Position, where to draw
txa
lsr
lsr
clc
adc temp8
sta temp8 ;Byteposition is ready in temp8
sty temp9 ;Store Y
lda temp6 ;Check Y-fineposition
and #$02
bne scrlog_ycolor5 ;Lower part
scrlog_ycolor3: txa ;Check X-fineposition
and #$02
bne scrlog_ycolor4 ;Right part
ldy temp8
lda COLORBUF,y
and #%11111100
ora temp7
sta COLORBUF,y
ldy temp9 ;Get Y back
rts
scrlog_ycolor4: ldy temp7
lda scrlog_mul4tbl,y
sta temp7
ldy temp8
lda COLORBUF,y
and #%11110011
ora temp7
sta COLORBUF,y
ldy temp9
rts
scrlog_ycolor5: txa
and #$02
bne scrlog_ycolor6
ldy temp7
lda scrlog_mul16tbl,y
sta temp7
ldy temp8
lda COLORBUF,y
and #%11001111
ora temp7
sta COLORBUF,y
ldy temp9
rts
scrlog_ycolor6: ldy temp7
lda scrlog_mul64tbl,y
sta temp7
ldy temp8
lda COLORBUF,y
and #%00111111
ora temp7
sta COLORBUF,y
ldy temp9
rts
scrlog_mul4tbl: dc.b 0,4,8,12
scrlog_mul16tbl:dc.b 0,16,32,48
scrlog_mul64tbl:dc.b 0,64,128,192
;ͻ
;SCROLLACTION
;
;Blits the horizontal & vertical buffers to PPU memory and also updates
;attribute table. To be called during vblank. Also, RESETSCROLL or SCROLLLOGIC
;must be called before calling this.
;
;Parameters: -
;Returns: -
;Destroys: A,X,Y
;ͼ
scrollaction: lda $2002
lda screenshift
asl
tax
lda scract_jumptbl,x
sta temp1
lda scract_jumptbl+1,x
sta temp2
jmp (temp1)
scract_jumptbl: dc.w scract_done ;0
dc.w scract_doleft ;1
dc.w scract_doright ;2
dc.w scract_done ;3
dc.w scract_doup ;4
dc.w scract_doupleft ;5
dc.w scract_doupright ;6
dc.w scract_done ;7
dc.w scract_dodown ;8
dc.w scract_dodownleft ;9
dc.w scract_dodownright ;10
scract_doupleft:jsr scract_doleft
jmp scract_doup
scract_doupright:jsr scract_doright
jmp scract_doup
scract_dodownleft:jsr scract_doleft
jmp scract_dodown
scract_dodownright:jsr scract_doright
jmp scract_dodown
scract_doleft: lda #$20
sta $2006
lda rawscrollx
and #$1f
sta $2006
jsr scract_horizshift
lda rawscrollx
lsr
lsr
tax
scract_doleftattr:
N SET 0
REPEAT 8
lda #$23
sta $2006
txa
ora #$c0+N
sta $2006
lda COLORBUF+N,x
sta $2007
N SET N+8
REPEND
scract_done: rts
scract_doright: lda #$20
sta $2006
lda rawscrollx
clc
adc #SCRSIZEX-1
and #$1f
sta $2006
jsr scract_horizshift
lda rawscrollx
clc
adc #$1f
and #$1f
lsr
lsr
tax
jmp scract_doleftattr
scract_doup: lda rawscrolly
asl
tax
lda scract_rowtbl+1,x
sta $2006
lda scract_rowtbl,x
sta $2006
jsr scract_vertshift
lda #$23
sta $2006
lda rawscrolly
asl
and #$f8
tax
ora #$c0
sta $2006
scract_doupattr:
N SET 0
REPEAT 8
lda COLORBUF+N,x
sta $2007
N SET N+1
REPEND
rts
scract_dodown: lda rawscrolly
clc
adc #SCRSIZEY-1
cmp #SCRSIZEY
bcc scract_dodownok1
sbc #SCRSIZEY
scract_dodownok1:
sta temp1
asl
tax
lda scract_rowtbl+1,x
sta $2006
lda scract_rowtbl,x
sta $2006
jsr scract_vertshift
lda #$23
sta $2006
lda temp1
asl
and #$f8
tax
ora #$c0
sta $2006
jmp scract_doupattr
scract_horizshift:
lda #PPU0VALUE+4 ;Vertical increment
sta $2000
N SET 0
REPEAT SCRSIZEY
lda HORIZBUF+N
sta $2007
N SET N+1
REPEND
lda #PPU0VALUE ;Normal increment
sta $2000
rts
scract_vertshift:
N SET 0
REPEAT SCRSIZEX
lda VERTBUF+N
sta $2007
N SET N+1
REPEND
rts
scract_rowtbl:
N SET $2000
REPEAT SCRSIZEY
dc.w N
N SET N+32
REPEND
setgamescreen: lda #$00
sta $2006
sta $2006
lda scrollx
sec
sbc #$08
sta $2005
lda scrolly
clc
adc #$10
cmp #SCRSIZEY*8
bcc setgame_ok
sbc #SCRSIZEY*8
setgame_ok: sta $2005
lda #$1c ;Turn on onescreen mirror
jsr write8000
lda #$18 ;BG & sprites on, clipping
sta $2001
lda #$00 ;Assume no shifting on next fr.
sta screenshift
rts
setpanel: bit $2002 ;Wait if sprite hit still on
bvs setpanel
ldx nmicount
setpanel_wait: cpx nmicount ;Check if vblank occurs before
bne setpanel_toolong ;spritehit (something went
bit $2002 ;wrong)
bvc setpanel_wait
lda #$00 ;Blank screen
sta $2001
lda #$1e ;Turn off onescreen mirror
jsr write8000
lda #$00
ldy #$04 ;Set scrolling & display pos.
sta $2005
sta $2005
sty $2006
sta $2006
lda #$0a
sta $2001 ;Just BG on, no sprites, no clip
setpanel_toolong:
rts
setsprites: lda #>SPRBUF ;Start sprite DMA
sta $4014
rts
clearsprites: ldx #$00
clrspr_loop: lda #$f0
sta SPRBUF,x
lda #$00
sta SPRBUF+1,x
lda #$00
sta SPRBUF+2,x
lda #$f8
sta SPRBUF+3,x
inx
inx
inx
inx
bne clrspr_loop
rts
setnametable: lda #$1e ;Turn off onescreen mirror
jsr write8000
lda #PPU0VALUE ;Normal increment
sta $2000
lda #$20 ;Address $2000
sta $2006
lda #$00
sta $2006
ldx #$00
ldy #$00
setntbl_loop1: lda #$00 ;Wipe both nametables
sta $2007
inx
bne setntbl_loop1
iny
cpy #$08
bcc setntbl_loop1
ldx #$00
lda #$24 ;Address $2400
sta $2006
lda #$00
sta $2006
lda ntscflag
bne setntbl_loop3
setntbl_loop2: lda paneltext,x ;Now write text to the
and #$3f ;second nametable
sta $2007
inx
cpx #6*32
bcc setntbl_loop2
rts
setntbl_loop3: lda paneltext2,x ;Now write text to the
and #$3f ;second nametable
sta $2007
inx
cpx #6*32
bcc setntbl_loop3
rts
loadpalette: lda #$3f
sta $2006
lda #$00
sta $2006
ldx #$00
loadpalette2: lda palette,x
sta $2007
inx
cpx #$20
bne loadpalette2
rts
loadchars: lda #$00
sta $2006
sta $2006
lda #<chardata
sta srclo
lda #>chardata
sta srchi
ldy #$00
ldx #$10
loadchars2: lda (srclo),y
sta $2007
iny
bne loadchars2
inc srchi
dex
bne loadchars2
rts
detectntsc: lda #$01
sta ntscflag ;Assume NTSC
sta ntscdelay
jsr waitvblank
lda #$00
sta temp1
sta temp2
lda nmicount
dntsc_loop: cmp nmicount
bne dntsc_over
inc temp1
bne dntsc_loop
inc temp2
bne dntsc_loop
dntsc_over: asl temp1
lda temp2
rol
cmp #$12
bcc dntsc_nopal
dec ntscflag
dntsc_nopal: rts
setupppu: lda #PPU0VALUE ;Blank screen, leave NMI's on
sta $2000
lda #$00
sta $2001
rts
setupmapper: lda #$1e
write8000: pha ;Write to MMC 1 mapper
lda #$80 ;First reset
sta $8000 ;Then 5 bits of data
pla
sta $8000
lsr
sta $8000
lsr
sta $8000
lsr
sta $8000
lsr
sta $8000
rts
waitvblank: lda nmicount
waitvblank2: cmp nmicount
beq waitvblank2
lda #$00 ;Blank screen, with clipping
sta $2001
lda $2002
lda #$1e ;Turn off onescreen mirror
jmp write8000
nmi: inc nmicount
irq: rti
paneltext: dc.b " "
dc.b $1d,"MULTIDIRECTIONAL SCROLLING TEST"
dc.b $1e,"RUNNING IN PAL, 50HZ REFRESH "
dc.b $1f
ds.b 30,$3c
dc.b $3e
dc.b " "
dc.b " "
dc.b " "
paneltext2: dc.b " "
dc.b $1d,"MULTIDIRECTIONAL SCROLLING TEST"
dc.b $1e,"RUNNING IN NTSC, 60HZ REFRESH "
dc.b $1f
ds.b 30,$3c
dc.b $3e
dc.b " "
dc.b " "
dc.b " "
blocktbllo:
N SET blockdata+512
REPEAT 128
dc.b N
N SET N+16
REPEND
blocktblhi:
N SET blockdata+512
REPEAT 128
dc.b N/256
N SET N+16
REPEND
palette: incbin scrtest.pal
incbin scrtest.pal
blockdata: incbin scrtest.blk
map: incbin scrtest.map
mapsizex = map+0
mapsizey = map+2
mapdata = map+4
chardata: incbin scrtest.chr
org $fffa
dc.w nmi
dc.w start
dc.w irq
|
xem/nes | 2,338 | nes-test-roms/mmc3_test_2/source/1-clocking.s | .include "test_mmc3.inc"
main:
jsr begin_mmc3_tests
set_test 2,"Counter/IRQ/A12 clocking isn't working at all"
ldx #10
jsr begin_counter_test
jsr clock_counter ; counter = 10
jsr clock_counter
jsr should_be_clear ; counter shouldn't be zero yet
set_test 3,"Should decrement when A12 is toggled via PPUADDR"
ldx #2
jsr begin_counter_test
ldx #9 ; counter = 2
jsr clock_counter_x ; clock 9 times
jsr should_be_set ; should have hit zero at least once by now
set_test 4,"Writing to $C000 shouldn't cause reload"
ldx #2
jsr begin_counter_test
jsr clock_counter ; counter = 2
lda #100
jsr set_reload
ldx #8
jsr clock_counter_x ; should reach 0 before reloading with 100
jsr should_be_set ; and thus IRQ flag should be set by now
set_test 5,"Writing to $C001 shouldn't cause immediate reload"
ldx #1
jsr begin_counter_test
lda #1
jsr set_reload ; shouldn't affect counter
jsr clear_counter ; request reload on next clock, not immediately
lda #4
jsr set_reload ; reload = 4, which will get used next
jsr clock_counter ; counter = 4
jsr clock_counter ; 3
jsr should_be_clear
set_test 6,"Should reload (no decrement) on first clock after clear"
ldx #2
jsr begin_counter_test
jsr clock_counter ; 2
jsr clock_counter ; 1
jsr should_be_clear
set_test 7,"Clear should clear counter immediately"
ldx #2
jsr begin_counter_test
jsr clock_counter ; 2
jsr clock_counter ; 1
jsr clear_counter ; clear counter
jsr clock_counter ; counter isn't 1 anymore, so IRQ shouldn't be set here
jsr should_be_clear
set_test 8,"IRQ should be set when counter is decremented to 0"
ldx #1
jsr begin_counter_test
jsr clock_counter ; 1
jsr clock_counter ; 0
jsr should_be_set
set_test 9,"IRQ should never be set when disabled"
ldx #1
jsr begin_counter_test
jsr disable_irq
ldx #10
jsr clock_counter_x
jsr should_be_clear
set_test 10,"Should reload when clocked when counter is 0"
ldx #1
jsr begin_counter_test
jsr clock_counter ; 1
lda #10
jsr set_reload
jsr clock_counter ; 0
lda #2
jsr set_reload
jsr clock_counter ; 2
jsr clock_counter ; 1
jsr clear_irq
jsr clock_counter ; 0
jsr should_be_set
jmp tests_passed
|
xem/nes | 1,997 | nes-test-roms/mmc3_test_2/source/2-details.s | ; Tests MMC3 IRQ counter details
.include "test_mmc3.inc"
main:
jsr begin_mmc3_tests
set_test 2,"Counter isn't working when reloaded with 255"
ldx #255
jsr begin_counter_test
ldx #255
jsr clock_counter_x ; first clock loads with 255
jsr should_be_clear
jsr clock_counter
jsr should_be_set
set_test 3,"Counter should run even when IRQ is disabled"
ldx #2
jsr begin_counter_test
jsr disable_irq
jsr clock_counter ; 2
jsr clock_counter ; 1
jsr clock_counter ; 0
jsr clock_counter ; 2
jsr clock_counter ; 1
jsr enable_irq
jsr should_be_clear
jsr clock_counter ; 0
jsr should_be_set
set_test 4,"Counter should run even after IRQ flag has been set"
ldx #2
jsr begin_counter_test
jsr clock_counter ; 2
jsr clock_counter ; 1
jsr clock_counter ; 0
jsr clock_counter ; 2
jsr clear_irq
jsr clock_counter ; 1
jsr should_be_clear
jsr clock_counter ; 0
jsr should_be_set
set_test 5,"IRQ should not be set when counter reloads with non-zero"
ldx #1
jsr begin_counter_test
jsr clock_counter ; 1
jsr clock_counter ; 0
jsr clear_irq
jsr clock_counter ; 1
jsr should_be_clear
set_test 6,"IRQ should not be set when counter is cleared via $C001"
ldx #2
jsr begin_counter_test
jsr clock_counter ; 2
jsr clock_counter ; 1
jsr clear_counter
jsr should_be_clear
set_test 7,"IRQ should be set when non-zero and reloading to 0 after clear"
ldx #3
jsr begin_counter_test
jsr clock_counter ; 3
jsr clock_counter ; 2
lda #0
jsr set_reload
jsr clear_counter
jsr clock_counter ; 0
jsr should_be_set
jsr clear_oam
set_test 8,"Counter should be clocked 241 times in PPU frame"
ldx #241
jsr begin_counter_test
jsr wait_vbl
setb PPUSCROLL,0
sta PPUSCROLL
setb PPUCTRL,$08 ; sprites use tiles at $1xxx
setb PPUMASK,$18 ; enable bg and sprites
delay 29800
setb PPUMASK,$00 ; disable rendering
jsr should_be_clear
jsr clock_counter
jsr should_be_set
jmp tests_passed
|
xem/nes | 1,565 | nes-test-roms/mmc3_test_2/source/6-MMC3_alt.s | ; Tests alternate MMC3 behavior. Some MMC3 chips also have this behavior,
; though their markings appear identical to those that have normal
; MMC3 behavior. My copy of Crystalis in particular behaves this way,
; but not my copy of Super Mario Bros. 3, even though both have a chip
; marked MMC3B.
.include "test_mmc3.inc"
main:
jsr begin_mmc3_tests
set_test 2,"IRQ shouldn't be set when reloading to 0 due to counter naturally reaching 0 previously"
ldx #2
jsr begin_counter_test
jsr clock_counter ; reload with 2
jsr clock_counter ; decrement to 1
jsr clock_counter ; decrement to 0
jsr should_be_set
lda #0
jsr set_reload
jsr clock_counter ; reload with 0
jsr should_be_clear
jsr clock_counter ; reload with 0
jsr should_be_clear
jsr clock_counter ; reload with 0
jsr should_be_clear
jsr clock_counter ; reload with 0
jsr should_be_clear
set_test 3,"IRQ should be set when reloading due to clear, even if counter was already 0"
ldx #2
jsr begin_counter_test
jsr clock_counter ; reload with 2
jsr clock_counter ; decrement to 1
jsr clock_counter ; decrement to 0
jsr should_be_set
lda #0
jsr set_reload
jsr clock_counter ; reload with 0
jsr should_be_clear
jsr clock_counter ; reload with 0
jsr should_be_clear
lda #2
jsr set_reload
jsr clear_counter ; this sets internal flag that is examined on next clock
lda #0
jsr set_reload
jsr clock_counter ; reload with 0, AND set IRQ flag, unlike before
jsr should_be_set
jmp tests_passed
|
xem/nes | 2,470 | nes-test-roms/mmc3_test_2/source/4-scanline_timing.s | ; Tests MMC3 IRQ timing to PPU clock accuracy. Tests both modes,
; $2000=$08 and $2000=$10.
;
; Timing tested is between $2002 reads of VBL flag first set,
; and IRQ occurring.
.include "test_mmc3.inc"
.include "sync_vbl.s"
.macro test mode, count, n
.local n_
n_ = (n) + 1
setb PPUMASK,0
jsr sync_vbl_even
delay_ppu_even (n_ .MOD 3) + 9
setb PPUCTRL,mode
lda #count
jsr begin_
delay n_/3 - 3 - 43
jsr end_
.endmacro
begin_:
pha
setb PPUMASK,$18
pla
sta r_set_reload
sta r_clear_counter
sta r_disable_irq
sta r_enable_irq
rts
end_:
setb irq_flag,$10
cli
nop
nop
inc irq_flag
delay 1000
sei
nop
lda irq_flag
cmp #$11
beq @no_irq
rts
@no_irq:
set_test 14,"IRQ never occurred"
jmp test_failed
main:
jsr begin_mmc3_tests
jsr clear_oam
scanline_0_08 = 6976
scanline_1_08 = scanline_0_08
set_test 2,"Scanline 0 IRQ should occur later when $2000=$08"
test $08, 0, scanline_0_08 - 1
cmp #$22
jne test_failed
set_test 3,"Scanline 0 IRQ should occur sooner when $2000=$08"
test $08, 0, scanline_0_08
cmp #$21
jne test_failed
set_test 4,"Scanline 1 IRQ should occur later when $2000=$08"
test $08, 1, scanline_1_08 + 341 - 1
cmp #$22
jne test_failed
set_test 5,"Scanline 1 IRQ should occur sooner when $2000=$08"
test $08, 1, scanline_1_08 + 341
cmp #$21
jne test_failed
set_test 6,"Scanline 239 IRQ should occur later when $2000=$08"
test $08, 239, scanline_1_08 + 239*341 - 1
cmp #$22
jne test_failed
set_test 7,"Scanline 239 IRQ should occur sooner when $2000=$08"
test $08, 239, scanline_1_08 + 239*341
cmp #$21
jne test_failed
scanline_0_10 = 6976 - 256
scanline_1_10 = scanline_0_10 - 21
set_test 8,"Scanline 0 IRQ should occur later when $2000=$10"
test $10, 0, scanline_0_10 - 1
cmp #$22
jne test_failed
set_test 9,"Scanline 0 IRQ should occur sooner when $2000=$10"
test $10, 0, scanline_0_10
cmp #$21
jne test_failed
set_test 10,"Scanline 1 IRQ should occur later when $2000=$10"
test $10, 1, scanline_1_10 + 341 - 1
cmp #$22
jne test_failed
set_test 11,"Scanline 1 IRQ should occur sooner when $2000=$10"
test $10, 1, scanline_1_10 + 341
cmp #$21
jne test_failed
set_test 12,"Scanline 239 IRQ should occur later when $2000=$10"
test $10, 239, scanline_1_10 + 239*341 - 1
cmp #$22
jne test_failed
set_test 13,"Scanline 239 IRQ should occur sooner when $2000=$10"
test $10, 239, scanline_1_10 + 239*341
cmp #$21
jne test_failed
jmp tests_passed
|
xem/nes | 1,649 | nes-test-roms/mmc3_test_2/source/3-A12_clocking.s | ; Tests MMC3 IRQ clocking via bit 12 of VRAM address
.include "test_mmc3.inc"
main:
jsr begin_mmc3_tests
setb PPUCTRL,0 ; disable PPU, sprites and bg use $0xxx patterns
sta PPUMASK
set_test 2,"Shouldn't be clocked when A12 doesn't change"
ldx #1
jsr begin_counter_test
lda #$00 ; transition everything but A12
ldx #$ef
ldy #$ff
sta PPUADDR
sta PPUADDR
stx PPUADDR
sty PPUADDR
sta PPUADDR
sta PPUADDR
stx PPUADDR
sty PPUADDR
sta PPUADDR
sta PPUADDR
jsr should_be_clear
set_test 3,"Shouldn't be clocked when A12 changes to 0"
ldx #1
jsr begin_counter_test
jsr clock_counter ; avoid pathological behavior
setb PPUADDR,$10
sta PPUADDR
jsr clear_counter
jsr clear_irq
ldx #$00
ldy #$10
stx PPUADDR
stx PPUADDR
sty PPUADDR ; counter = 1
stx PPUADDR
stx PPUADDR ; second 1 to 0 transition
stx PPUADDR
jsr should_be_clear
set_test 4,"Should be clocked when A12 changes to 1 via PPUADDR write"
ldx #1
jsr begin_counter_test
jsr clock_counter
setb PPUADDR,$00 ; transition A12 from 0 to 1
sta PPUADDR
setb PPUADDR,$10
sta PPUADDR
jsr should_be_set
set_test 5,"Should be clocked when A12 changes to 1 via PPUDATA read"
ldx #1
jsr begin_counter_test
jsr clock_counter
setb PPUADDR,$0f ; vaddr = $0fff
setb PPUADDR,$ff
jsr should_be_clear
bit PPUDATA
jsr should_be_set
set_test 6,"Should be clocked when A12 changes to 1 via PPUDATA write"
ldx #1
jsr begin_counter_test
jsr clock_counter
setb PPUADDR,$0f ; vaddr = $0fff
setb PPUADDR,$ff
jsr should_be_clear
sta PPUDATA
jsr should_be_set
jmp tests_passed
|
xem/nes | 1,852 | nes-test-roms/mmc3_test_2/source/common/testing.s | ; Utilities for writing test ROMs
; In NVRAM so these can be used before initializing runtime,
; then runtime initialized without clearing them
nv_res test_code ; code of current test
nv_res test_name,2 ; address of name of current test, or 0 of none
; Sets current test code and optional name. Also resets
; checksum.
; Preserved: A, X, Y
.macro set_test code,name
pha
lda #code
jsr set_test_
.ifblank name
setb test_name+1,0
.else
.local Addr
setw test_name,Addr
seg_data "RODATA",{Addr: .byte name,0}
.endif
pla
.endmacro
set_test_:
sta test_code
jmp reset_crc
; Initializes testing module
init_testing = init_crc
; Reports that all tests passed
tests_passed:
jsr print_filename
print_str newline,"Passed"
lda #0
jmp exit
; Reports "Done" if set_test has never been used,
; "Passed" if set_test 0 was last used, or
; failure if set_test n was last used.
tests_done:
ldx test_code
jeq tests_passed
inx
bne test_failed
jsr print_filename
print_str newline,"Done"
lda #0
jmp exit
; Reports that the current test failed. Prints code and
; name last set with set_test, or just "Failed" if none
; have been set yet.
test_failed:
ldx test_code
; Treat $FF as 1, in case it wasn't ever set
inx
bne :+
inx
stx test_code
:
; If code >= 2, print name
cpx #2-1 ; -1 due to inx above
blt :+
lda test_name+1
beq :+
jsr print_newline
sta addr+1
lda test_name
sta addr
jsr print_str_addr
jsr print_newline
:
jsr print_filename
; End program
lda test_code
jmp exit
; If checksum doesn't match expected, reports failed test.
; Clears checksum afterwards.
; Preserved: A, X, Y
.macro check_crc expected
jsr_with_addr check_crc_,{.dword expected}
.endmacro
check_crc_:
pha
jsr is_crc_
bne :+
jsr reset_crc
pla
rts
: jsr print_newline
jsr print_crc
jmp test_failed
|
xem/nes | 2,841 | nes-test-roms/mmc3_test_2/source/common/print.s | ; Prints values in various ways to output,
; including numbers and strings.
newline = 10
zp_byte print_temp_
; Prints indicated register to console as two hex
; chars and space
; Preserved: A, X, Y, flags
print_a:
php
pha
print_reg_:
jsr print_hex
lda #' '
jsr print_char_
pla
plp
rts
print_x:
php
pha
txa
jmp print_reg_
print_y:
php
pha
tya
jmp print_reg_
print_p:
php
pha
php
pla
jmp print_reg_
print_s:
php
pha
txa
tsx
inx
inx
inx
inx
jsr print_x
tax
pla
plp
rts
; Prints A as two hex characters, NO space after
; Preserved: A, X, Y
print_hex:
jsr update_crc
pha
lsr a
lsr a
lsr a
lsr a
jsr print_nibble_
pla
pha
and #$0F
jsr print_nibble_
pla
rts
print_nibble_:
cmp #10
blt @digit
adc #6;+1 since carry is set
@digit: adc #'0'
jmp print_char_
; Prints low 4 bits of A as single hex character
; Preserved: A, X, Y
print_nibble:
pha
and #$0F
jsr update_crc
jsr print_nibble_
pla
rts
; Prints character and updates checksum UNLESS
; it's a newline.
; Preserved: A, X, Y
print_char:
cmp #newline
beq :+
jsr update_crc
: pha
jsr print_char_
pla
rts
; Prints space. Does NOT update checksum.
; Preserved: A, X, Y
print_space:
pha
lda #' '
jsr print_char_
pla
rts
; Advances to next line. Does NOT update checksum.
; Preserved: A, X, Y
print_newline:
pha
lda #newline
jsr print_char_
pla
rts
; Prints string
; Preserved: A, X, Y
.macro print_str str,str2
jsr print_str_
.byte str
.ifnblank str2
.byte str2
.endif
.byte 0
.endmacro
print_str_:
sta print_temp_
pla
sta addr
pla
sta addr+1
jsr inc_addr
jsr print_str_addr
lda print_temp_
jmp (addr)
; Prints string at addr and leaves addr pointing to
; byte AFTER zero terminator.
; Preserved: A, X, Y
print_str_addr:
pha
tya
pha
ldy #0
beq :+ ; always taken
@loop: jsr print_char
jsr inc_addr
: lda (addr),y
bne @loop
pla
tay
pla
; FALL THROUGH
; Increments 16-bit value in addr.
; Preserved: A, X, Y
inc_addr:
inc addr
beq :+
rts
: inc addr+1
rts
; Prints A as 1-3 digit decimal value, NO space after.
; Preserved: A, X, Y
print_dec:
pha
sta print_temp_
jsr update_crc
txa
pha
lda print_temp_
; Hundreds
cmp #10
blt @ones
cmp #100
blt @tens
ldx #'0'-1
: inx
sbc #100
bge :-
adc #100
jsr @digit
; Tens
@tens: sec
ldx #'0'-1
: inx
sbc #10
bge :-
adc #10
jsr @digit
; Ones
@ones: ora #'0'
jsr print_char
pla
tax
pla
rts
; Print a single digit
@digit: pha
txa
jsr print_char
pla
rts
; Prints one of two characters based on condition.
; SEC; print_cc bcs,'C','-' prints 'C'.
; Preserved: A, X, Y, flags
.macro print_cc cond,yes,no
; Avoids labels since they're not local
; to macros in ca65.
php
pha
cond *+6
lda #no
bne *+4
lda #yes
jsr print_char
pla
plp
.endmacro
|
xem/nes | 3,089 | nes-test-roms/mmc3_test_2/source/common/shell_misc.s | ; Reports internal error and exits program
internal_error:
print_str newline,"Internal error"
lda #255
jmp exit
.import __NVRAM_LOAD__, __NVRAM_SIZE__
.macro fill_ram_ Begin, End
; Simpler to count from negative size up to 0,
; and adjust address downward to compensate
; for initial low byte in Y index
.local Neg_size
Neg_size = (Begin) - (End)
ldxy #(Begin) - <Neg_size
sty addr
stx addr+1
ldxy #Neg_size
: sta (addr),y
iny
bne :-
inc addr+1
inx
bne :-
.endmacro
; Clears 0 through ($100+S), $200 through __NVRAM_LOAD__-1, and
; __NVRAM_LOAD__+__NVRAM_SIZE__ through $7FF
clear_ram:
lda #0
bss_begin = $200
fill_ram_ bss_begin,__NVRAM_LOAD__
fill_ram_ __NVRAM_LOAD__+__NVRAM_SIZE__,$800
; Zero-page
tax
: sta 0,x
inx
bne :-
; Stack below S
tsx
inx
: dex
sta $100,x
bne :-
rts
nv_res unused_nv_var ; to avoid size=0
; Clears nvram
clear_nvram:
lda #0
fill_ram_ __NVRAM_LOAD__,__NVRAM_LOAD__+__NVRAM_SIZE__
rts
; Prints filename and newline, if available, otherwise nothing.
; Preserved: A, X, Y
print_filename:
.ifdef FILENAME_KNOWN
pha
jsr print_newline
setw addr,filename
jsr print_str_addr
jsr print_newline
pla
.endif
rts
.pushseg
.segment "RODATA"
; Filename terminated with zero byte.
filename:
.ifdef FILENAME_KNOWN
.incbin "ram:nes_temp"
.endif
.byte 0
.popseg
;**** ROM-specific ****
.ifndef BUILD_NSF
.include "ppu.s"
avoid_silent_nsf:
play_byte:
rts
; Disables interrupts and loops forever
.ifndef CUSTOM_FOREVER
forever:
sei
lda #0
sta PPUCTRL
: beq :-
.res $10,$EA ; room for code to run loader
.endif
; Default NMI
.ifndef CUSTOM_NMI
zp_byte nmi_count
zp_byte flags_from_nmi
zp_byte pclo_from_nmi
nmi: ; Record flags and PC low byte from stack
pla
sta flags_from_nmi
pla
sta pclo_from_nmi
pha
lda flags_from_nmi
pha
inc nmi_count
rti
; Waits for NMI. Must be using NMI handler that increments
; nmi_count, with NMI enabled.
; Preserved: X, Y
wait_nmi:
lda nmi_count
: cmp nmi_count
beq :-
rts
.endif
; Default IRQ
.ifndef CUSTOM_IRQ
zp_byte flags_from_irq
zp_byte pclo_from_irq
zp_byte irq_count
irq: ; Record flags and PC low byte from stack
pla
sta flags_from_irq
pla
sta pclo_from_irq
pha
lda flags_from_irq
pha
inc irq_count
bit SNDCHN ; clear frame IRQ flag
rti
.endif
.endif
; Reports A in binary as high and low tones, with
; leading low tone for reference. Omits leading
; zeroes. Doesn't hang if no APU is present.
; Preserved: A, X, Y
play_hex:
pha
; Make low reference beep
clc
jsr @beep
; Remove high zero bits
sec
: rol a
bcc :-
; Play remaining bits
beq @zero
: jsr @beep
asl a
bne :-
@zero:
delay_msec 300
pla
rts
; Plays low/high beep based on carry
; Preserved: A, X, Y
@beep:
pha
; Set up square
lda #1
sta SNDCHN
sta $4001
sta $4003
adc #$FE ; period=$100 if carry, $1FF if none
sta $4002
; Fade volume
lda #$0F
: ora #$30
sta $4000
delay_msec 8
sec
sbc #$31
bpl :-
; Silence
setb SNDCHN,0
delay_msec 160
pla
rts
|
xem/nes | 1,483 | nes-test-roms/mmc3_test_2/source/common/build_rom.s | ; Builds program as iNES ROM
; Default is 32K PRG and 8K CHR ROM, NROM (0)
.if 0 ; Options to set before .include "shell.inc":
CHR_RAM=1 ; Use CHR-RAM instead of CHR-ROM
CART_WRAM=1 ; Use mapper that supports 8K WRAM in cart
CUSTOM_MAPPER=n ; Specify mapper number
.endif
.ifndef CUSTOM_MAPPER
.ifdef CART_WRAM
CUSTOM_MAPPER = 2 ; UNROM
.else
CUSTOM_MAPPER = 0 ; NROM
.endif
.endif
;;;; iNES header
.ifndef CUSTOM_HEADER
.segment "HEADER"
.byte $4E,$45,$53,26 ; "NES" EOF
.ifdef CHR_RAM
.byte 2,0 ; 32K PRG, CHR RAM
.else
.byte 2,1 ; 32K PRG, 8K CHR
.endif
.byte CUSTOM_MAPPER*$10+$01 ; vertical mirroring
.endif
.ifndef CUSTOM_VECTORS
.segment "VECTORS"
.word -1,-1,-1, nmi, reset, irq
.endif
;;;; CHR-RAM/ROM
.ifdef CHR_RAM
.define CHARS "CHARS_PRG"
.segment CHARS
ascii_chr:
.segment "CHARS_PRG_ASCII"
.align $200
.incbin "ascii.chr"
ascii_chr_end:
.else
.define CHARS "CHARS"
.segment "CHARS_ASCII"
.align $200
.incbin "ascii.chr"
.res $1800
.endif
.segment CHARS
.res $10,0
;;;; Shell
.ifndef NEED_CONSOLE
NEED_CONSOLE=1
.endif
.segment "CODE"
.res $4000
.include "shell.s"
std_reset:
lda #0
sta PPUCTRL
sta PPUMASK
jmp run_shell
init_runtime:
.ifdef CHR_RAM
load_chr_ram
.endif
rts
post_exit:
jsr set_final_result
jsr play_hex
jmp forever
; This helps devcart recover after running test.
; It is never executed by test ROM.
.segment "LOADER"
.incbin "devcart.bin"
.code
.align 256
|
xem/nes | 5,404 | nes-test-roms/mmc3_test_2/source/common/console.s | ; Scrolling text console with word wrapping, 30x29 characters.
;
; * Defers PPU initialization until first flush/ newline.
; * Works even if PPU doesn't support scrolling.
; * Keeps border around edge of screen for TV overscan.
; * Requires vertical or single-screen mirroring.
; * Requires ASCII font in CHR.
.ifndef CONSOLE_COLOR
CONSOLE_COLOR = $30 ; white
.endif
console_screen_width = 32 ; if lower than 32, left-justifies
; Number of characters of margin on left and right, to avoid
; text getting cut off by common TVs. OK if either/both are 0.
console_left_margin = 1
console_right_margin = 1
console_width = console_screen_width - console_left_margin - console_right_margin
zp_byte console_pos ; 0 to console_width
zp_byte console_scroll
zp_byte console_temp
bss_res console_buf,console_width
; Initializes console
console_init:
; Flag that console hasn't been initialized
setb console_scroll,-1
setb console_pos,0
rts
; Hides console by disabling PPU rendering and blacking out
; first four entries of palette.
; Preserved: A, X, Y
console_hide:
pha
jsr console_wait_vbl_
setb PPUMASK,0
lda #$0F
jsr console_load_palette_
pla
rts
; Shows console display
; Preserved: A, X, Y
console_show:
pha
lda #CONSOLE_COLOR
jsr console_show_custom_color_
pla
rts
; Prints char A to console. Will not appear until
; a newline or flush occurs.
; Preserved: A, X, Y
console_print:
cmp #10
beq console_newline
sty console_temp
ldy console_pos
cpy #console_width
beq console_full_
sta console_buf,y
iny
sty console_pos
ldy console_temp
rts
; Displays current line and starts new one
; Preserved: A, X, Y
console_newline:
pha
jsr console_wait_vbl_
jsr console_flush_
jsr console_scroll_up_
setb console_pos,0
pla
rts
; Displays current line's contents without scrolling.
; Preserved: A, X, Y
console_flush:
pha
jsr console_wait_vbl_
jsr console_flush_
jsr console_apply_scroll_
pla
rts
;**** Internal routines ****
console_full_:
ldy console_temp
; Line is full
; If space, treat as newline
cmp #' '
beq console_newline
; Wrap current line at appropriate point
pha
tya
pha
jsr console_wrap_
pla
tay
pla
jmp console_print
; Inserts newline into buffer at appropriate position, leaving
; next line ready in buffer
; Preserved: X, console_temp
console_wrap_:
; Find beginning of last word
ldy #console_width
lda #' '
: dey
bmi console_newline
cmp console_buf,y
bne :-
; y = 0 to console_width-1
; Flush through current word and put remaining
; in buffer for next line
jsr console_wait_vbl_
; Time to last PPU write: 207 + 32*(26 + 10)
lda console_scroll
jsr console_set_ppuaddr_
stx console_pos ; save X
ldx #0
; Print everything before last word
: lda console_buf,x
sta PPUDATA
inx
dey
bpl :-
; x = 1 to console_width
; Move last word to beginning of buffer, and
; print spaces for rest of line
ldy #0
beq :++
: lda #' '
sta PPUDATA
lda console_buf,x
inx
sta console_buf,y
iny
: cpx #console_width
bne :--
ldx console_pos ; restore X
; Append new text after that
sty console_pos
; FALL THROUGH
; Scrolls up 8 pixels and clears one line BELOW new line
; Preserved: X, console_temp
console_scroll_up_:
; Scroll up 8 pixels
lda console_scroll
jsr console_add_8_to_scroll_
sta console_scroll
; Clear line AFTER that on screen
jsr console_add_8_to_scroll_
jsr console_set_ppuaddr_
ldy #console_width
lda #' '
: sta PPUDATA
dey
bne :-
; FALL THROUGH
; Applies current scrolling position to PPU
; Preserved: X, Y, console_temp
console_apply_scroll_:
lda #0
sta PPUADDR
sta PPUADDR
sta PPUSCROLL
lda console_scroll
jsr console_add_8_to_scroll_
jsr console_add_8_to_scroll_
sta PPUSCROLL
rts
; Sets PPU address for row
; In: A = scroll position
; Preserved: X, Y
console_set_ppuaddr_:
sta console_temp
lda #$08
asl console_temp
rol a
asl console_temp
rol a
sta PPUADDR
lda console_temp
ora #console_left_margin
sta PPUADDR
rts
; A = (A + 8) % 240
; Preserved: X, Y
console_add_8_to_scroll_:
cmp #240-8
bcc :+
adc #16-1;+1 for set carry
: adc #8
rts
console_show_custom_color_:
pha
jsr console_wait_vbl_
setb PPUMASK,PPUMASK_BG0
pla
jsr console_load_palette_
jmp console_apply_scroll_
console_load_palette_:
pha
setb PPUADDR,$3F
setb PPUADDR,$00
setb PPUDATA,$0F ; black
pla
sta PPUDATA
sta PPUDATA
sta PPUDATA
rts
; Initializes PPU if necessary, then waits for VBL
; Preserved: A, X, Y, console_temp
console_wait_vbl_:
lda console_scroll
cmp #-1
bne @already_initialized
; Deferred initialization of PPU until first use of console
; In case PPU doesn't support scrolling, start a
; couple of lines down
setb console_scroll,16
jsr console_hide
tya
pha
; Fill nametable with spaces
setb PPUADDR,$20
setb PPUADDR,$00
ldy #240
lda #' '
: sta PPUDATA
sta PPUDATA
sta PPUDATA
sta PPUDATA
dey
bne :-
; Clear attributes
lda #0
ldy #$40
: sta PPUDATA
dey
bne :-
pla
tay
jsr console_show
@already_initialized:
jmp wait_vbl_optional
; Flushes current line
; Preserved: X, Y
console_flush_:
lda console_scroll
jsr console_set_ppuaddr_
sty console_temp
; Copy line
ldy #0
beq :++
: lda console_buf,y
sta PPUDATA
iny
: cpy console_pos
bne :--
ldy console_temp
rts
|
xem/nes | 1,096 | nes-test-roms/mmc3_test_2/source/common/text_out.s | ; Text output as expanding zero-terminated string at text_out_base
; The final exit result byte is written here
final_result = $6000
; Text output is written here as an expanding
; zero-terminated string
text_out_base = $6004
bss_res text_out_temp
zp_res text_out_addr,2
init_text_out:
ldx #0
; Put valid data first
setb text_out_base,0
lda #$80
jsr set_final_result
; Now fill in signature that tells emulator there's
; useful data there
setb text_out_base-3,$DE
setb text_out_base-2,$B0
setb text_out_base-1,$61
ldx #>text_out_base
stx text_out_addr+1
setb text_out_addr,<text_out_base
rts
; Sets final result byte in memory
set_final_result:
sta final_result
rts
; Writes character to text output
; In: A=Character to write
; Preserved: A, X, Y
write_text_out:
sty text_out_temp
; Write new terminator FIRST, then new char before it,
; in case emulator looks at string in middle of this routine.
ldy #1
pha
lda #0
sta (text_out_addr),y
dey
pla
sta (text_out_addr),y
inc text_out_addr
bne :+
inc text_out_addr+1
:
ldy text_out_temp
rts
|
xem/nes | 3,060 | nes-test-roms/mmc3_test_2/source/common/ppu.s | ; PPU utilities
bss_res ppu_not_present
; Sets PPUADDR to w
; Preserved: X, Y
.macro set_ppuaddr w
bit PPUSTATUS
setb PPUADDR,>w
setb PPUADDR,<w
.endmacro
; Delays by no more than n scanlines
.macro delay_scanlines n
.if CLOCK_RATE <> 1789773
.error "Currently only supports NTSC"
.endif
delay ((n)*341)/3
.endmacro
; Waits for VBL then disables PPU rendering.
; Preserved: A, X, Y
disable_rendering:
pha
jsr wait_vbl_optional
setb PPUMASK,0
pla
rts
; Fills first nametable with $00
; Preserved: Y
clear_nametable:
ldx #$20
bne clear_nametable_
clear_nametable2:
ldx #$24
clear_nametable_:
lda #0
jsr fill_screen_
; Clear pattern table
ldx #64
: sta PPUDATA
dex
bne :-
rts
; Fills screen with tile A
; Preserved: A, Y
fill_screen:
ldx #$20
bne fill_screen_
; Same as fill_screen, but fills other nametable
fill_screen2:
ldx #$24
fill_screen_:
stx PPUADDR
ldx #$00
stx PPUADDR
ldx #240
: sta PPUDATA
sta PPUDATA
sta PPUDATA
sta PPUDATA
dex
bne :-
rts
; Fills palette with $0F
; Preserved: Y
clear_palette:
set_ppuaddr $3F00
ldx #$20
lda #$0F
: sta PPUDATA
dex
bne :-
; Fills OAM with $FF
; Preserved: Y
clear_oam:
lda #$FF
; Fills OAM with A
; Preserved: A, Y
fill_oam:
ldx #0
stx SPRADDR
: sta SPRDATA
dex
bne :-
rts
; Initializes wait_vbl_optional. Must be called before
; using it.
.align 32
init_wait_vbl:
; Wait for VBL flag to be set, or ~60000
; clocks (2 frames) to pass
ldy #24
ldx #1
bit PPUSTATUS
: bit PPUSTATUS
bmi @set
dex
bne :-
dey
bpl :-
@set:
; Be sure flag didn't stay set (in case
; PPUSTATUS always has high bit set)
tya
ora PPUSTATUS
sta ppu_not_present
rts
; Same as wait_vbl, but returns immediately if PPU
; isn't working or doesn't support VBL flag
; Preserved: A, X, Y
.align 16
wait_vbl_optional:
bit ppu_not_present
bmi :++
; FALL THROUGH
; Clears VBL flag then waits for it to be set.
; Preserved: A, X, Y
wait_vbl:
bit PPUSTATUS
: bit PPUSTATUS
bpl :-
: rts
.macro check_ppu_region_ Len
; Delays since VBL began
jsr wait_vbl_optional ; 10 average
delay Len - 18 - 200
lda PPUSTATUS ; 4
bmi @ok ; 2
delay 200
; Next VBL should roughly begin here if it's the
; one we are detecting
delay 200
lda PPUSTATUS ; 2
bpl @ok
.endmacro
check_ppu_region:
.ifndef REGION_FREE
.ifdef PAL_ONLY
check_ppu_region_ 29781
print_str {newline,"Note: This test is meant for PAL NES only.",newline,newline}
.endif
.ifdef NTSC_ONLY
check_ppu_region_ 33248
print_str {newline,"Note: This test is meant for NTSC NES only.",newline,newline}
.endif
.endif
@ok: rts
; Loads ASCII font into CHR RAM and fills rest with $FF
.macro load_chr_ram
bit PPUSTATUS
setb PPUADDR,0
setb PPUADDR,0
; Copy ascii_chr to 0
setb addr,<ascii_chr
ldx #>ascii_chr
ldy #0
@page:
stx addr+1
: lda (addr),y
sta PPUDATA
iny
bne :-
inx
cpx #>ascii_chr_end
bne @page
; Fill rest
lda #$FF
: sta PPUDATA
iny
bne :-
inx
cpx #$20
bne :-
.endmacro
|
xem/nes | 2,698 | nes-test-roms/mmc3_test_2/source/common/shell.s | ; Shell that sets up testing framework and calls main
; Detect inclusion loops (otherwise ca65 goes crazy)
.ifdef SHELL_INCLUDED
.error "shell.s included twice"
.end
.endif
SHELL_INCLUDED = 1
; Temporary variables that ANY routine might modify, so
; only use them between routine calls.
temp = <$A
temp2 = <$B
temp3 = <$C
addr = <$E
ptr = addr
; Move code from $C000 to $E200, to accommodate my devcarts
.segment "CODE"
.res $2200
; Put shell code after user code, so user code is in more
; consistent environment
.segment "CODE2"
; Any user code which runs off end might end up here,
; so catch that mistake.
nop ; in case there was three-byte opcode before this
nop
jmp internal_error
;**** Common routines ****
.include "macros.inc"
.include "neshw.inc"
.include "delay.s"
.include "print.s"
.include "crc.s"
.include "testing.s"
;**** Shell core ****
.ifndef CUSTOM_RESET
reset:
sei
jmp std_reset
.endif
; Sets up hardware then runs main
run_shell:
init_cpu_regs
jsr init_shell
set_test $FF
jmp run_main
; Initializes shell without affecting current set_test values
init_shell:
jsr clear_ram
jsr init_wait_vbl ; waits for VBL once here,
jsr wait_vbl_optional ; so only need to wait once more
jsr init_text_out
jsr init_testing
jsr init_runtime
jsr console_init
rts
; Runs main in consistent PPU/APU environment, then exits
; with code 0
run_main:
jsr pre_main
jsr main
lda #0
jmp exit
; Sets up environment for main to run in
pre_main:
.ifndef BUILD_NSF
jsr disable_rendering
setb PPUCTRL,0
jsr clear_palette
jsr clear_nametable
jsr clear_nametable2
jsr clear_oam
.endif
; Clear APU registers
lda #0
sta $4015
ldx #$13
: sta $4000,x
dex
bpl :-
; CPU registers
lda #$34
pha
lda #0
tax
tay
jsr wait_vbl_optional
plp
sta SNDMODE
rts
.ifndef CUSTOM_EXIT
exit:
.endif
; Reports result and ends program
std_exit:
sta temp
init_cpu_regs
setb SNDCHN,0
lda temp
jsr report_result
pha
jsr check_ppu_region
pla
jmp post_exit
; Reports final result code in A
report_result:
jsr :+
jmp play_byte
: jsr print_newline
jsr console_show
; 0: ""
cmp #1
bge :+
rts
:
; 1: "Failed"
bne :+
print_str {"Failed",newline}
rts
; n: "Failed #n"
: print_str "Failed #"
jsr print_dec
jsr print_newline
rts
;**** Other routines ****
.include "shell_misc.s"
.ifdef NEED_CONSOLE
.include "console.s"
.else
; Stubs so code doesn't have to care whether
; console exists
console_init:
console_show:
console_hide:
console_print:
console_flush:
rts
.endif
.ifndef CUSTOM_PRINT
.include "text_out.s"
print_char_:
jsr write_text_out
jmp console_print
stop_capture:
rts
.endif
|
xem/nes | 3,550 | nes-test-roms/mmc3_test_2/source/common/sync_vbl.s | ; Synchronizes EXACTLY to VBL, to accuracy of 1/3 CPU clock
; (1/2 CPU clock if PPU is enabled). Reading PPUSTATUS
; 29768 clocks or later after return will have bit 7 set.
; Reading PPUSTATUS immediately will have bit 7 clear.
; Preserved: A, X, Y
; Time: 120-330 msec
.align 128
sync_vbl:
pha
; Disable interrupts
sei
lda #0
sta PPUCTRL
; Coarse synchronize
bit PPUSTATUS
: bit PPUSTATUS
bpl :-
; Delay so that VBL is sure to occur slightly after
; critical window in loop below.
delay 29760 ; +1 works, +2 fails
jmp @first
; VBL occurs every 29780.67 (rendering disabled)
; or 29780.5 (rendering enabled) CPU clocks. Loop
; takes 29781 CPU clocks. Thus, time of VBL will
; be effectively 1/3 or 1/2 CPU clock earlier in
; loop each time. At some point, it will fall just
; before second PPUSTATUS read below.
: delay 29781-4-4-3
@first: bit PPUSTATUS ; clear flag (not really necessary)
bit PPUSTATUS ; see if just set
bpl :-
pla
rts
; Same as sync_vbl, but additionally ensures that next frame
; will skip PPU clock at end of VBL if rendering is enabled.
; Preserved: A, X, Y
sync_vbl_odd:
pha
; Rendering must be disabled
jsr wait_vbl
lda #0
sta PPUMASK
jsr sync_vbl
jsr @render_frame
; See whether frame was short
; If not, frames totaled 59561+1/3 CPU clocks
delay 29781-17-1
lda PPUSTATUS
bmi :+
jmp @end
:
; If frame was short, first frame was
; one clock shorter. Wait another frame to
; toggle even/odd flag. Rendering enabled
; for frame so total of all three frames
; is 89341+1/3 CPU clocks, which has the
; same fraction as the other case, thus
; ensuring the same CPU-PPU synchronization.
jsr @render_frame
@end: ; Establish same timing as sync_vbl
delay 29781-7
; Be sure VBL flag is clear for this frame, as the
; other sync routines do.
bit PPUSTATUS
pla
rts
@render_frame:
lda #PPUMASK_BG0
sta PPUMASK
delay 29781-6-6-6-6+1
lda #0
sta PPUMASK
rts
; Same as sync_vbl_odd, but next frame will NOT skip PPU clock
; Preserved: A, X, Y
sync_vbl_even:
jsr sync_vbl_odd
delay 341*262*3 / 3 - 10; get to even frame without affecting sync
; Be sure VBL flag is clear for this frame, as the
; other sync routines do.
bit PPUSTATUS
rts
; Same as sync_vbl_even, but also writes A to SPRDMA without
; affecting timing (in particular, SPRDMA's optional extra clock
; is dealt with).
; Preserved: A, X, Y
sync_vbl_even_dma:
jsr sync_vbl_odd
delay 341*262 - 534
sta SPRDMA
bit PPUSTATUS
; Delay extra clock if ncessary. Unaffected by code
; alignment since it branches to next instr.
bpl :+
:
; Be sure VBL flag is clear for this frame, as the
; other sync routines do.
bit PPUSTATUS
rts
; Same as sync_vbl, but then delays A additional PPU clocks.
; Preserved: X, Y
.align 32
sync_vbl_delay:
jsr sync_vbl
; VBL occurs every 29780.67 clocks, therefore
; each iteration of the loop is like delaying
; 1/3 CPU clock (1 PPU clock).
: delay 29781-7
clc
adc #-1
bcs :-
delay 29781*2-10
; Be sure VBL flag is clear for this frame, as the
; other sync routines do.
bit PPUSTATUS
rts
; Effectively delays n PPU clocks, while maintaing
; even/odd frame (i.e. never delays an odd number of
; frames). PPU rendering must be off.
; Preserved: A, X, Y
.macro delay_ppu_even n
.if (n) < 8
.error "time out of range"
.endif
.if (n) .MOD 3 = 1
.if (n) > 4
delay (n)/3-1
.endif
delay 29781*4
.elseif (n) .MOD 3 = 2
delay (n)/3
delay 29781*2
.else
delay (n)/3
.endif
.endmacro
|
xem/nes | 3,437 | nes-test-roms/mmc3_test_2/source/common/delay.s | ; Delays in CPU clocks, milliseconds, etc. All routines are re-entrant
; (no global data). No routines touch X or Y during execution.
; Code generated by macros is relocatable; it contains no JMPs to itself.
zp_byte delay_temp_ ; only written to
; Delays n clocks, from 2 to 16777215
; Preserved: A, X, Y, flags
.macro delay n
.if (n) < 0 .or (n) = 1 .or (n) > 16777215
.error "Delay out of range"
.endif
delay_ (n)
.endmacro
; Delays n milliseconds (1/1000 second)
; n can range from 0 to 1100.
; Preserved: A, X, Y, flags
.macro delay_msec n
.if (n) < 0 .or (n) > 1100
.error "time out of range"
.endif
delay ((n)*CLOCK_RATE+500)/1000
.endmacro
; Delays n microseconds (1/1000000 second).
; n can range from 0 to 100000.
; Preserved: A, X, Y, flags
.macro delay_usec n
.if (n) < 0 .or (n) > 100000
.error "time out of range"
.endif
delay ((n)*((CLOCK_RATE+50)/100)+5000)/10000
.endmacro
.align 64
; Delays A clocks + overhead
; Preserved: X, Y
; Time: A+25 clocks (including JSR)
: sbc #7 ; carry set by CMP
delay_a_25_clocks:
cmp #7
bcs :- ; do multiples of 7
lsr a ; bit 0
bcs :+
: ; A=clocks/2, either 0,1,2,3
beq @zero ; 0: 5
lsr a
beq :+ ; 1: 7
bcc :+ ; 2: 9
@zero: bne :+ ; 3: 11
: rts ; (thanks to dclxvi for the algorithm)
; Delays A*256 clocks + overhead
; Preserved: X, Y
; Time: A*256+16 clocks (including JSR)
delay_256a_16_clocks:
cmp #0
bne :+
rts
delay_256a_11_clocks_:
: pha
lda #256-19-22
jsr delay_a_25_clocks
pla
clc
adc #-1
bne :-
rts
; Delays A*65536 clocks + overhead
; Preserved: X, Y
; Time: A*65536+16 clocks (including JSR)
delay_65536a_16_clocks:
cmp #0
bne :+
rts
delay_65536a_11_clocks_:
: pha
lda #256-19-22-13
jsr delay_a_25_clocks
lda #255
jsr delay_256a_11_clocks_
pla
clc
adc #-1
bne :-
rts
max_short_delay = 41
; delay_short_ macro jumps into these
.res (max_short_delay-12)/2,$EA ; NOP
delay_unrolled_:
rts
.macro delay_short_ n
.if n < 0 .or n = 1 .or n > max_short_delay
.error "Internal delay error"
.endif
.if n = 0
; nothing
.elseif n = 2
nop
.elseif n = 3
sta <delay_temp_
.elseif n = 4
nop
nop
.elseif n = 5
sta <delay_temp_
nop
.elseif n = 6
nop
nop
nop
.elseif n = 7
php
plp
.elseif n = 8
nop
nop
nop
nop
.elseif n = 9
php
plp
nop
.elseif n = 10
sta <delay_temp_
php
plp
.elseif n = 11
php
plp
nop
nop
.elseif n = 13
php
plp
nop
nop
nop
.elseif n & 1
sta <delay_temp_
jsr delay_unrolled_-((n-15)/2)
.else
jsr delay_unrolled_-((n-12)/2)
.endif
.endmacro
.macro delay_nosave_ n
; 65536+17 = maximum delay using delay_256a_11_clocks_
; 255+27 = maximum delay using delay_a_25_clocks
; 27 = minimum delay using delay_a_25_clocks
.if n > 65536+17
lda #^(n - 15)
jsr delay_65536a_11_clocks_
; +2 ensures remaining clocks is never 1
delay_nosave_ (((n - 15) & $FFFF) + 2)
.elseif n > 255+27
lda #>(n - 15)
jsr delay_256a_11_clocks_
; +2 ensures remaining clocks is never 1
delay_nosave_ (<(n - 15) + 2)
.elseif n >= 27
lda #<(n - 27)
jsr delay_a_25_clocks
.else
delay_short_ n
.endif
.endmacro
.macro delay_ n
.if n > max_short_delay
php
pha
delay_nosave_ (n - 14)
pla
plp
.else
delay_short_ n
.endif
.endmacro
|
xem/nes | 1,632 | nes-test-roms/mmc3_test_2/source/common/crc.s | ; CRC-32 checksum calculation
zp_res checksum,4
zp_byte checksum_temp
zp_byte checksum_off_
; Turns CRC updating on/off. Allows nesting.
; Preserved: A, X, Y
crc_off:
dec checksum_off_
rts
crc_on: inc checksum_off_
beq :+
jpl internal_error ; catch unbalanced crc calls
: rts
; Initializes checksum module. Might initialize tables
; in the future.
init_crc:
jmp reset_crc
; Clears checksum and turns it on
; Preserved: X, Y
reset_crc:
lda #0
sta checksum_off_
lda #$FF
sta checksum
sta checksum + 1
sta checksum + 2
sta checksum + 3
rts
; Updates checksum with byte in A (unless disabled via crc_off)
; Preserved: A, X, Y
; Time: 357 clocks average
update_crc:
bit checksum_off_
bmi update_crc_off
update_crc_:
pha
stx checksum_temp
eor checksum
ldx #8
@bit: lsr checksum+3
ror checksum+2
ror checksum+1
ror a
bcc :+
sta checksum
lda checksum+3
eor #$ED
sta checksum+3
lda checksum+2
eor #$B8
sta checksum+2
lda checksum+1
eor #$83
sta checksum+1
lda checksum
eor #$20
: dex
bne @bit
sta checksum
ldx checksum_temp
pla
update_crc_off:
rts
; Prints checksum as 8-character hex value
print_crc:
jsr crc_off
; Print complement
ldx #3
: lda checksum,x
eor #$FF
jsr print_hex
dex
bpl :-
jmp crc_on
; EQ if checksum matches CRC
; Out: A=0 and EQ if match, A>0 and NE if different
; Preserved: X, Y
.macro is_crc crc
jsr_with_addr is_crc_,{.dword crc}
.endmacro
is_crc_:
tya
pha
; Compare with complemented checksum
ldy #3
: lda (ptr),y
sec
adc checksum,y
bne @wrong
dey
bpl :-
pla
tay
lda #0
rts
@wrong:
pla
tay
lda #1
rts
|
xem/nes | 1,425 | nes-test-roms/ppu_vbl_nmi/source/10-even_odd_timing.s | ; Tests timing of skipped clock every other frame
; when BG is enabled.
;
; Output: 08 08 09 07
.include "shell.inc"
.include "sync_vbl.s"
adjust = 2359
.align 256
test: jsr disable_rendering
jsr sync_vbl_delay
delay 13
; $2001=X for most of VBL, Y for part of frame, then 0
stx $2001
delay adjust-4-4
sty $2001
delay 20000
lda #0
sta $2001
delay 29781-adjust-4-20000-6
; Two frames with BG off
delay 29781
delay 29781-1
; Third frame same as first. Since clock is skipped every
; other frame, only one of these two will have the skipped
; clock, so its effect on later frame timing won't be a
; problem.
stx $2001
delay adjust-4
sty $2001
delay 20000
lda #0
sta $2001
delay 29781-adjust-4-20000-6
; Find number of PPU clocks until VBL
delay 29781-3-22
ldx #0
: delay 29781-2-4-3
inx
bit PPUSTATUS
bpl :-
jsr print_x
rts
main: jsr console_hide
set_test 2,"Clock is skipped too soon, relative to enabling BG"
lda #4
ldx #0
ldy #8
jsr test
cpx #8
jne test_failed
set_test 3,"Clock is skipped too late, relative to enabling BG"
lda #5
ldx #0
ldy #8
jsr test
cpx #8
jne test_failed
set_test 4,"Clock is skipped too soon, relative to disabling BG"
lda #4
ldx #8
ldy #0
jsr test
cpx #9
jne test_failed
set_test 5,"Clock is skipped too late, relative to disabling BG"
lda #5
ldx #8
ldy #0
jsr test
cpx #7
jne test_failed
jmp tests_passed
|
xem/nes | 2,740 | nes-test-roms/ppu_vbl_nmi/source/04-nmi_control.s | ; Tests immediate NMI behavior when enabling while VBL flag is already set
CUSTOM_NMI=1
.include "shell.inc"
zp_byte nmi_count
nmi: inc nmi_count
rti
; Waits until NMI is about to occur
begin: lda #0
sta $2000
jsr wait_vbl
delay 29600
lda #0
sta nmi_count
rts
; Enables NMI, waits, then reads NMI count
end: lda #$80
sta $2000
delay 200
lda nmi_count
rts
main: set_test 2,"Shouldn't occur when disabled"
jsr begin
delay 200
lda nmi_count
jne test_failed
set_test 3,"Should occur when enabled and VBL begins"
jsr begin
jsr end
jeq test_failed
set_test 4,"$2000 should be mirrored every 8 bytes"
jsr begin
lda #$80
sta $2FF8
delay 200
lda nmi_count
jeq test_failed
set_test 5,"Should occur immediately if enabled while VBL flag is set"
jsr begin
delay 200 ; VBL flag set during this time
jsr end ; NMI is enabled here, and should occur immediately
cmp #1
jne test_failed
set_test 6,"Shouldn't occur if enabled while VBL flag is clear"
jsr begin
delay 200
lda $2002 ; clear VBL flag
jsr end
jne test_failed
set_test 7,"Shouldn't occur again if writing $80 when already enabled"
jsr begin
lda #$80
sta $2000
delay 200 ; NMI occurs here
jsr end ; writes $80 again, shouldn't occur again
cmp #1 ; only 1 NMI should have occurred
jne test_failed
set_test 8,"Shouldn't occur again if writing $80 when already enabled 2"
jsr begin
delay 200 ; VBL flag set during this time
lda #$80 ; enable NMI, which should result in immediate NMI
sta $2000
jsr end ; writes $80 again, shouldn't occur again
cmp #1 ; only 1 NMI should have occurred
jne test_failed
set_test 9,"Should occur again if enabling after disabled"
jsr begin
lda #$80
sta $2000
delay 200 ; NMI occurs here
lda #$00 ; disable NMI
sta $2000
jsr end ; NMI is enabled again, and should occur immediately
cmp #2 ; 2 NMIs should have occurred
jne test_failed
set_test 10,"Should occur again if enabling after disabled 2"
jsr begin
delay 200 ; VBL flag set during this time
lda #$80 ; enable NMI, which should result in immediate NMI
sta $2000
lda #$00 ; disable NMI
sta $2000
jsr end ; NMI is enabled again, and should occur immediately
cmp #2 ; 2 NMIs should have occurred
jne test_failed
set_test 11,"Immediate occurence should be after NEXT instruction"
jsr begin
delay 200 ; VBL flag set during this time
ldx #0
lda #$80 ; enable NMI, which should result in immediate NMI
sta $2000 ; after NEXT instruction
stx nmi_count ; clear nmi_count
; NMI should occur here
lda nmi_count
jeq test_failed
jmp tests_passed
|
xem/nes | 1,092 | nes-test-roms/ppu_vbl_nmi/source/09-even_odd_frames.s | ; Tests clock skipped on every other PPU frame when BG rendering
; is enabled.
;
; Tries pattern of BG enabled/disabled during a sequence of
; 5 frames, then finds how many clocks were skipped. Prints
; number skipped clocks to help find problems.
;
; Correct output: 00 01 01 02
.include "shell.inc"
.include "sync_vbl.s"
test: pha
lda #0
sta $2001
jsr sync_vbl
delay 29755
pla
; Enable/disable rendering for each frame
; based on corresponding bit in A.
ldx #5
: pha
and #$08
sta $2001
delay 29781-3-2-4-4-2-2-3
pla
lsr a
dex
bne :-
; Find number of PPU clocks skipped
lda #0
sta $2001
ldx #6
: delay 29781-2-4-3
dex
bit PPUSTATUS
bpl :-
jsr print_x
rts
.macro test pattern,time,code,text
set_test code,text
lda #8*pattern
jsr test
cpx #time
jne test_failed
.endmacro
main: test %00000,0,2,"Pattern ----- should not skip any clocks"
test %00011,1,3,"Pattern ---BB should skip 1 clock"
test %01001,1,4,"Pattern -B--B (even, odd) should skip 1 clock"
test %11011,2,5,"Pattern BB-BB (two pairs) should skip 2 clocks"
jmp tests_passed
|
xem/nes | 1,850 | nes-test-roms/ppu_vbl_nmi/source/common/testing.s | ; Utilities for writing test ROMs
; In NVRAM so these can be used before initializing runtime,
; then runtime initialized without clearing them
nv_res test_code ; code of current test
nv_res test_name,2 ; address of name of current test, or 0 of none
; Sets current test code and optional name. Also resets
; checksum.
; Preserved: A, X, Y
.macro set_test code,name
pha
lda #code
jsr set_test_
.ifblank name
setb test_name+1,0
.else
.local Addr
setw test_name,Addr
seg_data RODATA,{Addr: .byte name,0}
.endif
pla
.endmacro
set_test_:
sta test_code
jmp reset_crc
; Initializes testing module
init_testing = init_crc
; Reports that all tests passed
tests_passed:
jsr print_filename
print_str newline,"Passed"
lda #0
jmp exit
; Reports "Done" if set_test has never been used,
; "Passed" if set_test 0 was last used, or
; failure if set_test n was last used.
tests_done:
ldx test_code
jeq tests_passed
inx
bne test_failed
jsr print_filename
print_str newline,"Done"
lda #0
jmp exit
; Reports that the current test failed. Prints code and
; name last set with set_test, or just "Failed" if none
; have been set yet.
test_failed:
ldx test_code
; Treat $FF as 1, in case it wasn't ever set
inx
bne :+
inx
stx test_code
:
; If code >= 2, print name
cpx #2-1 ; -1 due to inx above
blt :+
lda test_name+1
beq :+
jsr print_newline
sta addr+1
lda test_name
sta addr
jsr print_str_addr
jsr print_newline
:
jsr print_filename
; End program
lda test_code
jmp exit
; If checksum doesn't match expected, reports failed test.
; Clears checksum afterwards.
; Preserved: A, X, Y
.macro check_crc expected
jsr_with_addr check_crc_,{.dword expected}
.endmacro
check_crc_:
pha
jsr is_crc_
bne :+
jsr reset_crc
pla
rts
: jsr print_newline
jsr print_crc
jmp test_failed
|
xem/nes | 3,234 | nes-test-roms/ppu_vbl_nmi/source/common/print.s | ; Prints values in various ways to output,
; including numbers and strings.
newline = 10
zp_byte print_temp_
; Prints indicated register to console as two hex
; chars and space
; Preserved: A, X, Y, flags
print_a:
php
pha
print_reg_:
jsr print_hex
lda #' '
jsr print_char_
pla
plp
rts
print_x:
php
pha
txa
jmp print_reg_
print_y:
php
pha
tya
jmp print_reg_
print_p:
php
pha
php
pla
jmp print_reg_
print_s:
php
pha
txa
tsx
inx
inx
inx
inx
jsr print_x
tax
pla
plp
rts
; Prints A as two hex characters, NO space after
; Preserved: A, X, Y
print_hex:
jsr update_crc
pha
lsr a
lsr a
lsr a
lsr a
jsr print_nibble_
pla
pha
and #$0F
jsr print_nibble_
pla
rts
print_nibble_:
cmp #10
blt @digit
adc #6;+1 since carry is set
@digit: adc #'0'
jmp print_char_
; Prints low 4 bits of A as single hex character
; Preserved: A, X, Y
print_nibble:
pha
and #$0F
jsr update_crc
jsr print_nibble_
pla
rts
; Prints character and updates checksum UNLESS
; it's a newline.
; Preserved: A, X, Y
print_char:
cmp #newline
beq :+
jsr update_crc
: pha
jsr print_char_
pla
rts
; Prints space. Does NOT update checksum.
; Preserved: A, X, Y
print_space:
pha
lda #' '
jsr print_char_
pla
rts
; Advances to next line. Does NOT update checksum.
; Preserved: A, X, Y
print_newline:
pha
lda #newline
jsr print_char_
pla
rts
; Prints string
; Preserved: A, X, Y
.macro print_str str,str2
jsr print_str_
.byte str
.ifnblank str2
.byte str2
.endif
.byte 0
.endmacro
print_str_:
sta print_temp_
pla
sta addr
pla
sta addr+1
jsr inc_addr
jsr print_str_addr
lda print_temp_
jmp (addr)
; Prints string at addr and leaves addr pointing to
; byte AFTER zero terminator.
; Preserved: A, X, Y
print_str_addr:
pha
tya
pha
ldy #0
beq :+ ; always taken
@loop: jsr print_char
jsr inc_addr
: lda (addr),y
bne @loop
pla
tay
pla
; FALL THROUGH
; Increments 16-bit value in addr.
; Preserved: A, X, Y
inc_addr:
inc addr
bne :+
inc addr+1
: rts
; Prints A as 1-3 digit decimal.
; In: A = MSB
; Preserved: A, X, Y
print_dec:
sta print_temp_
pha
txa
pha
tya
pha
ldy print_temp_
lda #0
sta print_temp_
tya
jmp :+
; Prints 16-bit AY as 1-5 digit decimal.
; Preserved: A, X, Y
print_ay_dec:
jsr update_crc
sta print_temp_
pha
txa
pha
tya
pha
: jsr update_crc
; Strip leading zeroes
ldx #6
: dex
cmp @lsb-1,x
lda print_temp_
sbc @msb-1,x
tya
bcc :-
bcs @non_zero
; Print remaining digits
@more: ; Commit subtraction
iny
sta print_temp_
pla
; Subtract
@digit: sbc @lsb,x
pha
lda print_temp_
sbc @msb,x
bcs @more
; Print digit and undo subtraction
tya
jsr print_char_
pla
clc
adc @lsb,x
@non_zero:
sec
ldy #'0'
dex
bne @digit
ora #'0'
jsr print_char_
pla
tay
pla
tax
pla
rts
@lsb: .byte 0,<10,<100,<1000,<10000
@msb: .byte 0,>10,>100,>1000,>10000
; Prints one of two characters based on condition.
; SEC; print_cc bcs,'C','-' prints 'C'.
; Preserved: A, X, Y, flags
.macro print_cc cond,yes,no
; Avoids labels since they're not local
; to macros in ca65.
php
pha
cond *+6
lda #no
bne *+4
lda #yes
jsr print_char
pla
plp
.endmacro
|
xem/nes | 3,456 | nes-test-roms/ppu_vbl_nmi/source/common/shell_misc.s | ; Reports internal error and exits program
internal_error:
assert_failed:
pla
tay
pla
init_cpu_regs
print_str newline,"internal error, PC="
jsr print_hex
jsr print_y
lda #255
jmp exit
.import __NVRAM_LOAD__, __NVRAM_SIZE__
.macro fill_ram_ Begin, End
; Simpler to count from negative size up to 0,
; and adjust address downward to compensate
; for initial low byte in Y index
.local Neg_size
Neg_size = (Begin) - (End)
ldxy #(Begin) - <Neg_size
sty addr
stx addr+1
ldxy #Neg_size
: sta (addr),y
iny
bne :-
inc addr+1
inx
bne :-
.endmacro
; Clears 0 through ($100+S), $200 through __NVRAM_LOAD__-1, and
; __NVRAM_LOAD__+__NVRAM_SIZE__ through $7FF
clear_ram:
lda #0
bss_begin = $200
fill_ram_ bss_begin,__NVRAM_LOAD__
fill_ram_ __NVRAM_LOAD__+__NVRAM_SIZE__,$800
; Zero-page
tax
: sta 0,x
inx
bne :-
; Stack below S
tsx
inx
: dex
sta $100,x
bne :-
rts
nv_res unused_nv_var ; to avoid size=0
; Clears nvram
clear_nvram:
lda #0
fill_ram_ __NVRAM_LOAD__,__NVRAM_LOAD__+__NVRAM_SIZE__
rts
; Prints filename and newline, if available, otherwise nothing.
; Preserved: A, X, Y
print_filename:
.ifdef FILENAME_KNOWN
; avoid cluttering output with filename on devcart
.ifndef BUILD_DEVCART
pha
jsr print_newline
setw addr,filename
jsr print_str_addr
jsr print_newline
pla
.endif
.endif
rts
.pushseg
.segment "RODATA"
; TODO: use approach from SNES, where length doesn't affect data
; Filename terminated with zero byte.
filename:
.ifdef FILENAME_KNOWN
.incbin "ram:nes_temp"
.endif
.byte 0
.popseg
;**** ROM-specific ****
.ifndef BUILD_NSF
.include "ppu.s"
avoid_silent_nsf:
play_byte:
rts
; Disables interrupts and loops forever. When running on
; devcart, this is patched to re-run loader.
.ifndef CUSTOM_FOREVER
forever:
sei
lda #0
sta PPUCTRL
: beq :-
.res $10,$EA ; room for code to run loader
.endif
; Default NMI
.ifndef CUSTOM_NMI
zp_byte nmi_count
zp_byte flags_from_nmi
zp_byte pclo_from_nmi
zp_byte nmi_temp
nmi: ; Record flags and PC low byte from stack
sta nmi_temp
pla
sta flags_from_nmi
pla
sta pclo_from_nmi
pha
lda flags_from_nmi
pha
lda nmi_temp
inc nmi_count
rti
; Waits for NMI. Must be using NMI handler that increments
; nmi_count, with NMI enabled.
; Preserved: X, Y
wait_nmi:
lda nmi_count
: cmp nmi_count
beq :-
rts
.endif
; Default IRQ
.ifndef CUSTOM_IRQ
zp_byte flags_from_irq
zp_byte pclo_from_irq
zp_byte irq_count
irq: ; Record flags and PC low byte from stack
pla
sta flags_from_irq
pla
sta pclo_from_irq
pha
lda flags_from_irq
pha
inc irq_count
bit SNDCHN ; clear frame IRQ flag
rti
.endif
.endif
; Reports A in binary as high and low tones, with
; leading low tone for reference. Omits leading
; zeroes. Doesn't hang if no APU is present.
; Preserved: A, X, Y
play_hex:
pha
; Make low reference beep
clc
jsr @beep
; Remove high zero bits
sec
: rol a
bcc :-
; Play remaining bits
beq @zero
: jsr @beep
asl a
bne :-
@zero:
delay_msec_approx 300
pla
rts
; Plays low/high beep based on carry
; Preserved: A, X, Y
@beep:
pha
; Set up square
lda #1
sta SNDCHN
sta $4001
sta $4003
adc #$FE ; period=$100 if carry, $1FF if none
sta $4002
; Fade volume
lda #$0F
: ora #$30
sta $4000
delay_msec_approx 8
sec
sbc #$31
bpl :-
; Silence
setb SNDCHN,0
delay_msec_approx 160
pla
rts
|
xem/nes | 1,516 | nes-test-roms/ppu_vbl_nmi/source/common/build_rom.s | ; Builds program as iNES ROM
; Default is 32K PRG and 8K CHR ROM, NROM (0)
.if 0 ; Options to set before .include "shell.inc":
CHR_RAM=1 ; Use CHR-RAM instead of CHR-ROM
CART_WRAM=1 ; Use mapper that supports 8K WRAM in cart
CUSTOM_MAPPER=n ; Specify mapper number
.endif
.ifndef CUSTOM_MAPPER
.ifdef CART_WRAM
CUSTOM_MAPPER = 2 ; UNROM
.else
CUSTOM_MAPPER = 0 ; NROM
.endif
.endif
;;;; iNES header
.ifndef CUSTOM_HEADER
.segment "HEADER"
.byte $4E,$45,$53,26 ; "NES" EOF
.ifdef CHR_RAM
.byte 2,0 ; 32K PRG, CHR RAM
.else
.byte 2,1 ; 32K PRG, 8K CHR
.endif
.byte CUSTOM_MAPPER*$10+$01 ; vertical mirroring
.endif
.ifndef CUSTOM_VECTORS
.segment "VECTORS"
.word -1,-1,-1, nmi, reset, irq
.endif
;;;; CHR-RAM/ROM
.ifdef CHR_RAM
.define CHARS "CHARS_PRG"
.segment CHARS
ascii_chr:
.segment "CHARS_PRG_ASCII"
.align $200
.incbin "ascii.chr"
ascii_chr_end:
.else
.define CHARS "CHARS"
.segment "CHARS_ASCII"
.align $200
.incbin "ascii.chr"
.res $1800
.endif
.segment CHARS
.res $10,0
;;;; Shell
.ifndef NEED_CONSOLE
NEED_CONSOLE=1
.endif
.ifndef LARGER_ROM_HACK
.segment "CODE"
.res $4000
.endif
.include "shell.s"
std_reset:
lda #0
sta PPUCTRL
sta PPUMASK
jmp run_shell
init_runtime:
.ifdef CHR_RAM
load_chr_ram
.endif
rts
post_exit:
jsr set_final_result
jsr play_hex
jmp forever
; Standard NES bootloader to help with devcart.
; It is never executed by test ROM.
.segment "LOADER"
.incbin "bootloader.bin"
.code
.align 256
|
xem/nes | 5,404 | nes-test-roms/ppu_vbl_nmi/source/common/console.s | ; Scrolling text console with word wrapping, 30x29 characters.
;
; * Defers PPU initialization until first flush/ newline.
; * Works even if PPU doesn't support scrolling.
; * Keeps border around edge of screen for TV overscan.
; * Requires vertical or single-screen mirroring.
; * Requires ASCII font in CHR.
.ifndef CONSOLE_COLOR
CONSOLE_COLOR = $30 ; white
.endif
console_screen_width = 32 ; if lower than 32, left-justifies
; Number of characters of margin on left and right, to avoid
; text getting cut off by common TVs. OK if either/both are 0.
console_left_margin = 1
console_right_margin = 1
console_width = console_screen_width - console_left_margin - console_right_margin
zp_byte console_pos ; 0 to console_width
zp_byte console_scroll
zp_byte console_temp
bss_res console_buf,console_width
; Initializes console
console_init:
; Flag that console hasn't been initialized
setb console_scroll,-1
setb console_pos,0
rts
; Hides console by disabling PPU rendering and blacking out
; first four entries of palette.
; Preserved: A, X, Y
console_hide:
pha
jsr console_wait_vbl_
setb PPUMASK,0
lda #$0F
jsr console_load_palette_
pla
rts
; Shows console display
; Preserved: A, X, Y
console_show:
pha
lda #CONSOLE_COLOR
jsr console_show_custom_color_
pla
rts
; Prints char A to console. Will not appear until
; a newline or flush occurs.
; Preserved: A, X, Y
console_print:
cmp #10
beq console_newline
sty console_temp
ldy console_pos
cpy #console_width
beq console_full_
sta console_buf,y
iny
sty console_pos
ldy console_temp
rts
; Displays current line and starts new one
; Preserved: A, X, Y
console_newline:
pha
jsr console_wait_vbl_
jsr console_flush_
jsr console_scroll_up_
setb console_pos,0
pla
rts
; Displays current line's contents without scrolling.
; Preserved: A, X, Y
console_flush:
pha
jsr console_wait_vbl_
jsr console_flush_
jsr console_apply_scroll_
pla
rts
;**** Internal routines ****
console_full_:
ldy console_temp
; Line is full
; If space, treat as newline
cmp #' '
beq console_newline
; Wrap current line at appropriate point
pha
tya
pha
jsr console_wrap_
pla
tay
pla
jmp console_print
; Inserts newline into buffer at appropriate position, leaving
; next line ready in buffer
; Preserved: X, console_temp
console_wrap_:
; Find beginning of last word
ldy #console_width
lda #' '
: dey
bmi console_newline
cmp console_buf,y
bne :-
; y = 0 to console_width-1
; Flush through current word and put remaining
; in buffer for next line
jsr console_wait_vbl_
; Time to last PPU write: 207 + 32*(26 + 10)
lda console_scroll
jsr console_set_ppuaddr_
stx console_pos ; save X
ldx #0
; Print everything before last word
: lda console_buf,x
sta PPUDATA
inx
dey
bpl :-
; x = 1 to console_width
; Move last word to beginning of buffer, and
; print spaces for rest of line
ldy #0
beq :++
: lda #' '
sta PPUDATA
lda console_buf,x
inx
sta console_buf,y
iny
: cpx #console_width
bne :--
ldx console_pos ; restore X
; Append new text after that
sty console_pos
; FALL THROUGH
; Scrolls up 8 pixels and clears one line BELOW new line
; Preserved: X, console_temp
console_scroll_up_:
; Scroll up 8 pixels
lda console_scroll
jsr console_add_8_to_scroll_
sta console_scroll
; Clear line AFTER that on screen
jsr console_add_8_to_scroll_
jsr console_set_ppuaddr_
ldy #console_width
lda #' '
: sta PPUDATA
dey
bne :-
; FALL THROUGH
; Applies current scrolling position to PPU
; Preserved: X, Y, console_temp
console_apply_scroll_:
lda #0
sta PPUADDR
sta PPUADDR
sta PPUSCROLL
lda console_scroll
jsr console_add_8_to_scroll_
jsr console_add_8_to_scroll_
sta PPUSCROLL
rts
; Sets PPU address for row
; In: A = scroll position
; Preserved: X, Y
console_set_ppuaddr_:
sta console_temp
lda #$08
asl console_temp
rol a
asl console_temp
rol a
sta PPUADDR
lda console_temp
ora #console_left_margin
sta PPUADDR
rts
; A = (A + 8) % 240
; Preserved: X, Y
console_add_8_to_scroll_:
cmp #240-8
bcc :+
adc #16-1;+1 for set carry
: adc #8
rts
console_show_custom_color_:
pha
jsr console_wait_vbl_
setb PPUMASK,PPUMASK_BG0
pla
jsr console_load_palette_
jmp console_apply_scroll_
console_load_palette_:
pha
setb PPUADDR,$3F
setb PPUADDR,$00
setb PPUDATA,$0F ; black
pla
sta PPUDATA
sta PPUDATA
sta PPUDATA
rts
; Initializes PPU if necessary, then waits for VBL
; Preserved: A, X, Y, console_temp
console_wait_vbl_:
lda console_scroll
cmp #-1
bne @already_initialized
; Deferred initialization of PPU until first use of console
; In case PPU doesn't support scrolling, start a
; couple of lines down
setb console_scroll,16
jsr console_hide
tya
pha
; Fill nametable with spaces
setb PPUADDR,$20
setb PPUADDR,$00
ldy #240
lda #' '
: sta PPUDATA
sta PPUDATA
sta PPUDATA
sta PPUDATA
dey
bne :-
; Clear attributes
lda #0
ldy #$40
: sta PPUDATA
dey
bne :-
pla
tay
jsr console_show
@already_initialized:
jmp wait_vbl_optional
; Flushes current line
; Preserved: X, Y
console_flush_:
lda console_scroll
jsr console_set_ppuaddr_
sty console_temp
; Copy line
ldy #0
beq :++
: lda console_buf,y
sta PPUDATA
iny
: cpy console_pos
bne :--
ldy console_temp
rts
|
xem/nes | 1,096 | nes-test-roms/ppu_vbl_nmi/source/common/text_out.s | ; Text output as expanding zero-terminated string at text_out_base
; The final exit result byte is written here
final_result = $6000
; Text output is written here as an expanding
; zero-terminated string
text_out_base = $6004
bss_res text_out_temp
zp_res text_out_addr,2
init_text_out:
ldx #0
; Put valid data first
setb text_out_base,0
lda #$80
jsr set_final_result
; Now fill in signature that tells emulator there's
; useful data there
setb text_out_base-3,$DE
setb text_out_base-2,$B0
setb text_out_base-1,$61
ldx #>text_out_base
stx text_out_addr+1
setb text_out_addr,<text_out_base
rts
; Sets final result byte in memory
set_final_result:
sta final_result
rts
; Writes character to text output
; In: A=Character to write
; Preserved: A, X, Y
write_text_out:
sty text_out_temp
; Write new terminator FIRST, then new char before it,
; in case emulator looks at string in middle of this routine.
ldy #1
pha
lda #0
sta (text_out_addr),y
dey
pla
sta (text_out_addr),y
inc text_out_addr
bne :+
inc text_out_addr+1
:
ldy text_out_temp
rts
|
xem/nes | 3,060 | nes-test-roms/ppu_vbl_nmi/source/common/ppu.s | ; PPU utilities
bss_res ppu_not_present
; Sets PPUADDR to w
; Preserved: X, Y
.macro set_ppuaddr w
bit PPUSTATUS
setb PPUADDR,>w
setb PPUADDR,<w
.endmacro
; Delays by no more than n scanlines
.macro delay_scanlines n
.if CLOCK_RATE <> 1789773
.error "Currently only supports NTSC"
.endif
delay ((n)*341)/3
.endmacro
; Waits for VBL then disables PPU rendering.
; Preserved: A, X, Y
disable_rendering:
pha
jsr wait_vbl_optional
setb PPUMASK,0
pla
rts
; Fills first nametable with $00
; Preserved: Y
clear_nametable:
ldx #$20
bne clear_nametable_
clear_nametable2:
ldx #$24
clear_nametable_:
lda #0
jsr fill_screen_
; Clear pattern table
ldx #64
: sta PPUDATA
dex
bne :-
rts
; Fills screen with tile A
; Preserved: A, Y
fill_screen:
ldx #$20
bne fill_screen_
; Same as fill_screen, but fills other nametable
fill_screen2:
ldx #$24
fill_screen_:
stx PPUADDR
ldx #$00
stx PPUADDR
ldx #240
: sta PPUDATA
sta PPUDATA
sta PPUDATA
sta PPUDATA
dex
bne :-
rts
; Fills palette with $0F
; Preserved: Y
clear_palette:
set_ppuaddr $3F00
ldx #$20
lda #$0F
: sta PPUDATA
dex
bne :-
; Fills OAM with $FF
; Preserved: Y
clear_oam:
lda #$FF
; Fills OAM with A
; Preserved: A, Y
fill_oam:
ldx #0
stx SPRADDR
: sta SPRDATA
dex
bne :-
rts
; Initializes wait_vbl_optional. Must be called before
; using it.
.align 32
init_wait_vbl:
; Wait for VBL flag to be set, or ~60000
; clocks (2 frames) to pass
ldy #24
ldx #1
bit PPUSTATUS
: bit PPUSTATUS
bmi @set
dex
bne :-
dey
bpl :-
@set:
; Be sure flag didn't stay set (in case
; PPUSTATUS always has high bit set)
tya
ora PPUSTATUS
sta ppu_not_present
rts
; Same as wait_vbl, but returns immediately if PPU
; isn't working or doesn't support VBL flag
; Preserved: A, X, Y
.align 16
wait_vbl_optional:
bit ppu_not_present
bmi :++
; FALL THROUGH
; Clears VBL flag then waits for it to be set.
; Preserved: A, X, Y
wait_vbl:
bit PPUSTATUS
: bit PPUSTATUS
bpl :-
: rts
.macro check_ppu_region_ Len
; Delays since VBL began
jsr wait_vbl_optional ; 10 average
delay Len - 18 - 200
lda PPUSTATUS ; 4
bmi @ok ; 2
delay 200
; Next VBL should roughly begin here if it's the
; one we are detecting
delay 200
lda PPUSTATUS ; 2
bpl @ok
.endmacro
check_ppu_region:
.ifndef REGION_FREE
.ifdef PAL_ONLY
check_ppu_region_ 29781
print_str {newline,"Note: This test is meant for PAL NES only.",newline,newline}
.endif
.ifdef NTSC_ONLY
check_ppu_region_ 33248
print_str {newline,"Note: This test is meant for NTSC NES only.",newline,newline}
.endif
.endif
@ok: rts
; Loads ASCII font into CHR RAM and fills rest with $FF
.macro load_chr_ram
bit PPUSTATUS
setb PPUADDR,0
setb PPUADDR,0
; Copy ascii_chr to 0
setb addr,<ascii_chr
ldx #>ascii_chr
ldy #0
@page:
stx addr+1
: lda (addr),y
sta PPUDATA
iny
bne :-
inx
cpx #>ascii_chr_end
bne @page
; Fill rest
lda #$FF
: sta PPUDATA
iny
bne :-
inx
cpx #$20
bne :-
.endmacro
|
xem/nes | 2,728 | nes-test-roms/ppu_vbl_nmi/source/common/shell.s | ; Shell that sets up testing framework and calls main
; Detect inclusion loops (otherwise ca65 goes crazy)
.ifdef SHELL_INCLUDED
.error "shell.s included twice"
.end
.endif
SHELL_INCLUDED = 1
; Temporary variables that ANY routine might modify, so
; only use them between routine calls.
temp = <$A
temp2 = <$B
temp3 = <$C
addr = <$E
ptr = addr
; Move code from $C000 to $E200, to accommodate my devcarts
.ifndef LARGER_ROM_HACK
.segment "CODE"
.res $2200
.endif
; Put shell code after user code, so user code is in more
; consistent environment
.segment "CODE2"
; Any user code which runs off end might end up here,
; so catch that mistake.
nop ; in case there was three-byte opcode before this
nop
jmp internal_error
;**** Common routines ****
.include "macros.inc"
.include "neshw.inc"
.include "delay.s"
.include "print.s"
.include "crc.s"
.include "testing.s"
;**** Shell core ****
.ifndef CUSTOM_RESET
reset:
sei
jmp std_reset
.endif
; Sets up hardware then runs main
run_shell:
init_cpu_regs
jsr init_shell
set_test $FF
jmp run_main
; Initializes shell without affecting current set_test values
init_shell:
jsr clear_ram
jsr init_wait_vbl ; waits for VBL once here,
jsr wait_vbl_optional ; so only need to wait once more
jsr init_text_out
jsr init_testing
jsr init_runtime
jsr console_init
rts
; Runs main in consistent PPU/APU environment, then exits
; with code 0
run_main:
jsr pre_main
jsr main
lda #0
jmp exit
; Sets up environment for main to run in
pre_main:
.ifndef BUILD_NSF
jsr disable_rendering
setb PPUCTRL,0
jsr clear_palette
jsr clear_nametable
jsr clear_nametable2
jsr clear_oam
.endif
; Clear APU registers
lda #0
sta $4015
ldx #$13
: sta $4000,x
dex
bpl :-
; CPU registers
lda #$34
pha
lda #0
tax
tay
jsr wait_vbl_optional
plp
sta SNDMODE
rts
.ifndef CUSTOM_EXIT
exit:
.endif
; Reports result and ends program
std_exit:
sta temp
init_cpu_regs
setb SNDCHN,0
lda temp
jsr report_result
pha
jsr check_ppu_region
pla
jmp post_exit
; Reports final result code in A
report_result:
jsr :+
jmp play_byte
: jsr print_newline
jsr console_show
; 0: ""
cmp #1
bge :+
rts
:
; 1: "Failed"
bne :+
print_str {"Failed",newline}
rts
; n: "Failed #n"
: print_str "Failed #"
jsr print_dec
jsr print_newline
rts
;**** Other routines ****
.include "shell_misc.s"
.ifdef NEED_CONSOLE
.include "console.s"
.else
; Stubs so code doesn't have to care whether
; console exists
console_init:
console_show:
console_hide:
console_print:
console_flush:
rts
.endif
.ifndef CUSTOM_PRINT
.include "text_out.s"
print_char_:
jsr write_text_out
jmp console_print
stop_capture:
rts
.endif
|
xem/nes | 3,713 | nes-test-roms/ppu_vbl_nmi/source/common/sync_vbl.s | ; Synchronizes EXACTLY to VBL, to accuracy of 1/3 CPU clock
; (1/2 CPU clock if PPU is enabled). Reading PPUSTATUS
; 29768 clocks or later after return will have bit 7 set.
; Reading PPUSTATUS immediately will have bit 7 clear.
; Preserved: A, X, Y
; Time: 120-330 msec
.align 128
sync_vbl:
pha
; Disable interrupts
sei
lda #0
sta PPUCTRL
; Coarse synchronize
bit $2002
: bit $2002
bpl :-
delay 29771
; Divide possible cases into two groups, and optimize
; for each, halving time this routine takes.
bit $2002
bmi :+
delay 4 ; max=4, lower=slower
: delay 24 ; max=24, lower=slower
; Synchronize precisely to VBL. VBL occurs every 29780.67
; CPU clocks. Loop takes 27 clocks. Every 1103 iterations,
; the second LDA $2002 will read exactly 29781 clocks
; after a previous read. Thus, the loop will effectively
; read $2002 one PPU clock later each frame. It starts out
; with VBL beginning sometime after this read, so that
; eventually VBL will begin just before the $2002 read,
; and thus leave CPU exactly synchronized to VBL.
: delay 27 - 11
bit $2002
bit $2002
bpl :-
pla
rts
; Same as sync_vbl, but additionally ensures that next frame
; will skip PPU clock at end of VBL if rendering is enabled.
; Preserved: A, X, Y
sync_vbl_odd:
pha
; Rendering must be disabled
jsr wait_vbl
lda #0
sta PPUMASK
jsr sync_vbl
jsr @render_frame
; See whether frame was short
; If not, frames totaled 59561+1/3 CPU clocks
delay 29781-17-1
lda PPUSTATUS
bmi :+
jmp @end
:
; If frame was short, first frame was
; one clock shorter. Wait another frame to
; toggle even/odd flag. Rendering enabled
; for frame so total of all three frames
; is 89341+1/3 CPU clocks, which has the
; same fraction as the other case, thus
; ensuring the same CPU-PPU synchronization.
jsr @render_frame
@end: ; Establish same timing as sync_vbl
delay 29781-7
; Be sure VBL flag is clear for this frame, as the
; other sync routines do.
bit PPUSTATUS
pla
rts
@render_frame:
lda #PPUMASK_BG0
sta PPUMASK
delay 29781-6-6-6-6+1
lda #0
sta PPUMASK
rts
; Same as sync_vbl_odd, but next frame will NOT skip PPU clock
; Preserved: A, X, Y
sync_vbl_even:
jsr sync_vbl_odd
delay 341*262*3 / 3 - 10; get to even frame without affecting sync
; Be sure VBL flag is clear for this frame, as the
; other sync routines do.
bit PPUSTATUS
rts
; Same as sync_vbl_even, but also writes A to SPRDMA without
; affecting timing (in particular, SPRDMA's optional extra clock
; is dealt with).
; Preserved: A, X, Y
sync_vbl_even_dma:
jsr sync_vbl_odd
delay 341*262 - 534
sta SPRDMA
bit PPUSTATUS
; Delay extra clock if ncessary. Unaffected by code
; alignment since it branches to next instr.
bpl :+
:
; Be sure VBL flag is clear for this frame, as the
; other sync routines do.
bit PPUSTATUS
rts
; Same as sync_vbl, but then delays A additional PPU clocks.
; Preserved: X, Y
.align 32
sync_vbl_delay:
jsr sync_vbl
; VBL occurs every 29780.67 clocks, therefore
; each iteration of the loop is like delaying
; 1/3 CPU clock (1 PPU clock).
: delay 29781-7
clc
adc #-1
bcs :-
delay 29781*2-10
; Be sure VBL flag is clear for this frame, as the
; other sync routines do.
bit PPUSTATUS
rts
; Effectively delays n PPU clocks, while maintaing
; even/odd frame (i.e. never delays an odd number of
; frames). PPU rendering must be off.
; Preserved: A, X, Y
.macro delay_ppu_even n
.if (n) < 8
.error "time out of range"
.endif
.if (n) .MOD 3 = 1
.if (n) > 4
delay (n)/3-1
.endif
delay 29781*4
.elseif (n) .MOD 3 = 2
delay (n)/3
delay 29781*2
.else
delay (n)/3
.endif
.endmacro
|
xem/nes | 3,734 | nes-test-roms/ppu_vbl_nmi/source/common/delay.s | ; Delays in CPU clocks, milliseconds, etc. All routines are re-entrant
; (no global data). No routines touch X or Y during execution.
; Code generated by macros is relocatable; it contains no JMPs to itself.
zp_res delay_temp_ ; only written to
; Delays n clocks, from 2 to 16777215
; Preserved: A, X, Y, flags
.macro delay n
.if (n) < 0 .or (n) = 1 .or (n) > 16777215
.error "Delay out of range"
.endif
delay_ (n)
.endmacro
; Delays n milliseconds (1/1000 second)
; n can range from 0 to 1100.
; Preserved: A, X, Y, flags
.macro delay_msec n
.if (n) < 0 .or (n) > 1100
.error "time out of range"
.endif
delay ((n)*CLOCK_RATE+500)/1000
.endmacro
; Delays n microseconds (1/1000000 second).
; n can range from 0 to 100000.
; Preserved: A, X, Y, flags
.macro delay_usec n
.if (n) < 0 .or (n) > 100000
.error "time out of range"
.endif
delay ((n)*((CLOCK_RATE+50)/100)+5000)/10000
.endmacro
; Delays approximately n milliseconds (1/1000 second),
; without caring whether it's NTSC or PAL.
; n can range from 0 to 1100.
; Preserved: A, X, Y, flags
.macro delay_msec_approx n
.if (n) < 0 .or (n) > 1100
.error "time out of range"
.endif
delay ((n)*1726190+500)/1000
.endmacro
.align 64
; Delays A clocks + overhead
; Preserved: X, Y
; Time: A+25 clocks (including JSR)
: sbc #7 ; carry set by CMP
delay_a_25_clocks:
cmp #7
bcs :- ; do multiples of 7
lsr a ; bit 0
bcs :+
: ; A=clocks/2, either 0,1,2,3
beq @zero ; 0: 5
lsr a
beq :+ ; 1: 7
bcc :+ ; 2: 9
@zero: bne :+ ; 3: 11
: rts ; (thanks to dclxvi for the algorithm)
; Delays A*256 clocks + overhead
; Preserved: X, Y
; Time: A*256+16 clocks (including JSR)
delay_256a_16_clocks:
cmp #0
bne :+
rts
delay_256a_11_clocks_:
: pha
lda #256-19-22
jsr delay_a_25_clocks
pla
clc
adc #-1
bne :-
rts
; Delays A*65536 clocks + overhead
; Preserved: X, Y
; Time: A*65536+16 clocks (including JSR)
delay_65536a_16_clocks:
cmp #0
bne :+
rts
delay_65536a_11_clocks_:
: pha
lda #256-19-22-13
jsr delay_a_25_clocks
lda #255
jsr delay_256a_11_clocks_
pla
clc
adc #-1
bne :-
rts
max_short_delay = 41
; delay_short_ macro jumps into these
.res (max_short_delay-12)/2,$EA ; NOP
delay_unrolled_:
rts
.macro delay_short_ n
.if n < 0 .or n = 1 .or n > max_short_delay
.error "Internal delay error"
.endif
.if n = 0
; nothing
.elseif n = 2
nop
.elseif n = 3
sta <delay_temp_
.elseif n = 4
nop
nop
.elseif n = 5
sta <delay_temp_
nop
.elseif n = 6
nop
nop
nop
.elseif n = 7
php
plp
.elseif n = 8
nop
nop
nop
nop
.elseif n = 9
php
plp
nop
.elseif n = 10
sta <delay_temp_
php
plp
.elseif n = 11
php
plp
nop
nop
.elseif n = 13
php
plp
nop
nop
nop
.elseif n & 1
sta <delay_temp_
jsr delay_unrolled_-((n-15)/2)
.else
jsr delay_unrolled_-((n-12)/2)
.endif
.endmacro
.macro delay_nosave_ n
; 65536+17 = maximum delay using delay_256a_11_clocks_
; 255+27 = maximum delay using delay_a_25_clocks
; 27 = minimum delay using delay_a_25_clocks
.if n > 65536+17
lda #^(n - 15)
jsr delay_65536a_11_clocks_
; +2 ensures remaining clocks is never 1
delay_nosave_ (((n - 15) & $FFFF) + 2)
.elseif n > 255+27
lda #>(n - 15)
jsr delay_256a_11_clocks_
; +2 ensures remaining clocks is never 1
delay_nosave_ (<(n - 15) + 2)
.elseif n >= 27
lda #<(n - 27)
jsr delay_a_25_clocks
.else
delay_short_ n
.endif
.endmacro
.macro delay_ n
.if n > max_short_delay
php
pha
delay_nosave_ (n - 14)
pla
plp
.else
delay_short_ n
.endif
.endmacro
|
xem/nes | 1,600 | nes-test-roms/ppu_vbl_nmi/source/common/crc.s | ; CRC-32 checksum calculation
zp_res checksum,4 ; Current CRC-32; no need to invert
zp_byte checksum_temp
zp_byte checksum_off_
; Turns CRC updating on/off. Allows nesting.
; Preserved: A, X, Y
crc_off:
dec checksum_off_
rts
crc_on: inc checksum_off_
beq :+
jpl internal_error ; catch unbalanced crc calls
: rts
; Initializes checksum module. Might initialize tables
; in the future.
init_crc:
jmp reset_crc
; Clears checksum and turns it on
; Preserved: X, Y
reset_crc:
lda #0
sta checksum_off_
sta checksum
sta checksum + 1
sta checksum + 2
sta checksum + 3
rts
; Updates checksum with byte in A (unless disabled via crc_off)
; Preserved: A, X, Y
; Time: 360 clocks average
update_crc:
bit checksum_off_
bmi update_crc_off
update_crc_:
pha
stx checksum_temp
eor checksum
ldx #8
sec
@bit: ror checksum+3
ror checksum+2
ror checksum+1
ror a
bcs :+
sta checksum
lda checksum+3
eor #$ED
sta checksum+3
lda checksum+2
eor #$B8
sta checksum+2
lda checksum+1
eor #$83
sta checksum+1
lda checksum
eor #$20
sec
: dex
bne @bit
sta checksum
ldx checksum_temp
pla
update_crc_off:
rts
; Prints checksum as 8-character hex value
print_crc:
jsr crc_off
ldx #3
: lda checksum,x
jsr print_hex
dex
bpl :-
jmp crc_on
; EQ if checksum matches CRC
; Out: A=0 and EQ if match, A>0 and NE if different
; Preserved: X, Y
.macro is_crc crc
jsr_with_addr is_crc_,{.dword crc}
.endmacro
is_crc_:
tya
pha
ldy #3
: lda (ptr),y
cmp checksum,y
bne @wrong
dey
bpl :-
pla
tay
lda #0
rts
@wrong:
pla
tay
lda #1
rts
|
xem/nes | 1,350 | nes-test-roms/apu_mixer/source/noise.s | ; Verifies noise DAC and non-linear mixing
;
; Makes tone by running noise at maximum frequency to get
; soft noise, then toggling its volume between 0 and some
; other value. Cancels this to silence with inverse wave
; generated using DMC DAC.
CUSTOM_TEXT = 1
.include "vol_shell.inc"
text: .byte "2. Should fade noise in,",newline
.byte "and out, without any tone.",0
test_main:
jsr test_atten
jsr test_vols
rts
; Tests each noise volume
test_vols:
loop_n_times test,16
rts
test:
tay
eor #$3F ; x = noise volume
tax
lda vols,y ; y = DMC DAC value
tay
setb $4015,$08 ; enable
setb $400E,0 ; min period
setb $400F,0 ; start
setw temp,700
: lda #0
stx $400C
sta $4011
delay 896-10
lda #$30
sta $400C
sty $4011
delay 896-10-21
dec_tempw
bne :-
rts
vols:
.byte 13,12,11,10,9,9,8,7,6,5,4,4,3,2,1,0
.align 256
; Tests volume 15 over range of DMC DAC, starting
; at high end where it's most attenuated.
test_atten:
setb $4015,$08 ; enable
setb $400C,$3F ; max volume
setb $400E,0 ; min period
setb $400F,0 ; start
wait = 60
ldx #127
ldy #127-13
setb temp,wait
extra = 14-1
@1: delay extra
@2: lda #$30
sta $400C
stx $4011
delay 896-10
lda #$3F
sta $400C
sty $4011
delay 896-10-8-extra
dec temp
bne @1
setb temp,wait
dex
dey
bpl @2
rts
|
xem/nes | 2,631 | nes-test-roms/apu_mixer/source/square.s | ; Verifies square DACs and non-linear mixing
;
; Plays two square waves in all totals from 0 to 31.
; Cancels this to silence with inverse wave generated
; using DMC DAC.
.include "vol_shell.inc"
test_main:
setb $4001,$7F ; disable sweep
setb $4005,$7F
setb $4002,0 ; period = 0
setb $4003,0
setb $4006,0
setb $4007,0
delay 5000 ; allow period to settle in
setb $4015,$03
ldx #$6F ; period = 896*2
ldy #$00
stx $4002
stx $4006
sty $4003
sty $4007
delay 175
extra = 27-1
ldx #-1
ldy #0
jmp @first
@1: delay extra
@2: lda dmc,x
sta $4011
delay 896-4-2
lda #127
sta $4011
delay 896-4-5-extra-4
dey
bne @1
@first: inx
lda sq1,x ; update square volumes
sta $4000
lda sq2,x
sta $4004
ldy #80
lda dmc,x
bne @2
rts
.align 256
dmc:
.byte $7F,$7B,$77,$77,$74,$74,$70,$70,$70,$6D,$6D,$6D,$6A,$6A,$6A,$6A
.byte $67,$67,$67,$67,$64,$64,$64,$64,$64,$61,$61,$61,$61,$61,$5E,$5E
.byte $5E,$5E,$5E,$5E,$5C,$5C,$5C,$5C,$5C,$5C,$59,$59,$59,$59,$59,$59
.byte $59,$57,$57,$57,$57,$57,$57,$57,$54,$54,$54,$54,$54,$54,$54,$54
.byte $52,$52,$52,$52,$52,$52,$52,$52,$50,$50,$50,$50,$50,$50,$50,$50
.byte $4E,$4E,$4E,$4E,$4E,$4E,$4E,$4C,$4C,$4C,$4C,$4C,$4C,$4C,$4A,$4A
.byte $4A,$4A,$4A,$4A,$48,$48,$48,$48,$48,$48,$46,$46,$46,$46,$46,$44
.byte $44,$44,$44,$44,$42,$42,$42,$42,$41,$41,$41,$41,$3F,$3F,$3F,$3E
.byte $3E,$3E,$3C,$3C,$3B,$3B,$39,$38,$00
.align 256
sq1:
.byte $B0,$B0,$B0,$B1,$B0,$B1,$B0,$B1,$B2,$B0,$B1,$B2,$B0,$B1,$B2,$B3
.byte $B0,$B1,$B2,$B3,$B0,$B1,$B2,$B3,$B4,$B0,$B1,$B2,$B3,$B4,$B0,$B1
.byte $B2,$B3,$B4,$B5,$B0,$B1,$B2,$B3,$B4,$B5,$B0,$B1,$B2,$B3,$B4,$B5
.byte $B6,$B0,$B1,$B2,$B3,$B4,$B5,$B6,$B0,$B1,$B2,$B3,$B4,$B5,$B6,$B7
.byte $B0,$B1,$B2,$B3,$B4,$B5,$B6,$B7,$B1,$B2,$B3,$B4,$B5,$B6,$B7,$B8
.byte $B2,$B3,$B4,$B5,$B6,$B7,$B8,$B3,$B4,$B5,$B6,$B7,$B8,$B9,$B4,$B5
.byte $B6,$B7,$B8,$B9,$B5,$B6,$B7,$B8,$B9,$BA,$B6,$B7,$B8,$B9,$BA,$B7
.byte $B8,$B9,$BA,$BB,$B8,$B9,$BA,$BB,$B9,$BA,$BB,$BC,$BA,$BB,$BC,$BB
.byte $BC,$BD,$BC,$BD,$BD,$BE,$BE,$BF
.align 256
sq2:
.byte $B0,$B1,$B2,$B1,$B3,$B2,$B4,$B3,$B2,$B5,$B4,$B3,$B6,$B5,$B4,$B3
.byte $B7,$B6,$B5,$B4,$B8,$B7,$B6,$B5,$B4,$B9,$B8,$B7,$B6,$B5,$BA,$B9
.byte $B8,$B7,$B6,$B5,$BB,$BA,$B9,$B8,$B7,$B6,$BC,$BB,$BA,$B9,$B8,$B7
.byte $B6,$BD,$BC,$BB,$BA,$B9,$B8,$B7,$BE,$BD,$BC,$BB,$BA,$B9,$B8,$B7
.byte $BF,$BE,$BD,$BC,$BB,$BA,$B9,$B8,$BF,$BE,$BD,$BC,$BB,$BA,$B9,$B8
.byte $BF,$BE,$BD,$BC,$BB,$BA,$B9,$BF,$BE,$BD,$BC,$BB,$BA,$B9,$BF,$BE
.byte $BD,$BC,$BB,$BA,$BF,$BE,$BD,$BC,$BB,$BA,$BF,$BE,$BD,$BC,$BB,$BF
.byte $BE,$BD,$BC,$BB,$BF,$BE,$BD,$BC,$BF,$BE,$BD,$BC,$BF,$BE,$BD,$BF
.byte $BE,$BD,$BF,$BE,$BF,$BE,$BF,$BF
|
xem/nes | 1,852 | nes-test-roms/apu_mixer/source/common/testing.s | ; Utilities for writing test ROMs
; In NVRAM so these can be used before initializing runtime,
; then runtime initialized without clearing them
nv_res test_code ; code of current test
nv_res test_name,2 ; address of name of current test, or 0 of none
; Sets current test code and optional name. Also resets
; checksum.
; Preserved: A, X, Y
.macro set_test code,name
pha
lda #code
jsr set_test_
.ifblank name
setb test_name+1,0
.else
.local Addr
setw test_name,Addr
seg_data "RODATA",{Addr: .byte name,0}
.endif
pla
.endmacro
set_test_:
sta test_code
jmp reset_crc
; Initializes testing module
init_testing = init_crc
; Reports that all tests passed
tests_passed:
jsr print_filename
print_str newline,"Passed"
lda #0
jmp exit
; Reports "Done" if set_test has never been used,
; "Passed" if set_test 0 was last used, or
; failure if set_test n was last used.
tests_done:
ldx test_code
jeq tests_passed
inx
bne test_failed
jsr print_filename
print_str newline,"Done"
lda #0
jmp exit
; Reports that the current test failed. Prints code and
; name last set with set_test, or just "Failed" if none
; have been set yet.
test_failed:
ldx test_code
; Treat $FF as 1, in case it wasn't ever set
inx
bne :+
inx
stx test_code
:
; If code >= 2, print name
cpx #2-1 ; -1 due to inx above
blt :+
lda test_name+1
beq :+
jsr print_newline
sta addr+1
lda test_name
sta addr
jsr print_str_addr
jsr print_newline
:
jsr print_filename
; End program
lda test_code
jmp exit
; If checksum doesn't match expected, reports failed test.
; Clears checksum afterwards.
; Preserved: A, X, Y
.macro check_crc expected
jsr_with_addr check_crc_,{.dword expected}
.endmacro
check_crc_:
pha
jsr is_crc_
bne :+
jsr reset_crc
pla
rts
: jsr print_newline
jsr print_crc
jmp test_failed
|
xem/nes | 2,841 | nes-test-roms/apu_mixer/source/common/print.s | ; Prints values in various ways to output,
; including numbers and strings.
newline = 10
zp_byte print_temp_
; Prints indicated register to console as two hex
; chars and space
; Preserved: A, X, Y, flags
print_a:
php
pha
print_reg_:
jsr print_hex
lda #' '
jsr print_char_
pla
plp
rts
print_x:
php
pha
txa
jmp print_reg_
print_y:
php
pha
tya
jmp print_reg_
print_p:
php
pha
php
pla
jmp print_reg_
print_s:
php
pha
txa
tsx
inx
inx
inx
inx
jsr print_x
tax
pla
plp
rts
; Prints A as two hex characters, NO space after
; Preserved: A, X, Y
print_hex:
jsr update_crc
pha
lsr a
lsr a
lsr a
lsr a
jsr print_nibble_
pla
pha
and #$0F
jsr print_nibble_
pla
rts
print_nibble_:
cmp #10
blt @digit
adc #6;+1 since carry is set
@digit: adc #'0'
jmp print_char_
; Prints low 4 bits of A as single hex character
; Preserved: A, X, Y
print_nibble:
pha
and #$0F
jsr update_crc
jsr print_nibble_
pla
rts
; Prints character and updates checksum UNLESS
; it's a newline.
; Preserved: A, X, Y
print_char:
cmp #newline
beq :+
jsr update_crc
: pha
jsr print_char_
pla
rts
; Prints space. Does NOT update checksum.
; Preserved: A, X, Y
print_space:
pha
lda #' '
jsr print_char_
pla
rts
; Advances to next line. Does NOT update checksum.
; Preserved: A, X, Y
print_newline:
pha
lda #newline
jsr print_char_
pla
rts
; Prints string
; Preserved: A, X, Y
.macro print_str str,str2
jsr print_str_
.byte str
.ifnblank str2
.byte str2
.endif
.byte 0
.endmacro
print_str_:
sta print_temp_
pla
sta addr
pla
sta addr+1
jsr inc_addr
jsr print_str_addr
lda print_temp_
jmp (addr)
; Prints string at addr and leaves addr pointing to
; byte AFTER zero terminator.
; Preserved: A, X, Y
print_str_addr:
pha
tya
pha
ldy #0
beq :+ ; always taken
@loop: jsr print_char
jsr inc_addr
: lda (addr),y
bne @loop
pla
tay
pla
; FALL THROUGH
; Increments 16-bit value in addr.
; Preserved: A, X, Y
inc_addr:
inc addr
beq :+
rts
: inc addr+1
rts
; Prints A as 1-3 digit decimal value, NO space after.
; Preserved: A, X, Y
print_dec:
pha
sta print_temp_
jsr update_crc
txa
pha
lda print_temp_
; Hundreds
cmp #10
blt @ones
cmp #100
blt @tens
ldx #'0'-1
: inx
sbc #100
bge :-
adc #100
jsr @digit
; Tens
@tens: sec
ldx #'0'-1
: inx
sbc #10
bge :-
adc #10
jsr @digit
; Ones
@ones: ora #'0'
jsr print_char
pla
tax
pla
rts
; Print a single digit
@digit: pha
txa
jsr print_char
pla
rts
; Prints one of two characters based on condition.
; SEC; print_cc bcs,'C','-' prints 'C'.
; Preserved: A, X, Y, flags
.macro print_cc cond,yes,no
; Avoids labels since they're not local
; to macros in ca65.
php
pha
cond *+6
lda #no
bne *+4
lda #yes
jsr print_char
pla
plp
.endmacro
|
xem/nes | 3,089 | nes-test-roms/apu_mixer/source/common/shell_misc.s | ; Reports internal error and exits program
internal_error:
print_str newline,"Internal error"
lda #255
jmp exit
.import __NVRAM_LOAD__, __NVRAM_SIZE__
.macro fill_ram_ Begin, End
; Simpler to count from negative size up to 0,
; and adjust address downward to compensate
; for initial low byte in Y index
.local Neg_size
Neg_size = (Begin) - (End)
ldxy #(Begin) - <Neg_size
sty addr
stx addr+1
ldxy #Neg_size
: sta (addr),y
iny
bne :-
inc addr+1
inx
bne :-
.endmacro
; Clears 0 through ($100+S), $200 through __NVRAM_LOAD__-1, and
; __NVRAM_LOAD__+__NVRAM_SIZE__ through $7FF
clear_ram:
lda #0
bss_begin = $200
fill_ram_ bss_begin,__NVRAM_LOAD__
fill_ram_ __NVRAM_LOAD__+__NVRAM_SIZE__,$800
; Zero-page
tax
: sta 0,x
inx
bne :-
; Stack below S
tsx
inx
: dex
sta $100,x
bne :-
rts
nv_res unused_nv_var ; to avoid size=0
; Clears nvram
clear_nvram:
lda #0
fill_ram_ __NVRAM_LOAD__,__NVRAM_LOAD__+__NVRAM_SIZE__
rts
; Prints filename and newline, if available, otherwise nothing.
; Preserved: A, X, Y
print_filename:
.ifdef FILENAME_KNOWN
pha
jsr print_newline
setw addr,filename
jsr print_str_addr
jsr print_newline
pla
.endif
rts
.pushseg
.segment "RODATA"
; Filename terminated with zero byte.
filename:
.ifdef FILENAME_KNOWN
.incbin "ram:nes_temp"
.endif
.byte 0
.popseg
;**** ROM-specific ****
.ifndef BUILD_NSF
.include "ppu.s"
avoid_silent_nsf:
play_byte:
rts
; Disables interrupts and loops forever
.ifndef CUSTOM_FOREVER
forever:
sei
lda #0
sta PPUCTRL
: beq :-
.res $10,$EA ; room for code to run loader
.endif
; Default NMI
.ifndef CUSTOM_NMI
zp_byte nmi_count
zp_byte flags_from_nmi
zp_byte pclo_from_nmi
nmi: ; Record flags and PC low byte from stack
pla
sta flags_from_nmi
pla
sta pclo_from_nmi
pha
lda flags_from_nmi
pha
inc nmi_count
rti
; Waits for NMI. Must be using NMI handler that increments
; nmi_count, with NMI enabled.
; Preserved: X, Y
wait_nmi:
lda nmi_count
: cmp nmi_count
beq :-
rts
.endif
; Default IRQ
.ifndef CUSTOM_IRQ
zp_byte flags_from_irq
zp_byte pclo_from_irq
zp_byte irq_count
irq: ; Record flags and PC low byte from stack
pla
sta flags_from_irq
pla
sta pclo_from_irq
pha
lda flags_from_irq
pha
inc irq_count
bit SNDCHN ; clear frame IRQ flag
rti
.endif
.endif
; Reports A in binary as high and low tones, with
; leading low tone for reference. Omits leading
; zeroes. Doesn't hang if no APU is present.
; Preserved: A, X, Y
play_hex:
pha
; Make low reference beep
clc
jsr @beep
; Remove high zero bits
sec
: rol a
bcc :-
; Play remaining bits
beq @zero
: jsr @beep
asl a
bne :-
@zero:
delay_msec 300
pla
rts
; Plays low/high beep based on carry
; Preserved: A, X, Y
@beep:
pha
; Set up square
lda #1
sta SNDCHN
sta $4001
sta $4003
adc #$FE ; period=$100 if carry, $1FF if none
sta $4002
; Fade volume
lda #$0F
: ora #$30
sta $4000
delay_msec 8
sec
sbc #$31
bpl :-
; Silence
setb SNDCHN,0
delay_msec 160
pla
rts
|
xem/nes | 1,483 | nes-test-roms/apu_mixer/source/common/build_rom.s | ; Builds program as iNES ROM
; Default is 32K PRG and 8K CHR ROM, NROM (0)
.if 0 ; Options to set before .include "shell.inc":
CHR_RAM=1 ; Use CHR-RAM instead of CHR-ROM
CART_WRAM=1 ; Use mapper that supports 8K WRAM in cart
CUSTOM_MAPPER=n ; Specify mapper number
.endif
.ifndef CUSTOM_MAPPER
.ifdef CART_WRAM
CUSTOM_MAPPER = 2 ; UNROM
.else
CUSTOM_MAPPER = 0 ; NROM
.endif
.endif
;;;; iNES header
.ifndef CUSTOM_HEADER
.segment "HEADER"
.byte $4E,$45,$53,26 ; "NES" EOF
.ifdef CHR_RAM
.byte 2,0 ; 32K PRG, CHR RAM
.else
.byte 2,1 ; 32K PRG, 8K CHR
.endif
.byte CUSTOM_MAPPER*$10+$01 ; vertical mirroring
.endif
.ifndef CUSTOM_VECTORS
.segment "VECTORS"
.word -1,-1,-1, nmi, reset, irq
.endif
;;;; CHR-RAM/ROM
.ifdef CHR_RAM
.define CHARS "CHARS_PRG"
.segment CHARS
ascii_chr:
.segment "CHARS_PRG_ASCII"
.align $200
.incbin "ascii.chr"
ascii_chr_end:
.else
.define CHARS "CHARS"
.segment "CHARS_ASCII"
.align $200
.incbin "ascii.chr"
.res $1800
.endif
.segment CHARS
.res $10,0
;;;; Shell
.ifndef NEED_CONSOLE
NEED_CONSOLE=1
.endif
.segment "CODE"
.res $4000
.include "shell.s"
std_reset:
lda #0
sta PPUCTRL
sta PPUMASK
jmp run_shell
init_runtime:
.ifdef CHR_RAM
load_chr_ram
.endif
rts
post_exit:
jsr set_final_result
jsr play_hex
jmp forever
; This helps devcart recover after running test.
; It is never executed by test ROM.
.segment "LOADER"
.incbin "devcart.bin"
.code
.align 256
|
xem/nes | 5,404 | nes-test-roms/apu_mixer/source/common/console.s | ; Scrolling text console with word wrapping, 30x29 characters.
;
; * Defers PPU initialization until first flush/ newline.
; * Works even if PPU doesn't support scrolling.
; * Keeps border around edge of screen for TV overscan.
; * Requires vertical or single-screen mirroring.
; * Requires ASCII font in CHR.
.ifndef CONSOLE_COLOR
CONSOLE_COLOR = $30 ; white
.endif
console_screen_width = 32 ; if lower than 32, left-justifies
; Number of characters of margin on left and right, to avoid
; text getting cut off by common TVs. OK if either/both are 0.
console_left_margin = 1
console_right_margin = 1
console_width = console_screen_width - console_left_margin - console_right_margin
zp_byte console_pos ; 0 to console_width
zp_byte console_scroll
zp_byte console_temp
bss_res console_buf,console_width
; Initializes console
console_init:
; Flag that console hasn't been initialized
setb console_scroll,-1
setb console_pos,0
rts
; Hides console by disabling PPU rendering and blacking out
; first four entries of palette.
; Preserved: A, X, Y
console_hide:
pha
jsr console_wait_vbl_
setb PPUMASK,0
lda #$0F
jsr console_load_palette_
pla
rts
; Shows console display
; Preserved: A, X, Y
console_show:
pha
lda #CONSOLE_COLOR
jsr console_show_custom_color_
pla
rts
; Prints char A to console. Will not appear until
; a newline or flush occurs.
; Preserved: A, X, Y
console_print:
cmp #10
beq console_newline
sty console_temp
ldy console_pos
cpy #console_width
beq console_full_
sta console_buf,y
iny
sty console_pos
ldy console_temp
rts
; Displays current line and starts new one
; Preserved: A, X, Y
console_newline:
pha
jsr console_wait_vbl_
jsr console_flush_
jsr console_scroll_up_
setb console_pos,0
pla
rts
; Displays current line's contents without scrolling.
; Preserved: A, X, Y
console_flush:
pha
jsr console_wait_vbl_
jsr console_flush_
jsr console_apply_scroll_
pla
rts
;**** Internal routines ****
console_full_:
ldy console_temp
; Line is full
; If space, treat as newline
cmp #' '
beq console_newline
; Wrap current line at appropriate point
pha
tya
pha
jsr console_wrap_
pla
tay
pla
jmp console_print
; Inserts newline into buffer at appropriate position, leaving
; next line ready in buffer
; Preserved: X, console_temp
console_wrap_:
; Find beginning of last word
ldy #console_width
lda #' '
: dey
bmi console_newline
cmp console_buf,y
bne :-
; y = 0 to console_width-1
; Flush through current word and put remaining
; in buffer for next line
jsr console_wait_vbl_
; Time to last PPU write: 207 + 32*(26 + 10)
lda console_scroll
jsr console_set_ppuaddr_
stx console_pos ; save X
ldx #0
; Print everything before last word
: lda console_buf,x
sta PPUDATA
inx
dey
bpl :-
; x = 1 to console_width
; Move last word to beginning of buffer, and
; print spaces for rest of line
ldy #0
beq :++
: lda #' '
sta PPUDATA
lda console_buf,x
inx
sta console_buf,y
iny
: cpx #console_width
bne :--
ldx console_pos ; restore X
; Append new text after that
sty console_pos
; FALL THROUGH
; Scrolls up 8 pixels and clears one line BELOW new line
; Preserved: X, console_temp
console_scroll_up_:
; Scroll up 8 pixels
lda console_scroll
jsr console_add_8_to_scroll_
sta console_scroll
; Clear line AFTER that on screen
jsr console_add_8_to_scroll_
jsr console_set_ppuaddr_
ldy #console_width
lda #' '
: sta PPUDATA
dey
bne :-
; FALL THROUGH
; Applies current scrolling position to PPU
; Preserved: X, Y, console_temp
console_apply_scroll_:
lda #0
sta PPUADDR
sta PPUADDR
sta PPUSCROLL
lda console_scroll
jsr console_add_8_to_scroll_
jsr console_add_8_to_scroll_
sta PPUSCROLL
rts
; Sets PPU address for row
; In: A = scroll position
; Preserved: X, Y
console_set_ppuaddr_:
sta console_temp
lda #$08
asl console_temp
rol a
asl console_temp
rol a
sta PPUADDR
lda console_temp
ora #console_left_margin
sta PPUADDR
rts
; A = (A + 8) % 240
; Preserved: X, Y
console_add_8_to_scroll_:
cmp #240-8
bcc :+
adc #16-1;+1 for set carry
: adc #8
rts
console_show_custom_color_:
pha
jsr console_wait_vbl_
setb PPUMASK,PPUMASK_BG0
pla
jsr console_load_palette_
jmp console_apply_scroll_
console_load_palette_:
pha
setb PPUADDR,$3F
setb PPUADDR,$00
setb PPUDATA,$0F ; black
pla
sta PPUDATA
sta PPUDATA
sta PPUDATA
rts
; Initializes PPU if necessary, then waits for VBL
; Preserved: A, X, Y, console_temp
console_wait_vbl_:
lda console_scroll
cmp #-1
bne @already_initialized
; Deferred initialization of PPU until first use of console
; In case PPU doesn't support scrolling, start a
; couple of lines down
setb console_scroll,16
jsr console_hide
tya
pha
; Fill nametable with spaces
setb PPUADDR,$20
setb PPUADDR,$00
ldy #240
lda #' '
: sta PPUDATA
sta PPUDATA
sta PPUDATA
sta PPUDATA
dey
bne :-
; Clear attributes
lda #0
ldy #$40
: sta PPUDATA
dey
bne :-
pla
tay
jsr console_show
@already_initialized:
jmp wait_vbl_optional
; Flushes current line
; Preserved: X, Y
console_flush_:
lda console_scroll
jsr console_set_ppuaddr_
sty console_temp
; Copy line
ldy #0
beq :++
: lda console_buf,y
sta PPUDATA
iny
: cpy console_pos
bne :--
ldy console_temp
rts
|
xem/nes | 1,096 | nes-test-roms/apu_mixer/source/common/text_out.s | ; Text output as expanding zero-terminated string at text_out_base
; The final exit result byte is written here
final_result = $6000
; Text output is written here as an expanding
; zero-terminated string
text_out_base = $6004
bss_res text_out_temp
zp_res text_out_addr,2
init_text_out:
ldx #0
; Put valid data first
setb text_out_base,0
lda #$80
jsr set_final_result
; Now fill in signature that tells emulator there's
; useful data there
setb text_out_base-3,$DE
setb text_out_base-2,$B0
setb text_out_base-1,$61
ldx #>text_out_base
stx text_out_addr+1
setb text_out_addr,<text_out_base
rts
; Sets final result byte in memory
set_final_result:
sta final_result
rts
; Writes character to text output
; In: A=Character to write
; Preserved: A, X, Y
write_text_out:
sty text_out_temp
; Write new terminator FIRST, then new char before it,
; in case emulator looks at string in middle of this routine.
ldy #1
pha
lda #0
sta (text_out_addr),y
dey
pla
sta (text_out_addr),y
inc text_out_addr
bne :+
inc text_out_addr+1
:
ldy text_out_temp
rts
|
xem/nes | 3,060 | nes-test-roms/apu_mixer/source/common/ppu.s | ; PPU utilities
bss_res ppu_not_present
; Sets PPUADDR to w
; Preserved: X, Y
.macro set_ppuaddr w
bit PPUSTATUS
setb PPUADDR,>w
setb PPUADDR,<w
.endmacro
; Delays by no more than n scanlines
.macro delay_scanlines n
.if CLOCK_RATE <> 1789773
.error "Currently only supports NTSC"
.endif
delay ((n)*341)/3
.endmacro
; Waits for VBL then disables PPU rendering.
; Preserved: A, X, Y
disable_rendering:
pha
jsr wait_vbl_optional
setb PPUMASK,0
pla
rts
; Fills first nametable with $00
; Preserved: Y
clear_nametable:
ldx #$20
bne clear_nametable_
clear_nametable2:
ldx #$24
clear_nametable_:
lda #0
jsr fill_screen_
; Clear pattern table
ldx #64
: sta PPUDATA
dex
bne :-
rts
; Fills screen with tile A
; Preserved: A, Y
fill_screen:
ldx #$20
bne fill_screen_
; Same as fill_screen, but fills other nametable
fill_screen2:
ldx #$24
fill_screen_:
stx PPUADDR
ldx #$00
stx PPUADDR
ldx #240
: sta PPUDATA
sta PPUDATA
sta PPUDATA
sta PPUDATA
dex
bne :-
rts
; Fills palette with $0F
; Preserved: Y
clear_palette:
set_ppuaddr $3F00
ldx #$20
lda #$0F
: sta PPUDATA
dex
bne :-
; Fills OAM with $FF
; Preserved: Y
clear_oam:
lda #$FF
; Fills OAM with A
; Preserved: A, Y
fill_oam:
ldx #0
stx SPRADDR
: sta SPRDATA
dex
bne :-
rts
; Initializes wait_vbl_optional. Must be called before
; using it.
.align 32
init_wait_vbl:
; Wait for VBL flag to be set, or ~60000
; clocks (2 frames) to pass
ldy #24
ldx #1
bit PPUSTATUS
: bit PPUSTATUS
bmi @set
dex
bne :-
dey
bpl :-
@set:
; Be sure flag didn't stay set (in case
; PPUSTATUS always has high bit set)
tya
ora PPUSTATUS
sta ppu_not_present
rts
; Same as wait_vbl, but returns immediately if PPU
; isn't working or doesn't support VBL flag
; Preserved: A, X, Y
.align 16
wait_vbl_optional:
bit ppu_not_present
bmi :++
; FALL THROUGH
; Clears VBL flag then waits for it to be set.
; Preserved: A, X, Y
wait_vbl:
bit PPUSTATUS
: bit PPUSTATUS
bpl :-
: rts
.macro check_ppu_region_ Len
; Delays since VBL began
jsr wait_vbl_optional ; 10 average
delay Len - 18 - 200
lda PPUSTATUS ; 4
bmi @ok ; 2
delay 200
; Next VBL should roughly begin here if it's the
; one we are detecting
delay 200
lda PPUSTATUS ; 2
bpl @ok
.endmacro
check_ppu_region:
.ifndef REGION_FREE
.ifdef PAL_ONLY
check_ppu_region_ 29781
print_str {newline,"Note: This test is meant for PAL NES only.",newline,newline}
.endif
.ifdef NTSC_ONLY
check_ppu_region_ 33248
print_str {newline,"Note: This test is meant for NTSC NES only.",newline,newline}
.endif
.endif
@ok: rts
; Loads ASCII font into CHR RAM and fills rest with $FF
.macro load_chr_ram
bit PPUSTATUS
setb PPUADDR,0
setb PPUADDR,0
; Copy ascii_chr to 0
setb addr,<ascii_chr
ldx #>ascii_chr
ldy #0
@page:
stx addr+1
: lda (addr),y
sta PPUDATA
iny
bne :-
inx
cpx #>ascii_chr_end
bne @page
; Fill rest
lda #$FF
: sta PPUDATA
iny
bne :-
inx
cpx #$20
bne :-
.endmacro
|
xem/nes | 2,698 | nes-test-roms/apu_mixer/source/common/shell.s | ; Shell that sets up testing framework and calls main
; Detect inclusion loops (otherwise ca65 goes crazy)
.ifdef SHELL_INCLUDED
.error "shell.s included twice"
.end
.endif
SHELL_INCLUDED = 1
; Temporary variables that ANY routine might modify, so
; only use them between routine calls.
temp = <$A
temp2 = <$B
temp3 = <$C
addr = <$E
ptr = addr
; Move code from $C000 to $E200, to accommodate my devcarts
.segment "CODE"
.res $2200
; Put shell code after user code, so user code is in more
; consistent environment
.segment "CODE2"
; Any user code which runs off end might end up here,
; so catch that mistake.
nop ; in case there was three-byte opcode before this
nop
jmp internal_error
;**** Common routines ****
.include "macros.inc"
.include "neshw.inc"
.include "delay.s"
.include "print.s"
.include "crc.s"
.include "testing.s"
;**** Shell core ****
.ifndef CUSTOM_RESET
reset:
sei
jmp std_reset
.endif
; Sets up hardware then runs main
run_shell:
init_cpu_regs
jsr init_shell
set_test $FF
jmp run_main
; Initializes shell without affecting current set_test values
init_shell:
jsr clear_ram
jsr init_wait_vbl ; waits for VBL once here,
jsr wait_vbl_optional ; so only need to wait once more
jsr init_text_out
jsr init_testing
jsr init_runtime
jsr console_init
rts
; Runs main in consistent PPU/APU environment, then exits
; with code 0
run_main:
jsr pre_main
jsr main
lda #0
jmp exit
; Sets up environment for main to run in
pre_main:
.ifndef BUILD_NSF
jsr disable_rendering
setb PPUCTRL,0
jsr clear_palette
jsr clear_nametable
jsr clear_nametable2
jsr clear_oam
.endif
; Clear APU registers
lda #0
sta $4015
ldx #$13
: sta $4000,x
dex
bpl :-
; CPU registers
lda #$34
pha
lda #0
tax
tay
jsr wait_vbl_optional
plp
sta SNDMODE
rts
.ifndef CUSTOM_EXIT
exit:
.endif
; Reports result and ends program
std_exit:
sta temp
init_cpu_regs
setb SNDCHN,0
lda temp
jsr report_result
pha
jsr check_ppu_region
pla
jmp post_exit
; Reports final result code in A
report_result:
jsr :+
jmp play_byte
: jsr print_newline
jsr console_show
; 0: ""
cmp #1
bge :+
rts
:
; 1: "Failed"
bne :+
print_str {"Failed",newline}
rts
; n: "Failed #n"
: print_str "Failed #"
jsr print_dec
jsr print_newline
rts
;**** Other routines ****
.include "shell_misc.s"
.ifdef NEED_CONSOLE
.include "console.s"
.else
; Stubs so code doesn't have to care whether
; console exists
console_init:
console_show:
console_hide:
console_print:
console_flush:
rts
.endif
.ifndef CUSTOM_PRINT
.include "text_out.s"
print_char_:
jsr write_text_out
jmp console_print
stop_capture:
rts
.endif
|
xem/nes | 3,437 | nes-test-roms/apu_mixer/source/common/delay.s | ; Delays in CPU clocks, milliseconds, etc. All routines are re-entrant
; (no global data). No routines touch X or Y during execution.
; Code generated by macros is relocatable; it contains no JMPs to itself.
zp_byte delay_temp_ ; only written to
; Delays n clocks, from 2 to 16777215
; Preserved: A, X, Y, flags
.macro delay n
.if (n) < 0 .or (n) = 1 .or (n) > 16777215
.error "Delay out of range"
.endif
delay_ (n)
.endmacro
; Delays n milliseconds (1/1000 second)
; n can range from 0 to 1100.
; Preserved: A, X, Y, flags
.macro delay_msec n
.if (n) < 0 .or (n) > 1100
.error "time out of range"
.endif
delay ((n)*CLOCK_RATE+500)/1000
.endmacro
; Delays n microseconds (1/1000000 second).
; n can range from 0 to 100000.
; Preserved: A, X, Y, flags
.macro delay_usec n
.if (n) < 0 .or (n) > 100000
.error "time out of range"
.endif
delay ((n)*((CLOCK_RATE+50)/100)+5000)/10000
.endmacro
.align 64
; Delays A clocks + overhead
; Preserved: X, Y
; Time: A+25 clocks (including JSR)
: sbc #7 ; carry set by CMP
delay_a_25_clocks:
cmp #7
bcs :- ; do multiples of 7
lsr a ; bit 0
bcs :+
: ; A=clocks/2, either 0,1,2,3
beq @zero ; 0: 5
lsr a
beq :+ ; 1: 7
bcc :+ ; 2: 9
@zero: bne :+ ; 3: 11
: rts ; (thanks to dclxvi for the algorithm)
; Delays A*256 clocks + overhead
; Preserved: X, Y
; Time: A*256+16 clocks (including JSR)
delay_256a_16_clocks:
cmp #0
bne :+
rts
delay_256a_11_clocks_:
: pha
lda #256-19-22
jsr delay_a_25_clocks
pla
clc
adc #-1
bne :-
rts
; Delays A*65536 clocks + overhead
; Preserved: X, Y
; Time: A*65536+16 clocks (including JSR)
delay_65536a_16_clocks:
cmp #0
bne :+
rts
delay_65536a_11_clocks_:
: pha
lda #256-19-22-13
jsr delay_a_25_clocks
lda #255
jsr delay_256a_11_clocks_
pla
clc
adc #-1
bne :-
rts
max_short_delay = 41
; delay_short_ macro jumps into these
.res (max_short_delay-12)/2,$EA ; NOP
delay_unrolled_:
rts
.macro delay_short_ n
.if n < 0 .or n = 1 .or n > max_short_delay
.error "Internal delay error"
.endif
.if n = 0
; nothing
.elseif n = 2
nop
.elseif n = 3
sta <delay_temp_
.elseif n = 4
nop
nop
.elseif n = 5
sta <delay_temp_
nop
.elseif n = 6
nop
nop
nop
.elseif n = 7
php
plp
.elseif n = 8
nop
nop
nop
nop
.elseif n = 9
php
plp
nop
.elseif n = 10
sta <delay_temp_
php
plp
.elseif n = 11
php
plp
nop
nop
.elseif n = 13
php
plp
nop
nop
nop
.elseif n & 1
sta <delay_temp_
jsr delay_unrolled_-((n-15)/2)
.else
jsr delay_unrolled_-((n-12)/2)
.endif
.endmacro
.macro delay_nosave_ n
; 65536+17 = maximum delay using delay_256a_11_clocks_
; 255+27 = maximum delay using delay_a_25_clocks
; 27 = minimum delay using delay_a_25_clocks
.if n > 65536+17
lda #^(n - 15)
jsr delay_65536a_11_clocks_
; +2 ensures remaining clocks is never 1
delay_nosave_ (((n - 15) & $FFFF) + 2)
.elseif n > 255+27
lda #>(n - 15)
jsr delay_256a_11_clocks_
; +2 ensures remaining clocks is never 1
delay_nosave_ (<(n - 15) + 2)
.elseif n >= 27
lda #<(n - 27)
jsr delay_a_25_clocks
.else
delay_short_ n
.endif
.endmacro
.macro delay_ n
.if n > max_short_delay
php
pha
delay_nosave_ (n - 14)
pla
plp
.else
delay_short_ n
.endif
.endmacro
|
xem/nes | 1,632 | nes-test-roms/apu_mixer/source/common/crc.s | ; CRC-32 checksum calculation
zp_res checksum,4
zp_byte checksum_temp
zp_byte checksum_off_
; Turns CRC updating on/off. Allows nesting.
; Preserved: A, X, Y
crc_off:
dec checksum_off_
rts
crc_on: inc checksum_off_
beq :+
jpl internal_error ; catch unbalanced crc calls
: rts
; Initializes checksum module. Might initialize tables
; in the future.
init_crc:
jmp reset_crc
; Clears checksum and turns it on
; Preserved: X, Y
reset_crc:
lda #0
sta checksum_off_
lda #$FF
sta checksum
sta checksum + 1
sta checksum + 2
sta checksum + 3
rts
; Updates checksum with byte in A (unless disabled via crc_off)
; Preserved: A, X, Y
; Time: 357 clocks average
update_crc:
bit checksum_off_
bmi update_crc_off
update_crc_:
pha
stx checksum_temp
eor checksum
ldx #8
@bit: lsr checksum+3
ror checksum+2
ror checksum+1
ror a
bcc :+
sta checksum
lda checksum+3
eor #$ED
sta checksum+3
lda checksum+2
eor #$B8
sta checksum+2
lda checksum+1
eor #$83
sta checksum+1
lda checksum
eor #$20
: dex
bne @bit
sta checksum
ldx checksum_temp
pla
update_crc_off:
rts
; Prints checksum as 8-character hex value
print_crc:
jsr crc_off
; Print complement
ldx #3
: lda checksum,x
eor #$FF
jsr print_hex
dex
bpl :-
jmp crc_on
; EQ if checksum matches CRC
; Out: A=0 and EQ if match, A>0 and NE if different
; Preserved: X, Y
.macro is_crc crc
jsr_with_addr is_crc_,{.dword crc}
.endmacro
is_crc_:
tya
pha
; Compare with complemented checksum
ldy #3
: lda (ptr),y
sec
adc checksum,y
bne @wrong
dey
bpl :-
pla
tay
lda #0
rts
@wrong:
pla
tay
lda #1
rts
|
xem/nes | 1,903 | nes-test-roms/vaus-test/src/ppuclear.s | ;
; NES PPU common functions
; Copyright 2011 Damian Yerrick
;
; Copying and distribution of this file, with or without
; modification, are permitted in any medium without royalty provided
; the copyright notice and this notice are preserved in all source
; code copies. This file is offered as-is, without any warranty.
;
.include "nes.h"
.export ppu_clear_nt, ppu_clear_oam, ppu_screen_on
.import OAM
;;
; Clears a nametable to a given tile number and attribute value.
; (Turn off rendering in PPUMASK and set the VRAM address increment
; to 1 in PPUCTRL first.)
; @param A tile number
; @param X base address of nametable ($20, $24, $28, or $2C)
; @param Y attribute value ($00, $55, $AA, or $FF)
.proc ppu_clear_nt
; Set base PPU address to XX00
stx PPUADDR
ldx #$00
stx PPUADDR
; Clear the 960 spaces of the main part of the nametable,
; using a 4 times unrolled loop
ldx #960/4
loop1:
.repeat 4
sta PPUDATA
.endrepeat
dex
bne loop1
; Clear the 64 entries of the attribute table
ldx #64
loop2:
sty PPUDATA
dex
bne loop2
rts
.endproc
;;
; Moves all sprites starting at address X (e.g, $04, $08, ..., $FC)
; below the visible area.
; X is 0 at the end.
.proc ppu_clear_oam
; First round the address down to a multiple of 4 so that it won't
; freeze should the address get corrupted.
txa
and #%11111100
tax
lda #$FF ; Any Y value from $EF through $FF will work
loop:
sta OAM,x
inx
inx
inx
inx
bne loop
rts
.endproc
;;
; Sets the scroll position and turns PPU rendering on.
; @param A value for PPUCTRL ($2000) including scroll position
; MSBs; see nes.h
; @param X horizontal scroll position (0-255)
; @param Y vertical scroll position (0-239)
; @param C if true, sprites will be visible
.proc ppu_screen_on
stx PPUSCROLL
sty PPUSCROLL
sta PPUCTRL
lda #BG_ON
bcc :+
lda #BG_ON|OBJ_ON
:
sta PPUMASK
rts
.endproc
|
xem/nes | 16,718 | nes-test-roms/vaus-test/src/main.s | ;
; Simple sprite demo for NES (with paddle support)
; Copyright 2013 Damian Yerrick
;
; Copying and distribution of this file, with or without
; modification, are permitted in any medium without royalty provided
; the copyright notice and this notice are preserved in all source
; code copies. This file is offered as-is, without any warranty.
;
.include "nes.h"
.include "ram.h"
.import ppu_clear_nt, ppu_clear_oam, ppu_screen_on, read_all_pads
.importzp cur_keys_d0, cur_keys_d1, cur_keys_d3, cur_keys_d4
.segment "ZEROPAGE"
nmis: .res 1
oam_used: .res 1 ; starts at 0
; Game variables
player_xlo: .res 1 ; horizontal position is xhi + xlo/256 px
player_xhi: .res 1
player_dxlo: .res 1 ; speed in pixels per 256 s
player_yhi: .res 1
player_facing: .res 1
player_frame: .res 1
player_frame_sub: .res 1
control_type: .res 1
paddle_min: .res 1
paddle_max: .res 1
indicator_x: .res 1
target_x: .res 1
CONTROL_STANDARD = 0
CONTROL_4P_FC = 1
CONTROL_ARKANOID_FC = 2
CONTROL_ARKANOID_NES = 3
.segment "INESHDR"
.byt "NES",$1A ; magic signature
.byt 1 ; PRG ROM size in 16384 byte units
.byt 1 ; CHR ROM size in 8192 byte units
.byt $00 ; mirroring type and mapper number lower nibble
.byt $00 ; mapper number upper nibble
.segment "VECTORS"
.addr nmi, reset, irq
.segment "CODE"
;;
; This NMI handler is good enough for a simple "has NMI occurred?"
; vblank-detect loop. But sometimes there are things that you always
; want to happen every frame, even if the game logic takes far longer
; than usual. These might include music or a scroll split. In these
; cases, you'll need to put more logic into the NMI handler.
.proc nmi
inc nmis
rti
.endproc
; A null IRQ handler that just does RTI is useful to add breakpoints
; that survive a recompile. Set your debugging emulator to trap on
; reads of $FFFE, and then you can BRK $00 whenever you need to add
; a breakpoint.
;
; But sometimes you'll want a non-null IRQ handler.
; On NROM, the IRQ handler is mostly used for the DMC IRQ, which was
; designed for gapless playback of sampled sounds but can also be
; (ab)used as a crude timer for a scroll split (e.g. status bar).
.proc irq
rti
.endproc
;
.proc reset
; The very first thing to do when powering on is to put all sources
; of interrupts into a known state.
sei ; Disable interrupts
ldx #$00
stx PPUCTRL ; Disable NMI and set VRAM increment to 32
stx PPUMASK ; Disable rendering
stx $4010 ; Disable DMC IRQ
dex ; Subtracting 1 from $00 gives $FF, which is a
txs ; quick way to set the stack pointer to $01FF
bit PPUSTATUS ; Acknowledge stray vblank NMI across reset
bit SNDCHN ; Acknowledge DMC IRQ
lda #$40
sta P2 ; Disable APU Frame IRQ
lda #$0F
sta SNDCHN ; Disable DMC playback, initialize other channels
vwait1:
bit PPUSTATUS ; It takes one full frame for the PPU to become
bpl vwait1 ; stable. Wait for the first frame's vblank.
; We have about 29700 cycles to burn until the second frame's
; vblank. Use this time to get most of the rest of the chipset
; into a known state.
; Most versions of the 6502 support a mode where ADC and SBC work
; with binary-coded decimal. Some 6502-based platforms, such as
; Atari 2600, use this for scorekeeping. The second-source 6502 in
; the NES ignores the mode setting because its decimal circuit is
; dummied out to save on patent royalties, and games either use
; software BCD routines or convert numbers to decimal every time
; they are displayed. But some post-patent famiclones have a
; working decimal mode, so turn it off for best compatibility.
cld
; Clear OAM and the zero page here.
ldx #0
jsr ppu_clear_oam ; clear out OAM from X to end and set X to 0
; There are "holy wars" (perennial disagreements) on nesdev over
; whether it's appropriate to zero out RAM in the init code. Some
; anti-zeroing people say it hides programming errors with reading
; uninitialized memory, and memory will need to be initialized
; again anyway at the start of each level. Others in favor of
; clearing say that a lot more variables need set to 0 than to any
; other value, and a clear loop like this saves code size. Still
; others point to the C language, whose specification requires that
; uninitialized variables be set to 0 before main() begins.
txa
clear_zp:
sta $00,x
inx
bne clear_zp
; Other things to do here (not shown):
; Set up PRG RAM
; Copy initial high scores, bankswitching trampolines, etc. to RAM
; Set up initial CHR banks
; Set up your sound engine
vwait2:
bit PPUSTATUS ; After the second vblank, we know the PPU has
bpl vwait2 ; fully stabilized.
; There are two ways to wait for vertical blanking: spinning on
; bit 7 of PPUSTATUS (as seen above) and waiting for the NMI
; handler to run. Before the PPU has stabilized, you want to use
; the PPUSTATUS method because NMI might not be reliable. But
; afterward, you want to use the NMI method because if you read
; PPUSTATUS at the exact moment that the bit turns on, it'll flip
; from off to on to off faster than the CPU can see.
; Now the PPU has stabilized, we're still in vblank. Copy the
; palette right now because if you load a palette during forced
; blank (not vblank), it'll be visible as a rainbow streak.
jsr load_main_palette
; While in forced blank we have full access to VRAM.
; Load the nametable (background map).
jsr draw_bg
; Set up game variables, as if it were the start of a new level.
lda #0
sta player_xlo
sta player_dxlo
sta player_facing
sta player_frame
sta control_type
sta paddle_max
lda #48
sta player_xhi
lda #192
sta player_yhi
sta paddle_min
forever:
; Game logic
jsr read_all_pads
jsr fixup_fc_control
jsr move_player
; The first entry in OAM (indices 0-3) is "sprite 0". In games
; with a scrolling playfield and a still status bar, it's used to
; help split the screen. This demo doesn't use scrolling, but
; yours might, so I'm marking the first entry used anyway.
ldx #4
stx oam_used
; adds to oam_used
jsr draw_player_sprite
jsr draw_indicator_sprite
ldx oam_used
jsr ppu_clear_oam
; Good; we have the full screen ready. Wait for a vertical blank
; and set the scroll registers to display it.
lda nmis
vw3:
cmp nmis
beq vw3
; Copy the display list from main RAM to the PPU
lda #0
sta OAMADDR
lda #>OAM
sta OAM_DMA
; Turn the screen on
ldx #0
ldy #0
lda #VBLANK_NMI|BG_0000|OBJ_1000
sec
jsr ppu_screen_on
jmp forever
; And that's all there is to it.
.endproc
.proc load_main_palette
; seek to the start of palette memory ($3F00-$3F1F)
ldx #$3F
stx PPUADDR
ldx #$00
stx PPUADDR
copypalloop:
lda initial_palette,x
sta PPUDATA
inx
cpx #32
bcc copypalloop
rts
.endproc
.segment "RODATA"
initial_palette:
.byt $22,$18,$28,$38,$0F,$06,$16,$26,$0F,$08,$19,$2A,$0F,$02,$12,$22
.byt $22,$08,$15,$27,$0F,$06,$16,$26,$0F,$0A,$1A,$2A,$0F,$02,$12,$22
.segment "CODE"
.proc draw_bg
; Start by clearing the first nametable
ldx #$20
lda #$00
ldy #$AA
jsr ppu_clear_nt
; Draw a floor
lda #$23
sta PPUADDR
lda #$00
sta PPUADDR
lda #$0B
ldx #32
floorloop1:
sta PPUDATA
dex
bne floorloop1
; Draw areas buried under the floor as solid color
; (I learned this style from "Pinobee" for GBA. We drink Ritalin.)
lda #$01
ldx #5*32
floorloop2:
sta PPUDATA
dex
bne floorloop2
; Draw blocks on the sides, in vertical columns
lda #VBLANK_NMI|VRAM_DOWN
sta PPUCTRL
; At position (2, 20) (VRAM $2282) and (28, 20) (VRAM $229C),
; draw two columns of two blocks each, each block being 4 tiles:
; 0C 0D
; 0E 0F
ldx #2
colloop:
lda #$22
sta PPUADDR
txa
ora #$80
sta PPUADDR
; Draw $0C $0E $0C $0E or $0D $0F $0D $0F depending on column
and #$01
ora #$0C
ldy #4
tileloop:
sta PPUDATA
eor #$02
dey
bne tileloop
; Columns 2, 3, 28, and 29 only
inx
cpx #4 ; Skip columns 4 through 27
bne not4
ldx #28
not4:
cpx #30
bcc colloop
; The attribute table elements corresponding to these stacks are
; (0, 5) (VRAM $23E8) and (7, 5) (VRAM $23EF). Set them to 0.
ldx #$23
lda #$E8
ldy #$00
stx PPUADDR
sta PPUADDR
sty PPUDATA
lda #$EF
stx PPUADDR
sta PPUADDR
sty PPUDATA
rts
.endproc
;;
; Autodetects control type and translates to NES.
.proc fixup_fc_control
; First try to determine what controller is connected.
; A or Start pressed on controller 1:
; Disable translation
; A or Start pressed on FC controller 3:
; Treat FC controller 3 as controller 1
; A pressed on FC paddle:
; Use FC paddle (port 2 d1)
; A pressed on NES paddle in port 2:
; Use NES paddle (port 2 d4)
ldx #0
lda cur_keys_d0
cmp #KEY_A
beq set_control_type_x
cmp #KEY_START
beq set_control_type_x
inx
lda cur_keys_d1
cmp #KEY_A
beq set_control_type_x
cmp #KEY_START
beq set_control_type_x
inx
cmp #$FF
beq set_control_type_x
inx
lda cur_keys_d3+1
cmp #$FF
bne no_set_control_type
set_control_type_x:
stx control_type
no_set_control_type:
lda control_type
asl a
tax
lda control_type_handlers+1,x
pha
lda control_type_handlers+0,x
pha
rts
control_type_fc4p:
lda cur_keys_d1
ora cur_keys_d0
sta cur_keys_d0
; fall through
control_type_null:
; Optional: construct new_keys
rts
control_type_fcvaus:
lda cur_keys_d1
ldx cur_keys_d1+1
jmp handle_vaus
control_type_nesvaus:
lda cur_keys_d3+1
ldx cur_keys_d4+1
handle_vaus:
and #KEY_A
ora cur_keys_d0
sta cur_keys_d0
txa
eor #$FF ; change to increasing right
sta indicator_x
; Find target relative to center of observed range
cmp paddle_min
bcs :+
sta paddle_min
:
cmp paddle_max
bcc :+
sta paddle_max
clc
:
lda paddle_min
adc paddle_max
ror a
eor #$7F
adc indicator_x
sta target_x
SLOP = 4
TARGET_WID = 7
PLAYER_WID = 16
; Now move toward the target
sec
sbc #<(SLOP + (TARGET_WID - PLAYER_WID) / 2)
sec
sbc player_xhi
bcs not1
lda #KEY_LEFT
bne have_extra_keypress
not1:
cmp #SLOP * 2 + 1
bcc not2
lda #KEY_RIGHT
have_extra_keypress:
ora cur_keys_d0
sta cur_keys_d0
not2:
rts
.pushseg
.segment "RODATA"
control_type_handlers:
.addr control_type_null-1, control_type_fc4p-1
.addr control_type_fcvaus-1, control_type_nesvaus-1
.popseg
.endproc
; constants used by move_player
; PAL frames are about 20% longer than NTSC frames. So if you make
; dual NTSC and PAL versions, or you auto-adapt to the TV system,
; you'll want PAL velocity values to be 1.2 times the corresponding
; NTSC values, and PAL accelerations should be 1.44 times NTSC.
WALK_SPD = 85 ; speed limit in 1/256 px/frame
WALK_ACCEL = 4 ; movement acceleration in 1/256 px/frame^2
WALK_BRAKE = 8 ; stopping acceleration in 1/256 px/frame^2
.proc move_player
; Acceleration to right: Do it only if the player is holding right
; on the Control Pad and has a nonnegative velocity.
lda cur_keys_d0
and #KEY_RIGHT
beq notRight
lda player_dxlo
bmi notRight
; Right is pressed. Add to velocity, but don't allow velocity
; to be greater than the maximum.
clc
adc #WALK_ACCEL
cmp #WALK_SPD
bcc :+
lda #WALK_SPD
:
sta player_dxlo
lda player_facing ; Set the facing direction to not flipped
and #<~$40
sta player_facing
jmp doneRight
; Right is not pressed. Brake if headed right.
notRight:
lda player_dxlo
bmi doneRight
cmp #WALK_BRAKE
bcs notRightStop
lda #WALK_BRAKE+1 ; add 1 to compensate for the carry being clear
notRightStop:
sbc #WALK_BRAKE
sta player_dxlo
doneRight:
; Acceleration to left: Do it only if the player is holding left
; on the Control Pad and has a nonpositive velocity.
lda cur_keys_d0
and #KEY_LEFT
beq notLeft
lda player_dxlo
beq :+
bpl notLeft
:
; Left is pressed. Add to velocity.
lda player_dxlo
sec
sbc #WALK_ACCEL
cmp #256-WALK_SPD
bcs :+
lda #256-WALK_SPD
:
sta player_dxlo
lda player_facing ; Set the facing direction to flipped
ora #$40
sta player_facing
jmp doneLeft
; Left is not pressed. Brake if headed left.
notLeft:
lda player_dxlo
bpl doneLeft
cmp #256-WALK_BRAKE
bcc notLeftStop
lda #256-WALK_BRAKE
notLeftStop:
adc #8-1
sta player_dxlo
doneLeft:
; In a real game, you'd respond to A, B, Up, Down, etc. here.
; Move the player by adding the velocity to the 16-bit X position.
lda player_dxlo
bpl player_dxlo_pos
; if velocity is negative, subtract 1 from high byte to sign extend
dec player_xhi
player_dxlo_pos:
clc
adc player_xlo
sta player_xlo
lda #0 ; add high byte
adc player_xhi
sta player_xhi
; Test for collision with side walls
cmp #28
bcs notHitLeft
lda #28
sta player_xhi
lda #0
sta player_dxlo
beq doneWallCollision
notHitLeft:
cmp #212
bcc notHitRight
lda #211
sta player_xhi
lda #0
sta player_dxlo
notHitRight:
doneWallCollision:
; Animate the player
; If stopped, freeze the animation on frame 0 or 1
lda player_dxlo
bne notStop1
lda #$80
sta player_frame_sub
lda player_frame
cmp #2
bcc have_player_frame
lda #0
beq have_player_frame
notStop1:
; Take absolute value of velocity (negate it if it's negative)
bpl player_animate_noneg
eor #$FF
clc
adc #1
player_animate_noneg:
lsr a ; Multiply abs(velocity) by 5/16
lsr a
sta 0
lsr a
lsr a
adc 0
; And 16-bit add it to player_frame, mod $600
adc player_frame_sub
sta player_frame_sub
lda player_frame
adc #0
cmp #6
bcc have_player_frame
lda #0
have_player_frame:
sta player_frame
rts
.endproc
;;
; Draws the player's character to the display list as six sprites.
; In the template, we don't need to handle half-offscreen actors,
; but a scrolling game will need to "clip" sprites (skip drawing the
; parts that are offscreen).
.proc draw_player_sprite
draw_y = 0
cur_tile = 1
x_add = 2 ; +8 when not flipped; -8 when flipped
draw_x = 3
rows_left = 4
row_first_tile = 5
draw_x_left = 7
lda #3
sta rows_left
; In platform games, the Y position is often understood as the
; bottom of a character because that makes certain things related
; to platform collision easier to reason about. Here, the
; character is 24 pixels tall, and player_yhi is the bottom.
; On the NES, sprites are drawn one scanline lower than the Y
; coordinate in the OAM entry (e.g. the top row of pixels of a
; sprite with Y=8 is on scanline 9). But in a platformer, it's
; also common practice to overlap the bottom row of a sprite's
; pixels with the top pixel of the background platform that they
; walk on to suggest depth in the background.
lda player_yhi
sec
sbc #24
sta draw_y
; set up increment amounts based on flip value
; A: distance to move the pen (8 or -8)
; X: relative X position of first OAM entry
lda player_xhi
ldx #8
bit player_facing
bvc not_flipped
clc
adc #8
ldx #(256-8)
not_flipped:
sta draw_x_left
stx x_add
; the six frames start at $10, $12, ..., $1A
lda player_frame
asl a
ora #$10
sta row_first_tile
ldx oam_used
rowloop:
ldy #2 ; Y: remaining width on this row in 8px units
lda row_first_tile
sta cur_tile
lda draw_x_left
sta draw_x
tileloop:
; draw an 8x8 pixel chunk of the character using one entry in the
; display list
lda draw_y
sta OAM,x
lda cur_tile
inc cur_tile
sta OAM+1,x
lda player_facing
sta OAM+2,x
lda draw_x
sta OAM+3,x
clc
adc x_add
sta draw_x
; move to the next entry of the display list
inx
inx
inx
inx
dey
bne tileloop
; move to the next row, which is 8 scanlines down and on the next
; row of tiles in the pattern table
lda draw_y
clc
adc #8
sta draw_y
lda row_first_tile
clc
adc #16
sta row_first_tile
dec rows_left
bne rowloop
stx oam_used
rts
.endproc
.proc draw_indicator_sprite
lda control_type
cmp #2
bcc skip
ldy #15
lda #15-1 ; carry is set here
adc oam_used
tax
copyloop:
lda indicator_tmpl,y
sta OAM,x
dex
dey
bpl copyloop
inx
lda paddle_min
sta OAM+3,x
lda paddle_max
sta OAM+7,x
lda indicator_x
sta OAM+11,x
lda target_x
sta OAM+15,x
txa
clc
adc #16
sta oam_used
skip:
rts
.pushseg
.segment "RODATA"
indicator_tmpl:
.byte 127, $04, $00, 0
.byte 127, $04, $40, 0
.byte 127, $05, $00, 0
.byte 160, $05, $00, 0
.popseg
.endproc
|
xem/nes | 1,708 | nes-test-roms/vaus-test/src/vauspads.s | ;
; NES controller reading code (for Arkanoid controller)
; Copyright 2013 Damian Yerrick
;
; Copying and distribution of this file, with or without
; modification, are permitted in any medium without royalty provided
; the copyright notice and this notice are preserved in all source
; code copies. This file is offered as-is, without any warranty.
;
.exportzp cur_keys_d0, cur_keys_d1, cur_keys_d3, cur_keys_d4
.export read_all_pads
.segment "ZEROPAGE"
cur_keys_d0: .res 2
cur_keys_d1: .res 2
cur_keys_d3: .res 2
cur_keys_d4: .res 2
.segment "CODE"
.proc read_all_pads
; $4016 output pulse: copy sample to shift register and
; start taking new sample from potentiometer
lda #1
sta $4016
sta cur_keys_d4
lsr a
sta $4016
bitloop:
ldx #1
portloop:
lda $4016,x
lsr a
rol cur_keys_d0,x
lsr a
rol cur_keys_d1,x
lsr a
lsr a
rol cur_keys_d3,x
lsr a
rol cur_keys_d4,x
dex
bpl portloop
bcc bitloop
rts
.endproc
; Famicom, no expansion controllers
; NES, standard controllers in 7-pin ports
; cur_keys_d0+0 = controller 1
; cur_keys_d0+1 = controller 2
; Famicom, standard controllers in 15-pin port
; cur_keys_d1+0 = controller 3
; cur_keys_d1+1 = controller 4
; Famicom, Arkanoid controller in 15-pin port
; cur_keys_d1+0 = $FF if pressed, $00 if not
; cur_keys_d1+1 = Position (increases counterclockwise)
; NES, Arkanoid controller in 7-pin port (usually port 2)
; cur_keys_d3+x = $FF if pressed, $00 if not
; cur_keys_d4+x = Position (increases counterclockwise)
; Famicom, Zapper in 15-pin port (always port 2)
; NES, Zapper in 7-pin port (usually port 2)
; cur_keys_d3+x = $FF if dark, $00 if light or unplugged
; cur_keys_d4+x = $FF if pressed, $00 if not
|
xem/nes | 1,034 | nes-test-roms/read_joy3/source/count_errors.s | ; Repeatedly calls read_joy while DMC is playing
; and prints X when the DMC conflicted with one
; of the internal reads. Note that a conflict
; doesn't affect the result of read_joy, since it
; compensates when this occurs.
dmc_rate = 15 ; 0 to 15
.include "shell.inc"
.include "read_joy.inc"
iter = 1000
zp_byte conflicts
main:
; Start DMC
lda #$40+dmc_rate
sta $4010
lda #$FF
sta $4012
sta $4013
lda #0
sta $4015
lda #$10
sta $4015
; Repeatedly read controller
ldy #>iter
ldx #<iter
: txa
pha
jsr test
pla
tax
dex
bne :-
dey
bne :-
; Print result
print_str {newline,"Conflicts: "}
lda conflicts
jsr print_dec
print_str "/1000"
test: jsr read_joy
pha
pha
; Recover second controller read
; from below stack pointer
tsx
dex
txs
pla
; Compare all four reads
cmp <temp3
bne :+
cmp <temp2
bne :+
cmp <temp
:
; Print and count whether they matched
print_cc bne,'X','-'
beq :+
inc conflicts
:
; Be sure read worked correctly
pla
pla
jne test_failed
rts
|
xem/nes | 2,223 | nes-test-roms/read_joy3/source/common/testing.s | ; Utilities for writing test ROMs
zp_res test_code,1 ; code of current test
zp_res test_name,2 ; address of name of current test, or 0 of none
; Reports that all tests passed
tests_passed:
.if !BUILD_MULTI
jsr print_filename
print_str "Passed"
.endif
lda #0
jmp exit
; Reports that the current test failed. Prints code and
; name last set with set_test, or just "Failed" if none
; have been set yet.
test_failed:
lda test_code
; Treat 0 as 1, in case it wasn't ever set
bne :+
lda #1
sta test_code
:
; If code >= 2, print name
cmp #2
blt :+
lda test_name+1
beq :+
jsr print_newline
sta addr+1
lda test_name
sta addr
jsr print_str_addr
jsr print_newline
:
.if !BUILD_MULTI
jsr print_filename
.endif
; End program
lda test_code
jmp exit
; Sets current test code and optional name. Also resets
; checksum.
.macro set_test code,name
pha
lda #code
jsr set_test_
.local Addr
lda #<Addr
sta <test_name
lda #>Addr
sta <test_name+1
seg_data "STRINGS",{Addr: .byte name,0}
pla
.endmacro
set_test_:
sta test_code
jmp reset_crc
; If checksum doesn't match expected, reports failed test.
; Passing 0 just prints checksum.
; Preserved: A, X, Y
.macro check_crc expected
.if expected
jsr_with_addr check_crc_,{.dword expected}
.else
; print checksum if 0 is passed
jsr print_newline
jsr print_crc
jsr print_newline
.endif
.endmacro
check_crc_:
pha
tya
pha
; Compare with complemented checksum
ldy #3
: lda (addr),y
sec
adc checksum,y
bne @wrong
dey
bpl :-
pla
tay
pla
rts
@wrong: jsr print_newline
jsr print_crc
jsr print_newline
jmp test_failed
; Reports value of A via low/high beeps.
; Preserved: A, X, Y
beep_bits:
pha
; Make reference low beep
clc
jsr @beep
; End marker
sec
; Remove high zero bits
: rol a
beq @zero
bcc :-
; Play remaining bits
: php
jsr @beep
plp
asl a
bne :-
@zero: pla
rts
@beep: pha
; Set LSB of pitch based on carry
lda #0
adc #$FF
sta $4002
; Set up square
lda #1
sta SNDCHN
sta $4003
sta $4001
; Fade volume
lda #15
: pha
eor #$30
sta $4000
delay_msec 8
pla
clc
adc #-1
bne :-
; Silence
sta SNDCHN
delay_msec 120
pla
rts
|
xem/nes | 2,304 | nes-test-roms/read_joy3/source/common/print.s | ; Prints values in various ways to output, including numbers and strings.
newline = 10
; Prints indicated register to console as two hex chars and space
; Preserved: A, X, Y, P
print_a:
php
pha
print_reg_:
jsr print_hex
lda #' '
jsr print_char_
pla
plp
rts
print_x:
php
pha
txa
jmp print_reg_
print_y:
php
pha
tya
jmp print_reg_
print_p:
php
pha
php
pla
jmp print_reg_
print_s:
php
pha
txa
tsx
inx
inx
inx
inx
jsr print_x
tax
pla
plp
rts
; Prints A as two hex characters, NO space after
; Preserved: X, Y
print_hex:
jsr update_crc
; Print high nibble
pha
lsr a
lsr a
lsr a
lsr a
jsr @nibble
pla
; Print low nibble
and #$0F
@nibble:
cmp #10
blt @digit
adc #6;+1 since carry is set
@digit: adc #'0'
jmp print_char_
; Prints character and updates checksum UNLESS it's a newline.
; Preserved: X, Y
print_char:
cmp #newline
beq :+
jsr update_crc
: jmp print_char_
; Prints space. Does NOT update checksum.
; Preserved: A, X, Y
print_space:
pha
lda #' '
jsr print_char_
pla
rts
; Advances to next line. Does NOT update checksum.
; Preserved: A, X, Y
print_newline:
pha
lda #newline
jsr print_char_
pla
rts
; Prints string
; Preserved: A, X, Y
.macro print_str str
jsr_with_addr print_str_addr,{.byte str,0}
.endmacro
; Prints string at addr and leaves addr pointing to
; byte AFTER zero terminator.
; Preserved: A, X, Y
print_str_addr:
pha
tya
pha
ldy #0
beq :+ ; always taken
@loop: jsr print_char
jsr inc_addr
: lda (addr),y
bne @loop
pla
tay
pla
; FALL THROUGH
; Increments 16-bit value in addr.
; Preserved: A, X, Y
inc_addr:
inc addr
beq :+
rts
: inc addr+1
rts
; Prints A as 1-3 digit decimal value, NO space after.
; Preserved: Y
print_dec:
; Hundreds
cmp #100
blt @tens
ldx #'0'
: sbc #100
inx
cmp #100
bge :-
jsr @digit
; Tens
@tens: cmp #10
blt @ones
ldx #'0'
: sbc #10
inx
cmp #10
bge :-
jsr @digit
; Ones
@ones: ora #'0'
jmp print_char
; Print a single digit
@digit: pha
txa
jsr print_char
pla
rts
; Prints one of two characters based on condition.
; SEC; print_cc bcs,'C','-' prints 'C'.
; Preserved: A, X, Y, flags
.macro print_cc cond,yes,no
php
pha
cond *+6
lda #no
bne *+4
lda #yes
jsr print_char
pla
plp
.endmacro
|
xem/nes | 3,161 | nes-test-roms/read_joy3/source/common/console.s | ; Scrolling text console with line wrapping, 30x30 characters.
; Buffers lines for speed. Will work even if PPU doesn't
; support scrolling (until text reaches bottom).
; ** ASCII font must already be in CHR, and mirroring
; must be vertical or single-screen.
; Number of characters of margin on left and right, to avoid
; text getting cut off by common TVs
console_margin = 1
console_buf_size = 32
console_width = console_buf_size - (console_margin*2)
zp_byte console_pos
zp_byte console_scroll
zp_byte console_temp
bss_res console_buf,console_buf_size
; Waits for beginning of VBL
; Preserved: A, X, Y
console_wait_vbl:
bit PPUSTATUS
: bit PPUSTATUS
bpl :-
rts
; Initializes console
console_init:
jsr console_hide
lda #0
sta PPUCTRL
; Load palette
lda #$3F
sta PPUADDR
lda #0
sta PPUADDR
lda #$0F ; black background
sta PPUDATA
lda #$30 ; white text
sta PPUDATA
sta PPUDATA
sta PPUDATA
; Fill nametable with spaces
lda #$20
sta PPUADDR
ldx #0
stx PPUADDR
ldx #240
: sta PPUDATA
sta PPUDATA
sta PPUDATA
sta PPUDATA
dex
bne :-
; Clear attributes
lda #0
ldx #$40
: sta PPUDATA
dex
bne :-
; In case PPU doesn't support scrolling, start a
; couple of lines down
lda #8
sta console_scroll
jsr console_scroll_up_
jmp console_show
; Shows console display
; Preserved: X, Y
console_show:
pha
jsr console_wait_vbl
lda #PPUMASK_BG0
sta PPUMASK
jmp console_apply_scroll_
; Hides console display and makes screen black
; Preserved: X, Y
console_hide:
jsr console_wait_vbl
lda #0
sta PPUMASK
rts
; Prints char A to console. Will not appear until
; a newline or flush occurs.
; Preserved: A, X, Y
console_print:
cmp #10
beq console_newline
; Write to buffer
stx console_temp
ldx console_pos
sta console_buf+console_margin,x
ldx console_temp
; Update pos and print newline if buf full
dec console_pos
bmi console_newline ; reached end of line
rts
; Displays current line and starts new one
; Preserved: A, X, Y
console_newline:
pha
jsr console_wait_vbl
jsr console_flush_
jsr console_scroll_up_
jsr console_flush_
jmp console_apply_scroll_
console_get_scroll_:
; A = (console_scroll+8)%240
lda console_scroll
cmp #240-8
bcc :+
adc #16-1;+1 for set carry
: adc #8
rts
console_scroll_up_:
; Scroll up 8 pixels
jsr console_get_scroll_
sta console_scroll
stx console_temp
; Start new clear line
lda #' '
ldx #console_buf_size-1
: sta console_buf,x
dex
bpl :-
ldx #console_width-1
stx console_pos
ldx console_temp
rts
; Displays current line's contents without scrolling.
; Preserved: A, X, Y
console_flush:
pha
jsr console_wait_vbl
jsr console_flush_
console_apply_scroll_:
lda #0
sta PPUADDR
sta PPUADDR
sta PPUSCROLL
jsr console_get_scroll_
sta PPUSCROLL
pla
rts
console_flush_:
; Address line in nametable
lda console_scroll
sta console_temp
lda #$08
asl console_temp
rol a
asl console_temp
rol a
sta PPUADDR
lda console_temp
sta PPUADDR
; Copy line
stx console_temp
ldx #console_buf_size-1
: lda console_buf,x
sta PPUDATA
dex
bpl :-
ldx console_temp
rts
|
xem/nes | 2,505 | nes-test-roms/read_joy3/source/common/delay.s | ; Delays in clocks and milliseconds. All routines re-entrant
; (no global data).
; Delays n milliseconds (1/1000 second)
; n can range from 0 to 1100.
; Preserved: X, Y
.macro delay_msec n
.if (n) < 0 .or (n) > 1100
.error "time out of range"
.endif
delay ((n)*CLOCK_RATE+500)/1000
.endmacro
; Delays n microseconds (1/1000000 second).
; n can range from 0 to 100000.
; Preserved: X, Y
.macro delay_usec n
.if (n) < 0 .or (n) > 100000
.error "time out of range"
.endif
delay ((n)*((CLOCK_RATE+50)/100)+5000)/10000
.endmacro
; Delays n clocks, from 2 to 16777215
; Preserved: X, Y
.macro delay n
.if (n) < 0 .or (n) = 1 .or (n) > 16777215
.error "Delay out of range"
.endif
.if (n) < 14 .and (n) <> 12
delay_inline (n)
.elseif (n) < 27
delay_unrolled (n)
.elseif <(n) = 0
delay_256 (n)
.else
lda #<((n)-27)
jsr delay_a_25_clocks
delay_256 ((n)-27)
.endif
.endmacro
; Delays A+25 clocks (including JSR)
; Preserved: X, Y
.align 64
: sbc #7 ; carry set by CMP
delay_a_25_clocks:
cmp #7
bcs :- ; do multiples of 7
lsr a ; bit 0
bcs :+
: ; A=clocks/2, either 0,1,2,3
beq @zero ; 0: 5
lsr a
beq :+ ; 1: 7
bcc :+ ; 2: 9
@zero: bne :+ ; 3: 11
: rts ; (thanks to dclxvi for the algorithm)
; Delays A*256+16 clocks (including JSR)
; Preserved: X, Y
delay_256a_16_clocks:
cmp #0
bne :+
rts
delay_256a_clocks_:
pha
lda #256-19-22-16
bne @first ; always branches
: pha
lda #256-19-22
@first: jsr delay_a_25_clocks
pla
clc
adc #-1
bne :-
rts
; Delays A*65536+16 clocks (including JSR)
; Preserved: X, Y
delay_65536a_16_clocks:
cmp #0
bne :+
rts
delay_65536a_clocks_:
pha
lda #256-19-22-16
bne @first
: pha
lda #256-19-22
@first: jsr delay_a_25_clocks
lda #255
jsr delay_256a_clocks_
pla
clc
adc #-1
bne :-
rts
.macro delay_inline n
.if n = 7 .or n >= 9
pha
pla
delay_inline (n-7)
.elseif n >= 3 .and n & 1
lda <0
delay_inline (n-3)
.elseif n >= 2
nop
delay_inline (n-2)
.elseif n > 0
.error "delay_short internal error"
.endif
.endmacro
.macro delay_unrolled n
.if n & 1
lda <0
jsr delay_unrolled_-((n-15)/2)
.else
jsr delay_unrolled_-((n-12)/2)
.endif
.endmacro
.res 7,$EA ; NOP
delay_unrolled_:
rts
.macro delay_256 n
.if >n
lda #>n
jsr delay_256a_clocks_
.endif
.if ^n
lda #^n
jsr delay_65536a_clocks_
.endif
.endmacro
|
xem/nes | 1,276 | nes-test-roms/read_joy3/source/common/crc.s | ; CRC-32 checksum calculation
zp_res checksum,4
zp_byte checksum_temp
zp_byte checksum_off_
; Turns CRC updating on/off. Allows nesting.
; Preserved: A, X, Y
crc_off:
dec checksum_off_
rts
crc_on: inc checksum_off_
beq :+
jpl internal_error ; catch unbalanced crc calls
: rts
; Initializes checksum module. Might initialize tables
; in the future.
init_crc:
; FALL THROUGH
; Clears checksum and turns it on
; Preserved: X, Y
reset_crc:
lda #0
sta checksum_off_
lda #$FF
sta checksum
sta checksum + 1
sta checksum + 2
sta checksum + 3
rts
; Updates checksum with byte in A (unless disabled via crc_off)
; Preserved: A, X, Y
; Time: 357 clocks average
update_crc:
bit checksum_off_
bmi update_crc_off
update_crc_:
pha
stx checksum_temp
eor checksum
ldx #8
@bit: lsr checksum+3
ror checksum+2
ror checksum+1
ror a
bcc :+
sta checksum
lda checksum+3
eor #$ED
sta checksum+3
lda checksum+2
eor #$B8
sta checksum+2
lda checksum+1
eor #$83
sta checksum+1
lda checksum
eor #$20
: dex
bne @bit
sta checksum
ldx checksum_temp
pla
update_crc_off:
rts
; Prints checksum as 8-character hex value
print_crc:
jsr crc_off
; Print complement
ldx #3
: lda checksum,x
eor #$FF
jsr print_hex
dex
bpl :-
jmp crc_on
|
xem/nes | 1,575 | nes-test-roms/oam_stress/source/oam_stress.s | ; Randomly reads and writes to OAM, ensuring that
; all reads match predicted, and that final content
; also matches. Refreshes OAM periodically while
; running to avoid DRAM fade.
.include "shell.inc"
.include "crc_fast.s"
refresh_period = 150
zp_byte counter
bss_res mirror,256
main:
lda #1
jsr init_random
jsr wait_vbl
setb PPUCTRL,0
setb PPUMASK,0
; Fill with initial data
ldx #0
stx SPRADDR
: lda #$FF
sta SPRDATA
and masks,x
sta mirror,x
inx
bne :-
; Do hundreds of runs of random operations,
; with refresh of OAM between each
loop_n_times do_rand,255
jsr reset_crc
print_final:
; Verify final data
ldx #0
: stx SPRADDR
lda SPRDATA
eor mirror,x
sta mirror,x
dex
bne :-
; Print log
ldx #0
@loop: lda mirror,x
print_cc beq,'-','*'
txa
and #$0F
cmp #$0F
bne :+
jsr print_newline
:
inx
bne @loop
check_crc $22FBFEC7
jmp tests_passed
do_rand:
; Refresh data
ldx #0
stx SPRADDR
: lda SPRDATA
sta SPRDATA
inx
bne :-
; Randomly set addr, write, and read,
; verifying reads against mirror
setb counter,refresh_period
@loop: jsr next_random
and #$0F
tax
stx SPRADDR
jsr next_random
and #$87
tay
bmi @read
: jsr next_random
sta SPRDATA
and masks,x
sta mirror,x
inx
dey
bpl :-
jmp @next
@read:
: lda SPRDATA
cmp mirror,x
bne print_final
iny
bmi :-
@next: dec counter
bne @loop
rts
masks:
.repeat $100/4
.byte $FF,$FF,$E3,$FF
.endrepeat
init_random:
pha
jsr init_crc_fast
pla
jsr update_crc_fast
rts
next_random:
lda #$55
jmp update_crc_fast
|
xem/nes | 1,856 | nes-test-roms/oam_stress/source/common/testing.s | ; Utilities for writing test ROMs
; In NVRAM so these can be used before initializing runtime,
; then runtime initialized without clearing them
nv_res test_code ; code of current test
nv_res test_name,2 ; address of name of current test, or 0 of none
; Sets current test code and optional name. Also resets
; checksum.
; Preserved: A, X, Y
.macro set_test code,name
pha
lda #code
jsr set_test_
.ifblank name
setb test_name+1,0
.else
.local Addr
setw test_name,Addr
seg_data "RODATA",{Addr: .byte name,0}
.endif
pla
.endmacro
set_test_:
sta test_code
jmp reset_crc
; Initializes testing module
init_testing:
jmp init_crc
; Reports that all tests passed
tests_passed:
jsr print_filename
print_str newline,"Passed"
lda #0
jmp exit
; Reports "Done" if set_test has never been used,
; "Passed" if set_test 0 was last used, or
; failure if set_test n was last used.
tests_done:
ldx test_code
jeq tests_passed
inx
bne test_failed
jsr print_filename
print_str newline,"Done"
lda #0
jmp exit
; Reports that the current test failed. Prints code and
; name last set with set_test, or just "Failed" if none
; have been set yet.
test_failed:
ldx test_code
; Treat $FF as 1, in case it wasn't ever set
inx
bne :+
inx
stx test_code
:
; If code >= 2, print name
cpx #2-1 ; -1 due to inx above
blt :+
lda test_name+1
beq :+
jsr print_newline
sta addr+1
lda test_name
sta addr
jsr print_str_addr
jsr print_newline
:
jsr print_filename
; End program
lda test_code
jmp exit
; If checksum doesn't match expected, reports failed test.
; Clears checksum afterwards.
; Preserved: A, X, Y
.macro check_crc expected
jsr_with_addr check_crc_,{.dword expected}
.endmacro
check_crc_:
pha
jsr is_crc_
bne :+
jsr reset_crc
pla
rts
: jsr print_newline
jsr print_crc
jmp test_failed
|
xem/nes | 2,583 | nes-test-roms/oam_stress/source/common/print.s | ; Prints values in various ways to output,
; including numbers and strings.
newline = 10
zp_byte print_temp_
; Prints indicated register to console as two hex
; chars and space
; Preserved: A, X, Y, flags
print_a:
php
pha
print_reg_:
jsr print_hex
lda #' '
jsr print_char_
pla
plp
rts
print_x:
php
pha
txa
jmp print_reg_
print_y:
php
pha
tya
jmp print_reg_
print_p:
php
pha
php
pla
jmp print_reg_
print_s:
php
pha
txa
tsx
inx
inx
inx
inx
jsr print_x
tax
pla
plp
rts
; Prints A as two hex characters, NO space after
; Preserved: A, X, Y
print_hex:
jsr update_crc
pha
lsr a
lsr a
lsr a
lsr a
jsr @nibble
pla
pha
and #$0F
jsr @nibble
pla
rts
@nibble:
cmp #10
blt @digit
adc #6;+1 since carry is set
@digit: adc #'0'
jmp print_char_
; Prints character and updates checksum UNLESS
; it's a newline.
; Preserved: A, X, Y
print_char:
cmp #newline
beq :+
jsr update_crc
: pha
jsr print_char_
pla
rts
; Prints space. Does NOT update checksum.
; Preserved: A, X, Y
print_space:
pha
lda #' '
jsr print_char_
pla
rts
; Advances to next line. Does NOT update checksum.
; Preserved: A, X, Y
print_newline:
pha
lda #newline
jsr print_char_
pla
rts
; Prints string
; Preserved: A, X, Y
.macro print_str str,str2
jsr print_str_
.byte str
.ifnblank str2
.byte str2
.endif
.byte 0
.endmacro
print_str_:
sta print_temp_
pla
sta addr
pla
sta addr+1
jsr inc_addr
jsr print_str_addr
lda print_temp_
jmp (addr)
; Prints string at addr and leaves addr pointing to
; byte AFTER zero terminator.
; Preserved: A, X, Y
print_str_addr:
pha
tya
pha
ldy #0
beq :+ ; always taken
@loop: jsr print_char
jsr inc_addr
: lda (addr),y
bne @loop
pla
tay
pla
; FALL THROUGH
; Increments 16-bit value in addr.
; Preserved: A, X, Y
inc_addr:
inc addr
beq :+
rts
: inc addr+1
rts
; Prints A as 1-3 digit decimal value, NO space after.
; Preserved: Y
print_dec:
; Hundreds
cmp #10
blt @ones
cmp #100
blt @tens
ldx #'0'-1
: inx
sbc #100
bge :-
adc #100
jsr @digit
; Tens
@tens: sec
ldx #'0'-1
: inx
sbc #10
bge :-
adc #10
jsr @digit
; Ones
@ones: ora #'0'
jmp print_char
; Print a single digit
@digit: pha
txa
jsr print_char
pla
rts
; Prints one of two characters based on condition.
; SEC; print_cc bcs,'C','-' prints 'C'.
; Preserved: A, X, Y, flags
.macro print_cc cond,yes,no
; Avoids labels since they're not local
; to macros in ca65.
php
pha
cond *+6
lda #no
bne *+4
lda #yes
jsr print_char
pla
plp
.endmacro
|
xem/nes | 1,745 | nes-test-roms/oam_stress/source/common/build_multi.s | ; Builds program as bank for multi-test MMC1 ROM
; Define these so that error will occur if already defined
; by program
CUSTOM_MAPPER = 1 ; MMC1
CHR_RAM = 1
.ifndef BANK_COUNT
BANK_COUNT = 16
.endif
.segment "VECTORS"
.word -1, -1
.word reset
.word nmi, multi_reset, irq
;;;; CHR-RAM
.define CHARS "CHARS_PRG"
.segment CHARS
ascii_chr:
.segment "CHARS_PRG_ASCII"
.align $200
.incbin "ascii.chr"
ascii_chr_end:
;;;; Shell
NEED_CONSOLE = 1
.include "shell.s"
std_reset:
lda #0
sta PPUCTRL
sta PPUMASK
jmp run_shell
init_runtime:
load_ascii_chr
rts
nv_res cur_bank
.macro write_mmc1 Addr
: sta Addr
lsr a
cmp #$40 >> 5
bne :-
.endmacro
; Copies and executes following code (256 bytes max)
; from RAM at $700
.macro exec_in_ram
ldx #0
: lda @source,x
sta @dest,x
inx
bne :-
jmp @dest
@source = *
.org $700
@dest:
.endmacro
multi_reset:
exec_in_ram
; 16K PRG, upper bank switchable, and reset shift reg first
lda #$4A << 1
write_mmc1 $8000
; Select first bank and execute
lda #$40
write_mmc1 $E000
setb cur_bank,0
jmp ($FFF8)
.reloc
post_exit:
cmp #0
beq @passed
; Print which test number failed
print_str newline,"While running test "
ldx cur_bank
inx
txa
jsr print_dec
print_str {" of ",.sprintf("%d", BANK_COUNT),newline,newline,newline}
setb final_result,1
: jmp :-
@passed:
exec_in_ram
; 16K PRG, upper bank switchable, and reset shift reg first
lda #$4A << 1
write_mmc1 $8000
; Next bank
inc cur_bank
lda cur_bank
cmp #BANK_COUNT
bne @run_bank
jsr console_init ; clear screen
print_str {"All ",.sprintf("%d",BANK_COUNT)," tests passed",newline,newline,newline}
setb final_result,0
: jmp :-
@run_bank:
ora #$40
write_mmc1 $E000
jmp ($FFF8)
.reloc
|
xem/nes | 1,480 | nes-test-roms/oam_stress/source/common/build_rom.s | ; Builds program as iNES ROM
; Default is 16K PRG and 8K CHR ROM, NROM (0)
.if 0 ; Options to set before .include "shell.inc":
CHR_RAM=1 ; Use CHR-RAM instead of CHR-ROM
CART_WRAM=1 ; Use mapper that supports 8K WRAM in cart
CUSTOM_MAPPER=n ; Specify mapper number
.endif
.ifndef CUSTOM_MAPPER
.ifdef CART_WRAM
CUSTOM_MAPPER = 2 ; UNROM
.else
CUSTOM_MAPPER = 0 ; NROM
.endif
.endif
;;;; iNES header
.ifndef CUSTOM_HEADER
.segment "HEADER"
.byte $4E,$45,$53,26 ; "NES" EOF
.ifdef CHR_RAM
.byte 2,0 ; 32K PRG, CHR RAM
.else
.byte 2,1 ; 32K PRG, 8K CHR
.endif
.byte CUSTOM_MAPPER*$10+$01 ; vertical mirroring
.endif
.ifndef CUSTOM_VECTORS
.segment "VECTORS"
.word -1,-1,-1, nmi, reset, irq
.endif
;;;; CHR-RAM/ROM
.ifdef CHR_RAM
.define CHARS "CHARS_PRG"
.segment CHARS
ascii_chr:
.segment "CHARS_PRG_ASCII"
.align $200
.incbin "ascii.chr"
ascii_chr_end:
.else
.define CHARS "CHARS"
.segment "CHARS_ASCII"
.align $200
.incbin "ascii.chr"
.res $1800
.endif
.segment CHARS
.res $10,0
;;;; Shell
.ifndef NEED_CONSOLE
NEED_CONSOLE=1
.endif
; Move code to $C000
.segment "DMC"
.res $4000
.include "shell.s"
std_reset:
lda #0
sta PPUCTRL
sta PPUMASK
jmp run_shell
init_runtime:
.ifdef CHR_RAM
load_ascii_chr
.endif
rts
post_exit:
jsr set_final_result
jmp forever
; This helps devcart recover after running test.
; It is never executed by test ROM.
.segment "LOADER"
.incbin "devcart.bin"
.code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.