repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
lizhirui/DreamCore | 2,680 | model/tests/riscv-tests/isa/rv64ui/sw.S | # See LICENSE for license details.
#*****************************************************************************
# sw.S
#-----------------------------------------------------------------------------
#
# Test sw instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_ST_OP( 2, lw, sw, 0x0000000000aa00aa, 0, tdat );
TEST_ST_OP( 3, lw, sw, 0xffffffffaa00aa00, 4, tdat );
TEST_ST_OP( 4, lw, sw, 0x000000000aa00aa0, 8, tdat );
TEST_ST_OP( 5, lw, sw, 0xffffffffa00aa00a, 12, tdat );
# Test with negative offset
TEST_ST_OP( 6, lw, sw, 0x0000000000aa00aa, -12, tdat8 );
TEST_ST_OP( 7, lw, sw, 0xffffffffaa00aa00, -8, tdat8 );
TEST_ST_OP( 8, lw, sw, 0x000000000aa00aa0, -4, tdat8 );
TEST_ST_OP( 9, lw, sw, 0xffffffffa00aa00a, 0, tdat8 );
# Test with a negative base
TEST_CASE( 10, x5, 0x12345678, \
la x1, tdat9; \
li x2, 0x12345678; \
addi x4, x1, -32; \
sw x2, 32(x4); \
lw x5, 0(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x58213098, \
la x1, tdat9; \
li x2, 0x58213098; \
addi x1, x1, -3; \
sw x2, 7(x1); \
la x4, tdat10; \
lw x5, 0(x4); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_ST_SRC12_BYPASS( 12, 0, 0, lw, sw, 0xffffffffaabbccdd, 0, tdat );
TEST_ST_SRC12_BYPASS( 13, 0, 1, lw, sw, 0xffffffffdaabbccd, 4, tdat );
TEST_ST_SRC12_BYPASS( 14, 0, 2, lw, sw, 0xffffffffddaabbcc, 8, tdat );
TEST_ST_SRC12_BYPASS( 15, 1, 0, lw, sw, 0xffffffffcddaabbc, 12, tdat );
TEST_ST_SRC12_BYPASS( 16, 1, 1, lw, sw, 0xffffffffccddaabb, 16, tdat );
TEST_ST_SRC12_BYPASS( 17, 2, 0, lw, sw, 0xffffffffbccddaab, 20, tdat );
TEST_ST_SRC21_BYPASS( 18, 0, 0, lw, sw, 0x00112233, 0, tdat );
TEST_ST_SRC21_BYPASS( 19, 0, 1, lw, sw, 0x30011223, 4, tdat );
TEST_ST_SRC21_BYPASS( 20, 0, 2, lw, sw, 0x33001122, 8, tdat );
TEST_ST_SRC21_BYPASS( 21, 1, 0, lw, sw, 0x23300112, 12, tdat );
TEST_ST_SRC21_BYPASS( 22, 1, 1, lw, sw, 0x22330011, 16, tdat );
TEST_ST_SRC21_BYPASS( 23, 2, 0, lw, sw, 0x12233001, 20, tdat );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .word 0xdeadbeef
tdat2: .word 0xdeadbeef
tdat3: .word 0xdeadbeef
tdat4: .word 0xdeadbeef
tdat5: .word 0xdeadbeef
tdat6: .word 0xdeadbeef
tdat7: .word 0xdeadbeef
tdat8: .word 0xdeadbeef
tdat9: .word 0xdeadbeef
tdat10: .word 0xdeadbeef
RVTEST_DATA_END
|
lizhirui/DreamCore | 3,184 | model/tests/riscv-tests/isa/rv64ui/addw.S | # See LICENSE for license details.
#*****************************************************************************
# addw.S
#-----------------------------------------------------------------------------
#
# Test addw instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, addw, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, addw, 0x00000002, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, addw, 0x0000000a, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, addw, 0xffffffffffff8000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, addw, 0xffffffff80000000, 0xffffffff80000000, 0x00000000 );
TEST_RR_OP( 7, addw, 0x000000007fff8000, 0xffffffff80000000, 0xffffffffffff8000 );
TEST_RR_OP( 8, addw, 0x0000000000007fff, 0x0000000000000000, 0x0000000000007fff );
TEST_RR_OP( 9, addw, 0x000000007fffffff, 0x000000007fffffff, 0x0000000000000000 );
TEST_RR_OP( 10, addw, 0xffffffff80007ffe, 0x000000007fffffff, 0x0000000000007fff );
TEST_RR_OP( 11, addw, 0xffffffff80007fff, 0xffffffff80000000, 0x0000000000007fff );
TEST_RR_OP( 12, addw, 0x000000007fff7fff, 0x000000007fffffff, 0xffffffffffff8000 );
TEST_RR_OP( 13, addw, 0xffffffffffffffff, 0x0000000000000000, 0xffffffffffffffff );
TEST_RR_OP( 14, addw, 0x0000000000000000, 0xffffffffffffffff, 0x0000000000000001 );
TEST_RR_OP( 15, addw, 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff );
TEST_RR_OP( 16, addw, 0xffffffff80000000, 0x0000000000000001, 0x000000007fffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 17, addw, 24, 13, 11 );
TEST_RR_SRC2_EQ_DEST( 18, addw, 25, 14, 11 );
TEST_RR_SRC12_EQ_DEST( 19, addw, 26, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 20, 0, addw, 24, 13, 11 );
TEST_RR_DEST_BYPASS( 21, 1, addw, 25, 14, 11 );
TEST_RR_DEST_BYPASS( 22, 2, addw, 26, 15, 11 );
TEST_RR_SRC12_BYPASS( 23, 0, 0, addw, 24, 13, 11 );
TEST_RR_SRC12_BYPASS( 24, 0, 1, addw, 25, 14, 11 );
TEST_RR_SRC12_BYPASS( 25, 0, 2, addw, 26, 15, 11 );
TEST_RR_SRC12_BYPASS( 26, 1, 0, addw, 24, 13, 11 );
TEST_RR_SRC12_BYPASS( 27, 1, 1, addw, 25, 14, 11 );
TEST_RR_SRC12_BYPASS( 28, 2, 0, addw, 26, 15, 11 );
TEST_RR_SRC21_BYPASS( 29, 0, 0, addw, 24, 13, 11 );
TEST_RR_SRC21_BYPASS( 30, 0, 1, addw, 25, 14, 11 );
TEST_RR_SRC21_BYPASS( 31, 0, 2, addw, 26, 15, 11 );
TEST_RR_SRC21_BYPASS( 32, 1, 0, addw, 24, 13, 11 );
TEST_RR_SRC21_BYPASS( 33, 1, 1, addw, 25, 14, 11 );
TEST_RR_SRC21_BYPASS( 34, 2, 0, addw, 26, 15, 11 );
TEST_RR_ZEROSRC1( 35, addw, 15, 15 );
TEST_RR_ZEROSRC2( 36, addw, 32, 32 );
TEST_RR_ZEROSRC12( 37, addw, 0 );
TEST_RR_ZERODEST( 38, addw, 16, 30 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 4,022 | model/tests/riscv-tests/isa/rv64ui/sra.S | # See LICENSE for license details.
#*****************************************************************************
# sra.S
#-----------------------------------------------------------------------------
#
# Test sra instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, sra, 0xffffffff80000000, 0xffffffff80000000, 0 );
TEST_RR_OP( 3, sra, 0xffffffffc0000000, 0xffffffff80000000, 1 );
TEST_RR_OP( 4, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_OP( 5, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_OP( 6, sra, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_RR_OP( 7, sra, 0x000000007fffffff, 0x000000007fffffff, 0 );
TEST_RR_OP( 8, sra, 0x000000003fffffff, 0x000000007fffffff, 1 );
TEST_RR_OP( 9, sra, 0x0000000000ffffff, 0x000000007fffffff, 7 );
TEST_RR_OP( 10, sra, 0x000000000001ffff, 0x000000007fffffff, 14 );
TEST_RR_OP( 11, sra, 0x0000000000000000, 0x000000007fffffff, 31 );
TEST_RR_OP( 12, sra, 0xffffffff81818181, 0xffffffff81818181, 0 );
TEST_RR_OP( 13, sra, 0xffffffffc0c0c0c0, 0xffffffff81818181, 1 );
TEST_RR_OP( 14, sra, 0xffffffffff030303, 0xffffffff81818181, 7 );
TEST_RR_OP( 15, sra, 0xfffffffffffe0606, 0xffffffff81818181, 14 );
TEST_RR_OP( 16, sra, 0xffffffffffffffff, 0xffffffff81818181, 31 );
# Verify that shifts only use bottom six(rv64) or five(rv32) bits
TEST_RR_OP( 17, sra, 0xffffffff81818181, 0xffffffff81818181, 0xffffffffffffffc0 );
TEST_RR_OP( 18, sra, 0xffffffffc0c0c0c0, 0xffffffff81818181, 0xffffffffffffffc1 );
TEST_RR_OP( 19, sra, 0xffffffffff030303, 0xffffffff81818181, 0xffffffffffffffc7 );
TEST_RR_OP( 20, sra, 0xfffffffffffe0606, 0xffffffff81818181, 0xffffffffffffffce );
TEST_RR_OP( 21, sra, 0xffffffffffffffff, 0xffffffff81818181, 0xffffffffffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 22, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC2_EQ_DEST( 23, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC12_EQ_DEST( 24, sra, 0, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 25, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_DEST_BYPASS( 26, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_DEST_BYPASS( 27, 2, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_SRC12_BYPASS( 28, 0, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC12_BYPASS( 29, 0, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC12_BYPASS( 30, 0, 2, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_SRC12_BYPASS( 31, 1, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC12_BYPASS( 32, 1, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC12_BYPASS( 33, 2, 0, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_SRC21_BYPASS( 34, 0, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC21_BYPASS( 35, 0, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC21_BYPASS( 36, 0, 2, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_SRC21_BYPASS( 37, 1, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC21_BYPASS( 38, 1, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC21_BYPASS( 39, 2, 0, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_ZEROSRC1( 40, sra, 0, 15 );
TEST_RR_ZEROSRC2( 41, sra, 32, 32 );
TEST_RR_ZEROSRC12( 42, sra, 0 );
TEST_RR_ZERODEST( 43, sra, 1024, 2048 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 3,114 | model/tests/riscv-tests/isa/rv64ui/sraiw.S | # See LICENSE for license details.
#*****************************************************************************
# sraiw.S
#-----------------------------------------------------------------------------
#
# Test sraiw instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, sraiw, 0xffffffff80000000, 0xffffffff80000000, 0 );
TEST_IMM_OP( 3, sraiw, 0xffffffffc0000000, 0xffffffff80000000, 1 );
TEST_IMM_OP( 4, sraiw, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_IMM_OP( 5, sraiw, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_IMM_OP( 6, sraiw, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_IMM_OP( 7, sraiw, 0x000000007fffffff, 0x000000007fffffff, 0 );
TEST_IMM_OP( 8, sraiw, 0x000000003fffffff, 0x000000007fffffff, 1 );
TEST_IMM_OP( 9, sraiw, 0x0000000000ffffff, 0x000000007fffffff, 7 );
TEST_IMM_OP( 10, sraiw, 0x000000000001ffff, 0x000000007fffffff, 14 );
TEST_IMM_OP( 11, sraiw, 0x0000000000000000, 0x000000007fffffff, 31 );
TEST_IMM_OP( 12, sraiw, 0xffffffff81818181, 0xffffffff81818181, 0 );
TEST_IMM_OP( 13, sraiw, 0xffffffffc0c0c0c0, 0xffffffff81818181, 1 );
TEST_IMM_OP( 14, sraiw, 0xffffffffff030303, 0xffffffff81818181, 7 );
TEST_IMM_OP( 15, sraiw, 0xfffffffffffe0606, 0xffffffff81818181, 14 );
TEST_IMM_OP( 16, sraiw, 0xffffffffffffffff, 0xffffffff81818181, 31 );
# Verify that shifts ignore top 32 (using true 64-bit values)
TEST_IMM_OP( 44, sraiw, 0x0000000012345678, 0xffffffff12345678, 0 );
TEST_IMM_OP( 45, sraiw, 0x0000000001234567, 0xffffffff12345678, 4 );
TEST_IMM_OP( 46, sraiw, 0xffffffff92345678, 0x0000000092345678, 0 );
TEST_IMM_OP( 47, sraiw, 0xfffffffff9234567, 0x0000000092345678, 4 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, sraiw, 0xffffffffff000000, 0xffffffff80000000, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, sraiw, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_IMM_DEST_BYPASS( 19, 1, sraiw, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_IMM_DEST_BYPASS( 20, 2, sraiw, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_IMM_SRC1_BYPASS( 21, 0, sraiw, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_IMM_SRC1_BYPASS( 22, 1, sraiw, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_IMM_SRC1_BYPASS( 23, 2, sraiw, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_IMM_ZEROSRC1( 24, sraiw, 0, 31 );
TEST_IMM_ZERODEST( 25, sraiw, 31, 28 );
TEST_IMM_OP( 26, sraiw, 0x0000000000000000, 0x00e0000000000000, 28)
TEST_IMM_OP( 27, sraiw, 0xffffffffff000000, 0x00000000f0000000, 4)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,623 | model/tests/riscv-tests/isa/rv64ui/or.S | # See LICENSE for license details.
#*****************************************************************************
# or.S
#-----------------------------------------------------------------------------
#
# Test or instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Logical tests
#-------------------------------------------------------------
TEST_RR_OP( 2, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_OP( 3, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_OP( 4, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_OP( 5, or, 0xf0fff0ff, 0xf00ff00f, 0xf0f0f0f0 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 6, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC2_EQ_DEST( 7, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_EQ_DEST( 8, or, 0xff00ff00, 0xff00ff00 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 9, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_DEST_BYPASS( 10, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_DEST_BYPASS( 11, 2, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 12, 0, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 13, 0, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 14, 0, 2, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 15, 1, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 16, 1, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 17, 2, 0, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 18, 0, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 19, 0, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 20, 0, 2, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 21, 1, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 22, 1, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 23, 2, 0, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_ZEROSRC1( 24, or, 0xff00ff00, 0xff00ff00 );
TEST_RR_ZEROSRC2( 25, or, 0x00ff00ff, 0x00ff00ff );
TEST_RR_ZEROSRC12( 26, or, 0 );
TEST_RR_ZERODEST( 27, or, 0x11111111, 0x22222222 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,597 | model/tests/riscv-tests/isa/rv64ui/srai.S | # See LICENSE for license details.
#*****************************************************************************
# srai.S
#-----------------------------------------------------------------------------
#
# Test srai instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, srai, 0xffffff8000000000, 0xffffff8000000000, 0 );
TEST_IMM_OP( 3, srai, 0xffffffffc0000000, 0xffffffff80000000, 1 );
TEST_IMM_OP( 4, srai, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_IMM_OP( 5, srai, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_IMM_OP( 6, srai, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_IMM_OP( 7, srai, 0x000000007fffffff, 0x000000007fffffff, 0 );
TEST_IMM_OP( 8, srai, 0x000000003fffffff, 0x000000007fffffff, 1 );
TEST_IMM_OP( 9, srai, 0x0000000000ffffff, 0x000000007fffffff, 7 );
TEST_IMM_OP( 10, srai, 0x000000000001ffff, 0x000000007fffffff, 14 );
TEST_IMM_OP( 11, srai, 0x0000000000000000, 0x000000007fffffff, 31 );
TEST_IMM_OP( 12, srai, 0xffffffff81818181, 0xffffffff81818181, 0 );
TEST_IMM_OP( 13, srai, 0xffffffffc0c0c0c0, 0xffffffff81818181, 1 );
TEST_IMM_OP( 14, srai, 0xffffffffff030303, 0xffffffff81818181, 7 );
TEST_IMM_OP( 15, srai, 0xfffffffffffe0606, 0xffffffff81818181, 14 );
TEST_IMM_OP( 16, srai, 0xffffffffffffffff, 0xffffffff81818181, 31 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, srai, 0xffffffffff000000, 0xffffffff80000000, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, srai, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_IMM_DEST_BYPASS( 19, 1, srai, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_IMM_DEST_BYPASS( 20, 2, srai, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_IMM_SRC1_BYPASS( 21, 0, srai, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_IMM_SRC1_BYPASS( 22, 1, srai, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_IMM_SRC1_BYPASS( 23, 2, srai, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_IMM_ZEROSRC1( 24, srai, 0, 4 );
TEST_IMM_ZERODEST( 25, srai, 33, 10 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,013 | model/tests/riscv-tests/isa/rv64ui/bne.S | # See LICENSE for license details.
#*****************************************************************************
# bne.S
#-----------------------------------------------------------------------------
#
# Test bne instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Branch tests
#-------------------------------------------------------------
# Each test checks both forward and backward branches
TEST_BR2_OP_TAKEN( 2, bne, 0, 1 );
TEST_BR2_OP_TAKEN( 3, bne, 1, 0 );
TEST_BR2_OP_TAKEN( 4, bne, -1, 1 );
TEST_BR2_OP_TAKEN( 5, bne, 1, -1 );
TEST_BR2_OP_NOTTAKEN( 6, bne, 0, 0 );
TEST_BR2_OP_NOTTAKEN( 7, bne, 1, 1 );
TEST_BR2_OP_NOTTAKEN( 8, bne, -1, -1 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_BR2_SRC12_BYPASS( 9, 0, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 10, 0, 1, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 11, 0, 2, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 12, 1, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 13, 1, 1, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 14, 2, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 15, 0, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 16, 0, 1, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 17, 0, 2, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 18, 1, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 19, 1, 1, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 20, 2, 0, bne, 0, 0 );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 21, x1, 3, \
li x1, 1; \
bne x1, x0, 1f; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
1: addi x1, x1, 1; \
addi x1, x1, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,201 | model/tests/riscv-tests/isa/rv64ui/sltiu.S | # See LICENSE for license details.
#*****************************************************************************
# sltiu.S
#-----------------------------------------------------------------------------
#
# Test sltiu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, sltiu, 0, 0x0000000000000000, 0x000 );
TEST_IMM_OP( 3, sltiu, 0, 0x0000000000000001, 0x001 );
TEST_IMM_OP( 4, sltiu, 1, 0x0000000000000003, 0x007 );
TEST_IMM_OP( 5, sltiu, 0, 0x0000000000000007, 0x003 );
TEST_IMM_OP( 6, sltiu, 1, 0x0000000000000000, 0x800 );
TEST_IMM_OP( 7, sltiu, 0, 0xffffffff80000000, 0x000 );
TEST_IMM_OP( 8, sltiu, 1, 0xffffffff80000000, 0x800 );
TEST_IMM_OP( 9, sltiu, 1, 0x0000000000000000, 0x7ff );
TEST_IMM_OP( 10, sltiu, 0, 0x000000007fffffff, 0x000 );
TEST_IMM_OP( 11, sltiu, 0, 0x000000007fffffff, 0x7ff );
TEST_IMM_OP( 12, sltiu, 0, 0xffffffff80000000, 0x7ff );
TEST_IMM_OP( 13, sltiu, 1, 0x000000007fffffff, 0x800 );
TEST_IMM_OP( 14, sltiu, 1, 0x0000000000000000, 0xfff );
TEST_IMM_OP( 15, sltiu, 0, 0xffffffffffffffff, 0x001 );
TEST_IMM_OP( 16, sltiu, 0, 0xffffffffffffffff, 0xfff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, sltiu, 1, 11, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, sltiu, 0, 15, 10 );
TEST_IMM_DEST_BYPASS( 19, 1, sltiu, 1, 10, 16 );
TEST_IMM_DEST_BYPASS( 20, 2, sltiu, 0, 16, 9 );
TEST_IMM_SRC1_BYPASS( 21, 0, sltiu, 1, 11, 15 );
TEST_IMM_SRC1_BYPASS( 22, 1, sltiu, 0, 17, 8 );
TEST_IMM_SRC1_BYPASS( 23, 2, sltiu, 1, 12, 14 );
TEST_IMM_ZEROSRC1( 24, sltiu, 1, 0xfff );
TEST_IMM_ZERODEST( 25, sltiu, 0x00ff00ff, 0xfff );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,087 | model/tests/riscv-tests/isa/rv64ui/jal.S | # See LICENSE for license details.
#*****************************************************************************
# jal.S
#-----------------------------------------------------------------------------
#
# Test jal instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Test 2: Basic test
#-------------------------------------------------------------
test_2:
li TESTNUM, 2
li ra, 0
jal x4, target_2
linkaddr_2:
nop
nop
j fail
target_2:
la x2, linkaddr_2
bne x2, x4, fail
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 3, ra, 3, \
li ra, 1; \
jal x0, 1f; \
addi ra, ra, 1; \
addi ra, ra, 1; \
addi ra, ra, 1; \
addi ra, ra, 1; \
1: addi ra, ra, 1; \
addi ra, ra, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,735 | model/tests/riscv-tests/isa/rv64ui/jalr.S | # See LICENSE for license details.
#*****************************************************************************
# jalr.S
#-----------------------------------------------------------------------------
#
# Test jalr instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Test 2: Basic test
#-------------------------------------------------------------
test_2:
li TESTNUM, 2
li t0, 0
la t1, target_2
jalr t0, t1, 0
linkaddr_2:
j fail
target_2:
la t1, linkaddr_2
bne t0, t1, fail
#-------------------------------------------------------------
# Test 3: Basic test2, rs = rd
#-------------------------------------------------------------
test_3:
li TESTNUM, 3
la t0, target_3
jalr t0, t0, 0
linkaddr_3:
j fail
target_3:
la t1, linkaddr_3
bne t0, t1, fail
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_JALR_SRC1_BYPASS( 4, 0, jalr );
TEST_JALR_SRC1_BYPASS( 5, 1, jalr );
TEST_JALR_SRC1_BYPASS( 6, 2, jalr );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
.option push
.align 2
.option norvc
TEST_CASE( 7, t0, 4, \
li t0, 1; \
la t1, 1f; \
jr t1, -4; \
addi t0, t0, 1; \
addi t0, t0, 1; \
addi t0, t0, 1; \
addi t0, t0, 1; \
1: addi t0, t0, 1; \
addi t0, t0, 1; \
)
.option pop
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,176 | model/tests/riscv-tests/isa/rv64ui/srli.S | # See LICENSE for license details.
#*****************************************************************************
# srli.S
#-----------------------------------------------------------------------------
#
# Test srli instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
#define TEST_SRLI(n, v, a) \
TEST_IMM_OP(n, srli, ((v) & ((1 << (__riscv_xlen-1) << 1) - 1)) >> (a), v, a)
TEST_SRLI( 2, 0xffffffff80000000, 0 );
TEST_SRLI( 3, 0xffffffff80000000, 1 );
TEST_SRLI( 4, 0xffffffff80000000, 7 );
TEST_SRLI( 5, 0xffffffff80000000, 14 );
TEST_SRLI( 6, 0xffffffff80000001, 31 );
TEST_SRLI( 7, 0xffffffffffffffff, 0 );
TEST_SRLI( 8, 0xffffffffffffffff, 1 );
TEST_SRLI( 9, 0xffffffffffffffff, 7 );
TEST_SRLI( 10, 0xffffffffffffffff, 14 );
TEST_SRLI( 11, 0xffffffffffffffff, 31 );
TEST_SRLI( 12, 0x0000000021212121, 0 );
TEST_SRLI( 13, 0x0000000021212121, 1 );
TEST_SRLI( 14, 0x0000000021212121, 7 );
TEST_SRLI( 15, 0x0000000021212121, 14 );
TEST_SRLI( 16, 0x0000000021212121, 31 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, srli, 0x01000000, 0x80000000, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, srli, 0x01000000, 0x80000000, 7 );
TEST_IMM_DEST_BYPASS( 19, 1, srli, 0x00020000, 0x80000000, 14 );
TEST_IMM_DEST_BYPASS( 20, 2, srli, 0x00000001, 0x80000001, 31 );
TEST_IMM_SRC1_BYPASS( 21, 0, srli, 0x01000000, 0x80000000, 7 );
TEST_IMM_SRC1_BYPASS( 22, 1, srli, 0x00020000, 0x80000000, 14 );
TEST_IMM_SRC1_BYPASS( 23, 2, srli, 0x00000001, 0x80000001, 31 );
TEST_IMM_ZEROSRC1( 24, srli, 0, 4 );
TEST_IMM_ZERODEST( 25, srli, 33, 10 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,827 | model/tests/riscv-tests/isa/rv64ui/slli.S | # See LICENSE for license details.
#*****************************************************************************
# slli.S
#-----------------------------------------------------------------------------
#
# Test slli instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, slli, 0x0000000000000001, 0x0000000000000001, 0 );
TEST_IMM_OP( 3, slli, 0x0000000000000002, 0x0000000000000001, 1 );
TEST_IMM_OP( 4, slli, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_IMM_OP( 5, slli, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_IMM_OP( 6, slli, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_IMM_OP( 7, slli, 0xffffffffffffffff, 0xffffffffffffffff, 0 );
TEST_IMM_OP( 8, slli, 0xfffffffffffffffe, 0xffffffffffffffff, 1 );
TEST_IMM_OP( 9, slli, 0xffffffffffffff80, 0xffffffffffffffff, 7 );
TEST_IMM_OP( 10, slli, 0xffffffffffffc000, 0xffffffffffffffff, 14 );
TEST_IMM_OP( 11, slli, 0xffffffff80000000, 0xffffffffffffffff, 31 );
TEST_IMM_OP( 12, slli, 0x0000000021212121, 0x0000000021212121, 0 );
TEST_IMM_OP( 13, slli, 0x0000000042424242, 0x0000000021212121, 1 );
TEST_IMM_OP( 14, slli, 0x0000001090909080, 0x0000000021212121, 7 );
TEST_IMM_OP( 15, slli, 0x0000084848484000, 0x0000000021212121, 14 );
TEST_IMM_OP( 16, slli, 0x1090909080000000, 0x0000000021212121, 31 );
#if __riscv_xlen == 64
TEST_IMM_OP( 50, slli, 0x8000000000000000, 0x0000000000000001, 63 );
TEST_IMM_OP( 51, slli, 0xffffff8000000000, 0xffffffffffffffff, 39 );
TEST_IMM_OP( 52, slli, 0x0909080000000000, 0x0000000021212121, 43 );
#endif
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, slli, 0x00000080, 0x00000001, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, slli, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_IMM_DEST_BYPASS( 19, 1, slli, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_IMM_DEST_BYPASS( 20, 2, slli, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_IMM_SRC1_BYPASS( 21, 0, slli, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_IMM_SRC1_BYPASS( 22, 1, slli, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_IMM_SRC1_BYPASS( 23, 2, slli, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_IMM_ZEROSRC1( 24, slli, 0, 31 );
TEST_IMM_ZERODEST( 25, slli, 33, 20 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,290 | model/tests/riscv-tests/isa/rv64ui/lh.S | # See LICENSE for license details.
#*****************************************************************************
# lh.S
#-----------------------------------------------------------------------------
#
# Test lh instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, lh, 0x00000000000000ff, 0, tdat );
TEST_LD_OP( 3, lh, 0xffffffffffffff00, 2, tdat );
TEST_LD_OP( 4, lh, 0x0000000000000ff0, 4, tdat );
TEST_LD_OP( 5, lh, 0xfffffffffffff00f, 6, tdat );
# Test with negative offset
TEST_LD_OP( 6, lh, 0x00000000000000ff, -6, tdat4 );
TEST_LD_OP( 7, lh, 0xffffffffffffff00, -4, tdat4 );
TEST_LD_OP( 8, lh, 0x0000000000000ff0, -2, tdat4 );
TEST_LD_OP( 9, lh, 0xfffffffffffff00f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0x00000000000000ff, \
la x1, tdat; \
addi x1, x1, -32; \
lh x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0xffffffffffffff00, \
la x1, tdat; \
addi x1, x1, -5; \
lh x5, 7(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, lh, 0x0000000000000ff0, 2, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, lh, 0xfffffffffffff00f, 2, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, lh, 0xffffffffffffff00, 2, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, lh, 0x0000000000000ff0, 2, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, lh, 0xfffffffffffff00f, 2, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, lh, 0xffffffffffffff00, 2, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
lh x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
lh x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .half 0x00ff
tdat2: .half 0xff00
tdat3: .half 0x0ff0
tdat4: .half 0xf00f
RVTEST_DATA_END
|
lizhirui/DreamCore | 3,145 | model/tests/riscv-tests/isa/rv64ui/add.S | # See LICENSE for license details.
#*****************************************************************************
# add.S
#-----------------------------------------------------------------------------
#
# Test add instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, add, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, add, 0x00000002, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, add, 0x0000000a, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, add, 0xffffffffffff8000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, add, 0xffffffff80000000, 0xffffffff80000000, 0x00000000 );
TEST_RR_OP( 7, add, 0xffffffff7fff8000, 0xffffffff80000000, 0xffffffffffff8000 );
TEST_RR_OP( 8, add, 0x0000000000007fff, 0x0000000000000000, 0x0000000000007fff );
TEST_RR_OP( 9, add, 0x000000007fffffff, 0x000000007fffffff, 0x0000000000000000 );
TEST_RR_OP( 10, add, 0x0000000080007ffe, 0x000000007fffffff, 0x0000000000007fff );
TEST_RR_OP( 11, add, 0xffffffff80007fff, 0xffffffff80000000, 0x0000000000007fff );
TEST_RR_OP( 12, add, 0x000000007fff7fff, 0x000000007fffffff, 0xffffffffffff8000 );
TEST_RR_OP( 13, add, 0xffffffffffffffff, 0x0000000000000000, 0xffffffffffffffff );
TEST_RR_OP( 14, add, 0x0000000000000000, 0xffffffffffffffff, 0x0000000000000001 );
TEST_RR_OP( 15, add, 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff );
TEST_RR_OP( 16, add, 0x0000000080000000, 0x0000000000000001, 0x000000007fffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 17, add, 24, 13, 11 );
TEST_RR_SRC2_EQ_DEST( 18, add, 25, 14, 11 );
TEST_RR_SRC12_EQ_DEST( 19, add, 26, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 20, 0, add, 24, 13, 11 );
TEST_RR_DEST_BYPASS( 21, 1, add, 25, 14, 11 );
TEST_RR_DEST_BYPASS( 22, 2, add, 26, 15, 11 );
TEST_RR_SRC12_BYPASS( 23, 0, 0, add, 24, 13, 11 );
TEST_RR_SRC12_BYPASS( 24, 0, 1, add, 25, 14, 11 );
TEST_RR_SRC12_BYPASS( 25, 0, 2, add, 26, 15, 11 );
TEST_RR_SRC12_BYPASS( 26, 1, 0, add, 24, 13, 11 );
TEST_RR_SRC12_BYPASS( 27, 1, 1, add, 25, 14, 11 );
TEST_RR_SRC12_BYPASS( 28, 2, 0, add, 26, 15, 11 );
TEST_RR_SRC21_BYPASS( 29, 0, 0, add, 24, 13, 11 );
TEST_RR_SRC21_BYPASS( 30, 0, 1, add, 25, 14, 11 );
TEST_RR_SRC21_BYPASS( 31, 0, 2, add, 26, 15, 11 );
TEST_RR_SRC21_BYPASS( 32, 1, 0, add, 24, 13, 11 );
TEST_RR_SRC21_BYPASS( 33, 1, 1, add, 25, 14, 11 );
TEST_RR_SRC21_BYPASS( 34, 2, 0, add, 26, 15, 11 );
TEST_RR_ZEROSRC1( 35, add, 15, 15 );
TEST_RR_ZEROSRC2( 36, add, 32, 32 );
TEST_RR_ZEROSRC12( 37, add, 0 );
TEST_RR_ZERODEST( 38, add, 16, 30 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,282 | model/tests/riscv-tests/isa/rv64ui/lb.S | # See LICENSE for license details.
#*****************************************************************************
# lb.S
#-----------------------------------------------------------------------------
#
# Test lb instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, lb, 0xffffffffffffffff, 0, tdat );
TEST_LD_OP( 3, lb, 0x0000000000000000, 1, tdat );
TEST_LD_OP( 4, lb, 0xfffffffffffffff0, 2, tdat );
TEST_LD_OP( 5, lb, 0x000000000000000f, 3, tdat );
# Test with negative offset
TEST_LD_OP( 6, lb, 0xffffffffffffffff, -3, tdat4 );
TEST_LD_OP( 7, lb, 0x0000000000000000, -2, tdat4 );
TEST_LD_OP( 8, lb, 0xfffffffffffffff0, -1, tdat4 );
TEST_LD_OP( 9, lb, 0x000000000000000f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0xffffffffffffffff, \
la x1, tdat; \
addi x1, x1, -32; \
lb x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x0000000000000000, \
la x1, tdat; \
addi x1, x1, -6; \
lb x5, 7(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, lb, 0xfffffffffffffff0, 1, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, lb, 0x000000000000000f, 1, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, lb, 0x0000000000000000, 1, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, lb, 0xfffffffffffffff0, 1, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, lb, 0x000000000000000f, 1, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, lb, 0x0000000000000000, 1, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
lb x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
lb x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .byte 0xff
tdat2: .byte 0x00
tdat3: .byte 0xf0
tdat4: .byte 0x0f
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,861 | model/tests/riscv-tests/isa/rv64ui/xori.S | # See LICENSE for license details.
#*****************************************************************************
# xori.S
#-----------------------------------------------------------------------------
#
# Test xori instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Logical tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, xori, 0xffffffffff00f00f, 0x0000000000ff0f00, 0xf0f );
TEST_IMM_OP( 3, xori, 0x000000000ff00f00, 0x000000000ff00ff0, 0x0f0 );
TEST_IMM_OP( 4, xori, 0x0000000000ff0ff0, 0x0000000000ff08ff, 0x70f );
TEST_IMM_OP( 5, xori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 6, xori, 0xffffffffff00f00f, 0xffffffffff00f700, 0x70f );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 7, 0, xori, 0x000000000ff00f00, 0x000000000ff00ff0, 0x0f0 );
TEST_IMM_DEST_BYPASS( 8, 1, xori, 0x0000000000ff0ff0, 0x0000000000ff08ff, 0x70f );
TEST_IMM_DEST_BYPASS( 9, 2, xori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 );
TEST_IMM_SRC1_BYPASS( 10, 0, xori, 0x000000000ff00f00, 0x000000000ff00ff0, 0x0f0 );
TEST_IMM_SRC1_BYPASS( 11, 1, xori, 0x0000000000ff0ff0, 0x0000000000ff0fff, 0x00f );
TEST_IMM_SRC1_BYPASS( 12, 2, xori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 );
TEST_IMM_ZEROSRC1( 13, xori, 0x0f0, 0x0f0 );
TEST_IMM_ZERODEST( 14, xori, 0x00ff00ff, 0x70f );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,949 | model/tests/riscv-tests/isa/rv64ui/slt.S | # See LICENSE for license details.
#*****************************************************************************
# slt.S
#-----------------------------------------------------------------------------
#
# Test slt instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, slt, 0, 0x0000000000000000, 0x0000000000000000 );
TEST_RR_OP( 3, slt, 0, 0x0000000000000001, 0x0000000000000001 );
TEST_RR_OP( 4, slt, 1, 0x0000000000000003, 0x0000000000000007 );
TEST_RR_OP( 5, slt, 0, 0x0000000000000007, 0x0000000000000003 );
TEST_RR_OP( 6, slt, 0, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 7, slt, 1, 0xffffffff80000000, 0x0000000000000000 );
TEST_RR_OP( 8, slt, 1, 0xffffffff80000000, 0xffffffffffff8000 );
TEST_RR_OP( 9, slt, 1, 0x0000000000000000, 0x0000000000007fff );
TEST_RR_OP( 10, slt, 0, 0x000000007fffffff, 0x0000000000000000 );
TEST_RR_OP( 11, slt, 0, 0x000000007fffffff, 0x0000000000007fff );
TEST_RR_OP( 12, slt, 1, 0xffffffff80000000, 0x0000000000007fff );
TEST_RR_OP( 13, slt, 0, 0x000000007fffffff, 0xffffffffffff8000 );
TEST_RR_OP( 14, slt, 0, 0x0000000000000000, 0xffffffffffffffff );
TEST_RR_OP( 15, slt, 1, 0xffffffffffffffff, 0x0000000000000001 );
TEST_RR_OP( 16, slt, 0, 0xffffffffffffffff, 0xffffffffffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 17, slt, 0, 14, 13 );
TEST_RR_SRC2_EQ_DEST( 18, slt, 1, 11, 13 );
TEST_RR_SRC12_EQ_DEST( 19, slt, 0, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 20, 0, slt, 1, 11, 13 );
TEST_RR_DEST_BYPASS( 21, 1, slt, 0, 14, 13 );
TEST_RR_DEST_BYPASS( 22, 2, slt, 1, 12, 13 );
TEST_RR_SRC12_BYPASS( 23, 0, 0, slt, 0, 14, 13 );
TEST_RR_SRC12_BYPASS( 24, 0, 1, slt, 1, 11, 13 );
TEST_RR_SRC12_BYPASS( 25, 0, 2, slt, 0, 15, 13 );
TEST_RR_SRC12_BYPASS( 26, 1, 0, slt, 1, 10, 13 );
TEST_RR_SRC12_BYPASS( 27, 1, 1, slt, 0, 16, 13 );
TEST_RR_SRC12_BYPASS( 28, 2, 0, slt, 1, 9, 13 );
TEST_RR_SRC21_BYPASS( 29, 0, 0, slt, 0, 17, 13 );
TEST_RR_SRC21_BYPASS( 30, 0, 1, slt, 1, 8, 13 );
TEST_RR_SRC21_BYPASS( 31, 0, 2, slt, 0, 18, 13 );
TEST_RR_SRC21_BYPASS( 32, 1, 0, slt, 1, 7, 13 );
TEST_RR_SRC21_BYPASS( 33, 1, 1, slt, 0, 19, 13 );
TEST_RR_SRC21_BYPASS( 34, 2, 0, slt, 1, 6, 13 );
TEST_RR_ZEROSRC1( 35, slt, 0, -1 );
TEST_RR_ZEROSRC2( 36, slt, 1, -1 );
TEST_RR_ZEROSRC12( 37, slt, 0 );
TEST_RR_ZERODEST( 38, slt, 16, 30 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,149 | model/tests/riscv-tests/isa/rv64ui/bge.S | # See LICENSE for license details.
#*****************************************************************************
# bge.S
#-----------------------------------------------------------------------------
#
# Test bge instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Branch tests
#-------------------------------------------------------------
# Each test checks both forward and backward branches
TEST_BR2_OP_TAKEN( 2, bge, 0, 0 );
TEST_BR2_OP_TAKEN( 3, bge, 1, 1 );
TEST_BR2_OP_TAKEN( 4, bge, -1, -1 );
TEST_BR2_OP_TAKEN( 5, bge, 1, 0 );
TEST_BR2_OP_TAKEN( 6, bge, 1, -1 );
TEST_BR2_OP_TAKEN( 7, bge, -1, -2 );
TEST_BR2_OP_NOTTAKEN( 8, bge, 0, 1 );
TEST_BR2_OP_NOTTAKEN( 9, bge, -1, 1 );
TEST_BR2_OP_NOTTAKEN( 10, bge, -2, -1 );
TEST_BR2_OP_NOTTAKEN( 11, bge, -2, 1 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_BR2_SRC12_BYPASS( 12, 0, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 13, 0, 1, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 14, 0, 2, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 15, 1, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 16, 1, 1, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 17, 2, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 18, 0, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 19, 0, 1, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 20, 0, 2, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 21, 1, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 22, 1, 1, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 23, 2, 0, bge, -1, 0 );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 24, x1, 3, \
li x1, 1; \
bge x1, x0, 1f; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
1: addi x1, x1, 1; \
addi x1, x1, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,028 | model/tests/riscv-tests/isa/rv64ui/blt.S | # See LICENSE for license details.
#*****************************************************************************
# blt.S
#-----------------------------------------------------------------------------
#
# Test blt instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Branch tests
#-------------------------------------------------------------
# Each test checks both forward and backward branches
TEST_BR2_OP_TAKEN( 2, blt, 0, 1 );
TEST_BR2_OP_TAKEN( 3, blt, -1, 1 );
TEST_BR2_OP_TAKEN( 4, blt, -2, -1 );
TEST_BR2_OP_NOTTAKEN( 5, blt, 1, 0 );
TEST_BR2_OP_NOTTAKEN( 6, blt, 1, -1 );
TEST_BR2_OP_NOTTAKEN( 7, blt, -1, -2 );
TEST_BR2_OP_NOTTAKEN( 8, blt, 1, -2 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_BR2_SRC12_BYPASS( 9, 0, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 10, 0, 1, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 11, 0, 2, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 12, 1, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 13, 1, 1, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 14, 2, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 15, 0, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 16, 0, 1, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 17, 0, 2, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 18, 1, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 19, 1, 1, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 20, 2, 0, blt, 0, -1 );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 21, x1, 3, \
li x1, 1; \
blt x0, x1, 1f; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
1: addi x1, x1, 1; \
addi x1, x1, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,371 | model/tests/riscv-tests/isa/rv64ui/addiw.S | # See LICENSE for license details.
#*****************************************************************************
# addiw.S
#-----------------------------------------------------------------------------
#
# Test addiw instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, addiw, 0x00000000, 0x00000000, 0x000 );
TEST_IMM_OP( 3, addiw, 0x00000002, 0x00000001, 0x001 );
TEST_IMM_OP( 4, addiw, 0x0000000a, 0x00000003, 0x007 );
TEST_IMM_OP( 5, addiw, 0xfffffffffffff800, 0x0000000000000000, 0x800 );
TEST_IMM_OP( 6, addiw, 0xffffffff80000000, 0xffffffff80000000, 0x000 );
TEST_IMM_OP( 7, addiw, 0x000000007ffff800, 0xffffffff80000000, 0x800 );
TEST_IMM_OP( 8, addiw, 0x00000000000007ff, 0x00000000, 0x7ff );
TEST_IMM_OP( 9, addiw, 0x000000007fffffff, 0x7fffffff, 0x000 );
TEST_IMM_OP( 10, addiw, 0xffffffff800007fe, 0x7fffffff, 0x7ff );
TEST_IMM_OP( 11, addiw, 0xffffffff800007ff, 0xffffffff80000000, 0x7ff );
TEST_IMM_OP( 12, addiw, 0x000000007ffff7ff, 0x000000007fffffff, 0x800 );
TEST_IMM_OP( 13, addiw, 0xffffffffffffffff, 0x0000000000000000, 0xfff );
TEST_IMM_OP( 14, addiw, 0x0000000000000000, 0xffffffffffffffff, 0x001 );
TEST_IMM_OP( 15, addiw, 0xfffffffffffffffe, 0xffffffffffffffff, 0xfff );
TEST_IMM_OP( 16, addiw, 0xffffffff80000000, 0x7fffffff, 0x001 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, addiw, 24, 13, 11 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, addiw, 24, 13, 11 );
TEST_IMM_DEST_BYPASS( 19, 1, addiw, 23, 13, 10 );
TEST_IMM_DEST_BYPASS( 20, 2, addiw, 22, 13, 9 );
TEST_IMM_SRC1_BYPASS( 21, 0, addiw, 24, 13, 11 );
TEST_IMM_SRC1_BYPASS( 22, 1, addiw, 23, 13, 10 );
TEST_IMM_SRC1_BYPASS( 23, 2, addiw, 22, 13, 9 );
TEST_IMM_ZEROSRC1( 24, addiw, 32, 32 );
TEST_IMM_ZERODEST( 25, addiw, 33, 50 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,975 | model/tests/riscv-tests/isa/rv64ui/srliw.S | # See LICENSE for license details.
#*****************************************************************************
# srliw.S
#-----------------------------------------------------------------------------
#
# Test srliw instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, srliw, 0xffffffff80000000, 0xffffffff80000000, 0 );
TEST_IMM_OP( 3, srliw, 0x0000000040000000, 0xffffffff80000000, 1 );
TEST_IMM_OP( 4, srliw, 0x0000000001000000, 0xffffffff80000000, 7 );
TEST_IMM_OP( 5, srliw, 0x0000000000020000, 0xffffffff80000000, 14 );
TEST_IMM_OP( 6, srliw, 0x0000000000000001, 0xffffffff80000001, 31 );
TEST_IMM_OP( 7, srliw, 0xffffffffffffffff, 0xffffffffffffffff, 0 );
TEST_IMM_OP( 8, srliw, 0x000000007fffffff, 0xffffffffffffffff, 1 );
TEST_IMM_OP( 9, srliw, 0x0000000001ffffff, 0xffffffffffffffff, 7 );
TEST_IMM_OP( 10, srliw, 0x000000000003ffff, 0xffffffffffffffff, 14 );
TEST_IMM_OP( 11, srliw, 0x0000000000000001, 0xffffffffffffffff, 31 );
TEST_IMM_OP( 12, srliw, 0x0000000021212121, 0x0000000021212121, 0 );
TEST_IMM_OP( 13, srliw, 0x0000000010909090, 0x0000000021212121, 1 );
TEST_IMM_OP( 14, srliw, 0x0000000000424242, 0x0000000021212121, 7 );
TEST_IMM_OP( 15, srliw, 0x0000000000008484, 0x0000000021212121, 14 );
TEST_IMM_OP( 16, srliw, 0x0000000000000000, 0x0000000021212121, 31 );
# Verify that shifts ignore top 32 (using true 64-bit values)
TEST_IMM_OP( 44, srliw, 0x0000000012345678, 0xffffffff12345678, 0 );
TEST_IMM_OP( 45, srliw, 0x0000000001234567, 0xffffffff12345678, 4 );
TEST_IMM_OP( 46, srliw, 0xffffffff92345678, 0x0000000092345678, 0 );
TEST_IMM_OP( 47, srliw, 0x0000000009234567, 0x0000000092345678, 4 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, srliw, 0x0000000001000000, 0xffffffff80000000, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, srliw, 0x0000000001000000, 0xffffffff80000000, 7 );
TEST_IMM_DEST_BYPASS( 19, 1, srliw, 0x0000000000020000, 0xffffffff80000000, 14 );
TEST_IMM_DEST_BYPASS( 20, 2, srliw, 0x0000000000000001, 0xffffffff80000001, 31 );
TEST_IMM_SRC1_BYPASS( 21, 0, srliw, 0x0000000001000000, 0xffffffff80000000, 7 );
TEST_IMM_SRC1_BYPASS( 22, 1, srliw, 0x0000000000020000, 0xffffffff80000000, 14 );
TEST_IMM_SRC1_BYPASS( 23, 2, srliw, 0x0000000000000001, 0xffffffff80000001, 31 );
TEST_IMM_ZEROSRC1( 24, srliw, 0, 31 );
TEST_IMM_ZERODEST( 25, srliw, 31, 28 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,143 | model/tests/riscv-tests/isa/rv32ud/ldst.S | # See LICENSE for license details.
#*****************************************************************************
# ldst.S
#-----------------------------------------------------------------------------
#
# This test verifies that flw, fld, fsw, and fsd work properly.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV32UF
RVTEST_CODE_BEGIN
la s0, tdat
TEST_CASE_D32(2, a0, a1, 0x40000000bf800000, fld f2, 0(s0); fsd f2, 16(s0); lw a0, 16(s0); lw a1, 20(s0))
TEST_CASE_D32(3, a0, a1, 0x40000000bf800000, fld f2, 0(s0); fsw f2, 16(s0); lw a0, 16(s0); lw a1, 20(s0))
TEST_CASE_D32(4, a0, a1, 0x40000000bf800000, flw f2, 0(s0); fsw f2, 16(s0); lw a0, 16(s0); lw a1, 20(s0))
TEST_CASE_D32(5, a0, a1, 0xc080000040400000, fld f2, 8(s0); fsd f2, 16(s0); lw a0, 16(s0); lw a1, 20(s0))
TEST_CASE_D32(6, a0, a1, 0xffffffff40400000, flw f2, 8(s0); fsd f2, 16(s0); lw a0, 16(s0); lw a1, 20(s0))
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
.word 0xbf800000
.word 0x40000000
.word 0x40400000
.word 0xc0800000
.word 0xdeadbeef
.word 0xcafebabe
.word 0xabad1dea
.word 0x1337d00d
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,781 | model/tests/riscv-tests/isa/rv64um/mulhu.S | # See LICENSE for license details.
#*****************************************************************************
# mulhu.S
#-----------------------------------------------------------------------------
#
# Test mulhu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulhu, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulhu, 0x00000000, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulhu, 0x00000000, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulhu, 0x0000000000000000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, mulhu, 0x0000000000000000, 0xffffffff80000000, 0x00000000 );
TEST_RR_OP( 7, mulhu, 0xffffffff7fff8000, 0xffffffff80000000, 0xffffffffffff8000 );
TEST_RR_OP(30, mulhu, 0x000000000001fefe, 0xaaaaaaaaaaaaaaab, 0x000000000002fe7d );
TEST_RR_OP(31, mulhu, 0x000000000001fefe, 0x000000000002fe7d, 0xaaaaaaaaaaaaaaab );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulhu, 143, 13<<32, 11<<32 );
TEST_RR_SRC2_EQ_DEST( 9, mulhu, 154, 14<<32, 11<<32 );
TEST_RR_SRC12_EQ_DEST( 10, mulhu, 169, 13<<32 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulhu, 143, 13<<32, 11<<32 );
TEST_RR_DEST_BYPASS( 12, 1, mulhu, 154, 14<<32, 11<<32 );
TEST_RR_DEST_BYPASS( 13, 2, mulhu, 165, 15<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulhu, 143, 13<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulhu, 154, 14<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulhu, 165, 15<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulhu, 143, 13<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulhu, 154, 14<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulhu, 165, 15<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulhu, 143, 13<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulhu, 154, 14<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulhu, 165, 15<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulhu, 143, 13<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulhu, 154, 14<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulhu, 165, 15<<32, 11<<32 );
TEST_RR_ZEROSRC1( 26, mulhu, 0, 31<<32 );
TEST_RR_ZEROSRC2( 27, mulhu, 0, 32<<32 );
TEST_RR_ZEROSRC12( 28, mulhu, 0 );
TEST_RR_ZERODEST( 29, mulhu, 33<<32, 34<<32 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,576 | model/tests/riscv-tests/isa/rv64um/mulh.S | # See LICENSE for license details.
#*****************************************************************************
# mulh.S
#-----------------------------------------------------------------------------
#
# Test mulh instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulh, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulh, 0x00000000, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulh, 0x00000000, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulh, 0x0000000000000000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, mulh, 0x0000000000000000, 0xffffffff80000000, 0x00000000 );
TEST_RR_OP( 7, mulh, 0x0000000000000000, 0xffffffff80000000, 0xffffffffffff8000 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulh, 143, 13<<32, 11<<32 );
TEST_RR_SRC2_EQ_DEST( 9, mulh, 154, 14<<32, 11<<32 );
TEST_RR_SRC12_EQ_DEST( 10, mulh, 169, 13<<32 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulh, 143, 13<<32, 11<<32 );
TEST_RR_DEST_BYPASS( 12, 1, mulh, 154, 14<<32, 11<<32 );
TEST_RR_DEST_BYPASS( 13, 2, mulh, 165, 15<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulh, 143, 13<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulh, 154, 14<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulh, 165, 15<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulh, 143, 13<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulh, 154, 14<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulh, 165, 15<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulh, 143, 13<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulh, 154, 14<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulh, 165, 15<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulh, 143, 13<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulh, 154, 14<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulh, 165, 15<<32, 11<<32 );
TEST_RR_ZEROSRC1( 26, mulh, 0, 31<<32 );
TEST_RR_ZEROSRC2( 27, mulh, 0, 32<<32 );
TEST_RR_ZEROSRC12( 28, mulh, 0 );
TEST_RR_ZERODEST( 29, mulh, 33<<32, 34<<32 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,420 | model/tests/riscv-tests/isa/rv64um/mulw.S | # See LICENSE for license details.
#*****************************************************************************
# mulw.S
#-----------------------------------------------------------------------------
#
# Test mulw instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulw, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulw, 0x00000001, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulw, 0x00000015, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulw, 0x0000000000000000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, mulw, 0x0000000000000000, 0xffffffff80000000, 0x00000000 );
TEST_RR_OP( 7, mulw, 0x0000000000000000, 0xffffffff80000000, 0xffffffffffff8000 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulw, 143, 13, 11 );
TEST_RR_SRC2_EQ_DEST( 9, mulw, 154, 14, 11 );
TEST_RR_SRC12_EQ_DEST( 10, mulw, 169, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulw, 143, 13, 11 );
TEST_RR_DEST_BYPASS( 12, 1, mulw, 154, 14, 11 );
TEST_RR_DEST_BYPASS( 13, 2, mulw, 165, 15, 11 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulw, 143, 13, 11 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulw, 154, 14, 11 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulw, 165, 15, 11 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulw, 143, 13, 11 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulw, 154, 14, 11 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulw, 165, 15, 11 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulw, 143, 13, 11 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulw, 154, 14, 11 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulw, 165, 15, 11 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulw, 143, 13, 11 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulw, 154, 14, 11 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulw, 165, 15, 11 );
TEST_RR_ZEROSRC1( 26, mulw, 0, 31 );
TEST_RR_ZEROSRC2( 27, mulw, 0, 32 );
TEST_RR_ZEROSRC12( 28, mulw, 0 );
TEST_RR_ZERODEST( 29, mulw, 33, 34 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,636 | model/tests/riscv-tests/isa/rv64um/mulhsu.S | # See LICENSE for license details.
#*****************************************************************************
# mulhsu.S
#-----------------------------------------------------------------------------
#
# Test mulhsu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulhsu, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulhsu, 0x00000000, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulhsu, 0x00000000, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulhsu, 0x0000000000000000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, mulhsu, 0x0000000000000000, 0xffffffff80000000, 0x00000000 );
TEST_RR_OP( 7, mulhsu, 0xffffffff80000000, 0xffffffff80000000, 0xffffffffffff8000 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulhsu, 143, 13<<32, 11<<32 );
TEST_RR_SRC2_EQ_DEST( 9, mulhsu, 154, 14<<32, 11<<32 );
TEST_RR_SRC12_EQ_DEST( 10, mulhsu, 169, 13<<32 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulhsu, 143, 13<<32, 11<<32 );
TEST_RR_DEST_BYPASS( 12, 1, mulhsu, 154, 14<<32, 11<<32 );
TEST_RR_DEST_BYPASS( 13, 2, mulhsu, 165, 15<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulhsu, 143, 13<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulhsu, 154, 14<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulhsu, 165, 15<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulhsu, 143, 13<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulhsu, 154, 14<<32, 11<<32 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulhsu, 165, 15<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulhsu, 143, 13<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulhsu, 154, 14<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulhsu, 165, 15<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulhsu, 143, 13<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulhsu, 154, 14<<32, 11<<32 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulhsu, 165, 15<<32, 11<<32 );
TEST_RR_ZEROSRC1( 26, mulhsu, 0, 31<<32 );
TEST_RR_ZEROSRC2( 27, mulhsu, 0, 32<<32 );
TEST_RR_ZEROSRC12( 28, mulhsu, 0 );
TEST_RR_ZERODEST( 29, mulhsu, 33<<32, 34<<32 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,732 | model/tests/riscv-tests/isa/rv64um/mul.S | # See LICENSE for license details.
#*****************************************************************************
# mul.S
#-----------------------------------------------------------------------------
#
# Test mul instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP(32, mul, 0x0000000000001200, 0x0000000000007e00, 0x6db6db6db6db6db7 );
TEST_RR_OP(33, mul, 0x0000000000001240, 0x0000000000007fc0, 0x6db6db6db6db6db7 );
TEST_RR_OP( 2, mul, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mul, 0x00000001, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mul, 0x00000015, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mul, 0x0000000000000000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, mul, 0x0000000000000000, 0xffffffff80000000, 0x00000000 );
TEST_RR_OP( 7, mul, 0x0000400000000000, 0xffffffff80000000, 0xffffffffffff8000 );
TEST_RR_OP(30, mul, 0x000000000000ff7f, 0xaaaaaaaaaaaaaaab, 0x000000000002fe7d );
TEST_RR_OP(31, mul, 0x000000000000ff7f, 0x000000000002fe7d, 0xaaaaaaaaaaaaaaab );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mul, 143, 13, 11 );
TEST_RR_SRC2_EQ_DEST( 9, mul, 154, 14, 11 );
TEST_RR_SRC12_EQ_DEST( 10, mul, 169, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mul, 143, 13, 11 );
TEST_RR_DEST_BYPASS( 12, 1, mul, 154, 14, 11 );
TEST_RR_DEST_BYPASS( 13, 2, mul, 165, 15, 11 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mul, 143, 13, 11 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mul, 154, 14, 11 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mul, 165, 15, 11 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mul, 143, 13, 11 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mul, 154, 14, 11 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mul, 165, 15, 11 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mul, 143, 13, 11 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mul, 154, 14, 11 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mul, 165, 15, 11 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mul, 143, 13, 11 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mul, 154, 14, 11 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mul, 165, 15, 11 );
TEST_RR_ZEROSRC1( 26, mul, 0, 31 );
TEST_RR_ZEROSRC2( 27, mul, 0, 32 );
TEST_RR_ZEROSRC12( 28, mul, 0 );
TEST_RR_ZERODEST( 29, mul, 33, 34 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,872 | model/tests/riscv-tests/isa/rv64uzfh/fmin.S | # See LICENSE for license details.
#*****************************************************************************
# fmin.S
#-----------------------------------------------------------------------------
#
# Test f{min|max}.h instructinos.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_OP2_H( 2, fmin.h, 0, 1.0, 2.5, 1.0 );
TEST_FP_OP2_H( 3, fmin.h, 0, -1235.1, -1235.1, 1.1 );
TEST_FP_OP2_H( 4, fmin.h, 0, -1235.1, 1.1, -1235.1 );
TEST_FP_OP2_H( 5, fmin.h, 0, -1235.1, NaN, -1235.1 );
TEST_FP_OP2_H( 6, fmin.h, 0, 0.00000001, 3.14159265, 0.00000001 );
TEST_FP_OP2_H( 7, fmin.h, 0, -2.0, -1.0, -2.0 );
TEST_FP_OP2_H(12, fmax.h, 0, 2.5, 2.5, 1.0 );
TEST_FP_OP2_H(13, fmax.h, 0, 1.1, -1235.1, 1.1 );
TEST_FP_OP2_H(14, fmax.h, 0, 1.1, 1.1, -1235.1 );
TEST_FP_OP2_H(15, fmax.h, 0, -1235.1, NaN, -1235.1 );
TEST_FP_OP2_H(16, fmax.h, 0, 3.14159265, 3.14159265, 0.00000001 );
TEST_FP_OP2_H(17, fmax.h, 0, -1.0, -1.0, -2.0 );
# FMIN(hNaN, x) = x
TEST_FP_OP2_H(20, fmax.h, 0x10, 1.0, sNaNh, 1.0);
# FMIN(hNaN, hNaN) = canonical NaN
TEST_FP_OP2_H(21, fmax.h, 0x00, qNaNh, NaN, NaN);
# -0.0 < +0.0
TEST_FP_OP2_H(30, fmin.h, 0, -0.0, -0.0, 0.0 );
TEST_FP_OP2_H(31, fmin.h, 0, -0.0, 0.0, -0.0 );
TEST_FP_OP2_H(32, fmax.h, 0, 0.0, -0.0, 0.0 );
TEST_FP_OP2_H(33, fmax.h, 0, 0.0, 0.0, -0.0 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,629 | model/tests/riscv-tests/isa/rv64uzfh/fmadd.S | # See LICENSE for license details.
#*****************************************************************************
# fmadd.S
#-----------------------------------------------------------------------------
#
# Test f[n]m{add|sub}.h and f[n]m{add|sub}.h instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_OP3_H( 2, fmadd.h, 0, 3.5, 1.0, 2.5, 1.0 );
TEST_FP_OP3_H( 3, fmadd.h, 1, 13.2, -1.0, -12.1, 1.1 );
TEST_FP_OP3_H( 4, fmadd.h, 0, -12.0, 2.0, -5.0, -2.0 );
TEST_FP_OP3_H( 5, fnmadd.h, 0, -3.5, 1.0, 2.5, 1.0 );
TEST_FP_OP3_H( 6, fnmadd.h, 1, -13.2, -1.0, -12.1, 1.1 );
TEST_FP_OP3_H( 7, fnmadd.h, 0, 12.0, 2.0, -5.0, -2.0 );
TEST_FP_OP3_H( 8, fmsub.h, 0, 1.5, 1.0, 2.5, 1.0 );
TEST_FP_OP3_H( 9, fmsub.h, 1, 11, -1.0, -12.1, 1.1 );
TEST_FP_OP3_H(10, fmsub.h, 0, -8.0, 2.0, -5.0, -2.0 );
TEST_FP_OP3_H(11, fnmsub.h, 0, -1.5, 1.0, 2.5, 1.0 );
TEST_FP_OP3_H(12, fnmsub.h, 1, -11, -1.0, -12.1, 1.1 );
TEST_FP_OP3_H(13, fnmsub.h, 0, 8.0, 2.0, -5.0, -2.0 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,496 | model/tests/riscv-tests/isa/rv64uzfh/move.S | # See LICENSE for license details.
#*****************************************************************************
# move.S
#-----------------------------------------------------------------------------
#
# This test verifies that the fmv.h.x, fmv.x.h, and fsgnj[x|n].d instructions
# and the fcsr work properly.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
TEST_CASE(2, a1, 1, csrwi fcsr, 1; li a0, 0x1234; fssr a1, a0)
TEST_CASE(3, a0, 0x34, frsr a0)
TEST_CASE(4, a0, 0x14, frflags a0)
TEST_CASE(5, a0, 0x01, csrrwi a0, frm, 2)
TEST_CASE(6, a0, 0x54, frsr a0)
TEST_CASE(7, a0, 0x14, csrrci a0, fflags, 4)
TEST_CASE(8, a0, 0x50, frsr a0)
#define TEST_FSGNJS(n, insn, new_sign, rs1_sign, rs2_sign) \
TEST_CASE(n, a0, 0x1234 | (-(new_sign) << 15), \
li a1, ((rs1_sign) << 15) | 0x1234; \
li a2, -(rs2_sign); \
fmv.h.x f1, a1; \
fmv.h.x f2, a2; \
insn f0, f1, f2; \
fmv.x.h a0, f0)
TEST_FSGNJS(10, fsgnj.h, 0, 0, 0)
TEST_FSGNJS(11, fsgnj.h, 1, 0, 1)
TEST_FSGNJS(12, fsgnj.h, 0, 1, 0)
TEST_FSGNJS(13, fsgnj.h, 1, 1, 1)
TEST_FSGNJS(20, fsgnjn.h, 1, 0, 0)
TEST_FSGNJS(21, fsgnjn.h, 0, 0, 1)
TEST_FSGNJS(22, fsgnjn.h, 1, 1, 0)
TEST_FSGNJS(23, fsgnjn.h, 0, 1, 1)
TEST_FSGNJS(30, fsgnjx.h, 0, 0, 0)
TEST_FSGNJS(31, fsgnjx.h, 1, 0, 1)
TEST_FSGNJS(32, fsgnjx.h, 1, 1, 0)
TEST_FSGNJS(33, fsgnjx.h, 0, 1, 1)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,055 | model/tests/riscv-tests/isa/rv64uzfh/fdiv.S | # See LICENSE for license details.
#*****************************************************************************
# fdiv.S
#-----------------------------------------------------------------------------
#
# Test f{div|sqrt}.h instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_OP2_H(2, fdiv.h, 1, 1.1557273520668288, 3.14159265, 2.71828182 );
TEST_FP_OP2_H(3, fdiv.h, 1,-0.9991093838555584, -1234, 1235.1 );
TEST_FP_OP2_H(4, fdiv.h, 0, 3.14159265, 3.14159265, 1.0 );
TEST_FP_OP1_H(5, fsqrt.h, 1, 1.7724538498928541, 3.14159265 );
TEST_FP_OP1_H(6, fsqrt.h, 0, 100, 10000 );
TEST_FP_OP1_H_DWORD_RESULT(7, fsqrt.h, 0x10, 0x00007e00, -1.0 );
TEST_FP_OP1_H(8, fsqrt.h, 1, 13.076696, 171.0);
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,122 | model/tests/riscv-tests/isa/rv64uzfh/recoding.S | # See LICENSE for license details.
#*****************************************************************************
# recoding.S
#-----------------------------------------------------------------------------
#
# Test corner cases of John Hauser's microarchitectural recoding scheme.
# There are twice as many recoded values as IEEE-754 values; some of these
# extras are redundant (e.g. Inf) and others are illegal (subnormals with
# too many bits set).
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
# Make sure infinities with different mantissas compare as equal.
flw f0, minf, a0
flw f1, three, a0
fmul.s f1, f1, f0
TEST_CASE( 2, a0, 1, feq.s a0, f0, f1)
TEST_CASE( 3, a0, 1, fle.s a0, f0, f1)
TEST_CASE( 4, a0, 0, flt.s a0, f0, f1)
# Likewise, but for zeroes.
fcvt.s.w f0, x0
li a0, 1
fcvt.s.w f1, a0
fmul.s f1, f1, f0
TEST_CASE(5, a0, 1, feq.s a0, f0, f1)
TEST_CASE(6, a0, 1, fle.s a0, f0, f1)
TEST_CASE(7, a0, 0, flt.s a0, f0, f1)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
minf: .float -Inf
three: .float 3.0
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,111 | model/tests/riscv-tests/isa/rv64uzfh/fclass.S | # See LICENSE for license details.
#*****************************************************************************
# fclass.S
#-----------------------------------------------------------------------------
#
# Test fclass.h instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
#define TEST_FCLASS_H(testnum, correct, input) \
TEST_CASE(testnum, a0, correct, li a0, input; fmv.h.x fa0, a0; \
fclass.h a0, fa0)
TEST_FCLASS_H( 2, 1 << 0, 0xfc00 )
TEST_FCLASS_H( 3, 1 << 1, 0xbc00 )
TEST_FCLASS_H( 4, 1 << 2, 0x83ff )
TEST_FCLASS_H( 5, 1 << 3, 0x8000 )
TEST_FCLASS_H( 6, 1 << 4, 0x0000 )
TEST_FCLASS_H( 7, 1 << 5, 0x03ff )
TEST_FCLASS_H( 8, 1 << 6, 0x3c00 )
TEST_FCLASS_H( 9, 1 << 7, 0x7c00 )
TEST_FCLASS_H(10, 1 << 8, 0x7c01 )
TEST_FCLASS_H(11, 1 << 9, 0x7e00 )
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,229 | model/tests/riscv-tests/isa/rv64uzfh/fcvt.S | # See LICENSE for license details.
#*****************************************************************************
# fcvt.S
#-----------------------------------------------------------------------------
#
# Test fcvt.h.{wu|w|lu|l}, fcvt.h.d, and fcvt.d.h instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_INT_FP_OP_H( 2, fcvt.h.w, 2.0, 2);
TEST_INT_FP_OP_H( 3, fcvt.h.w, -2.0, -2);
TEST_INT_FP_OP_H( 4, fcvt.h.wu, 2.0, 2);
TEST_INT_FP_OP_H( 5, fcvt.h.wu, 0h:7c00, -2);
#if __riscv_xlen >= 64
TEST_INT_FP_OP_H( 6, fcvt.h.l, 2.0, 2);
TEST_INT_FP_OP_H( 7, fcvt.h.l, -2.0, -2);
TEST_INT_FP_OP_H( 8, fcvt.h.lu, 2.0, 2);
TEST_INT_FP_OP_H( 9, fcvt.h.lu, 0h:7c00, -2);
#endif
TEST_FCVT_H_S( 10, -1.5, -1.5)
#if __riscv_xlen >= 64
TEST_FCVT_H_D( 11, -1.5, -1.5)
#endif
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 4,416 | model/tests/riscv-tests/isa/rv64uzfh/fcvt_w.S | # See LICENSE for license details.
#*****************************************************************************
# fcvt_w.S
#-----------------------------------------------------------------------------
#
# Test fcvt{wu|w|lu|l}.h instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_INT_OP_H( 2, fcvt.w.h, 0x01, -1, -1.1, rtz);
TEST_FP_INT_OP_H( 3, fcvt.w.h, 0x00, -1, -1.0, rtz);
TEST_FP_INT_OP_H( 4, fcvt.w.h, 0x01, 0, -0.9, rtz);
TEST_FP_INT_OP_H( 5, fcvt.w.h, 0x01, 0, 0.9, rtz);
TEST_FP_INT_OP_H( 6, fcvt.w.h, 0x00, 1, 1.0, rtz);
TEST_FP_INT_OP_H( 7, fcvt.w.h, 0x01, 1, 1.1, rtz);
TEST_FP_INT_OP_H( 8, fcvt.w.h, 0x00, -2054, 0h:e803, rtz);
TEST_FP_INT_OP_H( 9, fcvt.w.h, 0x00, 2054, 0h:6803, rtz);
TEST_FP_INT_OP_H(12, fcvt.wu.h, 0x10, 0, -3.0, rtz);
TEST_FP_INT_OP_H(13, fcvt.wu.h, 0x10, 0, -1.0, rtz);
TEST_FP_INT_OP_H(14, fcvt.wu.h, 0x01, 0, -0.9, rtz);
TEST_FP_INT_OP_H(15, fcvt.wu.h, 0x01, 0, 0.9, rtz);
TEST_FP_INT_OP_H(16, fcvt.wu.h, 0x00, 1, 1.0, rtz);
TEST_FP_INT_OP_H(17, fcvt.wu.h, 0x01, 1, 1.1, rtz);
TEST_FP_INT_OP_H(18, fcvt.wu.h, 0x10, 0, 0h:e803, rtz);
TEST_FP_INT_OP_H(19, fcvt.wu.h, 0x00, 2054, 0h:6803, rtz);
#if __riscv_xlen >= 64
TEST_FP_INT_OP_H(22, fcvt.l.h, 0x01, -1, -1.1, rtz);
TEST_FP_INT_OP_H(23, fcvt.l.h, 0x00, -1, -1.0, rtz);
TEST_FP_INT_OP_H(24, fcvt.l.h, 0x01, 0, -0.9, rtz);
TEST_FP_INT_OP_H(25, fcvt.l.h, 0x01, 0, 0.9, rtz);
TEST_FP_INT_OP_H(26, fcvt.l.h, 0x00, 1, 1.0, rtz);
TEST_FP_INT_OP_H(27, fcvt.l.h, 0x01, 1, 1.1, rtz);
TEST_FP_INT_OP_H(32, fcvt.lu.h, 0x10, 0, -3.0, rtz);
TEST_FP_INT_OP_H(33, fcvt.lu.h, 0x10, 0, -1.0, rtz);
TEST_FP_INT_OP_H(34, fcvt.lu.h, 0x01, 0, -0.9, rtz);
TEST_FP_INT_OP_H(35, fcvt.lu.h, 0x01, 0, 0.9, rtz);
TEST_FP_INT_OP_H(36, fcvt.lu.h, 0x00, 1, 1.0, rtz);
TEST_FP_INT_OP_H(37, fcvt.lu.h, 0x01, 1, 1.1, rtz);
TEST_FP_INT_OP_H(38, fcvt.lu.h, 0x10, 0, 0h:e483, rtz);
#endif
# test negative NaN, negative infinity conversion
TEST_CASE( 42, x1, 0x000000007fffffff, la x1, tdat ; flw f1, 0(x1); fcvt.w.h x1, f1)
TEST_CASE( 43, x1, 0xffffffff80000000, la x1, tdat ; flw f1, 8(x1); fcvt.w.h x1, f1)
#if __riscv_xlen >= 64
TEST_CASE( 44, x1, 0x7fffffffffffffff, la x1, tdat ; flw f1, 0(x1); fcvt.l.h x1, f1)
TEST_CASE( 45, x1, 0x8000000000000000, la x1, tdat ; flw f1, 8(x1); fcvt.l.h x1, f1)
#endif
# test positive NaN, positive infinity conversion
TEST_CASE( 52, x1, 0x000000007fffffff, la x1, tdat ; flw f1, 4(x1); fcvt.w.h x1, f1)
TEST_CASE( 53, x1, 0x000000007fffffff, la x1, tdat ; flw f1, 12(x1); fcvt.w.h x1, f1)
#if __riscv_xlen >= 64
TEST_CASE( 54, x1, 0x7fffffffffffffff, la x1, tdat ; flw f1, 4(x1); fcvt.l.h x1, f1)
TEST_CASE( 55, x1, 0x7fffffffffffffff, la x1, tdat ; flw f1, 12(x1); fcvt.l.h x1, f1)
#endif
# test NaN, infinity conversions to unsigned integer
TEST_CASE( 62, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 0(x1); fcvt.wu.h x1, f1)
TEST_CASE( 63, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 4(x1); fcvt.wu.h x1, f1)
TEST_CASE( 64, x1, 0, la x1, tdat ; flw f1, 8(x1); fcvt.wu.h x1, f1)
TEST_CASE( 65, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 12(x1); fcvt.wu.h x1, f1)
#if __riscv_xlen >= 64
TEST_CASE( 66, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 0(x1); fcvt.lu.h x1, f1)
TEST_CASE( 67, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 4(x1); fcvt.lu.h x1, f1)
TEST_CASE( 68, x1, 0, la x1, tdat ; flw f1, 8(x1); fcvt.lu.h x1, f1)
TEST_CASE( 69, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 12(x1); fcvt.lu.h x1, f1)
#endif
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
# -NaN, NaN, -inf, +inf
#tdat:
#.word 0xffffffff
#.word 0x7fffffff
#.word 0xff800000
#.word 0x7f800000
tdat:
.word 0xffffffff
.word 0xffff7fff
.word 0xfffffc00
.word 0xffff7c00
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,380 | model/tests/riscv-tests/isa/rv64uzfh/fadd.S | # See LICENSE for license details.
#*****************************************************************************
# fadd.S
#-----------------------------------------------------------------------------
#
# Test f{add|sub|mul}.h instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_OP2_H( 2, fadd.h, 0, 3.5, 2.5, 1.0 );
TEST_FP_OP2_H( 3, fadd.h, 1, -1234, -1235.1, 1.1 );
TEST_FP_OP2_H( 4, fadd.h, 1, 3.14, 3.13, 0.01 );
TEST_FP_OP2_H( 5, fsub.h, 0, 1.5, 2.5, 1.0 );
TEST_FP_OP2_H( 6, fsub.h, 1, -1234, -1235.1, -1.1 );
TEST_FP_OP2_H( 7, fsub.h, 1, 3.14, 3.15, 0.01 );
TEST_FP_OP2_H( 8, fmul.h, 0, 2.5, 2.5, 1.0 );
TEST_FP_OP2_H( 9, fmul.h, 0, 1235.1, -1235.1, -1.0 );
TEST_FP_OP2_H(10, fmul.h, 1, 1.1, 11.0, 0.1 );
# Is the canonical NaN generated for Inf - Inf?
TEST_FP_OP2_H(11, fsub.h, 0x10, qNaNh, Inf, Inf);
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,157 | model/tests/riscv-tests/isa/rv64mi/access.S | # See LICENSE for license details.
#*****************************************************************************
# access.S
#-----------------------------------------------------------------------------
#
# Test access-exception behavior.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64M
RVTEST_CODE_BEGIN
.align 2
# Flipping just the MSB should result in an illegal address for RV64.
la t2, fail
li t0, 1 << (__riscv_xlen - 1)
xor t0, t0, t2
# jalr to an illegal address should commit (hence should write rd).
# after the pc is set to rs1, an access exception should be raised.
li TESTNUM, 2
li t1, CAUSE_FETCH_ACCESS
la s1, 1f
li t2, 0
jalr t2, t0
1:
# A load to an illegal address should not commit.
li TESTNUM, 3
li t1, CAUSE_LOAD_ACCESS
la s1, 1f
mv t2, s1
lb t2, (t0)
j fail
1:
j pass
TEST_PASSFAIL
.align 2
.global mtvec_handler
mtvec_handler:
li a0, 2
beq TESTNUM, a0, 2f
li a0, 3
beq TESTNUM, a0, 2f
j fail
2:
bne t2, s1, fail
csrr t2, mcause
bne t2, t1, fail
csrw mepc, s1
mret
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 3,404 | model/tests/riscv-tests/isa/rv64mi/illegal.S | # See LICENSE for license details.
#*****************************************************************************
# illegal.S
#-----------------------------------------------------------------------------
#
# Test illegal instruction trap.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64M
RVTEST_CODE_BEGIN
.align 2
.option norvc
li TESTNUM, 2
bad2:
.word 0
j fail
# Skip the rest of the test if S-mode is not present.
li t0, MSTATUS_MPP
csrc mstatus, t0
li t1, (MSTATUS_MPP & -MSTATUS_MPP) * PRV_S
csrs mstatus, t1
csrr t2, mstatus
and t2, t2, t0
bne t1, t2, pass
# Test vectored interrupts if they are supported.
test_vectored_interrupts:
csrwi mip, MIP_SSIP
csrwi mie, MIP_SSIP
la t0, mtvec_handler + 1
csrrw s0, mtvec, t0
csrr t0, mtvec
andi t0, t0, 1
beqz t0, msip
csrsi mstatus, MSTATUS_MIE
1:
j 1b
msip:
csrw mtvec, s0
# Delegate supervisor software interrupts so WFI won't stall.
csrwi mideleg, MIP_SSIP
# Enter supervisor mode.
la t0, 1f
csrw mepc, t0
li t0, MSTATUS_MPP
csrc mstatus, t0
li t1, (MSTATUS_MPP & -MSTATUS_MPP) * PRV_S
csrs mstatus, t1
mret
1:
# Make sure WFI doesn't trap when TW=0.
wfi
# Check if paging is supported (Set SUM & MXR and read it back)
and t0, t0, zero
li t0, (SSTATUS_SUM | SSTATUS_MXR)
csrc sstatus, t0
and t1, t1, zero
li t1, (SSTATUS_SUM | SSTATUS_MXR)
csrs sstatus, t1
csrr t2, sstatus
and t2, t2, t0
beqz t2, bare_s_1
csrc sstatus, t0
# Make sure SFENCE.VMA and sptbr don't trap when TVM=0.
sfence.vma
csrr t0, sptbr
bad5:
.word 0
j fail
bad6:
# Make sure SFENCE.VMA and sptbr do trap when TVM=1.
sfence.vma
j fail
bad7:
csrr t0, sptbr
j fail
test_tsr:
# Make sure SRET doesn't trap when TSR=0.
la t0, bad8
csrw sepc, t0
li t0, SSTATUS_SPP
csrs sstatus, t0
li t0, SSTATUS_SPIE
csrc sstatus, t0
sret
bad8:
.word 0
j fail
# Make sure SRET does trap when TSR=1.
la t0, 1f
csrw sepc, t0
bad9:
sret
1:
j fail
j skip_bare_s
bare_s_1:
# Make sure SFENCE.VMA trap when TVM=0.
sfence.vma
j fail
bare_s_2:
# Set TVM=1. TVM should stay 0 and SFENCE.VMA should still trap
sfence.vma
j fail
# And access to satp should not trap
csrr t0, sptbr
bare_s_3:
.word 0
j fail
j test_tsr
skip_bare_s:
TEST_PASSFAIL
.align 8
.global mtvec_handler
mtvec_handler:
j synchronous_exception
j msip
j fail
j fail
j fail
j fail
j fail
j fail
j fail
j fail
j fail
j fail
j fail
j fail
j fail
j fail
synchronous_exception:
li t1, CAUSE_ILLEGAL_INSTRUCTION
csrr t0, mcause
bne t0, t1, fail
csrr t0, mepc
# Make sure mtval contains either 0 or the instruction word.
csrr t2, mbadaddr
beqz t2, 1f
lhu t1, 0(t0)
xor t2, t2, t1
lhu t1, 2(t0)
slli t1, t1, 16
xor t2, t2, t1
bnez t2, fail
1:
la t1, bad2
beq t0, t1, 2f
la t1, bad5
beq t0, t1, 5f
la t1, bad6
beq t0, t1, 6f
la t1, bad7
beq t0, t1, 7f
la t1, bad8
beq t0, t1, 8f
la t1, bad9
beq t0, t1, 9f
la t1, bare_s_1
beq t0, t1, 5f
la t1, bare_s_2
beq t0, t1, 7f
la t1, bare_s_3
beq t0, t1, 7f
j fail
2:
6:
7:
addi t0, t0, 8
csrw mepc, t0
mret
5:
li t1, MSTATUS_TVM
csrs mstatus, t1
j 2b
8:
li t1, MSTATUS_TSR
csrs mstatus, t1
j 2b
9:
j 2b
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,449 | model/tests/riscv-tests/isa/rv64mi/breakpoint.S | # See LICENSE for license details.
#*****************************************************************************
# breakpoint.S
#-----------------------------------------------------------------------------
#
# Test breakpoints, if they are implemented.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64M
RVTEST_CODE_BEGIN
# Set up breakpoint to trap on M-mode fetches.
li TESTNUM, 2
# Skip tselect if hard-wired.
csrw tselect, x0
csrr a1, tselect
bne x0, a1, pass
la a2, 1f
csrw tdata2, a2
li a0, (2 << (__riscv_xlen - 4)) | MCONTROL_M | MCONTROL_EXECUTE
csrw tdata1, a0
# Skip if breakpoint type is unsupported.
csrr a1, tdata1
bne a0, a1, 2f
.align 2
1:
# Trap handler should skip this instruction.
beqz x0, fail
# Make sure reads don't trap.
li TESTNUM, 3
lw a0, (a2)
2:
# Set up breakpoint to trap on M-mode reads.
li TESTNUM, 4
li a0, (2 << (__riscv_xlen - 4)) | MCONTROL_M | MCONTROL_LOAD
csrw tdata1, a0
# Skip if breakpoint type is unsupported.
csrr a1, tdata1
bne a0, a1, 2f
la a2, data1
csrw tdata2, a2
# Trap handler should skip this instruction.
lw a2, (a2)
beqz a2, fail
# Make sure writes don't trap.
li TESTNUM, 5
sw x0, (a2)
2:
# Set up breakpoint to trap on M-mode stores.
li TESTNUM, 6
li a0, (2 << (__riscv_xlen - 4)) | MCONTROL_M | MCONTROL_STORE
csrw tdata1, a0
# Skip if breakpoint type is unsupported.
csrr a1, tdata1
bne a0, a1, 2f
# Trap handler should skip this instruction.
sw a2, (a2)
# Make sure store didn't succeed.
li TESTNUM, 7
lw a2, (a2)
bnez a2, fail
# Try to set up a second breakpoint.
li a0, 1
csrw tselect, a0
csrr a1, tselect
bne a0, a1, pass
li a0, (2 << (__riscv_xlen - 4)) | MCONTROL_M | MCONTROL_LOAD
csrw tdata1, a0
la a3, data2
csrw tdata2, a3
# Make sure the second breakpoint triggers.
li TESTNUM, 8
lw a3, (a3)
beqz a3, fail
# Make sure the first breakpoint still triggers.
li TESTNUM, 10
la a2, data1
sw a2, (a2)
li TESTNUM, 11
lw a2, (a2)
bnez a2, fail
2:
TEST_PASSFAIL
.align 2
.global mtvec_handler
mtvec_handler:
# Only even-numbered tests should trap.
andi t0, TESTNUM, 1
bnez t0, fail
li t0, CAUSE_BREAKPOINT
csrr t1, mcause
bne t0, t1, fail
csrr t0, mepc
addi t0, t0, 4
csrw mepc, t0
mret
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
data1: .word 0
data2: .word 0
RVTEST_DATA_END
|
lizhirui/DreamCore | 3,189 | model/tests/riscv-tests/isa/rv64mi/ma_addr.S | # See LICENSE for license details.
#*****************************************************************************
# ma_addr.S
#-----------------------------------------------------------------------------
#
# Test misaligned ld/st trap.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64M
RVTEST_CODE_BEGIN
.align 2
.option norvc
la s0, data
# indicate it's a load test
li s1, CAUSE_MISALIGNED_LOAD
#define SEXT(x, n) ((-((x) >> ((n)-1)) << (n)) | ((x) & ((1 << (n))-1)))
/* Check that a misaligned load either writes the correct value, or
takes an exception and performs no writeback. */
#define MISALIGNED_LOAD_TEST(testnum, insn, base, offset, res) \
li TESTNUM, testnum; \
la t2, 1f; \
addi t1, base, offset; \
insn t1, offset(base); \
li t2, res; \
bne t1, t2, fail; \
1:
MISALIGNED_LOAD_TEST(2, lh, s0, 1, SEXT(0xbbcc, 16))
MISALIGNED_LOAD_TEST(3, lhu, s0, 1, 0xbbcc)
MISALIGNED_LOAD_TEST(4, lw, s0, 1, SEXT(0x99aabbcc, 32))
MISALIGNED_LOAD_TEST(5, lw, s0, 2, SEXT(0x8899aabb, 32))
MISALIGNED_LOAD_TEST(6, lw, s0, 3, SEXT(0x778899aa, 32))
#if __riscv_xlen == 64
MISALIGNED_LOAD_TEST(7, lwu, s0, 1, 0x99aabbcc)
MISALIGNED_LOAD_TEST(8, lwu, s0, 2, 0x8899aabb)
MISALIGNED_LOAD_TEST(9, lwu, s0, 3, 0x778899aa)
MISALIGNED_LOAD_TEST(10, ld, s0, 1, 0x5566778899aabbcc)
MISALIGNED_LOAD_TEST(11, ld, s0, 2, 0x445566778899aabb)
MISALIGNED_LOAD_TEST(12, ld, s0, 3, 0x33445566778899aa)
MISALIGNED_LOAD_TEST(13, ld, s0, 4, 0x2233445566778899)
MISALIGNED_LOAD_TEST(14, ld, s0, 5, 0x1122334455667788)
MISALIGNED_LOAD_TEST(15, ld, s0, 6, 0xee11223344556677)
MISALIGNED_LOAD_TEST(16, ld, s0, 7, 0xffee112233445566)
#endif
# indicate it's a store test
li s1, CAUSE_MISALIGNED_STORE
/* Check that a misaligned store has some effect and takes no exception,
or takes no effect and generates an exception. This is not very
thorough. */
#define MISALIGNED_STORE_TEST(testnum, insn, base, offset, size) \
li TESTNUM, testnum; \
la t2, 1f; \
addi t1, base, offset; \
insn x0, offset(base); \
lb t1, (offset - 1)(base); \
beqz t1, fail; \
lb t1, (offset + size)(base); \
beqz t1, fail; \
lb t1, (offset + 0)(base); \
bnez t1, fail; \
lb t1, (offset + size - 1)(base); \
bnez t1, fail; \
1:
MISALIGNED_STORE_TEST(22, sh, s0, 1, 2)
MISALIGNED_STORE_TEST(23, sw, s0, 5, 4)
MISALIGNED_STORE_TEST(24, sw, s0, 10, 4)
MISALIGNED_STORE_TEST(25, sw, s0, 15, 4)
#if __riscv_xlen == 64
MISALIGNED_STORE_TEST(26, sd, s0, 25, 8)
MISALIGNED_STORE_TEST(27, sd, s0, 34, 8)
MISALIGNED_STORE_TEST(28, sd, s0, 43, 8)
MISALIGNED_STORE_TEST(29, sd, s0, 52, 8)
MISALIGNED_STORE_TEST(30, sd, s0, 61, 8)
MISALIGNED_STORE_TEST(31, sd, s0, 70, 8)
MISALIGNED_STORE_TEST(32, sd, s0, 79, 8)
#endif
TEST_PASSFAIL
.align 3
.global mtvec_handler
mtvec_handler:
csrr t0, mcause
bne t0, s1, fail
csrr t0, mbadaddr
bne t0, t1, fail
lb t0, (t0)
beqz t0, fail
csrw mepc, t2
mret
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
data:
.align 3
.word 0xaabbccdd
.word 0x66778899
.word 0x22334455
.word 0xeeffee11
.fill 0xff, 1, 80
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,872 | model/tests/riscv-tests/isa/rv64uf/fmin.S | # See LICENSE for license details.
#*****************************************************************************
# fmin.S
#-----------------------------------------------------------------------------
#
# Test f{min|max}.s instructinos.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_OP2_S( 2, fmin.s, 0, 1.0, 2.5, 1.0 );
TEST_FP_OP2_S( 3, fmin.s, 0, -1235.1, -1235.1, 1.1 );
TEST_FP_OP2_S( 4, fmin.s, 0, -1235.1, 1.1, -1235.1 );
TEST_FP_OP2_S( 5, fmin.s, 0, -1235.1, NaN, -1235.1 );
TEST_FP_OP2_S( 6, fmin.s, 0, 0.00000001, 3.14159265, 0.00000001 );
TEST_FP_OP2_S( 7, fmin.s, 0, -2.0, -1.0, -2.0 );
TEST_FP_OP2_S(12, fmax.s, 0, 2.5, 2.5, 1.0 );
TEST_FP_OP2_S(13, fmax.s, 0, 1.1, -1235.1, 1.1 );
TEST_FP_OP2_S(14, fmax.s, 0, 1.1, 1.1, -1235.1 );
TEST_FP_OP2_S(15, fmax.s, 0, -1235.1, NaN, -1235.1 );
TEST_FP_OP2_S(16, fmax.s, 0, 3.14159265, 3.14159265, 0.00000001 );
TEST_FP_OP2_S(17, fmax.s, 0, -1.0, -1.0, -2.0 );
# FMAX(sNaN, x) = x
TEST_FP_OP2_S(20, fmax.s, 0x10, 1.0, sNaNf, 1.0);
# FMAX(qNaN, qNaN) = canonical NaN
TEST_FP_OP2_S(21, fmax.s, 0x00, qNaNf, NaN, NaN);
# -0.0 < +0.0
TEST_FP_OP2_S(30, fmin.s, 0, -0.0, -0.0, 0.0 );
TEST_FP_OP2_S(31, fmin.s, 0, -0.0, 0.0, -0.0 );
TEST_FP_OP2_S(32, fmax.s, 0, 0.0, -0.0, 0.0 );
TEST_FP_OP2_S(33, fmax.s, 0, 0.0, 0.0, -0.0 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,629 | model/tests/riscv-tests/isa/rv64uf/fmadd.S | # See LICENSE for license details.
#*****************************************************************************
# fmadd.S
#-----------------------------------------------------------------------------
#
# Test f[n]m{add|sub}.s and f[n]m{add|sub}.d instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_OP3_S( 2, fmadd.s, 0, 3.5, 1.0, 2.5, 1.0 );
TEST_FP_OP3_S( 3, fmadd.s, 1, 1236.2, -1.0, -1235.1, 1.1 );
TEST_FP_OP3_S( 4, fmadd.s, 0, -12.0, 2.0, -5.0, -2.0 );
TEST_FP_OP3_S( 5, fnmadd.s, 0, -3.5, 1.0, 2.5, 1.0 );
TEST_FP_OP3_S( 6, fnmadd.s, 1, -1236.2, -1.0, -1235.1, 1.1 );
TEST_FP_OP3_S( 7, fnmadd.s, 0, 12.0, 2.0, -5.0, -2.0 );
TEST_FP_OP3_S( 8, fmsub.s, 0, 1.5, 1.0, 2.5, 1.0 );
TEST_FP_OP3_S( 9, fmsub.s, 1, 1234, -1.0, -1235.1, 1.1 );
TEST_FP_OP3_S(10, fmsub.s, 0, -8.0, 2.0, -5.0, -2.0 );
TEST_FP_OP3_S(11, fnmsub.s, 0, -1.5, 1.0, 2.5, 1.0 );
TEST_FP_OP3_S(12, fnmsub.s, 1, -1234, -1.0, -1235.1, 1.1 );
TEST_FP_OP3_S(13, fnmsub.s, 0, 8.0, 2.0, -5.0, -2.0 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,504 | model/tests/riscv-tests/isa/rv64uf/move.S | # See LICENSE for license details.
#*****************************************************************************
# move.S
#-----------------------------------------------------------------------------
#
# This test verifies that the fmv.s.x, fmv.x.s, and fsgnj[x|n].d instructions
# and the fcsr work properly.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
TEST_CASE(2, a1, 1, csrwi fcsr, 1; li a0, 0x1234; fssr a1, a0)
TEST_CASE(3, a0, 0x34, frsr a0)
TEST_CASE(4, a0, 0x14, frflags a0)
TEST_CASE(5, a0, 0x01, csrrwi a0, frm, 2)
TEST_CASE(6, a0, 0x54, frsr a0)
TEST_CASE(7, a0, 0x14, csrrci a0, fflags, 4)
TEST_CASE(8, a0, 0x50, frsr a0)
#define TEST_FSGNJS(n, insn, new_sign, rs1_sign, rs2_sign) \
TEST_CASE(n, a0, 0x12345678 | (-(new_sign) << 31), \
li a1, ((rs1_sign) << 31) | 0x12345678; \
li a2, -(rs2_sign); \
fmv.s.x f1, a1; \
fmv.s.x f2, a2; \
insn f0, f1, f2; \
fmv.x.s a0, f0)
TEST_FSGNJS(10, fsgnj.s, 0, 0, 0)
TEST_FSGNJS(11, fsgnj.s, 1, 0, 1)
TEST_FSGNJS(12, fsgnj.s, 0, 1, 0)
TEST_FSGNJS(13, fsgnj.s, 1, 1, 1)
TEST_FSGNJS(20, fsgnjn.s, 1, 0, 0)
TEST_FSGNJS(21, fsgnjn.s, 0, 0, 1)
TEST_FSGNJS(22, fsgnjn.s, 1, 1, 0)
TEST_FSGNJS(23, fsgnjn.s, 0, 1, 1)
TEST_FSGNJS(30, fsgnjx.s, 0, 0, 0)
TEST_FSGNJS(31, fsgnjx.s, 1, 0, 1)
TEST_FSGNJS(32, fsgnjx.s, 1, 1, 0)
TEST_FSGNJS(33, fsgnjx.s, 0, 1, 1)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,054 | model/tests/riscv-tests/isa/rv64uf/fdiv.S | # See LICENSE for license details.
#*****************************************************************************
# fdiv.S
#-----------------------------------------------------------------------------
#
# Test f{div|sqrt}.s instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_OP2_S(2, fdiv.s, 1, 1.1557273520668288, 3.14159265, 2.71828182 );
TEST_FP_OP2_S(3, fdiv.s, 1,-0.9991093838555584, -1234, 1235.1 );
TEST_FP_OP2_S(4, fdiv.s, 0, 3.14159265, 3.14159265, 1.0 );
TEST_FP_OP1_S(5, fsqrt.s, 1, 1.7724538498928541, 3.14159265 );
TEST_FP_OP1_S(6, fsqrt.s, 0, 100, 10000 );
TEST_FP_OP1_S_DWORD_RESULT(7, fsqrt.s, 0x10, 0x7FC00000, -1.0 );
TEST_FP_OP1_S(8, fsqrt.s, 1, 13.076696, 171.0);
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,122 | model/tests/riscv-tests/isa/rv64uf/recoding.S | # See LICENSE for license details.
#*****************************************************************************
# recoding.S
#-----------------------------------------------------------------------------
#
# Test corner cases of John Hauser's microarchitectural recoding scheme.
# There are twice as many recoded values as IEEE-754 values; some of these
# extras are redundant (e.g. Inf) and others are illegal (subnormals with
# too many bits set).
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
# Make sure infinities with different mantissas compare as equal.
flw f0, minf, a0
flw f1, three, a0
fmul.s f1, f1, f0
TEST_CASE( 2, a0, 1, feq.s a0, f0, f1)
TEST_CASE( 3, a0, 1, fle.s a0, f0, f1)
TEST_CASE( 4, a0, 0, flt.s a0, f0, f1)
# Likewise, but for zeroes.
fcvt.s.w f0, x0
li a0, 1
fcvt.s.w f1, a0
fmul.s f1, f1, f0
TEST_CASE(5, a0, 1, feq.s a0, f0, f1)
TEST_CASE(6, a0, 1, fle.s a0, f0, f1)
TEST_CASE(7, a0, 0, flt.s a0, f0, f1)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
minf: .float -Inf
three: .float 3.0
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,414 | model/tests/riscv-tests/isa/rv64uf/fcmp.S | # See LICENSE for license details.
#*****************************************************************************
# fcmp.S
#-----------------------------------------------------------------------------
#
# Test f{eq|lt|le}.s instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_CMP_OP_S( 2, feq.s, 0x00, 1, -1.36, -1.36)
TEST_FP_CMP_OP_S( 3, fle.s, 0x00, 1, -1.36, -1.36)
TEST_FP_CMP_OP_S( 4, flt.s, 0x00, 0, -1.36, -1.36)
TEST_FP_CMP_OP_S( 5, feq.s, 0x00, 0, -1.37, -1.36)
TEST_FP_CMP_OP_S( 6, fle.s, 0x00, 1, -1.37, -1.36)
TEST_FP_CMP_OP_S( 7, flt.s, 0x00, 1, -1.37, -1.36)
# Only sNaN should signal invalid for feq.
TEST_FP_CMP_OP_S( 8, feq.s, 0x00, 0, NaN, 0)
TEST_FP_CMP_OP_S( 9, feq.s, 0x00, 0, NaN, NaN)
TEST_FP_CMP_OP_S(10, feq.s, 0x10, 0, sNaNf, 0)
# qNaN should signal invalid for fle/flt.
TEST_FP_CMP_OP_S(11, flt.s, 0x10, 0, NaN, 0)
TEST_FP_CMP_OP_S(12, flt.s, 0x10, 0, NaN, NaN)
TEST_FP_CMP_OP_S(13, flt.s, 0x10, 0, sNaNf, 0)
TEST_FP_CMP_OP_S(14, fle.s, 0x10, 0, NaN, 0)
TEST_FP_CMP_OP_S(15, fle.s, 0x10, 0, NaN, NaN)
TEST_FP_CMP_OP_S(16, fle.s, 0x10, 0, sNaNf, 0)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,127 | model/tests/riscv-tests/isa/rv64uf/fcvt.S | # See LICENSE for license details.
#*****************************************************************************
# fcvt.S
#-----------------------------------------------------------------------------
#
# Test fcvt.s.{wu|w|lu|l}, fcvt.s.d, and fcvt.d.s instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_INT_FP_OP_S( 2, fcvt.s.w, 2.0, 2);
TEST_INT_FP_OP_S( 3, fcvt.s.w, -2.0, -2);
TEST_INT_FP_OP_S( 4, fcvt.s.wu, 2.0, 2);
TEST_INT_FP_OP_S( 5, fcvt.s.wu, 4.2949673e9, -2);
#if __riscv_xlen >= 64
TEST_INT_FP_OP_S( 6, fcvt.s.l, 2.0, 2);
TEST_INT_FP_OP_S( 7, fcvt.s.l, -2.0, -2);
TEST_INT_FP_OP_S( 8, fcvt.s.lu, 2.0, 2);
TEST_INT_FP_OP_S( 9, fcvt.s.lu, 1.8446744e19, -2);
#endif
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 4,360 | model/tests/riscv-tests/isa/rv64uf/fcvt_w.S | # See LICENSE for license details.
#*****************************************************************************
# fcvt_w.S
#-----------------------------------------------------------------------------
#
# Test fcvt{wu|w|lu|l}.s instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_INT_OP_S( 2, fcvt.w.s, 0x01, -1, -1.1, rtz);
TEST_FP_INT_OP_S( 3, fcvt.w.s, 0x00, -1, -1.0, rtz);
TEST_FP_INT_OP_S( 4, fcvt.w.s, 0x01, 0, -0.9, rtz);
TEST_FP_INT_OP_S( 5, fcvt.w.s, 0x01, 0, 0.9, rtz);
TEST_FP_INT_OP_S( 6, fcvt.w.s, 0x00, 1, 1.0, rtz);
TEST_FP_INT_OP_S( 7, fcvt.w.s, 0x01, 1, 1.1, rtz);
TEST_FP_INT_OP_S( 8, fcvt.w.s, 0x10, -1<<31, -3e9, rtz);
TEST_FP_INT_OP_S( 9, fcvt.w.s, 0x10, (1<<31)-1, 3e9, rtz);
TEST_FP_INT_OP_S(12, fcvt.wu.s, 0x10, 0, -3.0, rtz);
TEST_FP_INT_OP_S(13, fcvt.wu.s, 0x10, 0, -1.0, rtz);
TEST_FP_INT_OP_S(14, fcvt.wu.s, 0x01, 0, -0.9, rtz);
TEST_FP_INT_OP_S(15, fcvt.wu.s, 0x01, 0, 0.9, rtz);
TEST_FP_INT_OP_S(16, fcvt.wu.s, 0x00, 1, 1.0, rtz);
TEST_FP_INT_OP_S(17, fcvt.wu.s, 0x01, 1, 1.1, rtz);
TEST_FP_INT_OP_S(18, fcvt.wu.s, 0x10, 0, -3e9, rtz);
TEST_FP_INT_OP_S(19, fcvt.wu.s, 0x00, 3000000000, 3e9, rtz);
#if __riscv_xlen >= 64
TEST_FP_INT_OP_S(22, fcvt.l.s, 0x01, -1, -1.1, rtz);
TEST_FP_INT_OP_S(23, fcvt.l.s, 0x00, -1, -1.0, rtz);
TEST_FP_INT_OP_S(24, fcvt.l.s, 0x01, 0, -0.9, rtz);
TEST_FP_INT_OP_S(25, fcvt.l.s, 0x01, 0, 0.9, rtz);
TEST_FP_INT_OP_S(26, fcvt.l.s, 0x00, 1, 1.0, rtz);
TEST_FP_INT_OP_S(27, fcvt.l.s, 0x01, 1, 1.1, rtz);
TEST_FP_INT_OP_S(32, fcvt.lu.s, 0x10, 0, -3.0, rtz);
TEST_FP_INT_OP_S(33, fcvt.lu.s, 0x10, 0, -1.0, rtz);
TEST_FP_INT_OP_S(34, fcvt.lu.s, 0x01, 0, -0.9, rtz);
TEST_FP_INT_OP_S(35, fcvt.lu.s, 0x01, 0, 0.9, rtz);
TEST_FP_INT_OP_S(36, fcvt.lu.s, 0x00, 1, 1.0, rtz);
TEST_FP_INT_OP_S(37, fcvt.lu.s, 0x01, 1, 1.1, rtz);
TEST_FP_INT_OP_S(38, fcvt.lu.s, 0x10, 0, -3e9, rtz);
#endif
# test negative NaN, negative infinity conversion
TEST_CASE( 42, x1, 0x000000007fffffff, la x1, tdat ; flw f1, 0(x1); fcvt.w.s x1, f1)
TEST_CASE( 44, x1, 0xffffffff80000000, la x1, tdat ; flw f1, 8(x1); fcvt.w.s x1, f1)
#if __riscv_xlen >= 64
TEST_CASE( 43, x1, 0x7fffffffffffffff, la x1, tdat ; flw f1, 0(x1); fcvt.l.s x1, f1)
TEST_CASE( 45, x1, 0x8000000000000000, la x1, tdat ; flw f1, 8(x1); fcvt.l.s x1, f1)
#endif
# test positive NaN, positive infinity conversion
TEST_CASE( 52, x1, 0x000000007fffffff, la x1, tdat ; flw f1, 4(x1); fcvt.w.s x1, f1)
TEST_CASE( 54, x1, 0x000000007fffffff, la x1, tdat ; flw f1, 12(x1); fcvt.w.s x1, f1)
#if __riscv_xlen >= 64
TEST_CASE( 53, x1, 0x7fffffffffffffff, la x1, tdat ; flw f1, 4(x1); fcvt.l.s x1, f1)
TEST_CASE( 55, x1, 0x7fffffffffffffff, la x1, tdat ; flw f1, 12(x1); fcvt.l.s x1, f1)
#endif
# test NaN, infinity conversions to unsigned integer
TEST_CASE( 62, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 0(x1); fcvt.wu.s x1, f1)
TEST_CASE( 63, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 4(x1); fcvt.wu.s x1, f1)
TEST_CASE( 64, x1, 0, la x1, tdat ; flw f1, 8(x1); fcvt.wu.s x1, f1)
TEST_CASE( 65, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 12(x1); fcvt.wu.s x1, f1)
#if __riscv_xlen >= 64
TEST_CASE( 66, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 0(x1); fcvt.lu.s x1, f1)
TEST_CASE( 67, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 4(x1); fcvt.lu.s x1, f1)
TEST_CASE( 68, x1, 0, la x1, tdat ; flw f1, 8(x1); fcvt.lu.s x1, f1)
TEST_CASE( 69, x1, 0xffffffffffffffff, la x1, tdat ; flw f1, 12(x1); fcvt.lu.s x1, f1)
#endif
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
# -NaN, NaN, -inf, +inf
tdat:
.word 0xffffffff
.word 0x7fffffff
.word 0xff800000
.word 0x7f800000
tdat_d:
.dword 0xffffffffffffffff
.dword 0x7fffffffffffffff
.dword 0xfff0000000000000
.dword 0x7ff0000000000000
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,380 | model/tests/riscv-tests/isa/rv64uf/fadd.S | # See LICENSE for license details.
#*****************************************************************************
# fadd.S
#-----------------------------------------------------------------------------
#
# Test f{add|sub|mul}.s instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64UF
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_FP_OP2_S( 2, fadd.s, 0, 3.5, 2.5, 1.0 );
TEST_FP_OP2_S( 3, fadd.s, 1, -1234, -1235.1, 1.1 );
TEST_FP_OP2_S( 4, fadd.s, 1, 3.14159265, 3.14159265, 0.00000001 );
TEST_FP_OP2_S( 5, fsub.s, 0, 1.5, 2.5, 1.0 );
TEST_FP_OP2_S( 6, fsub.s, 1, -1234, -1235.1, -1.1 );
TEST_FP_OP2_S( 7, fsub.s, 1, 3.14159265, 3.14159265, 0.00000001 );
TEST_FP_OP2_S( 8, fmul.s, 0, 2.5, 2.5, 1.0 );
TEST_FP_OP2_S( 9, fmul.s, 1, 1358.61, -1235.1, -1.1 );
TEST_FP_OP2_S(10, fmul.s, 1, 3.14159265e-8, 3.14159265, 0.00000001 );
# Is the canonical NaN generated for Inf - Inf?
TEST_FP_OP2_S(11, fsub.s, 0x10, qNaNf, Inf, Inf);
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,017 | model/tests/riscv-tests/isa/rv64ua/lrsc.S | # See LICENSE for license details.
#*****************************************************************************
# lrsr.S
#-----------------------------------------------------------------------------
#
# Test LR/SC instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
# get a unique core id
la a0, coreid
li a1, 1
amoadd.w a2, a1, (a0)
# for now, only run this on core 0
1:li a3, 1
bgeu a2, a3, 1b
1: lw a1, (a0)
bltu a1, a3, 1b
# make sure that sc without a reservation fails.
TEST_CASE( 2, a4, 1, \
la a0, foo; \
li a5, 0xdeadbeef; \
sc.w a4, a5, (a0); \
)
# make sure the failing sc did not commit into memory
TEST_CASE( 3, a4, 0, \
lw a4, foo; \
)
#
# Disable test case 4 for now. It assumes a <1K reservation granule, when
# in reality any size granule is valid. After discussion in issue #315,
# decided to simply disable the test for now.
# (See https://github.com/riscv/riscv-tests/issues/315)
#
## make sure that sc with the wrong reservation fails.
## TODO is this actually mandatory behavior?
#TEST_CASE( 4, a4, 1, \
# la a0, foo; \
# la a1, fooTest3; \
# lr.w a1, (a1); \
# sc.w a4, a1, (a0); \
#)
#define LOG_ITERATIONS 10
# have each core add its coreid+1 to foo 1024 times
la a0, foo
li a1, 1<<LOG_ITERATIONS
addi a2, a2, 1
1: lr.w a4, (a0)
add a4, a4, a2
sc.w a4, a4, (a0)
bnez a4, 1b
add a1, a1, -1
bnez a1, 1b
# wait for all cores to finish
la a0, barrier
li a1, 1
amoadd.w x0, a1, (a0)
1: lw a1, (a0)
blt a1, a3, 1b
fence
# expected result is 512*ncores*(ncores+1)
TEST_CASE( 5, a0, 0, \
lw a0, foo; \
slli a1, a3, LOG_ITERATIONS-1; \
1:sub a0, a0, a1; \
addi a3, a3, -1; \
bgez a3, 1b
)
# make sure that sc-after-successful-sc fails.
TEST_CASE( 6, a1, 1, \
la a0, foo; \
1:lr.w a1, (a0); \
sc.w a1, x0, (a0); \
bnez a1, 1b; \
sc.w a1, x0, (a0)
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
coreid: .word 0
barrier: .word 0
foo: .word 0
.skip 1024
fooTest3: .word 0
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,627 | model/tests/riscv-tests/isa/rv64si/dirty.S | # See LICENSE for license details.
#*****************************************************************************
# dirty.S
#-----------------------------------------------------------------------------
#
# Test VM referenced and dirty bits.
#
#include "riscv_test.h"
#include "test_macros.h"
#if (DRAM_BASE >> 30 << 30) != DRAM_BASE
# error This test requires DRAM_BASE be SV39 superpage-aligned
#endif
RVTEST_RV64M
RVTEST_CODE_BEGIN
# Turn on VM
li a0, (SATP_MODE & ~(SATP_MODE<<1)) * SATP_MODE_SV39
la a1, page_table_1
srl a1, a1, RISCV_PGSHIFT
or a1, a1, a0
csrw sptbr, a1
sfence.vma
# Set up MPRV with MPP=S, so loads and stores use S-mode
li a1, ((MSTATUS_MPP & ~(MSTATUS_MPP<<1)) * PRV_S) | MSTATUS_MPRV
csrs mstatus, a1
# Try a faulting store to make sure dirty bit is not set
li TESTNUM, 2
li t2, 1
sw t2, dummy - DRAM_BASE, a0
# Set SUM=1 so user memory access is permitted
li TESTNUM, 3
li a1, ((MSTATUS_MPP & ~(MSTATUS_MPP<<1)) * PRV_S) | MSTATUS_SUM
csrs mstatus, a1
# Make sure SUM=1 works
lw t0, dummy - DRAM_BASE
bnez t0, die
# Try a non-faulting store to make sure dirty bit is set
sw t2, dummy - DRAM_BASE, a0
# Make sure it succeeded
lw t0, dummy - DRAM_BASE
bne t0, t2, die
# Leave MPRV
li t0, MSTATUS_MPRV
csrc mstatus, t0
# Make sure D bit is set
lw t0, page_table_1
li a0, PTE_A | PTE_D
and t0, t0, a0
bne t0, a0, die
# Enter MPRV again
li t0, MSTATUS_MPRV
csrs mstatus, t0
# Make sure that superpage entries trap when PPN LSBs are set.
li TESTNUM, 4
lw a0, page_table_1 - DRAM_BASE
or a0, a0, 1 << PTE_PPN_SHIFT
sw a0, page_table_1 - DRAM_BASE, t0
sfence.vma
sw a0, page_table_1 - DRAM_BASE, t0
j die
RVTEST_PASS
TEST_PASSFAIL
.align 2
.global mtvec_handler
mtvec_handler:
csrr t0, mcause
add t0, t0, -CAUSE_STORE_PAGE_FAULT
bnez t0, die
li t1, 2
bne TESTNUM, t1, 1f
# Make sure D bit is clear
lw t0, page_table_1
and t1, t0, PTE_D
bnez t1, die
skip:
csrr t0, mepc
add t0, t0, 4
csrw mepc, t0
mret
1:
li t1, 3
bne TESTNUM, t1, 1f
# The implementation doesn't appear to set D bits in HW.
# Make sure the D bit really is clear.
lw t0, page_table_1
and t1, t0, PTE_D
bnez t1, die
# Set the D bit.
or t0, t0, PTE_D
sw t0, page_table_1, t1
sfence.vma
mret
1:
li t1, 4
bne TESTNUM, t1, 1f
j pass
1:
die:
RVTEST_FAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
.align 12
page_table_1: .dword (DRAM_BASE/RISCV_PGSIZE << PTE_PPN_SHIFT) | PTE_V | PTE_U | PTE_R | PTE_W | PTE_X | PTE_A
dummy: .dword 0
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,003 | model/tests/riscv-tests/isa/rv64si/scall.S | # See LICENSE for license details.
#*****************************************************************************
# scall.S
#-----------------------------------------------------------------------------
#
# Test syscall trap.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64S
RVTEST_CODE_BEGIN
#ifdef __MACHINE_MODE
#define sscratch mscratch
#define sstatus mstatus
#define scause mcause
#define stvec mtvec
#define sepc mepc
#define sret mret
#define stvec_handler mtvec_handler
#undef SSTATUS_SPP
#define SSTATUS_SPP MSTATUS_MPP
#endif
li TESTNUM, 2
# This is the expected trap code.
li t1, CAUSE_USER_ECALL
#ifdef __MACHINE_MODE
# If running in M mode, use mstatus.MPP to check existence of U mode.
# Otherwise, if in S mode, then U mode must exist and we don't need to check.
#li t0, MSTATUS_MPP
#csrc mstatus, t0
#csrr t2, mstatus
#and t0, t0, t2
#beqz t0, 1f
la t0, stvec_handler
csrw mtvec, t0
# If U mode doesn't exist, mcause should indicate ECALL from M mode.
li t1, CAUSE_MACHINE_ECALL
#endif
1:
#li t0, SSTATUS_SPP
#csrc sstatus, t0
la t0, 1f
csrw sepc, t0
sret
1:
li TESTNUM, 1
do_scall:
scall
j fail
TEST_PASSFAIL
# Depending on the test environment, the M-mode version of this test might
# not actually invoke the following handler. Instead, the usual ECALL
# handler in the test environment might detect the CAUSE_USER_ECALL or
# CAUSE_MACHINE_ECALL exception and mark the test as having passed.
# Either way, we'll get the coverage we desire: such a handler must check
# both mcause and TESTNUM, just like the following handler.
.align 2
.global stvec_handler
stvec_handler:
csrr t0, scause
# Check if CLIC mode
csrr t2, stvec
andi t2, t2, 2
# Skip masking if non-CLIC mode
beqz t2, skip_mask
andi t0, t0, 255
skip_mask:
bne t0, t1, fail
la t2, do_scall
csrr t0, sepc
bne t0, t2, fail
j pass
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,494 | model/tests/riscv-tests/isa/rv64si/icache-alias.S | # See LICENSE for license details.
#*****************************************************************************
# icache-alias.S
#-----------------------------------------------------------------------------
#
# Test that instruction memory appears to be physically addressed, i.e.,
# that disagreements in the low-order VPN and PPN bits don't cause the
# wrong instruction to be fetched. It also tests that changing a page
# mapping takes effect without executing FENCE.I.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64M
RVTEST_CODE_BEGIN
li TESTNUM, 2
# Set up intermediate page tables
la t0, page_table_3
srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
ori t0, t0, PTE_V
sd t0, page_table_2, t1
la t0, page_table_2
srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
ori t0, t0, PTE_V
sd t0, page_table_1, t1
# Set up leaf mappings where va[12] != pa[12]
la t0, code_page_1
srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
ori t0, t0, PTE_V | PTE_X | PTE_A
sd t0, page_table_3 + 8, t1
la t0, code_page_2
srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
ori t0, t0, PTE_V | PTE_X | PTE_A
sd t0, page_table_3 + 0, t1
# Turn on VM
li a0, (SATP_MODE & ~(SATP_MODE<<1)) * SATP_MODE_SV39
la a1, page_table_1
srl a1, a1, RISCV_PGSHIFT
or a1, a1, a0
csrw sptbr, a1
sfence.vma
# Enter supervisor mode and make sure correct page is accessed
la a2, 1f
csrwi mepc, 0
li a1, ((MSTATUS_MPP & ~(MSTATUS_MPP<<1)) * PRV_S)
csrs mstatus, a1
mret
1:
li TESTNUM, 2
addi a0, a0, -321
bnez a0, fail
li TESTNUM, 3
la a2, 1f
li t0, RISCV_PGSIZE
csrw mepc, t0
mret
1:
addi a0, a0, -123
bnez a0, fail
li TESTNUM, 4
la a2, 1f
csrwi mepc, 0
mret
.align 2
1:
addi a0, a0, -321
bnez a0, fail
li TESTNUM, 5
# Change mapping and try again
la t0, code_page_1
srl t0, t0, RISCV_PGSHIFT - PTE_PPN_SHIFT
ori t0, t0, PTE_V | PTE_X | PTE_A
sd t0, page_table_3 + 0, t1
sfence.vma
la a2, 1f
csrwi mepc, 0
mret
.align 2
1:
addi a0, a0, -123
bnez a0, fail
RVTEST_PASS
TEST_PASSFAIL
.align 2
.global mtvec_handler
mtvec_handler:
csrr t0, mcause
add t0, t0, -CAUSE_STORE_PAGE_FAULT
bnez t0, fail
jr a2
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
.align 12
page_table_1: .dword 0
.align 12
page_table_2: .dword 0
.align 12
page_table_3: .dword 0
.align 13
code_page_1:
li a0, 123
sw x0, (x0)
.align 12
code_page_2:
li a0, 321
sw x0, (x0)
RVTEST_DATA_END
|
lizhirui/DreamCore | 3,380 | model/tests/riscv-tests/isa/rv64si/ma_fetch.S | # See LICENSE for license details.
#*****************************************************************************
# ma_fetch.S
#-----------------------------------------------------------------------------
#
# Test misaligned fetch trap.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64S
RVTEST_CODE_BEGIN
#ifdef __MACHINE_MODE
#define sscratch mscratch
#define sstatus mstatus
#define scause mcause
#define sbadaddr mbadaddr
#define sepc mepc
#define sret mret
#define stvec_handler mtvec_handler
#endif
.align 2
.option norvc
# Without RVC, the jalr should trap, and the handler will skip ahead.
# With RVC, the jalr should not trap, and "j fail" should get skipped.
li TESTNUM, 2
li t1, 0
la t0, 1f
jalr t1, t0, 2
1:
.option rvc
c.j 1f
c.j 2f
.option norvc
1:
j fail
2:
// This test should pass, since JALR ignores the target LSB
li TESTNUM, 3
la t0, 1f
jalr t1, t0, 1
1:
j 1f
j fail
1:
li TESTNUM, 4
li t1, 0
la t0, 1f
jalr t1, t0, 3
1:
.option rvc
c.j 1f
c.j 2f
.option norvc
1:
j fail
2:
# Like test 2, but with jal instead of jalr.
li TESTNUM, 5
li t1, 0
la t0, 1f
jal t1, 2f
1:
.option rvc
c.j 1f
2:
c.j 2f
.option norvc
1:
j fail
2:
# Like test 2, but with a taken branch instead of jalr.
li TESTNUM, 6
li t1, 0
la t0, 1f
beqz x0, 2f
1:
.option rvc
c.j 1f
2:
c.j 2f
.option norvc
1:
j fail
2:
# Not-taken branches should not trap, even without RVC.
li TESTNUM, 7
bnez x0, 1f
j 2f
.option rvc
c.j 1f
1:
c.j 1f
.option norvc
1:
j fail
2:
#ifdef __MACHINE_MODE
# RVC cannot be disabled if doing so would cause a misaligned instruction
# exception on the next instruction fetch. (This test assumes no other
# extensions that support misalignment are present.)
li TESTNUM, 8
csrr t2, misa
andi t2, t2, 1 << ('c' - 'a')
beqz t2, 2f
.option rvc
c.nop
csrci misa, 1 << ('c' - 'a')
1:
c.nop
.option norvc
csrr t2, misa
andi t2, t2, 1 << ('c' - 'a')
beqz t2, fail
# When RVC is disabled, mret to a misaligned mepc should succeed,
# masking off mepc[1].
la t0, 1f
addi t0, t0, -2
csrw mepc, t0
# Try to disable RVC; if it can't be disabled, skip the test.
csrci misa, 1 << ('c' - 'a')
csrr t2, misa
andi t2, t2, 1 << ('c' - 'a')
bnez t2, 2f
li t2, MSTATUS_MPP
csrs mstatus, t2
mret
# mret should transfer control to this branch. Otherwise, it will
# transfer control two bytes into the branch, which happens to be the
# illegal instruction c.unimp.
beqz x0, 1f
1:
csrsi misa, 1 << ('c' - 'a')
2:
#endif
j pass
TEST_PASSFAIL
.align 2
.global stvec_handler
stvec_handler:
# tests 2, 4, 5, 6, and 8 should trap
li a0, 2
beq TESTNUM, a0, 1f
li a0, 4
beq TESTNUM, a0, 1f
li a0, 5
beq TESTNUM, a0, 1f
li a0, 6
beq TESTNUM, a0, 1f
j fail
1:
# verify that return address was not written
bnez t1, fail
# verify trap cause
li a1, CAUSE_MISALIGNED_FETCH
csrr a0, scause
bne a0, a1, fail
# verify that epc == &jalr (== t0 - 4)
csrr a1, sepc
addi a1, a1, 4
bne t0, a1, fail
# verify that badaddr == 0 or badaddr == t0+2.
csrr a0, sbadaddr
beqz a0, 1f
addi a0, a0, -2
bne a0, t0, fail
1:
addi a1, a1, 12
csrw sepc, a1
sret
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 4,918 | model/tests/riscv-tests/isa/rv64si/csr.S | # See LICENSE for license details.
#*****************************************************************************
# csr.S
#-----------------------------------------------------------------------------
#
# Test CSRRx and CSRRxI instructions.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64S
RVTEST_CODE_BEGIN
#ifdef __MACHINE_MODE
#define sscratch mscratch
#define sstatus mstatus
#define scause mcause
#define sepc mepc
#define sret mret
#define stvec_handler mtvec_handler
#undef SSTATUS_SPP
#define SSTATUS_SPP MSTATUS_MPP
#endif
# For RV64, make sure UXL encodes RV64. (UXL does not exist for RV32.)
#if __riscv_xlen == 64
# If running in M mode, use mstatus.MPP to check existence of U mode.
# Otherwise, if in S mode, then U mode must exist and we don't need to check.
#ifdef __MACHINE_MODE
li t0, MSTATUS_MPP
csrc mstatus, t0
csrr t1, mstatus
and t0, t0, t1
bnez t0, 1f
#endif
# If U mode is present, UXL should be 2 (XLEN = 64-bit)
TEST_CASE(18, a0, SSTATUS_UXL & (SSTATUS_UXL << 1), csrr a0, sstatus; li a1, SSTATUS_UXL; and a0, a0, a1)
#ifdef __MACHINE_MODE
j 2f
1:
# If U mode is not present, UXL should be 0
TEST_CASE(19, a0, 0, csrr a0, sstatus; li a1, SSTATUS_UXL; and a0, a0, a1)
2:
#endif
#endif
# Make sure reading the cycle counter in four ways doesn't trap.
#ifdef __MACHINE_MODE
#TEST_CASE(25, x0, 0, csrrc x0, cycle, x0);
#TEST_CASE(26, x0, 0, csrrs x0, cycle, x0);
#TEST_CASE(27, x0, 0, csrrci x0, cycle, 0);
#TEST_CASE(28, x0, 0, csrrsi x0, cycle, 0);
#endif
TEST_CASE(20, a0, 0, csrw sscratch, zero; csrr a0, sscratch);
TEST_CASE(21, a0, 0, csrrwi a0, sscratch, 0; csrrwi a0, sscratch, 0xF);
TEST_CASE(22, a0, 0x1f, csrrsi x0, sscratch, 0x10; csrr a0, sscratch);
csrwi sscratch, 3
TEST_CASE( 2, a0, 3, csrr a0, sscratch);
TEST_CASE( 3, a1, 3, csrrci a1, sscratch, 1);
TEST_CASE( 4, a2, 2, csrrsi a2, sscratch, 4);
TEST_CASE( 5, a3, 6, csrrwi a3, sscratch, 2);
TEST_CASE( 6, a1, 2, li a0, 0xbad1dea; csrrw a1, sscratch, a0);
TEST_CASE( 7, a1, 0xbad1dea, li a0, 0x0001dea; csrrc a1, sscratch, a0);
TEST_CASE( 8, a1, 0xbad0000, li a0, 0x000beef; csrrs a1, sscratch, a0);
TEST_CASE( 9, a0, 0xbadbeef, li a0, 0xbad1dea; csrrw a0, sscratch, a0);
TEST_CASE(10, a0, 0xbad1dea, li a0, 0x0001dea; csrrc a0, sscratch, a0);
TEST_CASE(11, a0, 0xbad0000, li a0, 0x000beef; csrrs a0, sscratch, a0);
TEST_CASE(12, a0, 0xbadbeef, csrr a0, sscratch);
#ifdef __MACHINE_MODE
# Is F extension present?
#csrr a0, misa
#andi a0, a0, (1 << ('F' - 'A'))
#beqz a0, 1f
j 1f
# If so, make sure FP stores have no effect when mstatus.FS is off.
li a1, MSTATUS_FS
csrs mstatus, a1
#ifdef __riscv_flen
fmv.s.x f0, x0
csrc mstatus, a1
la a1, fsw_data
TEST_CASE(13, a0, 1, fsw f0, (a1); lw a0, (a1));
#else
# Fail if this test is compiled without F but executed on a core with F.
TEST_CASE(13, zero, 1)
#endif
1:
# Figure out if 'U' is set in misa
#csrr a0, misa # a0 = csr(misa)
#srli a0, a0, 20 # a0 = a0 >> 20
#andi a0, a0, 1 # a0 = a0 & 1
#beqz a0, finish # if no user mode, skip the rest of these checks
j finish
# Enable access to the cycle counter
csrwi mcounteren, 1
# Figure out if 'S' is set in misa
csrr a0, misa # a0 = csr(misa)
srli a0, a0, 18 # a0 = a0 >> 20
andi a0, a0, 1 # a0 = a0 & 1
beqz a0, 1f
# Enable access to the cycle counter
csrwi scounteren, 1
1:
#endif /* __MACHINE_MODE */
# jump to user land
li t0, SSTATUS_SPP
csrc sstatus, t0
la t0, 1f
csrw sepc, t0
sret
1:
# Make sure writing the cycle counter causes an exception.
# Don't run in supervisor, as we don't delegate illegal instruction traps.
#ifdef __MACHINE_MODE
TEST_CASE(14, a0, 255, li a0, 255; csrrw a0, cycle, x0);
#endif
# Make sure reading status in user mode causes an exception.
# Don't run in supervisor, as we don't delegate illegal instruction traps.
#ifdef __MACHINE_MODE
TEST_CASE(15, a0, 255, li a0, 255; csrr a0, sstatus)
#else
TEST_CASE(15, x0, 0, nop)
#endif
finish:
RVTEST_PASS
# We should only fall through to this if scall failed.
TEST_PASSFAIL
.align 2
.global stvec_handler
stvec_handler:
# Trapping on tests 13-15 is good news.
# Note that since the test didn't complete, TESTNUM is smaller by 1.
li t0, 12
bltu TESTNUM, t0, 1f
li t0, 14
bleu TESTNUM, t0, privileged
1:
# catch RVTEST_PASS and kick it up to M-mode
csrr t0, scause
li t1, CAUSE_USER_ECALL
bne t0, t1, fail
RVTEST_PASS
privileged:
# Make sure scause indicates a lack of privilege.
csrr t0, scause
li t1, CAUSE_ILLEGAL_INSTRUCTION
bne t0, t1, fail
# Return to user mode, but skip the trapping instruction.
csrr t0, sepc
addi t0, t0, 4
csrw sepc, t0
sret
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
fsw_data: .word 1
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,960 | model/tests/riscv-tests/isa/rv32um/mulhu.S | # See LICENSE for license details.
#*****************************************************************************
# mulhu.S
#-----------------------------------------------------------------------------
#
# Test mulhu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV32U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulhu, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulhu, 0x00000000, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulhu, 0x00000000, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulhu, 0x00000000, 0x00000000, 0xffff8000 );
TEST_RR_OP( 6, mulhu, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP( 7, mulhu, 0x7fffc000, 0x80000000, 0xffff8000 );
TEST_RR_OP(30, mulhu, 0x0001fefe, 0xaaaaaaab, 0x0002fe7d );
TEST_RR_OP(31, mulhu, 0x0001fefe, 0x0002fe7d, 0xaaaaaaab );
TEST_RR_OP(32, mulhu, 0xfe010000, 0xff000000, 0xff000000 );
TEST_RR_OP(33, mulhu, 0xfffffffe, 0xffffffff, 0xffffffff );
TEST_RR_OP(34, mulhu, 0x00000000, 0xffffffff, 0x00000001 );
TEST_RR_OP(35, mulhu, 0x00000000, 0x00000001, 0xffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC2_EQ_DEST( 9, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_EQ_DEST( 10, mulhu, 43264, 13<<20 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 12, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 13, 2, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_ZEROSRC1( 26, mulhu, 0, 31<<26 );
TEST_RR_ZEROSRC2( 27, mulhu, 0, 32<<26 );
TEST_RR_ZEROSRC12( 28, mulhu, 0 );
TEST_RR_ZERODEST( 29, mulhu, 33<<20, 34<<20 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,923 | model/tests/riscv-tests/isa/rv32um/mulh.S | # See LICENSE for license details.
#*****************************************************************************
# mulh.S
#-----------------------------------------------------------------------------
#
# Test mulh instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV32U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulh, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulh, 0x00000000, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulh, 0x00000000, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulh, 0x00000000, 0x00000000, 0xffff8000 );
TEST_RR_OP( 6, mulh, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP( 7, mulh, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP(30, mulh, 0xffff0081, 0xaaaaaaab, 0x0002fe7d );
TEST_RR_OP(31, mulh, 0xffff0081, 0x0002fe7d, 0xaaaaaaab );
TEST_RR_OP(32, mulh, 0x00010000, 0xff000000, 0xff000000 );
TEST_RR_OP(33, mulh, 0x00000000, 0xffffffff, 0xffffffff );
TEST_RR_OP(34, mulh, 0xffffffff, 0xffffffff, 0x00000001 );
TEST_RR_OP(35, mulh, 0xffffffff, 0x00000001, 0xffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC2_EQ_DEST( 9, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_EQ_DEST( 10, mulh, 43264, 13<<20 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 12, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 13, 2, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_ZEROSRC1( 26, mulh, 0, 31<<26 );
TEST_RR_ZEROSRC2( 27, mulh, 0, 32<<26 );
TEST_RR_ZEROSRC12( 28, mulh, 0 );
TEST_RR_ZERODEST( 29, mulh, 33<<20, 34<<20 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,997 | model/tests/riscv-tests/isa/rv32um/mulhsu.S | # See LICENSE for license details.
#*****************************************************************************
# mulhsu.S
#-----------------------------------------------------------------------------
#
# Test mulhsu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV32U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulhsu, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulhsu, 0x00000000, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulhsu, 0x00000000, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulhsu, 0x00000000, 0x00000000, 0xffff8000 );
TEST_RR_OP( 6, mulhsu, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP( 7, mulhsu, 0x80004000, 0x80000000, 0xffff8000 );
TEST_RR_OP(30, mulhsu, 0xffff0081, 0xaaaaaaab, 0x0002fe7d );
TEST_RR_OP(31, mulhsu, 0x0001fefe, 0x0002fe7d, 0xaaaaaaab );
TEST_RR_OP(32, mulhsu, 0xff010000, 0xff000000, 0xff000000 );
TEST_RR_OP(33, mulhsu, 0xffffffff, 0xffffffff, 0xffffffff );
TEST_RR_OP(34, mulhsu, 0xffffffff, 0xffffffff, 0x00000001 );
TEST_RR_OP(35, mulhsu, 0x00000000, 0x00000001, 0xffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC2_EQ_DEST( 9, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_EQ_DEST( 10, mulhsu, 43264, 13<<20 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 12, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 13, 2, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_ZEROSRC1( 26, mulhsu, 0, 31<<26 );
TEST_RR_ZEROSRC2( 27, mulhsu, 0, 32<<26 );
TEST_RR_ZEROSRC12( 28, mulhsu, 0 );
TEST_RR_ZERODEST( 29, mulhsu, 33<<20, 34<<20 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 2,818 | model/tests/riscv-tests/isa/rv32um/mul.S | # See LICENSE for license details.
#*****************************************************************************
# mul.S
#-----------------------------------------------------------------------------
#
# Test mul instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV32U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP(32, mul, 0x00001200, 0x00007e00, 0xb6db6db7 );
TEST_RR_OP(33, mul, 0x00001240, 0x00007fc0, 0xb6db6db7 );
TEST_RR_OP( 2, mul, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mul, 0x00000001, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mul, 0x00000015, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mul, 0x00000000, 0x00000000, 0xffff8000 );
TEST_RR_OP( 6, mul, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP( 7, mul, 0x00000000, 0x80000000, 0xffff8000 );
TEST_RR_OP(30, mul, 0x0000ff7f, 0xaaaaaaab, 0x0002fe7d );
TEST_RR_OP(31, mul, 0x0000ff7f, 0x0002fe7d, 0xaaaaaaab );
TEST_RR_OP(34, mul, 0x00000000, 0xff000000, 0xff000000 );
TEST_RR_OP(35, mul, 0x00000001, 0xffffffff, 0xffffffff );
TEST_RR_OP(36, mul, 0xffffffff, 0xffffffff, 0x00000001 );
TEST_RR_OP(37, mul, 0xffffffff, 0x00000001, 0xffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mul, 143, 13, 11 );
TEST_RR_SRC2_EQ_DEST( 9, mul, 154, 14, 11 );
TEST_RR_SRC12_EQ_DEST( 10, mul, 169, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mul, 143, 13, 11 );
TEST_RR_DEST_BYPASS( 12, 1, mul, 154, 14, 11 );
TEST_RR_DEST_BYPASS( 13, 2, mul, 165, 15, 11 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mul, 143, 13, 11 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mul, 154, 14, 11 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mul, 165, 15, 11 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mul, 143, 13, 11 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mul, 154, 14, 11 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mul, 165, 15, 11 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mul, 143, 13, 11 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mul, 154, 14, 11 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mul, 165, 15, 11 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mul, 143, 13, 11 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mul, 154, 14, 11 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mul, 165, 15, 11 );
TEST_RR_ZEROSRC1( 26, mul, 0, 31 );
TEST_RR_ZEROSRC2( 27, mul, 0, 32 );
TEST_RR_ZEROSRC12( 28, mul, 0 );
TEST_RR_ZERODEST( 29, mul, 33, 34 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
lizhirui/DreamCore | 1,794 | model/testcode/coremark/startup.S | # SPDX-License-Identifier: Apache-2.0
# Copyright 2019 Western Digital Corporation or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Simple start up file for the reference design
.section ".text.init"
.global _start
.type _start, @function
_start:
#clear minstret
csrw minstret, zero
csrw minstreth, zero
#clear registers
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
li x10,0
li x11,0
li x12,0
li x13,0
li x14,0
li x15,0
li x16,0
li x17,0
li x18,0
li x19,0
li x20,0
li x21,0
li x22,0
li x23,0
li x24,0
li x25,0
li x26,0
li x27,0
li x28,0
li x29,0
li x30,0
li x31,0
#cache configuration
#li t1, 0x55555655
#csrw 0x7c0, t1
#setup MEIP and MTIP
#li t0, (1<<7 | 1<<11)
#csrw mie, t0
#li t0, (1<<3)
#csrw mstatus, t0
# initialize global pointer
.option push
.option norelax
la gp, __global_pointer$
.option pop
la sp, _sp
la t0, __bss_start
la t1, __bss_end
zero_bss:
sw x0, 0(t0)
addi t0, t0, 4
blt t0, t1, zero_bss
# #hart id
csrr a0, mhartid
li a1, 1
1: bgeu a0, a1, 1b
# argc = argv = 0
li a0, 0
li a1, 0
call main
fence
csrw 0x804, 1
# loop here
2: j 2b
.global send_char
send_char:
csrw 0x800, a0
ret
|
lizhirui/DreamCore | 1,794 | model/testcode/dhrystone/startup.S | # SPDX-License-Identifier: Apache-2.0
# Copyright 2019 Western Digital Corporation or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Simple start up file for the reference design
.section ".text.init"
.global _start
.type _start, @function
_start:
#clear minstret
csrw minstret, zero
csrw minstreth, zero
#clear registers
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
li x10,0
li x11,0
li x12,0
li x13,0
li x14,0
li x15,0
li x16,0
li x17,0
li x18,0
li x19,0
li x20,0
li x21,0
li x22,0
li x23,0
li x24,0
li x25,0
li x26,0
li x27,0
li x28,0
li x29,0
li x30,0
li x31,0
#cache configuration
#li t1, 0x55555655
#csrw 0x7c0, t1
#setup MEIP and MTIP
#li t0, (1<<7 | 1<<11)
#csrw mie, t0
#li t0, (1<<3)
#csrw mstatus, t0
# initialize global pointer
.option push
.option norelax
la gp, __global_pointer$
.option pop
la sp, _sp
la t0, __bss_start
la t1, __bss_end
zero_bss:
sw x0, 0(t0)
addi t0, t0, 4
blt t0, t1, zero_bss
# #hart id
csrr a0, mhartid
li a1, 1
1: bgeu a0, a1, 1b
# argc = argv = 0
li a0, 0
li a1, 0
call main
fence
csrw 0x804, 1
# loop here
2: j 2b
.global send_char
send_char:
csrw 0x800, a0
ret
|
lizhirui/DreamCore | 1,984 | model/testcode/rt-thread/bsp/MyRISCVCore/MyRISCVCore/board/startup.S | /*
* Copyright (c) 2020-2020, AnnikaChip Development Team
*
* Change Logs:
* Date Author Notes
* 2020-11-08 lizhirui first version
*
*/
#include "riscv_encoding.h"
.section .init
.globl _start
.type _start,@function
/**
* Reset Handler called on controller reset
*/
_start:
/* Initialize all registers */
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
li x10, 0
li x11, 0
li x12, 0
li x13, 0
li x14, 0
li x15, 0
li x16, 0
li x17, 0
li x18, 0
li x19, 0
li x20, 0
li x21, 0
li x22, 0
li x23, 0
li x24, 0
li x25, 0
li x26, 0
li x27, 0
li x28, 0
li x29, 0
li x30, 0
li x31, 0
/* Initialize Normal Stack defined in linker.ld*/
la sp, _sp
/* ===== Startup Stage 1 ===== */
/* Disable Global Interrupt */
csrc CSR_MSTATUS, MSTATUS_MIE
/* Initialize GP */
.option push
.option norelax
la gp, __global_pointer$
.option pop
/*
* Set Exception Entry MTVEC to exc_entry
* Due to settings above, Exception and NMI
* will share common entry.
*/
la t0, exc_entry
csrw CSR_MTVEC, t0
/* ===== Startup Stage 2 ===== */
/* Disable mcycle and minstret counter */
csrci CSR_MCOUNTEREN, 0x5
/* Load data section */
la a0, _data_lma
la a1, _data
la a2, _edata
bgeu a1, a2, 2f
1:
lw t0, (a0)
sw t0, (a1)
addi a0, a0, 4
addi a1, a1, 4
bltu a1, a2, 1b
2:
/* Clear bss section */
la a0, __bss_start
la a1, _end
bgeu a0, a1, 2f
1:
sw zero, (a0)
addi a0, a0, 4
bltu a0, a1, 1b
2:
/* Call global constructors */
la a0, __libc_fini_array
call atexit
/* Call C/C++ constructor start up code */
call __libc_init_array
/* ===== Call Main Function ===== */
/* argc = argv = envp = 0 */
li a0, 0
li a1, 0
li a2, 0
call entry
1:
j 1b
|
lizhirui/DreamCore | 6,553 | model/testcode/rt-thread/bsp/MyRISCVCore/MyRISCVCore/board/intexc.S | /*
* Copyright (c) 2019 Nuclei Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************************************************
* \file intexc_gd32vf103.S
* \brief NMSIS Interrupt and Exception Handling Template File
* for Device gd32vf103
* \version V1.00
* \date 7 Jan 2020
*
******************************************************************************/
#include "riscv_encoding.h"
/**
* \brief Global interrupt disabled
* \details
* This function disable global interrupt.
* \remarks
* - All the interrupt requests will be ignored by CPU.
*/
.macro DISABLE_MIE
csrc CSR_MSTATUS, MSTATUS_MIE
.endm
/**
* \brief Macro for context save
* \details
* This macro save ABI defined caller saved registers in the stack.
* \remarks
* - This Macro could use to save context when you enter to interrupt
* or exception
*/
/* Save caller registers */
.macro SAVE_CONTEXT
/* Allocate stack space for context saving */
#ifndef __riscv_32e
addi sp, sp, -20*REGBYTES
#else
addi sp, sp, -14*REGBYTES
#endif /* __riscv_32e */
STORE x1, 0*REGBYTES(sp)
STORE x4, 1*REGBYTES(sp)
STORE x5, 2*REGBYTES(sp)
STORE x6, 3*REGBYTES(sp)
STORE x7, 4*REGBYTES(sp)
STORE x10, 5*REGBYTES(sp)
STORE x11, 6*REGBYTES(sp)
STORE x12, 7*REGBYTES(sp)
STORE x13, 8*REGBYTES(sp)
STORE x14, 9*REGBYTES(sp)
STORE x15, 10*REGBYTES(sp)
#ifndef __riscv_32e
STORE x16, 14*REGBYTES(sp)
STORE x17, 15*REGBYTES(sp)
STORE x28, 16*REGBYTES(sp)
STORE x29, 17*REGBYTES(sp)
STORE x30, 18*REGBYTES(sp)
STORE x31, 19*REGBYTES(sp)
#endif /* __riscv_32e */
.endm
/**
* \brief Macro for restore caller registers
* \details
* This macro restore ABI defined caller saved registers from stack.
* \remarks
* - You could use this macro to restore context before you want return
* from interrupt or exeception
*/
/* Restore caller registers */
.macro RESTORE_CONTEXT
LOAD x1, 0*REGBYTES(sp)
LOAD x4, 1*REGBYTES(sp)
LOAD x5, 2*REGBYTES(sp)
LOAD x6, 3*REGBYTES(sp)
LOAD x7, 4*REGBYTES(sp)
LOAD x10, 5*REGBYTES(sp)
LOAD x11, 6*REGBYTES(sp)
LOAD x12, 7*REGBYTES(sp)
LOAD x13, 8*REGBYTES(sp)
LOAD x14, 9*REGBYTES(sp)
LOAD x15, 10*REGBYTES(sp)
#ifndef __riscv_32e
LOAD x16, 14*REGBYTES(sp)
LOAD x17, 15*REGBYTES(sp)
LOAD x28, 16*REGBYTES(sp)
LOAD x29, 17*REGBYTES(sp)
LOAD x30, 18*REGBYTES(sp)
LOAD x31, 19*REGBYTES(sp)
/* De-allocate the stack space */
addi sp, sp, 20*REGBYTES
#else
/* De-allocate the stack space */
addi sp, sp, 14*REGBYTES
#endif /* __riscv_32e */
.endm
/**
* \brief Macro for save necessary CSRs to stack
* \details
* This macro store MCAUSE, MEPC, MSUBM to stack.
*/
.macro SAVE_CSR_CONTEXT
/* Store CSR mcause to stack using pushmcause */
//csrrwi x0, CSR_PUSHMCAUSE, 11
/* Store CSR mepc to stack using pushmepc */
//csrrwi x0, CSR_PUSHMEPC, 12
/* Store CSR msub to stack using pushmsub */
//csrrwi x0, CSR_PUSHMSUBM, 13
.endm
/**
* \brief Macro for restore necessary CSRs from stack
* \details
* This macro restore MSUBM, MEPC, MCAUSE from stack.
*/
.macro RESTORE_CSR_CONTEXT
//LOAD x5, 13*REGBYTES(sp)
//csrw CSR_MSUBM, x5
LOAD x5, 12*REGBYTES(sp)
csrw CSR_MEPC, x5
LOAD x5, 11*REGBYTES(sp)
csrw CSR_MCAUSE, x5
.endm
/**
* \brief Exception/NMI Entry
* \details
* This function provide common entry functions for exception/nmi.
* \remarks
* This function provide a default exception/nmi entry.
* ABI defined caller save register and some CSR registers
* to be saved before enter interrupt handler and be restored before return.
*/
.section .text.trap
/* In CLIC mode, the exeception entry must be 64bytes aligned */
.align 6
.global exc_entry
.weak exc_entry
exc_entry:
/* Save the caller saving registers (context) */
SAVE_CONTEXT
/* Save the necessary CSR registers */
SAVE_CSR_CONTEXT
/*
* Set the exception handler function arguments
* argument 1: mcause value
* argument 2: current stack point(SP) value
*/
csrr a0, mcause
mv a1, sp
/*
* TODO: Call the exception handler function
* By default, the function template is provided in
* system_Device.c, you can adjust it as you want
*/
call core_exception_handler
/* Restore the necessary CSR registers */
RESTORE_CSR_CONTEXT
/* Restore the caller saving registers (context) */
RESTORE_CONTEXT
/* Return to regular code */
mret
/**
* \brief Non-Vector Interrupt Entry
* \details
* This function provide common entry functions for handling
* non-vector interrupts
* \remarks
* This function provide a default non-vector interrupt entry.
* ABI defined caller save register and some CSR registers need
* to be saved before enter interrupt handler and be restored before return.
*/
.section .text.irq
/* In CLIC mode, the interrupt entry must be 4bytes aligned */
.align 2
.global irq_entry
.weak irq_entry
/* This label will be set to MTVT2 register */
irq_entry:
/* Save the caller saving registers (context) */
SAVE_CONTEXT
/* Save the necessary CSR registers */
SAVE_CSR_CONTEXT
/* This special CSR read/write operation, which is actually
* claim the CLIC to find its pending highest ID, if the ID
* is not 0, then automatically enable the mstatus.MIE, and
* jump to its vector-entry-label, and update the link register
*/
//csrrw ra, CSR_JALMNXTI, ra
/* Critical section with interrupts disabled */
DISABLE_MIE
/* Restore the necessary CSR registers */
RESTORE_CSR_CONTEXT
/* Restore the caller saving registers (context) */
RESTORE_CONTEXT
/* Return to regular code */
mret
/* Default Handler for Exceptions / Interrupts */
.global default_intexc_handler
.weak default_intexc_handler
Undef_Handler:
default_intexc_handler:
1:
j 1b
|
lizhirui/DreamCore | 5,089 | model/testcode/rt-thread/libcpu/common/context_gcc.S | /*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018/10/28 Bernard The unify RISC-V porting implementation
* 2018/12/27 Jesven Add SMP support
*/
#include "cpuport.h"
#ifdef RT_USING_SMP
#define rt_hw_interrupt_disable rt_hw_local_irq_disable
#define rt_hw_interrupt_enable rt_hw_local_irq_enable
#endif
/*
* rt_base_t rt_hw_interrupt_disable(void);
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
csrrci a0, mstatus, 8
ret
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
csrw mstatus, a0
ret
/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch_to(rt_ubase_t to, stuct rt_thread *to_thread);
* #else
* void rt_hw_context_switch_to(rt_ubase_t to);
* #endif
* a0 --> to
* a1 --> to_thread
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LOAD sp, (a0)
#ifdef RT_USING_SMP
mv a0, a1
jal rt_cpus_lock_status_restore
#endif
LOAD a0, 2 * REGBYTES(sp)
csrw mstatus, a0
j rt_hw_context_switch_exit
/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
* #else
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
* #endif
*
* a0 --> from
* a1 --> to
* a2 --> to_thread
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
/* saved from thread context
* x1/ra -> sp(0)
* x1/ra -> sp(1)
* mstatus.mie -> sp(2)
* x(i) -> sp(i-4)
*/
addi sp, sp, -32 * REGBYTES
STORE sp, (a0)
STORE x1, 0 * REGBYTES(sp)
STORE x1, 1 * REGBYTES(sp)
csrr a0, mstatus
andi a0, a0, 8
beqz a0, save_mpie
li a0, 0x80
save_mpie:
STORE a0, 2 * REGBYTES(sp)
STORE x4, 4 * REGBYTES(sp)
STORE x5, 5 * REGBYTES(sp)
STORE x6, 6 * REGBYTES(sp)
STORE x7, 7 * REGBYTES(sp)
STORE x8, 8 * REGBYTES(sp)
STORE x9, 9 * REGBYTES(sp)
STORE x10, 10 * REGBYTES(sp)
STORE x11, 11 * REGBYTES(sp)
STORE x12, 12 * REGBYTES(sp)
STORE x13, 13 * REGBYTES(sp)
STORE x14, 14 * REGBYTES(sp)
STORE x15, 15 * REGBYTES(sp)
STORE x16, 16 * REGBYTES(sp)
STORE x17, 17 * REGBYTES(sp)
STORE x18, 18 * REGBYTES(sp)
STORE x19, 19 * REGBYTES(sp)
STORE x20, 20 * REGBYTES(sp)
STORE x21, 21 * REGBYTES(sp)
STORE x22, 22 * REGBYTES(sp)
STORE x23, 23 * REGBYTES(sp)
STORE x24, 24 * REGBYTES(sp)
STORE x25, 25 * REGBYTES(sp)
STORE x26, 26 * REGBYTES(sp)
STORE x27, 27 * REGBYTES(sp)
STORE x28, 28 * REGBYTES(sp)
STORE x29, 29 * REGBYTES(sp)
STORE x30, 30 * REGBYTES(sp)
STORE x31, 31 * REGBYTES(sp)
/* restore to thread context
* sp(0) -> epc;
* sp(1) -> ra;
* sp(i) -> x(i+2)
*/
LOAD sp, (a1)
#ifdef RT_USING_SMP
mv a0, a2
jal rt_cpus_lock_status_restore
#endif /*RT_USING_SMP*/
j rt_hw_context_switch_exit
#ifdef RT_USING_SMP
/*
* void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
*
* a0 --> context
* a1 --> from
* a2 --> to
* a3 --> to_thread
*/
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
STORE a0, 0(a1)
LOAD sp, 0(a2)
move a0, a3
call rt_cpus_lock_status_restore
j rt_hw_context_switch_exit
#endif
.global rt_hw_context_switch_exit
rt_hw_context_switch_exit:
#ifdef RT_USING_SMP
#ifdef RT_USING_SIGNALS
mv a0, sp
csrr t0, mhartid
/* switch interrupt stack of current cpu */
la sp, __stack_start__
addi t1, t0, 1
li t2, __STACKSIZE__
mul t1, t1, t2
add sp, sp, t1 /* sp = (cpuid + 1) * __STACKSIZE__ + __stack_start__ */
call rt_signal_check
mv sp, a0
#endif
#endif
/* resw ra to mepc */
LOAD a0, 0 * REGBYTES(sp)
csrw mepc, a0
LOAD x1, 1 * REGBYTES(sp)
li t0, 0x00001800
csrw mstatus, t0
LOAD a0, 2 * REGBYTES(sp)
csrs mstatus, a0
LOAD x4, 4 * REGBYTES(sp)
LOAD x5, 5 * REGBYTES(sp)
LOAD x6, 6 * REGBYTES(sp)
LOAD x7, 7 * REGBYTES(sp)
LOAD x8, 8 * REGBYTES(sp)
LOAD x9, 9 * REGBYTES(sp)
LOAD x10, 10 * REGBYTES(sp)
LOAD x11, 11 * REGBYTES(sp)
LOAD x12, 12 * REGBYTES(sp)
LOAD x13, 13 * REGBYTES(sp)
LOAD x14, 14 * REGBYTES(sp)
LOAD x15, 15 * REGBYTES(sp)
LOAD x16, 16 * REGBYTES(sp)
LOAD x17, 17 * REGBYTES(sp)
LOAD x18, 18 * REGBYTES(sp)
LOAD x19, 19 * REGBYTES(sp)
LOAD x20, 20 * REGBYTES(sp)
LOAD x21, 21 * REGBYTES(sp)
LOAD x22, 22 * REGBYTES(sp)
LOAD x23, 23 * REGBYTES(sp)
LOAD x24, 24 * REGBYTES(sp)
LOAD x25, 25 * REGBYTES(sp)
LOAD x26, 26 * REGBYTES(sp)
LOAD x27, 27 * REGBYTES(sp)
LOAD x28, 28 * REGBYTES(sp)
LOAD x29, 29 * REGBYTES(sp)
LOAD x30, 30 * REGBYTES(sp)
LOAD x31, 31 * REGBYTES(sp)
addi sp, sp, 32 * REGBYTES
mret
|
lizhirui/DreamCore | 5,319 | model/testcode/rt-thread/libcpu/risc-v/myriscvcore/context_gcc.S | /*
* Copyright (c) 2019-Present Nuclei Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020/03/26 Huaqi First Nuclei RISC-V porting implementation
*/
#include "riscv_encoding.h"
#ifndef __riscv_32e
#define RT_SAVED_REGNUM 30
#else
#define RT_SAVED_REGNUM 14
#endif
#define RT_CONTEXT_SIZE (RT_SAVED_REGNUM * REGBYTES)
.extern rt_interrupt_from_thread
.extern rt_interrupt_to_thread
.section .text
/*
* void rt_hw_context_switch_to(rt_ubase_t to);
* a0 --> to_thread
*/
.globl rt_hw_context_switch_to
/* Start the first task. This also clears the bit that indicates the FPU is
in use in case the FPU was used before the scheduler was started - which
would otherwise result in the unnecessary leaving of space in the stack
for lazy saving of FPU registers. */
.align 3
rt_hw_context_switch_to:
/* Setup Interrupt Stack using
The stack that was used by entry()
before the scheduler is started is
no longer required after the scheduler is started.
Interrupt stack pointer is stored in CSR_MSCRATCH */
la t0, _sp
csrw CSR_MSCRATCH, t0
LOAD sp, 0x0(a0) /* Read sp from first TCB member(a0) */
/* Pop PC from stack and set MEPC */
LOAD t0, 0 * REGBYTES(sp)
csrw CSR_MEPC, t0
/* Pop mstatus from stack and set it */
LOAD t0, (RT_SAVED_REGNUM - 1) * REGBYTES(sp)
csrw CSR_MSTATUS, t0
/* Interrupt still disable here */
/* Restore Registers from Stack */
LOAD x1, 1 * REGBYTES(sp) /* RA */
LOAD x5, 2 * REGBYTES(sp)
LOAD x6, 3 * REGBYTES(sp)
LOAD x7, 4 * REGBYTES(sp)
LOAD x8, 5 * REGBYTES(sp)
LOAD x9, 6 * REGBYTES(sp)
LOAD x10, 7 * REGBYTES(sp)
LOAD x11, 8 * REGBYTES(sp)
LOAD x12, 9 * REGBYTES(sp)
LOAD x13, 10 * REGBYTES(sp)
LOAD x14, 11 * REGBYTES(sp)
LOAD x15, 12 * REGBYTES(sp)
#ifndef __riscv_32e
LOAD x16, 13 * REGBYTES(sp)
LOAD x17, 14 * REGBYTES(sp)
LOAD x18, 15 * REGBYTES(sp)
LOAD x19, 16 * REGBYTES(sp)
LOAD x20, 17 * REGBYTES(sp)
LOAD x21, 18 * REGBYTES(sp)
LOAD x22, 19 * REGBYTES(sp)
LOAD x23, 20 * REGBYTES(sp)
LOAD x24, 21 * REGBYTES(sp)
LOAD x25, 22 * REGBYTES(sp)
LOAD x26, 23 * REGBYTES(sp)
LOAD x27, 24 * REGBYTES(sp)
LOAD x28, 25 * REGBYTES(sp)
LOAD x29, 26 * REGBYTES(sp)
LOAD x30, 27 * REGBYTES(sp)
LOAD x31, 28 * REGBYTES(sp)
#endif
addi sp, sp, RT_CONTEXT_SIZE
mret
.align 2
.global msip_handler
msip_handler:
addi sp, sp, -RT_CONTEXT_SIZE
STORE x1, 1 * REGBYTES(sp) /* RA */
STORE x5, 2 * REGBYTES(sp)
STORE x6, 3 * REGBYTES(sp)
STORE x7, 4 * REGBYTES(sp)
STORE x8, 5 * REGBYTES(sp)
STORE x9, 6 * REGBYTES(sp)
STORE x10, 7 * REGBYTES(sp)
STORE x11, 8 * REGBYTES(sp)
STORE x12, 9 * REGBYTES(sp)
STORE x13, 10 * REGBYTES(sp)
STORE x14, 11 * REGBYTES(sp)
STORE x15, 12 * REGBYTES(sp)
#ifndef __riscv_32e
STORE x16, 13 * REGBYTES(sp)
STORE x17, 14 * REGBYTES(sp)
STORE x18, 15 * REGBYTES(sp)
STORE x19, 16 * REGBYTES(sp)
STORE x20, 17 * REGBYTES(sp)
STORE x21, 18 * REGBYTES(sp)
STORE x22, 19 * REGBYTES(sp)
STORE x23, 20 * REGBYTES(sp)
STORE x24, 21 * REGBYTES(sp)
STORE x25, 22 * REGBYTES(sp)
STORE x26, 23 * REGBYTES(sp)
STORE x27, 24 * REGBYTES(sp)
STORE x28, 25 * REGBYTES(sp)
STORE x29, 26 * REGBYTES(sp)
STORE x30, 27 * REGBYTES(sp)
STORE x31, 28 * REGBYTES(sp)
#endif
/* Push mstatus to stack */
csrr t0, CSR_MSTATUS
STORE t0, (RT_SAVED_REGNUM - 1) * REGBYTES(sp)
/* Push additional registers */
/* Store sp to task stack */
LOAD t0, rt_interrupt_from_thread
STORE sp, 0(t0)
csrr t0, CSR_MEPC
STORE t0, 0(sp)
jal rt_hw_taskswitch
/* Switch task context */
LOAD t0, rt_interrupt_to_thread
LOAD sp, 0x0(t0)
/* Pop PC from stack and set MEPC */
LOAD t0, 0 * REGBYTES(sp)
csrw CSR_MEPC, t0
/* Pop additional registers */
/* Pop mstatus from stack and set it */
LOAD t0, (RT_SAVED_REGNUM - 1) * REGBYTES(sp)
csrw CSR_MSTATUS, t0
/* Interrupt still disable here */
/* Restore Registers from Stack */
LOAD x1, 1 * REGBYTES(sp) /* RA */
LOAD x5, 2 * REGBYTES(sp)
LOAD x6, 3 * REGBYTES(sp)
LOAD x7, 4 * REGBYTES(sp)
LOAD x8, 5 * REGBYTES(sp)
LOAD x9, 6 * REGBYTES(sp)
LOAD x10, 7 * REGBYTES(sp)
LOAD x11, 8 * REGBYTES(sp)
LOAD x12, 9 * REGBYTES(sp)
LOAD x13, 10 * REGBYTES(sp)
LOAD x14, 11 * REGBYTES(sp)
LOAD x15, 12 * REGBYTES(sp)
#ifndef __riscv_32e
LOAD x16, 13 * REGBYTES(sp)
LOAD x17, 14 * REGBYTES(sp)
LOAD x18, 15 * REGBYTES(sp)
LOAD x19, 16 * REGBYTES(sp)
LOAD x20, 17 * REGBYTES(sp)
LOAD x21, 18 * REGBYTES(sp)
LOAD x22, 19 * REGBYTES(sp)
LOAD x23, 20 * REGBYTES(sp)
LOAD x24, 21 * REGBYTES(sp)
LOAD x25, 22 * REGBYTES(sp)
LOAD x26, 23 * REGBYTES(sp)
LOAD x27, 24 * REGBYTES(sp)
LOAD x28, 25 * REGBYTES(sp)
LOAD x29, 26 * REGBYTES(sp)
LOAD x30, 27 * REGBYTES(sp)
LOAD x31, 28 * REGBYTES(sp)
#endif
addi sp, sp, RT_CONTEXT_SIZE
mret
|
lizhirui/DreamCore | 4,618 | model/testcode/rt-thread/libcpu/risc-v/myriscvcore/interrupt_gcc.S | /*
* Copyright (c) 2019-Present Nuclei Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020/03/26 hqfang First Nuclei RISC-V porting implementation
*/
#include "riscv_encoding.h"
.section .text.entry
.align 8
/**
* \brief Global interrupt disabled
* \details
* This function disable global interrupt.
* \remarks
* - All the interrupt requests will be ignored by CPU.
*/
.macro DISABLE_MIE
csrc CSR_MSTATUS, MSTATUS_MIE
.endm
/**
* \brief Macro for context save
* \details
* This macro save ABI defined caller saved registers in the stack.
* \remarks
* - This Macro could use to save context when you enter to interrupt
* or exception
*/
/* Save caller registers */
.macro SAVE_CONTEXT
csrrw sp, CSR_MSCRATCH, sp
/* Allocate stack space for context saving */
#ifndef __riscv_32e
addi sp, sp, -20*REGBYTES
#else
addi sp, sp, -14*REGBYTES
#endif /* __riscv_32e */
STORE x1, 0*REGBYTES(sp)
STORE x4, 1*REGBYTES(sp)
STORE x5, 2*REGBYTES(sp)
STORE x6, 3*REGBYTES(sp)
STORE x7, 4*REGBYTES(sp)
STORE x10, 5*REGBYTES(sp)
STORE x11, 6*REGBYTES(sp)
STORE x12, 7*REGBYTES(sp)
STORE x13, 8*REGBYTES(sp)
STORE x14, 9*REGBYTES(sp)
STORE x15, 10*REGBYTES(sp)
#ifndef __riscv_32e
STORE x16, 14*REGBYTES(sp)
STORE x17, 15*REGBYTES(sp)
STORE x28, 16*REGBYTES(sp)
STORE x29, 17*REGBYTES(sp)
STORE x30, 18*REGBYTES(sp)
STORE x31, 19*REGBYTES(sp)
#endif /* __riscv_32e */
.endm
/**
* \brief Macro for restore caller registers
* \details
* This macro restore ABI defined caller saved registers from stack.
* \remarks
* - You could use this macro to restore context before you want return
* from interrupt or exeception
*/
/* Restore caller registers */
.macro RESTORE_CONTEXT
LOAD x1, 0*REGBYTES(sp)
LOAD x4, 1*REGBYTES(sp)
LOAD x5, 2*REGBYTES(sp)
LOAD x6, 3*REGBYTES(sp)
LOAD x7, 4*REGBYTES(sp)
LOAD x10, 5*REGBYTES(sp)
LOAD x11, 6*REGBYTES(sp)
LOAD x12, 7*REGBYTES(sp)
LOAD x13, 8*REGBYTES(sp)
LOAD x14, 9*REGBYTES(sp)
LOAD x15, 10*REGBYTES(sp)
#ifndef __riscv_32e
LOAD x16, 14*REGBYTES(sp)
LOAD x17, 15*REGBYTES(sp)
LOAD x28, 16*REGBYTES(sp)
LOAD x29, 17*REGBYTES(sp)
LOAD x30, 18*REGBYTES(sp)
LOAD x31, 19*REGBYTES(sp)
/* De-allocate the stack space */
addi sp, sp, 20*REGBYTES
#else
/* De-allocate the stack space */
addi sp, sp, 14*REGBYTES
#endif /* __riscv_32e */
csrrw sp, CSR_MSCRATCH, sp
.endm
/**
* \brief Macro for save necessary CSRs to stack
* \details
* This macro store MCAUSE, MEPC, to stack.
*/
.macro SAVE_CSR_CONTEXT
/* Store CSR mcause to stack using pushmcause */
csrr x5, CSR_MCAUSE
STORE x5, 11*REGBYTES(sp)
/* Store CSR mepc to stack using pushmepc */
csrr x5, CSR_MEPC
STORE x5, 12*REGBYTES(sp)
/* Reserved */
STORE x0, 13*REGBYTES(sp)
.endm
/**
* \brief Macro for restore necessary CSRs from stack
* \details
* This macro restore MEPC, MCAUSE from stack.
*/
.macro RESTORE_CSR_CONTEXT
LOAD x5, 12*REGBYTES(sp)
csrw CSR_MEPC, x5
LOAD x5, 11*REGBYTES(sp)
csrw CSR_MCAUSE, x5
.endm
/**
* \brief Exception/NMI Entry
* \details
* This function provide common entry functions for exception/nmi.
* \remarks
* This function provide a default exception/nmi entry.
* ABI defined caller save register and some CSR registers
* to be saved before enter interrupt handler and be restored before return.
*/
.section .text.trap
/* In CLIC mode, the exeception entry must be 64bytes aligned */
.align 6
.global riscv_trap_handler_entry
riscv_trap_handler_entry:
/* Save the caller saving registers (context) */
SAVE_CONTEXT
/* Save the necessary CSR registers */
SAVE_CSR_CONTEXT
/*
* Set the exception handler function arguments
* argument 1: mcause value
* argument 2: current stack point(SP) value
*/
csrr a0, mcause
mv a1, sp
addi t0, zero, 1
slli t0, t0, 31
ori t0, t0, 3
beq a0, t0, msip_handler_entry
/*
* TODO: Call the exception handler function
* By default, the function template is provided in
* system_Device.c, you can adjust it as you want
*/
call core_exception_handler
/* Restore the necessary CSR registers */
RESTORE_CSR_CONTEXT
/* Restore the caller saving registers (context) */
RESTORE_CONTEXT
/* Return to regular code */
mret
msip_handler_entry:
RESTORE_CSR_CONTEXT
RESTORE_CONTEXT
j msip_handler |
lizhirui/DreamCore | 3,020 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-m7/lwp_gcc.S | /*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-30 heyuanjie first version
*/
.cpu cortex-m7
.syntax unified
.thumb
.text
/*
* void* lwp_get_sys_api(rt_uint32_t number);
*/
.global lwp_get_sys_api
.global lwp_get_kernel_sp
.global lwp_set_kernel_sp
/*
* void lwp_user_entry(args, text, data);
*/
.global lwp_user_entry
.type lwp_user_entry, % function
lwp_user_entry:
PUSH {R0-R3} @; push text&data addr.
MOV R0, SP @; v1 = SP
BL lwp_set_kernel_sp @; lwp_set_kernel_sp(v1)
@; set CPU to user-thread mode.
MRS R2, CONTROL
ORR R2, R2, #0x03 @; use PSP, user-thread mode.
MSR CONTROL, R2
POP {R0-R3} @; pop app address to R1.
@; set data address.
MOV R9, R2
@; run app, only Thumb-mode.
ORR R1, R1, #0x01
BX R1
/*
* void SVC_Handler(void);
*/
.global SVC_Handler
.type SVC_Handler, % function
SVC_Handler:
PUSH {LR}
@; get user SP.
TST LR, #0x4
ITE EQ
MRSEQ R1, MSP
MRSNE R1, PSP
PUSH {R1} @; push app SP.
@; get SVC number.
mov R0, R7
@; get kernel system API
BL lwp_get_sys_api
PUSH {R0} @; push api
@; get kernel SP to R0.
BL lwp_get_kernel_sp
POP {R2} @; pop api to R2.
POP {R1} @; pop app SP to R1.
stmfd r0!, {r1} @; save app SP to kernel SP
@;push app parm5~6 to kernel SP
STMFD R0!, {R4 - R5}
@; copy R1(app SP) to R0(kernel SP).
push {r8-r11}
LDMFD R1, {R4 - R11} @; pop exception_stack_frame to r4 - r11 register
STMFD R0!, {R4 - R11} @; push exception_stack_frame to server SP.
pop {r8-r11}
LDR R3, =svc_exit
STR R3, [R0, #20] @; update LR
STR R2, [R0, #24] @; update api to PC
MSR PSP, R0 @; update SP, API is executed with kernel SP
@; set to thread-privilege mode.
MRS R3, CONTROL
BIC R3, R3, #0x01
ORR R3, R3, #0x02
MSR CONTROL, R3
POP {LR} @; 0xFFFFFFED
ORR LR, LR, #0x10
BX LR
/*
* void svc_exit(void);
*/
.global svc_exit
.type svc_exit, % function
svc_exit:
@; get user SP.
PUSH {R0} @; push result to SP.
BL lwp_get_kernel_sp
ldr r3, [r0, #-4]
pop {r0}
ldr lr, [r3, #20]
ldr r1, [r3, #24] @; load pc
add r3, #32 @; exception_stack_frame size
MSR PSP, R3 @; restore app stack pointer
@; restore to PSP & thread-unprivilege mode.
MRS R2, CONTROL
ORR R2, R2, #0x03
MSR CONTROL, R2
@; return to lwp.
ORR R1, R1, #0x01 @; only Thumb-mode.
BX R1 @; return to user app.
|
lizhirui/DreamCore | 2,972 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-m7/lwp_iar.S | ;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2018-10-30 heyuanjie first version
; */
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
;/*
; * void* lwp_get_sys_api(rt_uint32_t number);
; */
IMPORT lwp_get_sys_api
IMPORT lwp_get_kernel_sp
IMPORT lwp_set_kernel_sp
;/*
; * void lwp_user_entry(args, text, data);
; */
EXPORT lwp_user_entry
lwp_user_entry:
PUSH {R0-R3} ; push text&data addr.
MOV R0, SP ; v1 = SP
BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1)
; set CPU to user-thread mode.
MRS R2, CONTROL
ORR R2, R2, #0x03 ; use PSP, user-thread mode.
MSR CONTROL, R2
POP {R0-R3} ; pop app address to R1.
; set data address.
MOV R9, R2
; run app, only Thumb-mode.
ORR R1, R1, #0x01
BX R1
;/*
; * void SVC_Handler(void);
; */
EXPORT SVC_Handler
SVC_Handler:
PUSH {LR}
; get user SP.
TST LR, #0x4
ITE EQ
MRSEQ R1, MSP
MRSNE R1, PSP
PUSH {R1} ; push app SP.
; get SVC number.
mov R0, R7
; get kernel system API
BL lwp_get_sys_api
PUSH {R0} ; push api
; get kernel SP to R0.
BL lwp_get_kernel_sp
POP {R2} ; pop api to R2.
POP {R1} ; pop app SP to R1.
stmfd r0!, {r1} ; save app SP to kernel SP
;push app parm5~6 to kernel SP
STMFD R0!, {R4 - R5}
; copy R1(app SP) to R0(kernel SP).
push {r8-r11}
LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register
STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP.
pop {r8-r11}
LDR R3, =svc_exit
STR R3, [R0, #20] ; update LR
STR R2, [R0, #24] ; update api to PC
MSR PSP, R0 ; update SP, API is executed with kernel SP
; set to thread-privilege mode.
MRS R3, CONTROL
BIC R3, R3, #0x01
ORR R3, R3, #0x02
MSR CONTROL, R3
POP {LR} ; 0xFFFFFFED
ORR LR, LR, #0x10
BX LR
;/*
; * void svc_exit(void);
; */
EXPORT svc_exit
svc_exit:
; get user SP.
PUSH {R0} ; push result to SP.
BL lwp_get_kernel_sp
ldr r3, [r0, #-4]
pop {r0}
ldr lr, [r3, #20]
ldr r1, [r3, #24] ; load pc
add r3, r3, #32 ; exception_stack_frame size
MSR PSP, R3 ; restore app stack pointer
; restore to PSP & thread-unprivilege mode.
MRS R2, CONTROL
ORR R2, R2, #0x03
MSR CONTROL, R2
; return to lwp.
ORR R1, R1, #0x01 ; only Thumb-mode.
BX R1 ; return to user app.
END
|
lizhirui/DreamCore | 3,071 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-m7/lwp_rvds.S | ;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2018-10-30 heyuanjie first version
; */
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
;/*
; * void* lwp_get_sys_api(rt_uint32_t number);
; */
IMPORT lwp_get_sys_api
IMPORT lwp_get_kernel_sp
IMPORT lwp_set_kernel_sp
;/*
; * void lwp_user_entry(args, text, data);
; */
lwp_user_entry PROC
EXPORT lwp_user_entry
PUSH {R0-R3} ; push text&data addr.
MOV R0, SP ; v1 = SP
BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1)
; set CPU to user-thread mode.
MRS R2, CONTROL
ORR R2, R2, #0x03 ; use PSP, user-thread mode.
MSR CONTROL, R2
POP {R0-R3} ; pop app address to R1.
; set data address.
MOV R9, R2
; run app, only Thumb-mode.
ORR R1, R1, #0x01
BX R1
; never reach here!
ENDP
;/*
; * void SVC_Handler(void);
; */
SVC_Handler PROC
EXPORT SVC_Handler
PUSH {LR}
; get user SP.
TST LR, #0x4
ITE EQ
MRSEQ R1, MSP
MRSNE R1, PSP
PUSH {R1} ; push app SP.
; get SVC number.
mov R0, R7
; get kernel system API
BL lwp_get_sys_api
PUSH {R0} ; push api
; get kernel SP to R0.
BL lwp_get_kernel_sp
POP {R2} ; pop api to R2.
POP {R1} ; pop app SP to R1.
stmfd r0!, {r1} ; save app SP to kernel SP
;push app parm5~6 to kernel SP
STMFD R0!, {R4 - R5}
; copy R1(app SP) to R0(kernel SP).
push {r8-r11}
LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register
STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP.
pop {r8-r11}
LDR R3, =svc_exit
STR R3, [R0, #20] ; update LR
STR R2, [R0, #24] ; update api to PC
MSR PSP, R0 ; update SP, API is executed with kernel SP
; set to thread-privilege mode.
MRS R3, CONTROL
BIC R3, R3, #0x01
ORR R3, R3, #0x02
MSR CONTROL, R3
POP {LR} ; 0xFFFFFFED
ORR LR, LR, #0x10
BX LR
ENDP
;/*
; * void svc_exit(void);
; */
svc_exit PROC
EXPORT svc_exit
; get user SP.
PUSH {R0} ; push result to SP.
BL lwp_get_kernel_sp
ldr r3, [r0, #-4]
pop {r0}
ldr lr, [r3, #20]
ldr r1, [r3, #24] ; load pc
add r3, #32 ; exception_stack_frame size
MSR PSP, R3 ; restore app stack pointer
; restore to PSP & thread-unprivilege mode.
MRS R2, CONTROL
ORR R2, R2, #0x03
MSR CONTROL, R2
; return to lwp.
ORR R1, R1, #0x01 ; only Thumb-mode.
BX R1 ; return to user app.
ENDP
ALIGN
END
|
lizhirui/DreamCore | 1,381 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-a/lwp_gcc.S | /*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
*/
#define Mode_USR 0x10
#define Mode_FIQ 0x11
#define Mode_IRQ 0x12
#define Mode_SVC 0x13
#define Mode_MON 0x16
#define Mode_ABT 0x17
#define Mode_UDF 0x1B
#define Mode_SYS 0x1F
#define A_Bit 0x100
#define I_Bit 0x80 @; when I bit is set, IRQ is disabled
#define F_Bit 0x40 @; when F bit is set, FIQ is disabled
#define T_Bit 0x20
.cpu cortex-a9
.syntax unified
.text
/*
* void lwp_user_entry(args, text, data);
*/
.global lwp_user_entry
.type lwp_user_entry, % function
lwp_user_entry:
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
/* set data address. */
mov r9, r2
movs pc, r1
/*
* void SVC_Handler(void);
*/
.global vector_swi
.type vector_swi, % function
vector_swi:
push {lr}
mrs lr, spsr
push {r4, r5, lr}
cpsie i
push {r0 - r3, r12}
and r0, r7, #0xff
bl lwp_get_sys_api
cmp r0, #0 /* r0 = api */
mov lr, r0
pop {r0 - r3, r12}
beq svc_exit
blx lr
svc_exit:
cpsid i
pop {r4, r5, lr}
msr spsr_cxsf, lr
pop {lr}
movs pc, lr
|
lizhirui/DreamCore | 1,545 | model/testcode/rt-thread/components/lwp/arch/arm/arm926/lwp_gcc.S | /*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
*/
#define Mode_USR 0x10
#define Mode_FIQ 0x11
#define Mode_IRQ 0x12
#define Mode_SVC 0x13
#define Mode_MON 0x16
#define Mode_ABT 0x17
#define Mode_UDF 0x1B
#define Mode_SYS 0x1F
#define A_Bit 0x100
#define I_Bit 0x80 @; when I bit is set, IRQ is disabled
#define F_Bit 0x40 @; when F bit is set, FIQ is disabled
#define T_Bit 0x20
.cpu arm9
.syntax unified
.text
/*
* void lwp_user_entry(args, text, data);
*/
.global lwp_user_entry
.type lwp_user_entry, % function
lwp_user_entry:
mrs r9, cpsr
mov r8, r9
bic r9, #0x1f
orr r9, #Mode_USR
orr r8, #I_Bit
msr cpsr_c, r8
msr spsr, r9
/* set data address. */
mov r9, r2
movs pc, r1
/*
* void SVC_Handler(void);
*/
.global SVC_Handler
.type SVC_Handler, % function
SVC_Handler:
push {lr}
mrs lr, spsr
push {r4, r5, lr}
mrs r4, cpsr
bic r4, #I_Bit
msr cpsr_c, r4
push {r0 - r3, r12}
and r0, r7, #0xff
bl lwp_get_sys_api
cmp r0, #0 /* r0 = api */
mov r4, r0
pop {r0 - r3, r12}
beq svc_exit
ldr lr, = svc_exit
bx r4
svc_exit:
mrs r4, cpsr
orr r4, #I_Bit
msr cpsr_c, r4
pop {r4, r5, lr}
msr spsr_cxsf, lr
pop {lr}
movs pc, lr
|
lizhirui/DreamCore | 3,020 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-m3/lwp_gcc.S | /*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-30 heyuanjie first version
*/
.cpu cortex-m3
.syntax unified
.thumb
.text
/*
* void* lwp_get_sys_api(rt_uint32_t number);
*/
.global lwp_get_sys_api
.global lwp_get_kernel_sp
.global lwp_set_kernel_sp
/*
* void lwp_user_entry(args, text, data);
*/
.global lwp_user_entry
.type lwp_user_entry, % function
lwp_user_entry:
PUSH {R0-R3} @; push text&data addr.
MOV R0, SP @; v1 = SP
BL lwp_set_kernel_sp @; lwp_set_kernel_sp(v1)
@; set CPU to user-thread mode.
MRS R2, CONTROL
ORR R2, R2, #0x03 @; use PSP, user-thread mode.
MSR CONTROL, R2
POP {R0-R3} @; pop app address to R1.
@; set data address.
MOV R9, R2
@; run app, only Thumb-mode.
ORR R1, R1, #0x01
BX R1
/*
* void SVC_Handler(void);
*/
.global SVC_Handler
.type SVC_Handler, % function
SVC_Handler:
PUSH {LR}
@; get user SP.
TST LR, #0x4
ITE EQ
MRSEQ R1, MSP
MRSNE R1, PSP
PUSH {R1} @; push app SP.
@; get SVC number.
mov R0, R7
@; get kernel system API
BL lwp_get_sys_api
PUSH {R0} @; push api
@; get kernel SP to R0.
BL lwp_get_kernel_sp
POP {R2} @; pop api to R2.
POP {R1} @; pop app SP to R1.
stmfd r0!, {r1} @; save app SP to kernel SP
@;push app parm5~6 to kernel SP
STMFD R0!, {R4 - R5}
@; copy R1(app SP) to R0(kernel SP).
push {r8-r11}
LDMFD R1, {R4 - R11} @; pop exception_stack_frame to r4 - r11 register
STMFD R0!, {R4 - R11} @; push exception_stack_frame to server SP.
pop {r8-r11}
LDR R3, =svc_exit
STR R3, [R0, #20] @; update LR
STR R2, [R0, #24] @; update api to PC
MSR PSP, R0 @; update SP, API is executed with kernel SP
@; set to thread-privilege mode.
MRS R3, CONTROL
BIC R3, R3, #0x01
ORR R3, R3, #0x02
MSR CONTROL, R3
POP {LR} @; 0xFFFFFFED
ORR LR, LR, #0x10
BX LR
/*
* void svc_exit(void);
*/
.global svc_exit
.type svc_exit, % function
svc_exit:
@; get user SP.
PUSH {R0} @; push result to SP.
BL lwp_get_kernel_sp
ldr r3, [r0, #-4]
pop {r0}
ldr lr, [r3, #20]
ldr r1, [r3, #24] @; load pc
add r3, #32 @; exception_stack_frame size
MSR PSP, R3 @; restore app stack pointer
@; restore to PSP & thread-unprivilege mode.
MRS R2, CONTROL
ORR R2, R2, #0x03
MSR CONTROL, R2
@; return to lwp.
ORR R1, R1, #0x01 @; only Thumb-mode.
BX R1 @; return to user app.
|
lizhirui/DreamCore | 2,972 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-m3/lwp_iar.S | ;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2018-10-30 heyuanjie first version
; */
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
;/*
; * void* lwp_get_sys_api(rt_uint32_t number);
; */
IMPORT lwp_get_sys_api
IMPORT lwp_get_kernel_sp
IMPORT lwp_set_kernel_sp
;/*
; * void lwp_user_entry(args, text, data);
; */
EXPORT lwp_user_entry
lwp_user_entry:
PUSH {R0-R3} ; push text&data addr.
MOV R0, SP ; v1 = SP
BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1)
; set CPU to user-thread mode.
MRS R2, CONTROL
ORR R2, R2, #0x03 ; use PSP, user-thread mode.
MSR CONTROL, R2
POP {R0-R3} ; pop app address to R1.
; set data address.
MOV R9, R2
; run app, only Thumb-mode.
ORR R1, R1, #0x01
BX R1
;/*
; * void SVC_Handler(void);
; */
EXPORT SVC_Handler
SVC_Handler:
PUSH {LR}
; get user SP.
TST LR, #0x4
ITE EQ
MRSEQ R1, MSP
MRSNE R1, PSP
PUSH {R1} ; push app SP.
; get SVC number.
mov R0, R7
; get kernel system API
BL lwp_get_sys_api
PUSH {R0} ; push api
; get kernel SP to R0.
BL lwp_get_kernel_sp
POP {R2} ; pop api to R2.
POP {R1} ; pop app SP to R1.
stmfd r0!, {r1} ; save app SP to kernel SP
;push app parm5~6 to kernel SP
STMFD R0!, {R4 - R5}
; copy R1(app SP) to R0(kernel SP).
push {r8-r11}
LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register
STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP.
pop {r8-r11}
LDR R3, =svc_exit
STR R3, [R0, #20] ; update LR
STR R2, [R0, #24] ; update api to PC
MSR PSP, R0 ; update SP, API is executed with kernel SP
; set to thread-privilege mode.
MRS R3, CONTROL
BIC R3, R3, #0x01
ORR R3, R3, #0x02
MSR CONTROL, R3
POP {LR} ; 0xFFFFFFED
ORR LR, LR, #0x10
BX LR
;/*
; * void svc_exit(void);
; */
EXPORT svc_exit
svc_exit:
; get user SP.
PUSH {R0} ; push result to SP.
BL lwp_get_kernel_sp
ldr r3, [r0, #-4]
pop {r0}
ldr lr, [r3, #20]
ldr r1, [r3, #24] ; load pc
add r3, r3, #32 ; exception_stack_frame size
MSR PSP, R3 ; restore app stack pointer
; restore to PSP & thread-unprivilege mode.
MRS R2, CONTROL
ORR R2, R2, #0x03
MSR CONTROL, R2
; return to lwp.
ORR R1, R1, #0x01 ; only Thumb-mode.
BX R1 ; return to user app.
END
|
lizhirui/DreamCore | 3,071 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-m3/lwp_rvds.S | ;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2018-10-30 heyuanjie first version
; */
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
;/*
; * void* lwp_get_sys_api(rt_uint32_t number);
; */
IMPORT lwp_get_sys_api
IMPORT lwp_get_kernel_sp
IMPORT lwp_set_kernel_sp
;/*
; * void lwp_user_entry(args, text, data);
; */
lwp_user_entry PROC
EXPORT lwp_user_entry
PUSH {R0-R3} ; push text&data addr.
MOV R0, SP ; v1 = SP
BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1)
; set CPU to user-thread mode.
MRS R2, CONTROL
ORR R2, R2, #0x03 ; use PSP, user-thread mode.
MSR CONTROL, R2
POP {R0-R3} ; pop app address to R1.
; set data address.
MOV R9, R2
; run app, only Thumb-mode.
ORR R1, R1, #0x01
BX R1
; never reach here!
ENDP
;/*
; * void SVC_Handler(void);
; */
SVC_Handler PROC
EXPORT SVC_Handler
PUSH {LR}
; get user SP.
TST LR, #0x4
ITE EQ
MRSEQ R1, MSP
MRSNE R1, PSP
PUSH {R1} ; push app SP.
; get SVC number.
mov R0, R7
; get kernel system API
BL lwp_get_sys_api
PUSH {R0} ; push api
; get kernel SP to R0.
BL lwp_get_kernel_sp
POP {R2} ; pop api to R2.
POP {R1} ; pop app SP to R1.
stmfd r0!, {r1} ; save app SP to kernel SP
;push app parm5~6 to kernel SP
STMFD R0!, {R4 - R5}
; copy R1(app SP) to R0(kernel SP).
push {r8-r11}
LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register
STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP.
pop {r8-r11}
LDR R3, =svc_exit
STR R3, [R0, #20] ; update LR
STR R2, [R0, #24] ; update api to PC
MSR PSP, R0 ; update SP, API is executed with kernel SP
; set to thread-privilege mode.
MRS R3, CONTROL
BIC R3, R3, #0x01
ORR R3, R3, #0x02
MSR CONTROL, R3
POP {LR} ; 0xFFFFFFED
ORR LR, LR, #0x10
BX LR
ENDP
;/*
; * void svc_exit(void);
; */
svc_exit PROC
EXPORT svc_exit
; get user SP.
PUSH {R0} ; push result to SP.
BL lwp_get_kernel_sp
ldr r3, [r0, #-4]
pop {r0}
ldr lr, [r3, #20]
ldr r1, [r3, #24] ; load pc
add r3, #32 ; exception_stack_frame size
MSR PSP, R3 ; restore app stack pointer
; restore to PSP & thread-unprivilege mode.
MRS R2, CONTROL
ORR R2, R2, #0x03
MSR CONTROL, R2
; return to lwp.
ORR R1, R1, #0x01 ; only Thumb-mode.
BX R1 ; return to user app.
ENDP
ALIGN
END
|
lizhirui/DreamCore | 3,020 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-m4/lwp_gcc.S | /*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-10-30 heyuanjie first version
*/
.cpu cortex-m4
.syntax unified
.thumb
.text
/*
* void* lwp_get_sys_api(rt_uint32_t number);
*/
.global lwp_get_sys_api
.global lwp_get_kernel_sp
.global lwp_set_kernel_sp
/*
* void lwp_user_entry(args, text, data);
*/
.global lwp_user_entry
.type lwp_user_entry, % function
lwp_user_entry:
PUSH {R0-R3} @; push text&data addr.
MOV R0, SP @; v1 = SP
BL lwp_set_kernel_sp @; lwp_set_kernel_sp(v1)
@; set CPU to user-thread mode.
MRS R2, CONTROL
ORR R2, R2, #0x03 @; use PSP, user-thread mode.
MSR CONTROL, R2
POP {R0-R3} @; pop app address to R1.
@; set data address.
MOV R9, R2
@; run app, only Thumb-mode.
ORR R1, R1, #0x01
BX R1
/*
* void SVC_Handler(void);
*/
.global SVC_Handler
.type SVC_Handler, % function
SVC_Handler:
PUSH {LR}
@; get user SP.
TST LR, #0x4
ITE EQ
MRSEQ R1, MSP
MRSNE R1, PSP
PUSH {R1} @; push app SP.
@; get SVC number.
mov R0, R7
@; get kernel system API
BL lwp_get_sys_api
PUSH {R0} @; push api
@; get kernel SP to R0.
BL lwp_get_kernel_sp
POP {R2} @; pop api to R2.
POP {R1} @; pop app SP to R1.
stmfd r0!, {r1} @; save app SP to kernel SP
@;push app parm5~6 to kernel SP
STMFD R0!, {R4 - R5}
@; copy R1(app SP) to R0(kernel SP).
push {r8-r11}
LDMFD R1, {R4 - R11} @; pop exception_stack_frame to r4 - r11 register
STMFD R0!, {R4 - R11} @; push exception_stack_frame to server SP.
pop {r8-r11}
LDR R3, =svc_exit
STR R3, [R0, #20] @; update LR
STR R2, [R0, #24] @; update api to PC
MSR PSP, R0 @; update SP, API is executed with kernel SP
@; set to thread-privilege mode.
MRS R3, CONTROL
BIC R3, R3, #0x01
ORR R3, R3, #0x02
MSR CONTROL, R3
POP {LR} @; 0xFFFFFFED
ORR LR, LR, #0x10
BX LR
/*
* void svc_exit(void);
*/
.global svc_exit
.type svc_exit, % function
svc_exit:
@; get user SP.
PUSH {R0} @; push result to SP.
BL lwp_get_kernel_sp
ldr r3, [r0, #-4]
pop {r0}
ldr lr, [r3, #20]
ldr r1, [r3, #24] @; load pc
add r3, #32 @; exception_stack_frame size
MSR PSP, R3 @; restore app stack pointer
@; restore to PSP & thread-unprivilege mode.
MRS R2, CONTROL
ORR R2, R2, #0x03
MSR CONTROL, R2
@; return to lwp.
ORR R1, R1, #0x01 @; only Thumb-mode.
BX R1 @; return to user app.
|
lizhirui/DreamCore | 2,972 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-m4/lwp_iar.S | ;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2018-10-30 heyuanjie first version
; */
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
;/*
; * void* lwp_get_sys_api(rt_uint32_t number);
; */
IMPORT lwp_get_sys_api
IMPORT lwp_get_kernel_sp
IMPORT lwp_set_kernel_sp
;/*
; * void lwp_user_entry(args, text, data);
; */
EXPORT lwp_user_entry
lwp_user_entry:
PUSH {R0-R3} ; push text&data addr.
MOV R0, SP ; v1 = SP
BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1)
; set CPU to user-thread mode.
MRS R2, CONTROL
ORR R2, R2, #0x03 ; use PSP, user-thread mode.
MSR CONTROL, R2
POP {R0-R3} ; pop app address to R1.
; set data address.
MOV R9, R2
; run app, only Thumb-mode.
ORR R1, R1, #0x01
BX R1
;/*
; * void SVC_Handler(void);
; */
EXPORT SVC_Handler
SVC_Handler:
PUSH {LR}
; get user SP.
TST LR, #0x4
ITE EQ
MRSEQ R1, MSP
MRSNE R1, PSP
PUSH {R1} ; push app SP.
; get SVC number.
mov R0, R7
; get kernel system API
BL lwp_get_sys_api
PUSH {R0} ; push api
; get kernel SP to R0.
BL lwp_get_kernel_sp
POP {R2} ; pop api to R2.
POP {R1} ; pop app SP to R1.
stmfd r0!, {r1} ; save app SP to kernel SP
;push app parm5~6 to kernel SP
STMFD R0!, {R4 - R5}
; copy R1(app SP) to R0(kernel SP).
push {r8-r11}
LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register
STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP.
pop {r8-r11}
LDR R3, =svc_exit
STR R3, [R0, #20] ; update LR
STR R2, [R0, #24] ; update api to PC
MSR PSP, R0 ; update SP, API is executed with kernel SP
; set to thread-privilege mode.
MRS R3, CONTROL
BIC R3, R3, #0x01
ORR R3, R3, #0x02
MSR CONTROL, R3
POP {LR} ; 0xFFFFFFED
ORR LR, LR, #0x10
BX LR
;/*
; * void svc_exit(void);
; */
EXPORT svc_exit
svc_exit:
; get user SP.
PUSH {R0} ; push result to SP.
BL lwp_get_kernel_sp
ldr r3, [r0, #-4]
pop {r0}
ldr lr, [r3, #20]
ldr r1, [r3, #24] ; load pc
add r3, r3, #32 ; exception_stack_frame size
MSR PSP, R3 ; restore app stack pointer
; restore to PSP & thread-unprivilege mode.
MRS R2, CONTROL
ORR R2, R2, #0x03
MSR CONTROL, R2
; return to lwp.
ORR R1, R1, #0x01 ; only Thumb-mode.
BX R1 ; return to user app.
END
|
lizhirui/DreamCore | 3,071 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-m4/lwp_rvds.S | ;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2018-10-30 heyuanjie first version
; */
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
;/*
; * void* lwp_get_sys_api(rt_uint32_t number);
; */
IMPORT lwp_get_sys_api
IMPORT lwp_get_kernel_sp
IMPORT lwp_set_kernel_sp
;/*
; * void lwp_user_entry(args, text, data);
; */
lwp_user_entry PROC
EXPORT lwp_user_entry
PUSH {R0-R3} ; push text&data addr.
MOV R0, SP ; v1 = SP
BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1)
; set CPU to user-thread mode.
MRS R2, CONTROL
ORR R2, R2, #0x03 ; use PSP, user-thread mode.
MSR CONTROL, R2
POP {R0-R3} ; pop app address to R1.
; set data address.
MOV R9, R2
; run app, only Thumb-mode.
ORR R1, R1, #0x01
BX R1
; never reach here!
ENDP
;/*
; * void SVC_Handler(void);
; */
SVC_Handler PROC
EXPORT SVC_Handler
PUSH {LR}
; get user SP.
TST LR, #0x4
ITE EQ
MRSEQ R1, MSP
MRSNE R1, PSP
PUSH {R1} ; push app SP.
; get SVC number.
mov R0, R7
; get kernel system API
BL lwp_get_sys_api
PUSH {R0} ; push api
; get kernel SP to R0.
BL lwp_get_kernel_sp
POP {R2} ; pop api to R2.
POP {R1} ; pop app SP to R1.
stmfd r0!, {r1} ; save app SP to kernel SP
;push app parm5~6 to kernel SP
STMFD R0!, {R4 - R5}
; copy R1(app SP) to R0(kernel SP).
push {r8-r11}
LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register
STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP.
pop {r8-r11}
LDR R3, =svc_exit
STR R3, [R0, #20] ; update LR
STR R2, [R0, #24] ; update api to PC
MSR PSP, R0 ; update SP, API is executed with kernel SP
; set to thread-privilege mode.
MRS R3, CONTROL
BIC R3, R3, #0x01
ORR R3, R3, #0x02
MSR CONTROL, R3
POP {LR} ; 0xFFFFFFED
ORR LR, LR, #0x10
BX LR
ENDP
;/*
; * void svc_exit(void);
; */
svc_exit PROC
EXPORT svc_exit
; get user SP.
PUSH {R0} ; push result to SP.
BL lwp_get_kernel_sp
ldr r3, [r0, #-4]
pop {r0}
ldr lr, [r3, #20]
ldr r1, [r3, #24] ; load pc
add r3, #32 ; exception_stack_frame size
MSR PSP, R3 ; restore app stack pointer
; restore to PSP & thread-unprivilege mode.
MRS R2, CONTROL
ORR R2, R2, #0x03
MSR CONTROL, R2
; return to lwp.
ORR R1, R1, #0x01 ; only Thumb-mode.
BX R1 ; return to user app.
ENDP
ALIGN
END
|
lizhirui/DreamCore | 1,380 | model/testcode/rt-thread/components/lwp/arch/arm/cortex-a9/lwp_gcc.S | /*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2018-12-10 Jesven first version
*/
#define Mode_USR 0x10
#define Mode_FIQ 0x11
#define Mode_IRQ 0x12
#define Mode_SVC 0x13
#define Mode_MON 0x16
#define Mode_ABT 0x17
#define Mode_UDF 0x1B
#define Mode_SYS 0x1F
#define A_Bit 0x100
#define I_Bit 0x80 @; when I bit is set, IRQ is disabled
#define F_Bit 0x40 @; when F bit is set, FIQ is disabled
#define T_Bit 0x20
.cpu cortex-a9
.syntax unified
.text
/*
* void lwp_user_entry(args, text, data);
*/
.global lwp_user_entry
.type lwp_user_entry, % function
lwp_user_entry:
mrs r9, cpsr
bic r9, #0x1f
orr r9, #Mode_USR
cpsid i
msr spsr, r9
/* set data address. */
mov r9, r2
movs pc, r1
/*
* void vector_swi(void);
*/
.global vector_swi
.type vector_swi, % function
vector_swi:
push {lr}
mrs lr, spsr
push {r4, r5, lr}
cpsie i
push {r0 - r3, r12}
and r0, r7, #0xff
bl lwp_get_sys_api
cmp r0, #0 /* r0 = api */
mov lr, r0
pop {r0 - r3, r12}
beq svc_exit
blx lr
svc_exit:
cpsid i
pop {r4, r5, lr}
msr spsr_cxsf, lr
pop {lr}
movs pc, lr
|
LoCryptEn/Key-security | 84,351 | Cache-bound/GMIn_Cache/Nortm/Kernel/sm4_dec_master.S | .section .data
sbox:
.byte 0xd6,0x90,0xe9,0xfe,0xcc,0xe1,0x3d,0xb7,0x16,0xb6,0x14,0xc2,0x28,0xfb,0x2c,0x05,0x2b,0x67,0x9a,0x76,0x2a,0xbe,0x04,0xc3,0xaa,0x44,0x13,0x26,0x49,0x86,0x06,0x99,0x9c,0x42,0x50,0xf4,0x91,0xef,0x98,0x7a,0x33,0x54,0x0b,0x43,0xed,0xcf,0xac,0x62,0xe4,0xb3,0x1c,0xa9,0xc9,0x08,0xe8,0x95,0x80,0xdf,0x94,0xfa,0x75,0x8f,0x3f,0xa6,0x47,0x07,0xa7,0xfc,0xf3,0x73,0x17,0xba,0x83,0x59,0x3c,0x19,0xe6,0x85,0x4f,0xa8,0x68,0x6b,0x81,0xb2,0x71,0x64,0xda,0x8b,0xf8,0xeb,0x0f,0x4b,0x70,0x56,0x9d,0x35,0x1e,0x24,0x0e,0x5e,0x63,0x58,0xd1,0xa2,0x25,0x22,0x7c,0x3b,0x01,0x21,0x78,0x87,0xd4,0x00,0x46,0x57,0x9f,0xd3,0x27,0x52,0x4c,0x36,0x02,0xe7,0xa0,0xc4,0xc8,0x9e,0xea,0xbf,0x8a,0xd2,0x40,0xc7,0x38,0xb5,0xa3,0xf7,0xf2,0xce,0xf9,0x61,0x15,0xa1,0xe0,0xae,0x5d,0xa4,0x9b,0x34,0x1a,0x55,0xad,0x93,0x32,0x30,0xf5,0x8c,0xb1,0xe3,0x1d,0xf6,0xe2,0x2e,0x82,0x66,0xca,0x60,0xc0,0x29,0x23,0xab,0x0d,0x53,0x4e,0x6f,0xd5,0xdb,0x37,0x45,0xde,0xfd,0x8e,0x2f,0x03,0xff,0x6a,0x72,0x6d,0x6c,0x5b,0x51,0x8d,0x1b,0xaf,0x92,0xbb,0xdd,0xbc,0x7f,0x11,0xd9,0x5c,0x41,0x1f,0x10,0x5a,0xd8,0x0a,0xc1,0x31,0x88,0xa5,0xcd,0x7b,0xbd,0x2d,0x74,0xd0,0x12,0xb8,0xe5,0xb4,0xb0,0x89,0x69,0x97,0x4a,0x0c,0x96,0x77,0x7e,0x65,0xb9,0xf1,0x09,0xc5,0x6e,0xc6,0x84,0x18,0xf0,0x7d,0xec,0x3a,0xdc,0x4d,0x20,0x79,0xee,0x5f,0x3e,0xd7,0xcb,0x39,0x48
.section .text
.type sm4_dec_master, @function
.globl sm4_dec_master
sm4_dec_master:
# ready
push %rbx # push sample register need calle save
push %r12
push %r13
push %r14
push %r15
push %rdx # push third argument register
push %rsi # second
# push %rdi
# KEY DERIVE
# xor %rax, %rax
mov %dr0, %rdx # k0 = mk0 xor fk0 # k1 = mk1 xor fk1
mov $0xA3B1BAC656AA3350, %rcx
xor %rcx, %rdx
mov %dr1, %rsi # k2 = mk2 xor fk2 # k3 = mk3 xor fk3
mov $0x677D9197B27022DC, %rcx
xor %rcx, %rsi
# rk0 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x00070E15, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk1 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x1C232A31, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk2 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x383F464D, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk3 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x545B6269, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk4 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x70777E85, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk5 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x8C939AA1, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk6 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA8AFB6BD, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk7 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xC4CBD2D9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk8 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xE0E7EEF5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk9 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xFC030A11, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk10 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x181F262D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk11 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x343B4249, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk12 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x50575E65, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk13 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x6C737A81, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk14 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x888F969D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk15 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA4ABB2B9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk16 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xC0C7CED5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk17 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xDCE3EAF1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk18 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xF8FF060D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk19 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x141B2229, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov r16-r19 to %r8-%r9
mov %rdx, %r8
mov %rsi, %r9
# rk20 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x30373E45, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk21 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x4C535A61, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk22 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x686F767D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk23 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x848B9299, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk20-rk23 to %r10-%r11
mov %rdx, %r10
mov %rsi, %r11
# rk24 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA0A7AEB5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk25 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xBCC3CAD1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk26 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xD8DFE6ED, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk27 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xF4FB0209, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk24-rk27 to %r12-%r13
mov %rdx, %r12
mov %rsi, %r13
# rk28 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x10171E25, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk29 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x2C333A41, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk30 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x484F565D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk31 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x646B7279, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk28-rk31 to %r14-%r15
mov %rdx, %r14
mov %rsi, %r15
# get input
pop %rsi
# push %rdi
mov $0, %rcx # mov input0 to %rdi
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
mov %eax, %edi
shl $32, %rdi
inc %rcx # mov input1 to %rdi
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
xor %rax, %rdi
inc %rcx # mov input2 to %rbx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
mov %eax, %ebx
shl $32, %rbx
inc %rcx # mov input3 to %rbx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
xor %rax, %rbx
# ROUND 0
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r15, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 1
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r15
and %r15, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 2
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r14, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 3
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r14
and %r14, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 4
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r13, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 5
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r13
and %r13, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 6
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r12, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 7
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r12
and %r12, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 8
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r11, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 9
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r11
and %r11, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 10
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r10, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 11
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r10
and %r10, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 12
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r9, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 13
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r9
and %r9, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 14
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r8, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 15
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r8
and %r8, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# store rdi, rbx
mov %rdi, %r14
mov %rbx, %r15
# regenerate rk0-rk15
# pop %rdi
mov %dr0, %rdx # k0 = mk0 xor fk0 # k1 = mk1 xor fk1
mov $0xA3B1BAC656AA3350, %rcx
xor %rcx, %rdx
mov %dr1, %rsi # k2 = mk2 xor fk2 # k3 = mk3 xor fk3
mov $0x677D9197B27022DC, %rcx
xor %rcx, %rsi
# rk0 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x00070E15, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk1 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x1C232A31, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk2 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x383F464D, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk3 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x545B6269, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk0-rk3 to %r8-%r9
mov %rdx, %r8
mov %rsi, %r9
# rk4 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x70777E85, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk5 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x8C939AA1, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk6 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA8AFB6BD, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk7 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xC4CBD2D9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk4-rk7 to %r10-%r11
mov %rdx, %r10
mov %rsi, %r11
# rk8 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xE0E7EEF5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk9 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xFC030A11, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk10 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x181F262D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk11 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x343B4249, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk8-rk11 to %r12-%r13
mov %rdx, %r12
mov %rsi, %r13
# rk12 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x50575E65, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk13 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x6C737A81, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk14 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x888F969D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk15 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA4ABB2B9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov round out, mov key in
mov %r14, %rdi
mov %r15, %rbx
mov %rdx, %r14
mov %rsi, %r15
# ROUND 16
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r15, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 17
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r15
and %r15, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 18
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r14, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 19
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r14
and %r14, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 20
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r13, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 21
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r13
and %r13, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 22
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r12, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 23
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r12
and %r12, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 24
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r11, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 25
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r11
and %r11, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 26
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r10, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 27
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r10
and %r10, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 28
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r9, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 29
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r9
and %r9, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 30
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r8, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 31
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r8
and %r8, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# output
pop %rdx # mov rbx to output[8-15]
mov $3, %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
mov $7, %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
mov $11, %rcx # mov rdi to output[0-7]
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
mov $15, %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbx
mov $1, %rax
ret |
LoCryptEn/Key-security | 55,142 | Cache-bound/GMIn_Cache/Nortm/Kernel/sm4_enc.S | .section .data
sbox:
.byte 0xd6,0x90,0xe9,0xfe,0xcc,0xe1,0x3d,0xb7,0x16,0xb6,0x14,0xc2,0x28,0xfb,0x2c,0x05,0x2b,0x67,0x9a,0x76,0x2a,0xbe,0x04,0xc3,0xaa,0x44,0x13,0x26,0x49,0x86,0x06,0x99,0x9c,0x42,0x50,0xf4,0x91,0xef,0x98,0x7a,0x33,0x54,0x0b,0x43,0xed,0xcf,0xac,0x62,0xe4,0xb3,0x1c,0xa9,0xc9,0x08,0xe8,0x95,0x80,0xdf,0x94,0xfa,0x75,0x8f,0x3f,0xa6,0x47,0x07,0xa7,0xfc,0xf3,0x73,0x17,0xba,0x83,0x59,0x3c,0x19,0xe6,0x85,0x4f,0xa8,0x68,0x6b,0x81,0xb2,0x71,0x64,0xda,0x8b,0xf8,0xeb,0x0f,0x4b,0x70,0x56,0x9d,0x35,0x1e,0x24,0x0e,0x5e,0x63,0x58,0xd1,0xa2,0x25,0x22,0x7c,0x3b,0x01,0x21,0x78,0x87,0xd4,0x00,0x46,0x57,0x9f,0xd3,0x27,0x52,0x4c,0x36,0x02,0xe7,0xa0,0xc4,0xc8,0x9e,0xea,0xbf,0x8a,0xd2,0x40,0xc7,0x38,0xb5,0xa3,0xf7,0xf2,0xce,0xf9,0x61,0x15,0xa1,0xe0,0xae,0x5d,0xa4,0x9b,0x34,0x1a,0x55,0xad,0x93,0x32,0x30,0xf5,0x8c,0xb1,0xe3,0x1d,0xf6,0xe2,0x2e,0x82,0x66,0xca,0x60,0xc0,0x29,0x23,0xab,0x0d,0x53,0x4e,0x6f,0xd5,0xdb,0x37,0x45,0xde,0xfd,0x8e,0x2f,0x03,0xff,0x6a,0x72,0x6d,0x6c,0x5b,0x51,0x8d,0x1b,0xaf,0x92,0xbb,0xdd,0xbc,0x7f,0x11,0xd9,0x5c,0x41,0x1f,0x10,0x5a,0xd8,0x0a,0xc1,0x31,0x88,0xa5,0xcd,0x7b,0xbd,0x2d,0x74,0xd0,0x12,0xb8,0xe5,0xb4,0xb0,0x89,0x69,0x97,0x4a,0x0c,0x96,0x77,0x7e,0x65,0xb9,0xf1,0x09,0xc5,0x6e,0xc6,0x84,0x18,0xf0,0x7d,0xec,0x3a,0xdc,0x4d,0x20,0x79,0xee,0x5f,0x3e,0xd7,0xcb,0x39,0x48
.section .text
.type sm4_enc, @function
.globl sm4_enc
sm4_enc:
# ready
push %rbx # push sample register need calle save
push %r12
push %r13
push %r14
push %r15
push %rdx # push third argument register
# mov input0-input3 to
mov $0, %rcx # mov input0 to %r8d
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
mov %eax, %r8d
inc %rcx # mov input1 to %r9d
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
mov %eax, %r9d
inc %rcx # mov input2 to %r10d
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
mov %eax, %r10d
inc %rcx # mov input3 to %r11d
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
mov %eax, %r11d
# mov k0-k3 into %r12d-%r15d
mov $0, %rbx # k0 = mk0 xor fk0
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0xA3B1BAC6, %ecx
xor %ecx, %eax
mov %eax, %r12d
inc %rbx # k1 = mk1 xor fk1
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0x56AA3350, %ecx
xor %ecx, %eax
mov %eax, %r13d
inc %rbx # k2 = mk2 xor fk2
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0x677D9197, %ecx
xor %ecx, %eax
mov %eax, %r14d
inc %rbx # k3 = mk3 xor fk3
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0xB27022DC, %ecx
xor %ecx, %eax
mov %eax, %r15d
# ROUND 0
# rk0 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x00070E15, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 0
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 1
# rk1 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x1C232A31, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 1
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 2
# rk2 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x383F464D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 2
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 3
# rk3 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x545B6269, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 3
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 4
# rk4 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x70777E85, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 4
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 5
# rk5 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x8C939AA1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 5
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 6
# rk6 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xA8AFB6BD, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 6
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 7
# rk7 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xC4CBD2D9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 7
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 8
# rk8 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xE0E7EEF5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 8
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 9
# rk9 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xFC030A11, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 9
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 10
# rk10 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x181F262D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 10
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 11
# rk11 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x343B4249, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 11
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 12
# rk12 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x50575E65, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 12
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 13
# rk13 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x6C737A81, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 13
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 14
# rk14 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x888F969D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 14
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 15
# rk15 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xA4ABB2B9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 15
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 16
# rk16 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xC0C7CED5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 16
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 17
# rk17 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xDCE3EAF1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 17
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 18
# rk18 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xF8FF060D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 18
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 19
# rk19 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x141B2229, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 19
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 20
# rk20 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x30373E45, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 20
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 21
# rk21 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x4C535A61, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 21
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 22
# rk22 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x686F767D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 22
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 23
# rk23 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x848B9299, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 23
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 24
# rk24 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xA0A7AEB5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 24
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 25
# rk25 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xBCC3CAD1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 25
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 26
# rk26 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xD8DFE6ED, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 26
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 27
# rk27 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xF4FB0209, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 27
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 28
# rk28 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x10171E25, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 28
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 29
# rk29 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x2C333A41, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 29
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 30
# rk30 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x484F565D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 30
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 31
# rk31 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x646B7279, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 31
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %edx, %eax
# xor %eax, %eax
# mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
# mov %r10d, %r9d
# mov %r11d, %r10d
# mov %edx, %r11d
# output
pop %rdx
mov $3, %rcx # output 0
movb $0xFF, %bl
and %eax, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %eax
dec %rcx
movb $0xFF, %bl
and %eax, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %eax
dec %rcx
movb $0xFF, %bl
and %eax, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %eax
dec %rcx
movb $0xFF, %bl
and %eax, %ebx
movb %bl, (%rdx,%rcx,1)
mov $7, %rcx # output 1
movb $0xFF, %bl
and %r11d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r11d
dec %rcx
movb $0xFF, %bl
and %r11d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r11d
dec %rcx
movb $0xFF, %bl
and %r11d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r11d
dec %rcx
movb $0xFF, %bl
and %r11d, %ebx
movb %bl, (%rdx,%rcx,1)
mov $11, %rcx # output 2
movb $0xFF, %bl
and %r10d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r10d
dec %rcx
movb $0xFF, %bl
and %r10d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r10d
dec %rcx
movb $0xFF, %bl
and %r10d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r10d
dec %rcx
movb $0xFF, %bl
and %r10d, %ebx
movb %bl, (%rdx,%rcx,1)
mov $15, %rcx # output 3
movb $0xFF, %bl
and %r9d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r9d
dec %rcx
movb $0xFF, %bl
and %r9d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r9d
dec %rcx
movb $0xFF, %bl
and %r9d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r9d
dec %rcx
movb $0xFF, %bl
and %r9d, %ebx
movb %bl, (%rdx,%rcx,1)
pop %r15 # pop sample register need calle save
pop %r14
pop %r13
pop %r12
pop %rbx
ret |
LoCryptEn/Key-security | 86,021 | Cache-bound/GMIn_Cache/Nortm/Kernel/sm4_dec.S | .section .data
sbox:
.byte 0xd6,0x90,0xe9,0xfe,0xcc,0xe1,0x3d,0xb7,0x16,0xb6,0x14,0xc2,0x28,0xfb,0x2c,0x05,0x2b,0x67,0x9a,0x76,0x2a,0xbe,0x04,0xc3,0xaa,0x44,0x13,0x26,0x49,0x86,0x06,0x99,0x9c,0x42,0x50,0xf4,0x91,0xef,0x98,0x7a,0x33,0x54,0x0b,0x43,0xed,0xcf,0xac,0x62,0xe4,0xb3,0x1c,0xa9,0xc9,0x08,0xe8,0x95,0x80,0xdf,0x94,0xfa,0x75,0x8f,0x3f,0xa6,0x47,0x07,0xa7,0xfc,0xf3,0x73,0x17,0xba,0x83,0x59,0x3c,0x19,0xe6,0x85,0x4f,0xa8,0x68,0x6b,0x81,0xb2,0x71,0x64,0xda,0x8b,0xf8,0xeb,0x0f,0x4b,0x70,0x56,0x9d,0x35,0x1e,0x24,0x0e,0x5e,0x63,0x58,0xd1,0xa2,0x25,0x22,0x7c,0x3b,0x01,0x21,0x78,0x87,0xd4,0x00,0x46,0x57,0x9f,0xd3,0x27,0x52,0x4c,0x36,0x02,0xe7,0xa0,0xc4,0xc8,0x9e,0xea,0xbf,0x8a,0xd2,0x40,0xc7,0x38,0xb5,0xa3,0xf7,0xf2,0xce,0xf9,0x61,0x15,0xa1,0xe0,0xae,0x5d,0xa4,0x9b,0x34,0x1a,0x55,0xad,0x93,0x32,0x30,0xf5,0x8c,0xb1,0xe3,0x1d,0xf6,0xe2,0x2e,0x82,0x66,0xca,0x60,0xc0,0x29,0x23,0xab,0x0d,0x53,0x4e,0x6f,0xd5,0xdb,0x37,0x45,0xde,0xfd,0x8e,0x2f,0x03,0xff,0x6a,0x72,0x6d,0x6c,0x5b,0x51,0x8d,0x1b,0xaf,0x92,0xbb,0xdd,0xbc,0x7f,0x11,0xd9,0x5c,0x41,0x1f,0x10,0x5a,0xd8,0x0a,0xc1,0x31,0x88,0xa5,0xcd,0x7b,0xbd,0x2d,0x74,0xd0,0x12,0xb8,0xe5,0xb4,0xb0,0x89,0x69,0x97,0x4a,0x0c,0x96,0x77,0x7e,0x65,0xb9,0xf1,0x09,0xc5,0x6e,0xc6,0x84,0x18,0xf0,0x7d,0xec,0x3a,0xdc,0x4d,0x20,0x79,0xee,0x5f,0x3e,0xd7,0xcb,0x39,0x48
.section .text
.type sm4_dec, @function
.globl sm4_dec
sm4_dec:
# ready
push %rbx # push sample register need calle save
push %r12
push %r13
push %r14
push %r15
push %rdx # push third argument register
push %rsi # second
# push %rdi
# KEY DERIVE
# xor %rax, %rax
mov $0, %rbx # k0 = mk0 xor fk0
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0xA3B1BAC6, %ecx
xor %ecx, %eax
mov %eax, %edx
shl $32, %rdx
inc %rbx # k1 = mk1 xor fk1
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0x56AA3350, %ecx
xor %ecx, %eax
xor %rax, %rdx
inc %rbx # k2 = mk2 xor fk2
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0x677D9197, %ecx
xor %ecx, %eax
mov %eax, %esi
shl $32, %rsi
inc %rbx # k3 = mk3 xor fk3
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0xB27022DC, %ecx
xor %ecx, %eax
xor %rax, %rsi
# rk0 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x00070E15, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk1 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x1C232A31, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk2 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x383F464D, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk3 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x545B6269, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk4 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x70777E85, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk5 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x8C939AA1, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk6 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA8AFB6BD, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk7 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xC4CBD2D9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk8 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xE0E7EEF5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk9 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xFC030A11, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk10 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x181F262D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk11 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x343B4249, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk12 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x50575E65, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk13 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x6C737A81, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk14 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x888F969D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk15 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA4ABB2B9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk16 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xC0C7CED5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk17 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xDCE3EAF1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk18 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xF8FF060D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk19 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x141B2229, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov r16-r19 to %r8-%r9
mov %rdx, %r8
mov %rsi, %r9
# rk20 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x30373E45, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk21 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x4C535A61, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk22 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x686F767D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk23 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x848B9299, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk20-rk23 to %r10-%r11
mov %rdx, %r10
mov %rsi, %r11
# rk24 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA0A7AEB5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk25 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xBCC3CAD1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk26 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xD8DFE6ED, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk27 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xF4FB0209, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk24-rk27 to %r12-%r13
mov %rdx, %r12
mov %rsi, %r13
# rk28 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x10171E25, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk29 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x2C333A41, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk30 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x484F565D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk31 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x646B7279, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk28-rk31 to %r14-%r15
mov %rdx, %r14
mov %rsi, %r15
# get input
pop %rsi
push %rdi
mov $0, %rcx # mov input0 to %rdi
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
mov %eax, %edi
shl $32, %rdi
inc %rcx # mov input1 to %rdi
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
xor %rax, %rdi
inc %rcx # mov input2 to %rbx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
mov %eax, %ebx
shl $32, %rbx
inc %rcx # mov input3 to %rbx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
mov (%rsi,%rcx,1), %al
xor %rax, %rbx
# ROUND 0
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r15, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 1
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r15
and %r15, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 2
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r14, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 3
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r14
and %r14, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 4
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r13, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 5
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r13
and %r13, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 6
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r12, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 7
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r12
and %r12, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 8
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r11, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 9
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r11
and %r11, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 10
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r10, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 11
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r10
and %r10, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 12
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r9, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 13
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r9
and %r9, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 14
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r8, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 15
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r8
and %r8, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# store rdi, rbx
mov %rdi, %r14
mov %rbx, %r15
# regenerate rk0-rk15
pop %rdi
mov $0, %rbx # k0 = mk0 xor fk0
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0xA3B1BAC6, %ecx
xor %ecx, %eax
mov %eax, %edx
shl $32, %rdx
inc %rbx # k1 = mk1 xor fk1
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0x56AA3350, %ecx
xor %ecx, %eax
xor %rax, %rdx
inc %rbx # k2 = mk2 xor fk2
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0x677D9197, %ecx
xor %ecx, %eax
mov %eax, %esi
shl $32, %rsi
inc %rbx # k3 = mk3 xor fk3
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
shl $8, %eax
inc %rbx
movb (%rdi,%rbx,1), %al
mov $0xB27022DC, %ecx
xor %ecx, %eax
xor %rax, %rsi
# rk0 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x00070E15, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk1 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x1C232A31, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk2 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x383F464D, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk3 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x545B6269, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk0-rk3 to %r8-%r9
mov %rdx, %r8
mov %rsi, %r9
# rk4 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x70777E85, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk5 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x8C939AA1, %ecx
xor %ecx, %eax
# xor %ecx, %ecx
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk6 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA8AFB6BD, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk7 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xC4CBD2D9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk4-rk7 to %r10-%r11
mov %rdx, %r10
mov %rsi, %r11
# rk8 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xE0E7EEF5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk9 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xFC030A11, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk10 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x181F262D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk11 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x343B4249, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov rk8-rk11 to %r12-%r13
mov %rdx, %r12
mov %rsi, %r13
# rk12 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x50575E65, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk13 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x6C737A81, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk14 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0x888F969D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# rk15 generate
mov $0xFFFFFFFF, %eax # k1 xor k3 xor k2 xor ck0
and %edx, %eax
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
rol $32, %rsi
mov $0xFFFFFFFF, %ecx
and %esi, %ecx
xor %ecx, %eax
mov $0xA4ABB2B9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
rol $32, %rdx # k0 xor T'
mov $0xFFFFFFFF, %eax
and %edx, %eax
xor %eax, %ecx
shr $32, %rdx # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
shl $32, %rdx
mov $0xFFFFFFFF, %eax
and %esi, %eax
xor %rax, %rdx
shr $32, %rsi
shl $32, %rsi
xor %rcx, %rsi
# mov round out, mov key in
mov %r14, %rdi
mov %r15, %rbx
mov %rdx, %r14
mov %rsi, %r15
# ROUND 16
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r15, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 17
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r15
and %r15, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 18
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r14, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 19
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r14
and %r14, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 20
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r13, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 21
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r13
and %r13, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 22
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r12, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 23
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r12
and %r12, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 24
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r11, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 25
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r11
and %r11, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 26
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r10, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 27
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r10
and %r10, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 28
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r9, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 29
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r9
and %r9, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 30
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
and %r8, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# ROUND 31
mov $0xFFFFFFFF, %eax # x1 xor x3 xor x2 xor rk31
and %edi, %eax
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
rol $32, %rbx
mov $0xFFFFFFFF, %ecx
and %ebx, %ecx
xor %ecx, %eax
mov $0xFFFFFFFF, %ecx
shr $32, %r8
and %r8, %rcx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
xor %rcx, %rcx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
rol $32, %rdi # x4 = x0 xor T
mov $0xFFFFFFFF, %eax
and %edi, %eax
xor %eax, %edx
mov $0xFFFFFFFF, %eax # mov x0 = x1, x1 = x2, x2 = x3, x3 = x4
and %ebx, %eax
shr $32, %rdi
shl $32, %rdi
xor %rax, %rdi
shr $32, %rbx
shl $32, %rbx
xor %rdx, %rbx
# output
pop %rdx # mov rbx to output[8-15]
mov $3, %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
mov $7, %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rbx
dec %rcx
movb $0xFF, %al
and %rbx, %rax
movb %al, (%rdx,%rcx,1)
mov $11, %rcx # mov rdi to output[0-7]
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
mov $15, %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
shr $8, %rdi
dec %rcx
movb $0xFF, %al
and %rdi, %rax
movb %al, (%rdx,%rcx,1)
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbx
mov $1, %rax
ret |
LoCryptEn/Key-security | 54,545 | Cache-bound/GMIn_Cache/Nortm/Kernel/sm4_enc_master.S | .section .data
sbox:
.byte 0xd6,0x90,0xe9,0xfe,0xcc,0xe1,0x3d,0xb7,0x16,0xb6,0x14,0xc2,0x28,0xfb,0x2c,0x05,0x2b,0x67,0x9a,0x76,0x2a,0xbe,0x04,0xc3,0xaa,0x44,0x13,0x26,0x49,0x86,0x06,0x99,0x9c,0x42,0x50,0xf4,0x91,0xef,0x98,0x7a,0x33,0x54,0x0b,0x43,0xed,0xcf,0xac,0x62,0xe4,0xb3,0x1c,0xa9,0xc9,0x08,0xe8,0x95,0x80,0xdf,0x94,0xfa,0x75,0x8f,0x3f,0xa6,0x47,0x07,0xa7,0xfc,0xf3,0x73,0x17,0xba,0x83,0x59,0x3c,0x19,0xe6,0x85,0x4f,0xa8,0x68,0x6b,0x81,0xb2,0x71,0x64,0xda,0x8b,0xf8,0xeb,0x0f,0x4b,0x70,0x56,0x9d,0x35,0x1e,0x24,0x0e,0x5e,0x63,0x58,0xd1,0xa2,0x25,0x22,0x7c,0x3b,0x01,0x21,0x78,0x87,0xd4,0x00,0x46,0x57,0x9f,0xd3,0x27,0x52,0x4c,0x36,0x02,0xe7,0xa0,0xc4,0xc8,0x9e,0xea,0xbf,0x8a,0xd2,0x40,0xc7,0x38,0xb5,0xa3,0xf7,0xf2,0xce,0xf9,0x61,0x15,0xa1,0xe0,0xae,0x5d,0xa4,0x9b,0x34,0x1a,0x55,0xad,0x93,0x32,0x30,0xf5,0x8c,0xb1,0xe3,0x1d,0xf6,0xe2,0x2e,0x82,0x66,0xca,0x60,0xc0,0x29,0x23,0xab,0x0d,0x53,0x4e,0x6f,0xd5,0xdb,0x37,0x45,0xde,0xfd,0x8e,0x2f,0x03,0xff,0x6a,0x72,0x6d,0x6c,0x5b,0x51,0x8d,0x1b,0xaf,0x92,0xbb,0xdd,0xbc,0x7f,0x11,0xd9,0x5c,0x41,0x1f,0x10,0x5a,0xd8,0x0a,0xc1,0x31,0x88,0xa5,0xcd,0x7b,0xbd,0x2d,0x74,0xd0,0x12,0xb8,0xe5,0xb4,0xb0,0x89,0x69,0x97,0x4a,0x0c,0x96,0x77,0x7e,0x65,0xb9,0xf1,0x09,0xc5,0x6e,0xc6,0x84,0x18,0xf0,0x7d,0xec,0x3a,0xdc,0x4d,0x20,0x79,0xee,0x5f,0x3e,0xd7,0xcb,0x39,0x48
.section .text
.type sm4_enc_master, @function
.globl sm4_enc_master
# SYX: provide a key extend function to reuse
# rdi = NULL, rsi = inputplain, rdx = outputcipher
sm4_enc_master:
# ready
push %rbx # push sample register need calle save
push %r12
push %r13
push %r14
push %r15
push %rdx # push third argument register
# mov input0-input3 to
mov $0, %rcx # mov input0 to %r8d
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
mov %eax, %r8d
inc %rcx # mov input1 to %r9d
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
mov %eax, %r9d
inc %rcx # mov input2 to %r10d
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
mov %eax, %r10d
inc %rcx # mov input3 to %r11d
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
shl $8, %eax
inc %rcx
movb (%rsi,%rcx,1), %al
mov %eax, %r11d
# mov k0-k3 into %r12d-%r15d
mov %dr0, %rax # k0 = mk0 xor fk0
mov $0xA3B1BAC656AA3350, %rcx
xor %rcx, %rax
mov %eax, %r13d
shr $32, %rax # k1 = mk1 xor fk1
mov %eax, %r12d
mov %dr1, %rax # k2 = mk2 xor fk2
mov $0x677D9197B27022DC, %rcx
xor %rcx, %rax
mov %eax, %r15d
shr $32, %rax # k3 = mk3 xor fk3
mov %eax, %r14d
# ROUND 0
# rk0 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x00070E15, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 0
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 1
# rk1 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x1C232A31, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 1
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 2
# rk2 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x383F464D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 2
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 3
# rk3 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x545B6269, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 3
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 4
# rk4 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x70777E85, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 4
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 5
# rk5 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x8C939AA1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 5
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 6
# rk6 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xA8AFB6BD, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 6
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 7
# rk7 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xC4CBD2D9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 7
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 8
# rk8 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xE0E7EEF5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 8
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 9
# rk9 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xFC030A11, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 9
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 10
# rk10 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x181F262D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 10
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 11
# rk11 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x343B4249, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 11
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 12
# rk12 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x50575E65, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 12
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 13
# rk13 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x6C737A81, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 13
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 14
# rk14 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x888F969D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 14
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 15
# rk15 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xA4ABB2B9, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 15
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 16
# rk16 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xC0C7CED5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 16
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 17
# rk17 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xDCE3EAF1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 17
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 18
# rk18 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xF8FF060D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 18
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 19
# rk19 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x141B2229, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 19
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 20
# rk20 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x30373E45, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 20
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 21
# rk21 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x4C535A61, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 21
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 22
# rk22 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x686F767D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 22
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 23
# rk23 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x848B9299, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 23
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 24
# rk24 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xA0A7AEB5, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 24
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 25
# rk25 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xBCC3CAD1, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 25
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 26
# rk26 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xD8DFE6ED, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 26
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 27
# rk27 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0xF4FB0209, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 27
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 28
# rk28 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x10171E25, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 28
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 29
# rk29 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x2C333A41, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 29
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 30
# rk30 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x484F565D, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 30
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %eax, %edx
mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
mov %r10d, %r9d
mov %r11d, %r10d
mov %edx, %r11d
# ROUND 31
# rk31 generate
mov %r13d, %eax # k1 xor k2 xor k3 xor ck0
mov %r14d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov $0x646B7279, %ecx
xor %ecx, %eax
mov %eax, %ebx # tao
and $0xFF000000, %ebx
shr $24, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x00FF0000, %ebx
shr $16, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x0000FF00, %ebx
shr $8, %ebx
movb sbox(,%rbx,1), %cl
shl $8, %ecx
mov %eax, %ebx
and $0x000000FF, %ebx
movb sbox(,%rbx,1), %cl
mov %ecx, %eax # T' = L'
rol $13, %eax
xor %eax, %ecx
rol $10, %eax
xor %eax, %ecx
mov %r12d, %eax # k0 xor T'
xor %eax, %ecx
mov %r13d, %r12d # k0 = k1, k1 = k2 , k2 = k3, k3 = k4 = r0
mov %r14d, %r13d
mov %r15d, %r14d
mov %ecx, %r15d
# enc 31
mov %r9d, %eax # x1 xor x2 xor x3 xor rk0
mov %r10d, %ecx
xor %ecx, %eax
mov %r11d, %ecx
xor %ecx, %eax
mov %r15d, %ecx
xor %ecx, %eax
mov %eax, %ecx # tao
and $0xFF000000, %ecx
shr $24, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x00FF0000, %ecx
shr $16, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x0000FF00, %ecx
shr $8, %ecx
movb sbox(,%rcx,1), %dl
shl $8, %edx
mov %eax, %ecx
and $0x000000FF, %ecx
movb sbox(,%rcx,1), %dl
mov %edx, %eax # T = L
rol $2, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $8, %eax
xor %eax, %edx
rol $6, %eax
xor %eax, %edx
mov %r8d, %eax # x4 = x0 xor T
xor %edx, %eax
# xor %eax, %eax
# mov %r9d, %r8d # x0 = x1, x1 = x2, x2 = x3, x3= x4
# mov %r10d, %r9d
# mov %r11d, %r10d
# mov %edx, %r11d
# output
pop %rdx
mov $3, %rcx # output 0
movb $0xFF, %bl
and %eax, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %eax
dec %rcx
movb $0xFF, %bl
and %eax, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %eax
dec %rcx
movb $0xFF, %bl
and %eax, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %eax
dec %rcx
movb $0xFF, %bl
and %eax, %ebx
movb %bl, (%rdx,%rcx,1)
mov $7, %rcx # output 1
movb $0xFF, %bl
and %r11d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r11d
dec %rcx
movb $0xFF, %bl
and %r11d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r11d
dec %rcx
movb $0xFF, %bl
and %r11d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r11d
dec %rcx
movb $0xFF, %bl
and %r11d, %ebx
movb %bl, (%rdx,%rcx,1)
mov $11, %rcx # output 2
movb $0xFF, %bl
and %r10d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r10d
dec %rcx
movb $0xFF, %bl
and %r10d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r10d
dec %rcx
movb $0xFF, %bl
and %r10d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r10d
dec %rcx
movb $0xFF, %bl
and %r10d, %ebx
movb %bl, (%rdx,%rcx,1)
mov $15, %rcx # output 3
movb $0xFF, %bl
and %r9d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r9d
dec %rcx
movb $0xFF, %bl
and %r9d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r9d
dec %rcx
movb $0xFF, %bl
and %r9d, %ebx
movb %bl, (%rdx,%rcx,1)
shr $8, %r9d
dec %rcx
movb $0xFF, %bl
and %r9d, %ebx
movb %bl, (%rdx,%rcx,1)
pop %r15 # pop sample register need calle save
pop %r14
pop %r13
pop %r12
pop %rbx
ret |
LoCryptEn/Key-security | 4,561 | Cache-bound/DilithiumIn_Cache/Nortm/Kernel/aesni.S | .section .text
.type aes_enc, @function
.type aes_dec, @function
.type aes_enc_master, @function
.type aes_dec_master, @function
.globl aes_enc
.globl aes_dec
.globl aes_enc_master
.globl aes_dec_master
.macro key_expand RCON DEST INV=0
aeskeygenassist \RCON, %xmm0, %xmm1
call key_combine
.if \INV
aesimc %xmm0, \DEST #逆列混合
.else
movapd %xmm0, \DEST
.endif
.endm
key_combine:
pshufd $0b11111111, %xmm1, %xmm1
shufps $0b00010000, %xmm0, %xmm2
pxor %xmm2, %xmm0
shufps $0b10001100, %xmm0, %xmm2
pxor %xmm2, %xmm0
pxor %xmm1, %xmm0
ret
aes_enc:
push %rdi
push %rsi
push %rdx
movupd (%rdi), %xmm0
movapd %xmm0, %xmm5
pxor %xmm2, %xmm2
key_expand $1, %xmm6
key_expand $2, %xmm7
key_expand $4, %xmm8
key_expand $8, %xmm9
key_expand $16, %xmm10
key_expand $32, %xmm11
key_expand $64, %xmm12
key_expand $128, %xmm13
key_expand $27, %xmm14
key_expand $54, %xmm15
movupd (%rsi), %xmm0
pxor %xmm5, %xmm0
aesenc %xmm6, %xmm0
aesenc %xmm7, %xmm0
aesenc %xmm8, %xmm0
aesenc %xmm9, %xmm0
aesenc %xmm10, %xmm0
aesenc %xmm11, %xmm0
aesenc %xmm12, %xmm0
aesenc %xmm13, %xmm0
aesenc %xmm14, %xmm0
aesenclast %xmm15, %xmm0
movupd %xmm0, (%rdx)
pop %rdx
pop %rsi
pop %rdi
mov $1,%rax
ret
aes_dec:
push %rdi
push %rsi
push %rdx
movupd (%rdi), %xmm0 # mov key to xmm0
movapd %xmm0, %xmm5
pxor %xmm2, %xmm2
key_expand $1, %xmm6, 1
key_expand $2, %xmm7, 1
key_expand $4, %xmm8, 1
key_expand $8, %xmm9, 1
key_expand $16, %xmm10, 1
key_expand $32, %xmm11, 1
key_expand $64, %xmm12, 1
key_expand $128, %xmm13, 1
key_expand $27, %xmm14, 1
key_expand $54, %xmm15, 0 # No AESIMC on the last round.
movupd (%rsi), %xmm0
pxor %xmm15, %xmm0
aesdec %xmm14, %xmm0
aesdec %xmm13, %xmm0
aesdec %xmm12, %xmm0
aesdec %xmm11, %xmm0
aesdec %xmm10, %xmm0
aesdec %xmm9, %xmm0
aesdec %xmm8, %xmm0
aesdec %xmm7, %xmm0
aesdec %xmm6, %xmm0
aesdeclast %xmm5, %xmm0
movupd %xmm0, (%rdx)
pop %rdx
pop %rsi
pop %rdi
mov $1,%rax
ret
aes_enc_master:
push %r12
push %r13
push %rdi
push %rsi
mov %dr0, %r12
mov %dr1, %r13
sub $16, %rsp
mov %r12, (%rsp)
mov %r13, 8(%rsp)
movupd (%rsp), %xmm0 # mov key to xmm0
movq $0, (%rsp)
movq $0, 8(%rsp) # clean key in stack
add $16, %rsp
movapd %xmm0, %xmm5
pxor %xmm2, %xmm2
key_expand $1, %xmm6
key_expand $2, %xmm7
key_expand $4, %xmm8
key_expand $8, %xmm9
key_expand $16, %xmm10
key_expand $32, %xmm11
key_expand $64, %xmm12
key_expand $128, %xmm13
key_expand $27, %xmm14
key_expand $54, %xmm15
movupd (%rdi), %xmm0
pxor %xmm5, %xmm0
aesenc %xmm6, %xmm0
aesenc %xmm7, %xmm0
aesenc %xmm8, %xmm0
aesenc %xmm9, %xmm0
aesenc %xmm10, %xmm0
aesenc %xmm11, %xmm0
aesenc %xmm12, %xmm0
aesenc %xmm13, %xmm0
aesenc %xmm14, %xmm0
aesenclast %xmm15, %xmm0
movupd %xmm0, (%rsi)
pop %rsi
pop %rdi
pop %r13
pop %r12
mov $1, %eax
ret
aes_dec_master:
push %r12
push %r13
push %rdi
push %rsi
mov %dr0, %r12
mov %dr1, %r13
sub $16, %rsp
mov %r12, (%rsp)
mov %r13, 8(%rsp)
movupd (%rsp), %xmm0 # mov key to xmm0
movq $0, (%rsp)
movq $0, 8(%rsp) # clean key in stack
add $16, %rsp
movapd %xmm0, %xmm5
pxor %xmm2, %xmm2
key_expand $1, %xmm6, 1
key_expand $2, %xmm7, 1
key_expand $4, %xmm8, 1
key_expand $8, %xmm9, 1
key_expand $16, %xmm10, 1
key_expand $32, %xmm11, 1
key_expand $64, %xmm12, 1
key_expand $128, %xmm13, 1
key_expand $27, %xmm14, 1
key_expand $54, %xmm15, 0 # No AESIMC on the last round.
movupd (%rdi), %xmm0
pxor %xmm15, %xmm0
aesdec %xmm14, %xmm0
aesdec %xmm13, %xmm0
aesdec %xmm12, %xmm0
aesdec %xmm11, %xmm0
aesdec %xmm10, %xmm0
aesdec %xmm9, %xmm0
aesdec %xmm8, %xmm0
aesdec %xmm7, %xmm0
aesdec %xmm6, %xmm0
aesdeclast %xmm5, %xmm0
movupd %xmm0, (%rsi)
pop %rsi
pop %rdi
pop %r13
pop %r12
mov $1, %eax
ret
|
LoCryptEn/Key-security | 4,561 | Cache-bound/ECDSA_Cache/Nortm/Kernel/aesni.S | .section .text
.type aes_enc, @function
.type aes_dec, @function
.type aes_enc_master, @function
.type aes_dec_master, @function
.globl aes_enc
.globl aes_dec
.globl aes_enc_master
.globl aes_dec_master
.macro key_expand RCON DEST INV=0
aeskeygenassist \RCON, %xmm0, %xmm1
call key_combine
.if \INV
aesimc %xmm0, \DEST #逆列混合
.else
movapd %xmm0, \DEST
.endif
.endm
key_combine:
pshufd $0b11111111, %xmm1, %xmm1
shufps $0b00010000, %xmm0, %xmm2
pxor %xmm2, %xmm0
shufps $0b10001100, %xmm0, %xmm2
pxor %xmm2, %xmm0
pxor %xmm1, %xmm0
ret
aes_enc:
push %rdi
push %rsi
push %rdx
movupd (%rdi), %xmm0
movapd %xmm0, %xmm5
pxor %xmm2, %xmm2
key_expand $1, %xmm6
key_expand $2, %xmm7
key_expand $4, %xmm8
key_expand $8, %xmm9
key_expand $16, %xmm10
key_expand $32, %xmm11
key_expand $64, %xmm12
key_expand $128, %xmm13
key_expand $27, %xmm14
key_expand $54, %xmm15
movupd (%rsi), %xmm0
pxor %xmm5, %xmm0
aesenc %xmm6, %xmm0
aesenc %xmm7, %xmm0
aesenc %xmm8, %xmm0
aesenc %xmm9, %xmm0
aesenc %xmm10, %xmm0
aesenc %xmm11, %xmm0
aesenc %xmm12, %xmm0
aesenc %xmm13, %xmm0
aesenc %xmm14, %xmm0
aesenclast %xmm15, %xmm0
movupd %xmm0, (%rdx)
pop %rdx
pop %rsi
pop %rdi
mov $1,%rax
ret
aes_dec:
push %rdi
push %rsi
push %rdx
movupd (%rdi), %xmm0 # mov key to xmm0
movapd %xmm0, %xmm5
pxor %xmm2, %xmm2
key_expand $1, %xmm6, 1
key_expand $2, %xmm7, 1
key_expand $4, %xmm8, 1
key_expand $8, %xmm9, 1
key_expand $16, %xmm10, 1
key_expand $32, %xmm11, 1
key_expand $64, %xmm12, 1
key_expand $128, %xmm13, 1
key_expand $27, %xmm14, 1
key_expand $54, %xmm15, 0 # No AESIMC on the last round.
movupd (%rsi), %xmm0
pxor %xmm15, %xmm0
aesdec %xmm14, %xmm0
aesdec %xmm13, %xmm0
aesdec %xmm12, %xmm0
aesdec %xmm11, %xmm0
aesdec %xmm10, %xmm0
aesdec %xmm9, %xmm0
aesdec %xmm8, %xmm0
aesdec %xmm7, %xmm0
aesdec %xmm6, %xmm0
aesdeclast %xmm5, %xmm0
movupd %xmm0, (%rdx)
pop %rdx
pop %rsi
pop %rdi
mov $1,%rax
ret
aes_enc_master:
push %r12
push %r13
push %rdi
push %rsi
mov %dr0, %r12
mov %dr1, %r13
sub $16, %rsp
mov %r12, (%rsp)
mov %r13, 8(%rsp)
movupd (%rsp), %xmm0 # mov key to xmm0
movq $0, (%rsp)
movq $0, 8(%rsp) # clean key in stack
add $16, %rsp
movapd %xmm0, %xmm5
pxor %xmm2, %xmm2
key_expand $1, %xmm6
key_expand $2, %xmm7
key_expand $4, %xmm8
key_expand $8, %xmm9
key_expand $16, %xmm10
key_expand $32, %xmm11
key_expand $64, %xmm12
key_expand $128, %xmm13
key_expand $27, %xmm14
key_expand $54, %xmm15
movupd (%rdi), %xmm0
pxor %xmm5, %xmm0
aesenc %xmm6, %xmm0
aesenc %xmm7, %xmm0
aesenc %xmm8, %xmm0
aesenc %xmm9, %xmm0
aesenc %xmm10, %xmm0
aesenc %xmm11, %xmm0
aesenc %xmm12, %xmm0
aesenc %xmm13, %xmm0
aesenc %xmm14, %xmm0
aesenclast %xmm15, %xmm0
movupd %xmm0, (%rsi)
pop %rsi
pop %rdi
pop %r13
pop %r12
mov $1, %eax
ret
aes_dec_master:
push %r12
push %r13
push %rdi
push %rsi
mov %dr0, %r12
mov %dr1, %r13
sub $16, %rsp
mov %r12, (%rsp)
mov %r13, 8(%rsp)
movupd (%rsp), %xmm0 # mov key to xmm0
movq $0, (%rsp)
movq $0, 8(%rsp) # clean key in stack
add $16, %rsp
movapd %xmm0, %xmm5
pxor %xmm2, %xmm2
key_expand $1, %xmm6, 1
key_expand $2, %xmm7, 1
key_expand $4, %xmm8, 1
key_expand $8, %xmm9, 1
key_expand $16, %xmm10, 1
key_expand $32, %xmm11, 1
key_expand $64, %xmm12, 1
key_expand $128, %xmm13, 1
key_expand $27, %xmm14, 1
key_expand $54, %xmm15, 0 # No AESIMC on the last round.
movupd (%rdi), %xmm0
pxor %xmm15, %xmm0
aesdec %xmm14, %xmm0
aesdec %xmm13, %xmm0
aesdec %xmm12, %xmm0
aesdec %xmm11, %xmm0
aesdec %xmm10, %xmm0
aesdec %xmm9, %xmm0
aesdec %xmm8, %xmm0
aesdec %xmm7, %xmm0
aesdec %xmm6, %xmm0
aesdeclast %xmm5, %xmm0
movupd %xmm0, (%rsi)
pop %rsi
pop %rdi
pop %r13
pop %r12
mov $1, %eax
ret
|
LoCryptEn/Key-security | 106,919 | Register-bound/RSAIn_Register/Kernel/RSA_function.S |
#include "montexp_AES.S"
.file "RSA_function.S"
.text
##################################################################################################
### ###
### mul1024(A,B): ###
### ###
### R=A*B ###
### ###
### ###
##################################################################################################
/* Argument only use A0-A3 and B0-B3, M0-M3 for tmp storage */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# A[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# B0 B1 B2 B3 #
# #
# B[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# M0 M1 M2 M3 #
# #
# M[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# T0 T1 T2 T3 #
# #
# T[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
#########################################################
/* result store in A and B, R[0]-R[15] store in A, R[16]-R[32] store in B*/
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# A[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# B0 B1 B2 B3 #
# #
# B[i] 25 27 29 31 #
# 24 26 28 30 #
# 17 19 21 23 #
# 16 18 20 22 #
#########################################
# M0 M1 M2 M3 #
# #
# M[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# T0 T1 T2 T3 #
# #
# T[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
#########################################################
.macro mul1024_1st
##################################################################################################
### ###
### 1st part: A[0-7]*B[0-7] ###
### ###
### sum 576=65+73*7 ###
### ###
##################################################################################################
### ###
### 1st_0: A[0-7]*B[0] ###
### sum 65=11+3+17*3 ###
### ###
###########################################################
##### A[0 2 4 6]*B[0] #####
xorq s8, s8
xorq s9, s9
vpextrq $1, A0xmm, bi #B[0]
vmovq A0xmm, ai #A[0]
mulx bi, s0, s1 #A[0]*B[0]
vmovq A1xmm, ai #A[2]
mulx bi, s2, s3 #A[2]*B[0]
vmovq A2xmm, ai #A[4]
mulx bi, s4, s5 #A[4]*B[0]
vmovq A3xmm, ai #A[6]
mulx bi, s6, s7 #A[6]*B[0]
##### A[1 3 5 7]*B[0] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[0]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[0]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[0]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[0]
adc rl, s7
adc rh, s8
adc $0, s9
vmovq s0, M0xmm #R[0]
##################################################################################################
### ###
### 1st_1: A[0-7]*B[1] + M[0-7]*q1 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[1] #####
xorq s0, s0
vpextrq $1, B0xmm, bi #B[1]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[1]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[1]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[1]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[1]
adc rl, s7
adc rh, s8
adc $0, s9
##### A[1 3 5 7]*B[1] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[1]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[1]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[1]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[1]
adc rl, s8
adc rh, s9
adc $0, s0
vmovq s1, T0xmm #R[1]
##################################################################################################
### ###
### 1st_2: A[0-7]*B[2] ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[2] #####
xorq s1, s1
vpextrq $1, A1xmm, bi #B[2]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[2]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[2]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[2]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[2]
adc rl, s8
adc rh, s9
adc $0, s0
##### A[1 3 5 7]*B[2] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[2]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[2]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[2]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[2]
adc rl, s9
adc rh, s0
adc $0, s1
vmovq s2, M1xmm #R[2]
##################################################################################################
### ###
### 1st_3: A[0-7]*B[3] ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[3] #####
xorq s2, s2
vpextrq $1, B1xmm, bi #B[3]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[3]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[3]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[3]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[3]
adc rl, s9
adc rh, s0
adc $0, s1
##### A[1 3 5 7]*B[3] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[3]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[3]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[3]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[3]
adc rl, s0
adc rh, s1
adc $0, s2
vmovq s3, T1xmm #R[3]
##################################################################################################
### ###
### 1st_4: A[0-7]*B[4] ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[4] #####
xorq s3, s3
vpextrq $1, A2xmm, bi #B[4]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[4]
add rl, s4
adc rh, s5
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[4]
adc rl, s6
adc rh, s7
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[4]
adc rl, s8
adc rh, s9
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[4]
adc rl, s0
adc rh, s1
adc $0, s2
##### A[1 3 5 7]*B[4] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[4]
add rl, s5
adc rh, s6
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[4]
adc rl, s7
adc rh, s8
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[4]
adc rl, s9
adc rh, s0
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[4]
adc rl, s1
adc rh, s2
adc $0, s3
vmovq s4, M2xmm #R[4]
##################################################################################################
### ###
### 1st_5: A[0-7]*B[5] ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[5] #####
xorq s4, s4
vpextrq $1, B2xmm, bi #B[5]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[5]
add rl, s5
adc rh, s6
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[5]
adc rl, s7
adc rh, s8
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[5]
adc rl, s9
adc rh, s0
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[5]
adc rl, s1
adc rh, s2
adc $0, s3
##### A[1 3 5 7]*B[5] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[5]
add rl, s6
adc rh, s7
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[5]
adc rl, s8
adc rh, s9
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[5]
adc rl, s0
adc rh, s1
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[5]
adc rl, s2
adc rh, s3
adc $0, s4
vmovq s5, T2xmm #R[5]
##################################################################################################
### ###
### 1st_6: A[0-7]*B[6] ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[6] #####
xorq s5, s5
vpextrq $1, A3xmm, bi #B[6]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[6]
add rl, s6
adc rh, s7
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[6]
adc rl, s8
adc rh, s9
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[6]
adc rl, s0
adc rh, s1
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[6]
adc rl, s2
adc rh, s3
adc $0, s4
##### A[1 3 5 7]*B[6] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[6]
add rl, s7
adc rh, s8
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[6]
adc rl, s9
adc rh, s0
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[6]
adc rl, s1
adc rh, s2
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[6]
adc rl, s3
adc rh, s4
adc $0, s5
vmovq s6, M3xmm #R[6]
##################################################################################################
### ###
### 1st_7: A[0-7]*B[7] ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[7] #####
xorq s6, s6
vpextrq $1, B3xmm, bi #B[7]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[7]
add rl, s7
adc rh, s8
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[7]
adc rl, s9
adc rh, s0
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[7]
adc rl, s1
adc rh, s2
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[7]
adc rl, s3
adc rh, s4
adc $0, s5
##### A[1 3 5 7]*B[7] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[7]
add rl, s8
adc rh, s9
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[7]
adc rl, s0
adc rh, s1
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[7]
adc rl, s2
adc rh, s3
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[7]
adc rl, s4
adc rh, s5
adc $0, s6
vmovq s7, T3xmm #R[7]
##################################################################################################
### ###
### 1st part END ###
### ###
### low high ###
### ###
### s8 s9 s0 s1 s2 s3 s4 s5 s6 ###
### ###
##################################################################################################
.endm
##################################################################################################
/* 16 256bit vector registers */
#########################################################
### al ah can be used for temporary storage, for vector value exchange ###
### bi q also can be used ###
#################################################################################
# AL0 AL1 AL2 AL3 # BL0 BL1 BL2 BL3 #
# # #
# A[i] X X X X # B[i] 1 3 5 7 #
# X X X X # 0 2 4 6 #
# 1 3 5 7 # X X X X #
# 0 2 4 6 # X X X X #
#################################################################################
# AH0 AH1 AH2 AH3 # BH0 BH1 BH2 BH3 #
# # #
# A[i] X X X X # B[i] 9 11 13 15 #
# X X X X # 8 10 12 14 #
# 9 11 13 15 # X X X X #
# 8 10 12 14 # X X X X #
#################################################################################
# ML0 ML1 ML2 ML3 # TL0 TL1 TL2 TL3 #
# # #
# M[i] X X X X # T[i] 1 3 5 7 #
# X X X X # 0 2 4 6 #
# 1 3 5 7 # X X X X #
# 0 2 4 6 # X X X X #
#################################################################################
# MH0 MH1 MH2 MH3 # TH0 TH1 TH2 TH3 #
# # #
# M[i] X X X X # T[i] 9 11 13 15 #
# X X X X # 8 10 12 14 #
# 9 11 13 15 # X X X X #
# 8 10 12 14 # X X X X #
#################################################################################
#########################################################
.macro mul1024_2nd
##################################################################################################
### ###
### 2nd part: ###
### A[8-15]*B[0-7] + A[0-7]*B[8-15] ###
### ###
### sum 1248=56+149*8 ###
### ###
##################################################################################################
### ###
### 2nd_arrange_vector ###
### sum 56=7*8 ###
### ###
###########################################################
vpermq $0xD8, A0, A0 #imm=3120
vpermq $0xD8, A1, A1 #imm=3120
vpermq $0xD8, A2, A2 #imm=3120
vpermq $0xD8, A3, A3 #imm=3120
vpermq $0xD8, B0, B0 #imm=3120
vpermq $0xD8, B1, B1 #imm=3120
vpermq $0xD8, B2, B2 #imm=3120
vpermq $0xD8, B3, B3 #imm=3120
##################################################################################################
### ###
### 2nd_0: ###
### A[8-15]*B[0] + A[0-7]*B[8] ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[0] #####
xorq s7, s7
vperm2i128 $1, A0, A0, A0 #imm=01
vmovq A0xmm, bi #B[0]
vperm2i128 $1, A0, A0, A0 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[0]
add rl, s8
adc rh, s9
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[0]
adc rl, s0
adc rh, s1
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[0]
adc rl, s2
adc rh, s3
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[0]
adc rl, s4
adc rh, s5
adc $0, s6
##### A[9 11 13 15]*B[0] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[0]
add rl, s9
adc rh, s0
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[0]
adc rl, s1
adc rh, s2
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[0]
adc rl, s3
adc rh, s4
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[0]
adc rl, s5
adc rh, s6
adc $0, s7
###################################################################
###################################################################
##### A[0 2 4 6]*B[8] #####
vperm2i128 $1, A0, A0, A0 #imm=01
vpextrq $1, A0xmm, bi #B[8]
vperm2i128 $1, A0, A0, A0 #imm=01
movq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[8]
add rl, s8
adc rh, s9
movq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[8]
adc rl, s0
adc rh, s1
movq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[8]
adc rl, s2
adc rh, s3
movq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[8]
adc rl, s4
adc rh, s5
adc $0, s6
adc $0, s7
##### A[1 3 5 7]*B[8] #####
movq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[8]
add rl, s9
adc rh, s0
movq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[8]
adc rl, s1
adc rh, s2
movq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[8]
adc rl, s3
adc rh, s4
movq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[8]
adc rl, s5
adc rh, s6
adc $0, s7
vpinsrq $1, s8, M0xmm, M0xmm #R[8]
##################################################################################################
### ###
### 2nd_1: ###
### A[8-15]*B[1] + A[0-7]*B[9] ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[1] #####
xorq s8, s8
vperm2i128 $1, B0, B0, B0 #imm=01
vmovq B0xmm, bi #B[1]
vperm2i128 $1, B0, B0, B0 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[1]
add rl, s9
adc rh, s0
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[1]
adc rl, s1
adc rh, s2
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[1]
adc rl, s3
adc rh, s4
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[1]
adc rl, s5
adc rh, s6
adc $0, s7
##### A[9 11 13 15]*B[1] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[1]
add rl, s0
adc rh, s1
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[1]
adc rl, s2
adc rh, s3
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[1]
adc rl, s4
adc rh, s5
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[1]
adc rl, s6
adc rh, s7
adc $0, s8
###################################################################
###################################################################
##### A[0 2 4 6]*B[9] #####
vperm2i128 $1, B0, B0, B0 #imm=01
vpextrq $1, B0xmm, bi #B[9]
vperm2i128 $1, B0, B0, B0 #imm=01
movq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[9]
add rl, s9
adc rh, s0
movq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[9]
adc rl, s1
adc rh, s2
movq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[9]
adc rl, s3
adc rh, s4
movq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[9]
adc rl, s5
adc rh, s6
adc $0, s7
adc $0, s8
##### A[1 3 5 7]*B[9] #####
movq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[9]
add rl, s0
adc rh, s1
movq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[9]
adc rl, s2
adc rh, s3
movq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[9]
adc rl, s4
adc rh, s5
movq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[9]
adc rl, s6
adc rh, s7
adc $0, s8
vpinsrq $1, s9, T0xmm, T0xmm #R[9]
##################################################################################################
### ###
### 2nd_2: ###
### A[8-15]*B[2] + A[0-7]*B[10] ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[2] #####
xorq s9, s9
vperm2i128 $1, A1, A1, A1 #imm=01
vmovq A1xmm, bi #B[2]
vperm2i128 $1, A1, A1, A1 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[2]
add rl, s0
adc rh, s1
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[2]
adc rl, s2
adc rh, s3
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[2]
adc rl, s4
adc rh, s5
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[2]
adc rl, s6
adc rh, s7
adc $0, s8
##### A[9 11 13 15]*B[2] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[2]
add rl, s1
adc rh, s2
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[2]
adc rl, s3
adc rh, s4
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[2]
adc rl, s5
adc rh, s6
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[2]
adc rl, s7
adc rh, s8
adc $0, s9
###################################################################
###################################################################
##### A[0 2 4 6]*B[10] #####
vperm2i128 $1, A1, A1, A1 #imm=01
vpextrq $1, A1xmm, bi #B[10]
vperm2i128 $1, A1, A1, A1 #imm=01
movq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[10]
add rl, s0
adc rh, s1
movq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[10]
adc rl, s2
adc rh, s3
movq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[10]
adc rl, s4
adc rh, s5
movq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[10]
adc rl, s6
adc rh, s7
adc $0, s8
adc $0, s9
##### A[1 3 5 7]*B[10] #####
movq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[10]
add rl, s1
adc rh, s2
movq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[10]
adc rl, s3
adc rh, s4
movq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[10]
adc rl, s5
adc rh, s6
movq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[10]
adc rl, s7
adc rh, s8
adc $0, s9
vpinsrq $1, s0, M1xmm, M1xmm #R[10]
##################################################################################################
### ###
### 2nd_3: ###
### A[8-15]*B[3]+ A[0-7]*B[11] ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[3] #####
xorq s0, s0
vperm2i128 $1, B1, B1, B1 #imm=01
vmovq B1xmm, bi #B[3]
vperm2i128 $1, B1, B1, B1 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[3]
add rl, s1
adc rh, s2
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[3]
adc rl, s3
adc rh, s4
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[3]
adc rl, s5
adc rh, s6
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[3]
adc rl, s7
adc rh, s8
adc $0, s9
##### A[9 11 13 15]*B[3] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[3]
add rl, s2
adc rh, s3
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[3]
adc rl, s4
adc rh, s5
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[3]
adc rl, s6
adc rh, s7
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[3]
adc rl, s8
adc rh, s9
adc $0, s0
###################################################################
###################################################################
##### A[0 2 4 6]*B[11] #####
vperm2i128 $1, B1, B1, B1 #imm=01
vpextrq $1, B1xmm, bi #B[11]
vperm2i128 $1, B1, B1, B1 #imm=01
movq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[11]
add rl, s1
adc rh, s2
movq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[11]
adc rl, s3
adc rh, s4
movq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[11]
adc rl, s5
adc rh, s6
movq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[11]
adc rl, s7
adc rh, s8
adc $0, s9
adc $0, s0
##### A[1 3 5 7]*B[11] #####
movq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[11]
add rl, s2
adc rh, s3
movq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[11]
adc rl, s4
adc rh, s5
movq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[11]
adc rl, s6
adc rh, s7
movq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[11]
adc rl, s8
adc rh, s9
adc $0, s0
vpinsrq $1, s1, T1xmm, T1xmm #R[11]
##################################################################################################
### ###
### 2nd_4: ###
### A[8-15]*B[4] + A[0-7]*B[12] ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[4] #####
xorq s1, s1
vperm2i128 $1, A2, A2, A2 #imm=01
vmovq A2xmm, bi #B[4]
vperm2i128 $1, A2, A2, A2 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[4]
add rl, s2
adc rh, s3
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[4]
adc rl, s4
adc rh, s5
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[4]
adc rl, s6
adc rh, s7
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[4]
adc rl, s8
adc rh, s9
adc $0, s0
##### A[9 11 13 15]*B[4] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[4]
add rl, s3
adc rh, s4
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[4]
adc rl, s5
adc rh, s6
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[4]
adc rl, s7
adc rh, s8
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[4]
adc rl, s9
adc rh, s0
adc $0, s1
###################################################################
###################################################################
##### A[0 2 4 6]*B[12] #####
vperm2i128 $1, A2, A2, A2 #imm=01
vpextrq $1, A2xmm, bi #B[12]
vperm2i128 $1, A2, A2, A2 #imm=01
movq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[12]
add rl, s2
adc rh, s3
movq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[12]
adc rl, s4
adc rh, s5
movq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[12]
adc rl, s6
adc rh, s7
movq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[12]
adc rl, s8
adc rh, s9
adc $0, s0
adc $0, s1
##### A[1 3 5 7]*B[12] #####
movq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[12]
add rl, s3
adc rh, s4
movq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[12]
adc rl, s5
adc rh, s6
movq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[12]
adc rl, s7
adc rh, s8
movq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[12]
adc rl, s9
adc rh, s0
adc $0, s1
vpinsrq $1, s2, M2xmm, M2xmm #R[12]
##################################################################################################
### ###
### 2nd_5: ###
### A[8-15]*B[5] + A[0-7]*B[13] ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[5] #####
xorq s2, s2
vperm2i128 $1, B2, B2, B2 #imm=01
vmovq B2xmm, bi #B[5]
vperm2i128 $1, B2, B2, B2 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[5]
add rl, s3
adc rh, s4
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[5]
adc rl, s5
adc rh, s6
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[5]
adc rl, s7
adc rh, s8
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[5]
adc rl, s9
adc rh, s0
adc $0, s1
##### A[9 11 13 15]*B[5] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[5]
add rl, s4
adc rh, s5
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[5]
adc rl, s6
adc rh, s7
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[5]
adc rl, s8
adc rh, s9
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[5]
adc rl, s0
adc rh, s1
adc $0, s2
###################################################################
###################################################################
##### A[0 2 4 6]*B[13] #####
vperm2i128 $1, B2, B2, B2 #imm=01
vpextrq $1, B2xmm, bi #B[13]
vperm2i128 $1, B2, B2, B2 #imm=01
movq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[13]
add rl, s3
adc rh, s4
movq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[13]
adc rl, s5
adc rh, s6
movq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[13]
adc rl, s7
adc rh, s8
movq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[13]
adc rl, s9
adc rh, s0
adc $0, s1
adc $0, s2
##### A[1 3 5 7]*B[13] #####
movq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[13]
add rl, s4
adc rh, s5
movq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[13]
adc rl, s6
adc rh, s7
movq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[13]
adc rl, s8
adc rh, s9
movq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[13]
adc rl, s0
adc rh, s1
adc $0, s2
vpinsrq $1, s3, T2xmm, T2xmm #R[13]
##################################################################################################
### ###
### 2nd_6: ###
### A[8-15]*B[6] + A[0-7]*B[14] ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[6] #####
xorq s3, s3
vperm2i128 $1, A3, A3, A3 #imm=01
vmovq A3xmm, bi #B[6]
vperm2i128 $1, A3, A3, A3 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[6]
add rl, s4
adc rh, s5
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[6]
adc rl, s6
adc rh, s7
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[6]
adc rl, s8
adc rh, s9
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[6]
adc rl, s0
adc rh, s1
adc $0, s2
##### A[9 11 13 15]*B[6] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[6]
add rl, s5
adc rh, s6
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[6]
adc rl, s7
adc rh, s8
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[6]
adc rl, s9
adc rh, s0
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[6]
adc rl, s1
adc rh, s2
adc $0, s3
###################################################################
###################################################################
##### A[0 2 4 6]*B[14] #####
vperm2i128 $1, A3, A3, A3 #imm=01
vpextrq $1, A3xmm, bi #B[14]
vperm2i128 $1, A3, A3, A3 #imm=01
movq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[14]
add rl, s4
adc rh, s5
movq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[14]
adc rl, s6
adc rh, s7
movq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[14]
adc rl, s8
adc rh, s9
movq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[14]
adc rl, s0
adc rh, s1
adc $0, s2
adc $0, s3
##### A[1 3 5 7]*B[14] #####
movq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[14]
add rl, s5
adc rh, s6
movq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[14]
adc rl, s7
adc rh, s8
movq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[14]
adc rl, s9
adc rh, s0
movq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[14]
adc rl, s1
adc rh, s2
adc $0, s3
vpinsrq $1, s4, M3xmm, M3xmm #R[14]
##################################################################################################
### ###
### 2nd_7: ###
### A[8-15]*B[7] + A[0-7]*B[15] ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[7] #####
xorq s4, s4
vperm2i128 $1, B3, B3, B3 #imm=01
vmovq B3xmm, bi #B[7]
vperm2i128 $1, B3, B3, B3 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[7]
add rl, s5
adc rh, s6
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[7]
adc rl, s7
adc rh, s8
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[7]
adc rl, s9
adc rh, s0
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[7]
adc rl, s1
adc rh, s2
adc $0, s3
##### A[9 11 13 15]*B[7] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[7]
add rl, s6
adc rh, s7
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[7]
adc rl, s8
adc rh, s9
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[7]
adc rl, s0
adc rh, s1
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[7]
adc rl, s2
adc rh, s3
adc $0, s4
###################################################################
###################################################################
##### A[0 2 4 6]*B[15] #####
vperm2i128 $1, B3, B3, B3 #imm=01
vpextrq $1, B3xmm, bi #B[15]
vperm2i128 $1, B3, B3, B3 #imm=01
movq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[15]
add rl, s5
adc rh, s6
movq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[15]
adc rl, s7
adc rh, s8
movq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[15]
adc rl, s9
adc rh, s0
movq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[15]
adc rl, s1
adc rh, s2
adc $0, s3
adc $0, s4
##### A[1 3 5 7]*B[15] #####
movq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[15]
add rl, s6
adc rh, s7
movq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[15]
adc rl, s8
adc rh, s9
movq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[15]
adc rl, s0
adc rh, s1
movq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[15]
adc rl, s2
adc rh, s3
adc $0, s4
vpinsrq $1, s5, T3xmm, T3xmm #R[15]
##################################################################################################
### ###
### 2nd part END ###
### ###
### low high ###
### ###
### s6 s7 s8 s9 s0 s1 s2 s3 s4 ###
### ###
##################################################################################################
.endm
##################################################################################################
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# A[i] 1 3 5 7 #
# 0 2 4 6 #
# 9 11 13 15 #
# 8 10 12 14 #
#########################################
# B0 B1 B2 B3 #
# #
# B[i] 1 3 5 7 #
# 0 2 4 6 #
# 9 11 13 15 #
# 8 10 12 14 #
#########################################
# M0 M1 M2 M3 #
# #
# M[i] 1 3 5 7 #
# 0 2 4 6 #
# 9 11 13 15 #
# 8 10 12 14 #
#########################################
# T0 T1 T2 T3 #
# #
# T[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
#########################################################
.macro mul1024_3rd
##################################################################################################
### ###
### 3rd part: A[8-15]*B[8-15] ###
### ###
### sum 628=52+72*8 ###
### ###
##################################################################################################
### ###
### 3rd_arrange_vector ###
### sum 52=7*4+6*4 ###
### ###
###########################################################
vpermq $0x8D, A0, A0 #imm=2031
vpermq $0x8D, A1, A1 #imm=2031
vpermq $0x8D, A2, A2 #imm=2031
vpermq $0x8D, A3, A3 #imm=2031
vpermq $0x8D, B0, B0 #imm=2031
vpermq $0x8D, B1, B1 #imm=2031
vpermq $0x8D, B2, B2 #imm=2031
vpermq $0x8D, B3, B3 #imm=2031
##################################################################################################
### ###
### 3rd_0: ###
### A[8-15]*B[8] ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[8] #####
xorq s5, s5
vpextrq $1, A0xmm, bi #B[8]
movq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[8]
add rl, s6
adc rh, s7
movq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[8]
adc rl, s8
adc rh, s9
movq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[8]
adc rl, s0
adc rh, s1
movq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[8]
adc rl, s2
adc rh, s3
adc $0, s4
##### A[9 11 13 15]*B[8] #####
movq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[8]
add rl, s7
adc rh, s8
movq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[8]
adc rl, s9
adc rh, s0
movq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[8]
adc rl, s1
adc rh, s2
movq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[8]
adc rl, s3
adc rh, s4
adc $0, s5
movq s6, r0 #R[16]
##################################################################################################
### ###
### 3rd_1: ###
### A[8-15]*B[9] ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[9] #####
xorq s6, s6
vpextrq $1, B0xmm, bi #B[9]
movq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[9]
add rl, s7
adc rh, s8
movq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[9]
adc rl, s9
adc rh, s0
movq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[9]
adc rl, s1
adc rh, s2
movq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[9]
adc rl, s3
adc rh, s4
adc $0, s5
##### A[9 11 13 15]*B[9] #####
movq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[9]
add rl, s8
adc rh, s9
movq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[9]
adc rl, s0
adc rh, s1
movq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[9]
adc rl, s2
adc rh, s3
movq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[9]
adc rl, s4
adc rh, s5
adc $0, s6
movq s7, r1 #R[17]
##################################################################################################
### ###
### 3rd_2: ###
### A[8-15]*B[10] ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[10] #####
xorq s7, s7
vpextrq $1, A1xmm, bi #B[10]
movq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[10]
add rl, s8
adc rh, s9
movq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[10]
adc rl, s0
adc rh, s1
movq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[10]
adc rl, s2
adc rh, s3
movq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[10]
adc rl, s4
adc rh, s5
adc $0, s6
##### A[9 11 13 15]*B[10] #####
movq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[10]
add rl, s9
adc rh, s0
movq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[10]
adc rl, s1
adc rh, s2
movq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[10]
adc rl, s3
adc rh, s4
movq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[10]
adc rl, s5
adc rh, s6
adc $0, s7
movq s8, r2 #R[18]
##################################################################################################
### ###
### 3rd_3: ###
### A[8-15]*B[11] ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[11] #####
xorq s8, s8
vpextrq $1, B1xmm, bi #B[11]
movq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[11]
add rl, s9
adc rh, s0
movq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[11]
adc rl, s1
adc rh, s2
movq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[11]
adc rl, s3
adc rh, s4
movq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[11]
adc rl, s5
adc rh, s6
adc $0, s7
##### A[9 11 13 15]*B[11] #####
movq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[11]
add rl, s0
adc rh, s1
movq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[11]
adc rl, s2
adc rh, s3
movq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[11]
adc rl, s4
adc rh, s5
movq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[11]
adc rl, s6
adc rh, s7
adc $0, s8
movq s9, r3 #R[19]
##################################################################################################
### ###
### 3rd_4: ###
### A[8-15]*B[12] ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[12] #####
xorq s9, s9
vpextrq $1, A2xmm, bi #B[12]
movq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[12]
add rl, s0
adc rh, s1
movq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[12]
adc rl, s2
adc rh, s3
movq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[12]
adc rl, s4
adc rh, s5
movq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[12]
adc rl, s6
adc rh, s7
adc $0, s8
##### A[9 11 13 15]*B[12] #####
movq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[12]
add rl, s1
adc rh, s2
movq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[12]
adc rl, s3
adc rh, s4
movq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[12]
adc rl, s5
adc rh, s6
movq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[12]
adc rl, s7
adc rh, s8
adc $0, s9
movq s0, r4 #R[20]
##################################################################################################
### ###
### 3rd_5: ###
### A[8-15]*B[13] ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[13] #####
xorq s0, s0
vpextrq $1, B2xmm, bi #B[13]
movq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[13]
add rl, s1
adc rh, s2
movq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[13]
adc rl, s3
adc rh, s4
movq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[13]
adc rl, s5
adc rh, s6
movq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[13]
adc rl, s7
adc rh, s8
adc $0, s9
##### A[9 11 13 15]*B[13] #####
movq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[13]
add rl, s2
adc rh, s3
movq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[13]
adc rl, s4
adc rh, s5
movq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[13]
adc rl, s6
adc rh, s7
movq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[13]
adc rl, s8
adc rh, s9
adc $0, s0
movq s1, r5 #R[21]
##################################################################################################
### ###
### 3rd_6: ###
### A[8-15]*B[14] ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[14] #####
xorq s1, s1
vpextrq $1, A3xmm, bi #B[14]
movq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[14]
add rl, s2
adc rh, s3
movq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[14]
adc rl, s4
adc rh, s5
movq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[14]
adc rl, s6
adc rh, s7
movq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[14]
adc rl, s8
adc rh, s9
adc $0, s0
##### A[9 11 13 15]*B[14] #####
movq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[14]
add rl, s3
adc rh, s4
movq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[14]
adc rl, s5
adc rh, s6
movq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[14]
adc rl, s7
adc rh, s8
movq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[14]
adc rl, s9
adc rh, s0
adc $0, s1
movq s2, r6 #R[22]
##################################################################################################
### ###
### 3rd_7: ###
### A[8-15]*B[15] ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[15] #####
xorq s2, s2
vpextrq $1, B3xmm, bi #B[15]
movq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[15]
add rl, s3
adc rh, s4
movq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[15]
adc rl, s5
adc rh, s6
movq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[15]
adc rl, s7
adc rh, s8
movq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[15]
adc rl, s9
adc rh, s0
adc $0, s1
##### A[9 11 13 15]*B[15] #####
movq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[15]
add rl, s4
adc rh, s5
movq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[15]
adc rl, s6
adc rh, s7
movq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[15]
adc rl, s8
adc rh, s9
movq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[15]
adc rl, s0
adc rh, s1
adc $0, s2
movq s3, r7 #R[23]
##################################################################################################
###last result###
##################################################################################################
### ###
### 3rd part END ###
### ###
### low high ###
### ###
### s4 s5 s6 s7 s8 s9 s0 s1 s2 ###
### ###
##################################################################################################
.endm
.globl mul1024
.type mul1024, @function
.align 64
mul1024:
#.macro mul1024
##################################################################################################
### ###
### mul1024: 1st 2nd 3rd last ###
### ###
### result store in A B ###
### ###
##################################################################################################
movq %rsp, rsp #mm7
mul1024_1st
mul1024_2nd
mul1024_3rd
movq rsp, %rsp #mm7
##################################################################################################
### ###
### mul1024 END ###
### ###
### result A0 A1 A2 A3 ###
### ###
##################################################################################################
#.endm
ret
.size mul1024, .-mul1024
##################################################################################################
##################################################################################################
##################################################################################################
.globl sub_mp_mq
.type sub_mp_mq, @function
.align 64
sub_mp_mq:
### Rp:result of montexp1024_AES_p ###
### Rq:result of montexp1024_AES_q ###
/*
movq %mm0, %rsi #for RRp
movq %mm1, %rax #for Rq Rq
### store the %rax, %rax will be altered in function montmul1024 ###
pushq %rax
#####################################################################
### load RRp to A B and Dec ###
addq $256, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
#### load key ####
vperm2i128 $0x10, M0, M0, T2
#### dec load arg ####
key_schedule_128_128bit
#10
xor_arg_128_128bit
#9
inv_key_expansion_128_128bit rk_128 0x36 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#8
inv_key_expansion_128_128bit rk_128 0x1b rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#7
inv_key_expansion_128_128bit rk_128 0x80 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#6
inv_key_expansion_128_128bit rk_128 0x40 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#5
inv_key_expansion_128_128bit rk_128 0x20 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#4
inv_key_expansion_128_128bit rk_128 0x10 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#3
inv_key_expansion_128_128bit rk_128 0x8 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#2
inv_key_expansion_128_128bit rk_128 0x4 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#1
inv_key_expansion_128_128bit rk_128 0x2 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#0
inv_key_expansion_128_128bit rk_128 0x1 rhelp_128
vaesimc rk_128, rhelp_128
aesdeclast_arg_128_128bit
### rerange RRp to B ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x0A, A0, T3, B0 #imm=1010
vshufpd $0x00, A0, T3, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A1, T3, B1 #imm=1010
vshufpd $0x00, A1, T3, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A2, T3, B2 #imm=1010
vshufpd $0x00, A2, T3, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A3, T3, B3 #imm=1010
vshufpd $0x00, A3, T3, A3 #imm=0000
#### store B ####
store_B
#####################################################################
#####################################################################
popq %rax
pushq %rax
### load Rq to A B ###
vmovdqu 128(%rax), A0xmm
vmovdqu 16+128(%rax), A1xmm
vmovdqu 32+128(%rax), A2xmm
vmovdqu 48+128(%rax), A3xmm
vmovdqu 64+128(%rax), B0xmm
vmovdqu 80+128(%rax), B1xmm
vmovdqu 96+128(%rax), B2xmm
vmovdqu 112+128(%rax), B3xmm
#### load key ####
vperm2i128 $0x10, M0, M0, T2
#### dec load arg ####
key_schedule_128_128bit
#10
xor_arg_128_128bit
#9
inv_key_expansion_128_128bit rk_128 0x36 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#8
inv_key_expansion_128_128bit rk_128 0x1b rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#7
inv_key_expansion_128_128bit rk_128 0x80 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#6
inv_key_expansion_128_128bit rk_128 0x40 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#5
inv_key_expansion_128_128bit rk_128 0x20 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#4
inv_key_expansion_128_128bit rk_128 0x10 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#3
inv_key_expansion_128_128bit rk_128 0x8 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#2
inv_key_expansion_128_128bit rk_128 0x4 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#1
inv_key_expansion_128_128bit rk_128 0x2 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#0
inv_key_expansion_128_128bit rk_128 0x1 rhelp_128
vaesimc rk_128, rhelp_128
aesdeclast_arg_128_128bit
### rerange Rq to A ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
#### restore B ####
restore_B
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
### Transfer Rq to montgomery form in p ###
### Compute Rq'=Rq*RRp*R^(-1)mod p ###
call montmul1024
### movq Rq to B ###
vshufpd $0x05, A0, A0, A0 #imm=0101
vshufpd $0x05, A1, A1, A1 #imm=0101
vshufpd $0x05, A2, A2, A2 #imm=0101
vshufpd $0x05, A3, A3, A3 #imm=0101
vshufpd $0x05, B0, B0, B0 #imm=0101
vshufpd $0x05, B1, B1, B1 #imm=0101
vshufpd $0x05, B2, B2, B2 #imm=0101
vshufpd $0x05, B3, B3, B3 #imm=0101
#####################################################################
#####################################################################
#### store B ####
store_B
popq %rax
### load Rp to A B ###
vmovdqu 256(%rax), A0xmm
vmovdqu 16+256(%rax), A1xmm
vmovdqu 32+256(%rax), A2xmm
vmovdqu 48+256(%rax), A3xmm
vmovdqu 64+256(%rax), B0xmm
vmovdqu 80+256(%rax), B1xmm
vmovdqu 96+256(%rax), B2xmm
vmovdqu 112+256(%rax), B3xmm
#### load key ####
vperm2i128 $0x10, M0, M0, T2
#### dec load arg ####
key_schedule_128_128bit
#10
xor_arg_128_128bit
#9
inv_key_expansion_128_128bit rk_128 0x36 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#8
inv_key_expansion_128_128bit rk_128 0x1b rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#7
inv_key_expansion_128_128bit rk_128 0x80 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#6
inv_key_expansion_128_128bit rk_128 0x40 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#5
inv_key_expansion_128_128bit rk_128 0x20 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#4
inv_key_expansion_128_128bit rk_128 0x10 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#3
inv_key_expansion_128_128bit rk_128 0x8 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#2
inv_key_expansion_128_128bit rk_128 0x4 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#1
inv_key_expansion_128_128bit rk_128 0x2 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#0
inv_key_expansion_128_128bit rk_128 0x1 rhelp_128
vaesimc rk_128, rhelp_128
aesdeclast_arg_128_128bit
### rerange Rp to A ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
#### restore B ####
restore_B
*/
#####################################################################
#####################################################################
### A sub B store in A ###
### (Rp-Rq)mod p ###
### A=A-B ###
xorq bi, bi
vpextrq $0, A0xmm, rh
vpextrq $1, A0xmm, rl
subq rl, rh
movq rh, r0 #R[0]
vpextrq $0, B0xmm, rh
vpextrq $1, B0xmm, rl
sbbq rl, rh
movq rh, r1 #R[1]
vpextrq $0, A1xmm, rh
vpextrq $1, A1xmm, rl
sbbq rl, rh
movq rh, r2 #R[2]
vpextrq $0, B1xmm, rh
vpextrq $1, B1xmm, rl
sbbq rl, rh
movq rh, r3 #R[3]
vpextrq $0, A2xmm, rh
vpextrq $1, A2xmm, rl
sbbq rl, rh
movq rh, r4 #R[4]
vpextrq $0, B2xmm, rh
vpextrq $1, B2xmm, rl
sbbq rl, rh
movq rh, r5 #R[5]
vpextrq $0, A3xmm, rh
vpextrq $1, A3xmm, rl
sbbq rl, rh
movq rh, r6 #R[6]
vpextrq $0, B3xmm, rh
vpextrq $1, B3xmm, rl
sbbq rl, rh
movq rh, r7 #R[7]
vperm2i128 $0x1, A0, A0, A0
vperm2i128 $0x1, A1, A1, A1
vperm2i128 $0x1, A2, A2, A2
vperm2i128 $0x1, A3, A3, A3
vperm2i128 $0x1, B0, B0, B0
vperm2i128 $0x1, B1, B1, B1
vperm2i128 $0x1, B2, B2, B2
vperm2i128 $0x1, B3, B3, B3
vpextrq $0, A0xmm, rh
vpextrq $1, A0xmm, rl
sbbq rl, rh
movq rh, r8 #R[8]
vpextrq $0, B0xmm, rh
vpextrq $1, B0xmm, rl
sbbq rl, rh
movq rh, r9 #R[9]
vpextrq $0, A1xmm, rh
vpextrq $1, A1xmm, rl
sbbq rl, rh
movq rh, r10 #R[10]
vpextrq $0, B1xmm, rh
vpextrq $1, B1xmm, rl
sbbq rl, rh
movq rh, r11 #R[11]
vpextrq $0, A2xmm, rh
vpextrq $1, A2xmm, rl
sbbq rl, rh
movq rh, r12 #R[12]
vpextrq $0, B2xmm, rh
vpextrq $1, B2xmm, rl
sbbq rl, rh
movq rh, r13 #R[13]
vpextrq $0, A3xmm, rh
vpextrq $1, A3xmm, rl
sbbq rl, rh
movq rh, r14 #R[14]
vpextrq $0, B3xmm, rh
vpextrq $1, B3xmm, rl
sbbq rl, rh
movq rh, r15 #R[15]
sbbq $0, bi
jnb 1f
#.sub_mp_mq_1:
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
### R=R+P ###
xorq bi, bi
vpextrq $0, M0xmm, rl
movq r0, rh
addq rl, rh
movq rh, r0
vpextrq $0, T0xmm, rl
movq r1, rh
adcq rl, rh
movq rh, r1
vpextrq $0, M1xmm, rl
movq r2, rh
adcq rl, rh
movq rh, r2
vpextrq $0, T1xmm, rl
movq r3, rh
adcq rl, rh
movq rh, r3
vpextrq $0, M2xmm, rl
movq r4, rh
adcq rl, rh
movq rh, r4
vpextrq $0, T2xmm, rl
movq r5, rh
adcq rl, rh
movq rh, r5
vpextrq $0, M3xmm, rl
movq r6, rh
adcq rl, rh
movq rh, r6
vpextrq $0, T3xmm, rl
movq r7, rh
adcq rl, rh
movq rh, r7
vpextrq $1, M0xmm, rl
adcq rl, r8
vpextrq $1, T0xmm, rl
adcq rl, r9
vpextrq $1, M1xmm, rl
adcq rl, r10
vpextrq $1, T1xmm, rl
adcq rl, r11
vpextrq $1, M2xmm, rl
adcq rl, r12
vpextrq $1, T2xmm, rl
adcq rl, r13
vpextrq $1, M3xmm, rl
adcq rl, r14
vpextrq $1, T3xmm, rl
adcq rl, r15
/*
adcq $0, bi
subq $1, bi
jnb 1f
#.sub_mp_mq_2:
### R=R+P ###
xorq bi, bi
vpextrq $0, M0xmm, rl
movq rh, r0
addq rl, rh
vpextrq $0, T0xmm, rl
movq rh, r1
addq rl, rh
vpextrq $0, M1xmm, rl
movq rh, r2
addq rl, rh
vpextrq $0, T1xmm, rl
movq rh, r3
addq rl, rh
vpextrq $0, M2xmm, rl
movq rh, r4
addq rl, rh
vpextrq $0, T2xmm, rl
movq rh, r5
addq rl, rh
vpextrq $0, M3xmm, rl
movq rh, r6
addq rl, rh
vpextrq $0, T3xmm, rl
movq rh, r7
addq rl, rh
vpextrq $1, M0xmm, rl
adcq rl, r8
vpextrq $1, T0xmm, rl
adcq rl, r9
vpextrq $1, M1xmm, rl
adcq rl, r10
vpextrq $1, T1xmm, rl
adcq rl, r11
vpextrq $1, M2xmm, rl
adcq rl, r12
vpextrq $1, T2xmm, rl
adcq rl, r13
vpextrq $1, M3xmm, rl
adcq rl, r14
vpextrq $1, T3xmm, rl
adcq rl, r15
*/
#.sub_mp_mq_end$:
1:
####################################################################
####################################################################
#### restore sub result to A ####
movq r0, rl
vmovq rl, A0xmm
vpinsrq $1, r8, A0xmm, A0xmm
movq r1, rl
vmovq rl, B0xmm
vpinsrq $1, r9, B0xmm, B0xmm
movq r2, rl
vmovq rl, A1xmm
vpinsrq $1, r10, A1xmm, A1xmm
movq r3, rl
vmovq rl, B1xmm
vpinsrq $1, r11, B1xmm, B1xmm
movq r4, rl
vmovq rl, A2xmm
vpinsrq $1, r12, A2xmm, A2xmm
movq r5, rl
vmovq rl, B2xmm
vpinsrq $1, r13, B2xmm, B2xmm
movq r6, rl
vmovq rl, A3xmm
vpinsrq $1, r14, A3xmm, A3xmm
movq r7, rl
vmovq rl, B3xmm
vpinsrq $1, r15, B3xmm, B3xmm
/*
##################################################################
#### load key ####
vperm2i128 $0x10, M0, M0, T2
#### enc result ####
xor_arg_128_128bit
key_expansion_128 rk_128 0x1 rhelp_128
aesenc_arg_128_128bit
key_expansion_128 rk_128 0x2 rhelp_128
aesenc_arg_128_128bit
key_expansion_128 rk_128 0x4 rhelp_128
aesenc_arg_128_128bit
key_expansion_128 rk_128 0x8 rhelp_128
aesenc_arg_128_128bit
key_expansion_128 rk_128 0x10 rhelp_128
aesenc_arg_128_128bit
key_expansion_128 rk_128 0x20 rhelp_128
aesenc_arg_128_128bit
key_expansion_128 rk_128 0x40 rhelp_128
aesenc_arg_128_128bit
key_expansion_128 rk_128 0x80 rhelp_128
aesenc_arg_128_128bit
key_expansion_128 rk_128 0x1b rhelp_128
aesenc_arg_128_128bit
key_expansion_128 rk_128 0x36 rhelp_128
aesenclast_arg_128_128bit
*/
ret
.size sub_mp_mq, .-sub_mp_mq
########################################################################################
########################################################################################
########################################################################################
########################################################################################
.globl mul_qinv
.type mul_qinv, @function
.align 64
mul_qinv:
/*
### R=(m1-m2)mod p, stored in A ###
movq %mm1, %rax #for Rp-Rq
pushq %rax
movq %mm0, %rsi
pushq %rsi
*/
#####################################################################
#####################################################################
### load qinv to A B and Dec ###
addq $1280, %rsi # for qinv which stored in 1280(%rsi)
vpxorq %ymm1, %ymm1, %ymm1
movq $0x0123456789ABCDEF, %rax
vmovq %rax, %xmm1
valignq $1, %ymm0, %ymm1, %ymm0
movq $0xFEDCBA9876543210, %rax
vmovq %rax, %xmm1
valignq $3, %ymm0, %ymm1, %ymm0
vmovdqu64 %zmm15 ,%zmm7 # no forget !!!!!
vmovdqu64 (%rsi), %zmm16
vmovdqu64 64(%rsi), %zmm17
mov $0x03, %eax # 0000 0011
kmovd %eax, %k1
mov $0x0C, %eax # 0000 1100
kmovd %eax, %k2
mov $0x30, %eax # 0011 0000
kmovd %eax, %k3
mov $0xC0, %eax # 1100 0000
kmovd %eax, %k4
vpcompressq %zmm16,%zmm15 {%k1}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k1}
vpcompressq %zmm16,%zmm15 {%k2}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k2}
vpcompressq %zmm16,%zmm15 {%k3}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k3}
vpcompressq %zmm16,%zmm15 {%k4}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k4}
vpcompressq %zmm17,%zmm15 {%k1}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k1}
vpcompressq %zmm17,%zmm15 {%k2}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k2}
vpcompressq %zmm17,%zmm15 {%k3}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k3}
vpcompressq %zmm17,%zmm15 {%k4}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k4}
vmovdqu64 %zmm7 ,%zmm15
vpxorq %zmm0, %zmm0, %zmm0
valignq $0x00,%zmm16 ,%zmm0, %zmm0{%k1}{z} #shift 0*64
vpxorq %zmm1, %zmm1, %zmm1
valignq $0x02,%zmm16 ,%zmm1, %zmm1{%k1}{z} #shift 2*64
vpxorq %zmm2, %zmm2, %zmm2
valignq $0x04,%zmm16,%zmm2, %zmm2{%k1}{z} #shift 4*64
vpxorq %zmm3, %zmm3, %zmm3
valignq $0x06,%zmm16,%zmm3, %zmm3{%k1}{z} #shift 6*64
vpxorq %zmm4, %zmm4, %zmm4
valignq $0x00,%zmm17,%zmm4, %zmm4{%k1}{z} #shift 0*64
vpxorq %zmm5, %zmm5, %zmm5
valignq $0x02,%zmm17,%zmm5, %zmm5{%k1}{z} #shift 2*64
vpxorq %zmm6, %zmm6, %zmm6
valignq $0x04,%zmm17, %zmm6, %zmm6{%k1}{z} #shift 4*64
vpxorq %zmm7, %zmm7, %zmm7
valignq $0x06,%zmm17,%zmm7, %zmm7{%k1}{z} #shift 6*64
### rerange qinv to A ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
#### restore B ####
restore_B
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
call montmul1024
/*
#### load key ####
vperm2i128 $0x10, M0, M0, T2
#### dec load arg ####
key_schedule_128_128bit
#10
xor_arg_128_128bit
#9
inv_key_expansion_128_128bit rk_128 0x36 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#8
inv_key_expansion_128_128bit rk_128 0x1b rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#7
inv_key_expansion_128_128bit rk_128 0x80 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#6
inv_key_expansion_128_128bit rk_128 0x40 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#5
inv_key_expansion_128_128bit rk_128 0x20 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#4
inv_key_expansion_128_128bit rk_128 0x10 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#3
inv_key_expansion_128_128bit rk_128 0x8 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#2
inv_key_expansion_128_128bit rk_128 0x4 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#1
inv_key_expansion_128_128bit rk_128 0x2 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#0
inv_key_expansion_128_128bit rk_128 0x1 rhelp_128
vaesimc rk_128, rhelp_128
aesdeclast_arg_128_128bit
### store qinv ###
vmovq B0xmm, s0 #q0=B[0] s0=B[8]
vpextrq $1, B0xmm, s1
vmovq B1xmm, s2
vpextrq $1, B1xmm, s3
vmovq B2xmm, s4
vpextrq $1, B2xmm, s5
vmovq B3xmm, s6
vpextrq $1, B3xmm, s7
vmovq A0xmm, rl
movq rl, q0
vpextrq $1, A0xmm, rl
movq rl, q1
vmovq A1xmm, rl
movq rl, q2
vpextrq $1, A1xmm, rl
movq rl, q3
vmovq A2xmm, rl
movq rl, q4
vpextrq $1, A2xmm, rl
movq rl, q5
vmovq A3xmm, rl
movq rl, q6
vpextrq $1, A3xmm, rl
movq rl, q7
#####################################################################
#####################################################################
popq %rsi
### load RRp to A B and Dec ###
popq %rsi
pushq %rsi
addq $256, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
#### load key ####
vperm2i128 $0x10, M0, M0, T2
#### dec load arg ####
key_schedule_128_128bit
#10
xor_arg_128_128bit
#9
inv_key_expansion_128_128bit rk_128 0x36 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#8
inv_key_expansion_128_128bit rk_128 0x1b rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#7
inv_key_expansion_128_128bit rk_128 0x80 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#6
inv_key_expansion_128_128bit rk_128 0x40 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#5
inv_key_expansion_128_128bit rk_128 0x20 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#4
inv_key_expansion_128_128bit rk_128 0x10 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#3
inv_key_expansion_128_128bit rk_128 0x8 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#2
inv_key_expansion_128_128bit rk_128 0x4 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#1
inv_key_expansion_128_128bit rk_128 0x2 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#0
inv_key_expansion_128_128bit rk_128 0x1 rhelp_128
vaesimc rk_128, rhelp_128
aesdeclast_arg_128_128bit
### rerange RRp to A ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
#### restore B ####
restore_B
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
### Transfer qinv to montgomery form in p ###
### qinv*RRp*R^(-1)mod p ###
call montmul1024
####################################################################
####################################################################
#### store A ####
store_A
#### load sub result ####
popq %rax
### load Rq to A B ###
vmovdqu 256(%rax), A0xmm
vmovdqu 16+256(%rax), A1xmm
vmovdqu 32+256(%rax), A2xmm
vmovdqu 48+256(%rax), A3xmm
vmovdqu 64+256(%rax), B0xmm
vmovdqu 80+256(%rax), B1xmm
vmovdqu 96+256(%rax), B2xmm
vmovdqu 112+256(%rax), B3xmm
#### load key ####
vperm2i128 $0x10, M0, M0, T2
#### dec load arg ####
key_schedule_128_128bit
#10
xor_arg_128_128bit
#9
inv_key_expansion_128_128bit rk_128 0x36 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#8
inv_key_expansion_128_128bit rk_128 0x1b rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#7
inv_key_expansion_128_128bit rk_128 0x80 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#6
inv_key_expansion_128_128bit rk_128 0x40 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#5
inv_key_expansion_128_128bit rk_128 0x20 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#4
inv_key_expansion_128_128bit rk_128 0x10 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#3
inv_key_expansion_128_128bit rk_128 0x8 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#2
inv_key_expansion_128_128bit rk_128 0x4 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#1
inv_key_expansion_128_128bit rk_128 0x2 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#0
inv_key_expansion_128_128bit rk_128 0x1 rhelp_128
vaesimc rk_128, rhelp_128
aesdeclast_arg_128_128bit
### rerange RRp to B ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x0A, A0, T3, B0 #imm=1010
vshufpd $0x00, A0, T3, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A1, T3, B1 #imm=1010
vshufpd $0x00, A1, T3, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A2, T3, B2 #imm=1010
vshufpd $0x00, A2, T3, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A3, T3, B3 #imm=1010
vshufpd $0x00, A3, T3, A3 #imm=0000
#### restore A ####
restore_A
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
### compute R*qinv ###
call montmul1024
####################################################################
####################################################################
### rerange RRp to B ###
vshufpd $0x05, A0, A0, A0 #imm=0101
vshufpd $0x05, A1, A1, A1 #imm=0101
vshufpd $0x05, A2, A2, A2 #imm=0101
vshufpd $0x05, A3, A3, A3 #imm=0101
vshufpd $0x05, B0, B0, B0 #imm=0101
vshufpd $0x05, B1, B1, B1 #imm=0101
vshufpd $0x05, B2, B2, B2 #imm=0101
vshufpd $0x05, B3, B3, B3 #imm=0101
### set A to 1 ###
vpxor T3, T3, T3
vblendpd $0x05, T3, A1, A1 #imm=0101
vblendpd $0x05, T3, A2, A2 #imm=0101
vblendpd $0x05, T3, A3, A3 #imm=0101
vblendpd $0x05, T3, B0, B0 #imm=0101
vblendpd $0x05, T3, B1, B1 #imm=0101
vblendpd $0x05, T3, B2, B2 #imm=0101
vblendpd $0x05, T3, B3, B3 #imm=0101
movq $1, %rax
vmovq %rax, T3xmm
vblendpd $0x01, T3, A0, A0 #imm=0001
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
### compute result*1*R^(-1)mod p ###
call montmul1024
*/
ret
.size mul_qinv, .-mul_qinv
########################################################################################
########################################################################################
########################################################################################
########################################################################################
.globl mul_h_q_add_Rq
.type mul_h_q_add_Rq, @function
.align 64
mul_h_q_add_Rq:
### h stored in A ###
/*
movq %mm0, %rsi
movq %mm1, %rax
pushq %rax
*/
########################################################################################
########################################################################################
#### load q ####
#### store A ####
store_A
#####################################################################
### load q to A B and Dec ###
vpxorq %ymm1, %ymm1, %ymm1
movq $0x0123456789ABCDEF, %rax
vmovq %rax, %xmm1
valignq $1, %ymm0, %ymm1, %ymm0
movq $0xFEDCBA9876543210, %rax
vmovq %rax, %xmm1
valignq $3, %ymm0, %ymm1, %ymm0
vmovdqu64 %zmm15 ,%zmm7 # no forget !!!!!
vmovdqu64 (%rsi), %zmm16
vmovdqu64 64(%rsi), %zmm17
mov $0x03, %eax # 0000 0011
kmovd %eax, %k1
mov $0x0C, %eax # 0000 1100
kmovd %eax, %k2
mov $0x30, %eax # 0011 0000
kmovd %eax, %k3
mov $0xC0, %eax # 1100 0000
kmovd %eax, %k4
vpcompressq %zmm16,%zmm15 {%k1}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k1}
vpcompressq %zmm16,%zmm15 {%k2}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k2}
vpcompressq %zmm16,%zmm15 {%k3}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k3}
vpcompressq %zmm16,%zmm15 {%k4}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k4}
vpcompressq %zmm17,%zmm15 {%k1}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k1}
vpcompressq %zmm17,%zmm15 {%k2}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k2}
vpcompressq %zmm17,%zmm15 {%k3}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k3}
vpcompressq %zmm17,%zmm15 {%k4}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k4}
vmovdqu64 %zmm7 ,%zmm15
### load p to A B ###
vpxorq %zmm0, %zmm0, %zmm0
valignq $0x00,%zmm16 ,%zmm0, %zmm0{%k1}{z} #shift 0*64
vpxorq %zmm1, %zmm1, %zmm1
valignq $0x02,%zmm16 ,%zmm1, %zmm1{%k1}{z} #shift 2*64
vpxorq %zmm2, %zmm2, %zmm2
valignq $0x04,%zmm16,%zmm2, %zmm2{%k1}{z} #shift 4*64
vpxorq %zmm3, %zmm3, %zmm3
valignq $0x06,%zmm16,%zmm3, %zmm3{%k1}{z} #shift 6*64
vpxorq %zmm4, %zmm4, %zmm4
valignq $0x00,%zmm17,%zmm4, %zmm4{%k1}{z} #shift 0*64
vpxorq %zmm5, %zmm5, %zmm5
valignq $0x02,%zmm17,%zmm5, %zmm5{%k1}{z} #shift 2*64
vpxorq %zmm6, %zmm6, %zmm6
valignq $0x04,%zmm17, %zmm6, %zmm6{%k1}{z} #shift 4*64
vpxorq %zmm7, %zmm7, %zmm7
valignq $0x06,%zmm17,%zmm7, %zmm7{%k1}{z} #shift 6*64
#################################
/*#调试用:
vmovdqu64 A0xmm, (%rsi)
vmovdqu64 A1xmm, 16(%rsi)
vmovdqu64 A2xmm, 32(%rsi)
vmovdqu64 A3xmm, 48(%rsi)
vmovdqu64 B0xmm, 64(%rsi)
vmovdqu64 B1xmm, 80(%rsi)
vmovdqu64 B2xmm, 96(%rsi)
vmovdqu64 B3xmm, 112(%rsi)
*/
###############################
/*
#### load key ####
vperm2i128 $0x10, M0, M0, T2
#### dec load arg ####
key_schedule_128_128bit
#10
xor_arg_128_128bit
vpxor rk_128, M0xmm, M0xmm
#9
inv_key_expansion_128_128bit rk_128 0x36 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
vaesdec rhelp_128, M0xmm, M0xmm
#8
inv_key_expansion_128_128bit rk_128 0x1b rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
vaesdec rhelp_128, M0xmm, M0xmm
#7
inv_key_expansion_128_128bit rk_128 0x80 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
vaesdec rhelp_128, M0xmm, M0xmm
#6
inv_key_expansion_128_128bit rk_128 0x40 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
vaesdec rhelp_128, M0xmm, M0xmm
#5
inv_key_expansion_128_128bit rk_128 0x20 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
vaesdec rhelp_128, M0xmm, M0xmm
#4
inv_key_expansion_128_128bit rk_128 0x10 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
vaesdec rhelp_128, M0xmm, M0xmm
#3
inv_key_expansion_128_128bit rk_128 0x8 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
vaesdec rhelp_128, M0xmm, M0xmm
#2
inv_key_expansion_128_128bit rk_128 0x4 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
vaesdec rhelp_128, M0xmm, M0xmm
#1
inv_key_expansion_128_128bit rk_128 0x2 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
vaesdec rhelp_128, M0xmm, M0xmm
#0
inv_key_expansion_128_128bit rk_128 0x1 rhelp_128
vaesimc rk_128, rhelp_128
aesdeclast_arg_128_128bit
vaesdeclast rhelp_128, M0xmm, M0xmm
*/
### rerange q to B ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x0A, A0, T3, B0 #imm=1010
vshufpd $0x00, A0, T3, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A1, T3, B1 #imm=1010
vshufpd $0x00, A1, T3, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A2, T3, B2 #imm=1010
vshufpd $0x00, A2, T3, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A3, T3, B3 #imm=1010
vshufpd $0x00, A3, T3, A3 #imm=0000
#### restore A ####
restore_A
/*
vmovdqu64 A0, %ymm24
vmovdqu64 A1, %ymm25
vmovdqu64 A2, %ymm26
vmovdqu64 A3, %ymm27
*/
### compute m*h ###
call mul1024
/* ### rerange mul result to M ###
vperm2i128 $0x20, T0, M0, M0
vperm2i128 $0x20, T1, M1, M1
vperm2i128 $0x20, T2, M2, M2
vperm2i128 $0x20, T3, M3, M3
*/
/*
vmovdqu64 M0, %ymm24 ##R[0] R[8] 0 0
vmovdqu64 M1, %ymm25
vmovdqu64 M2, %ymm26
vmovdqu64 M3, %ymm27
*/
########################################################################################
########################################################################################
# popq %rax
### load Rq to A B ###
vmovdqu64 %zmm21, %zmm24
vmovdqu64 %zmm27, %zmm25
vpxorq %zmm26, %zmm26, %zmm26
vmovdqu64 %xmm24, %xmm0
valignq $2, %zmm24, %zmm26, %zmm24
vmovdqu64 %xmm24, %xmm1
valignq $2, %zmm24, %zmm26, %zmm24
vmovdqu64 %xmm24, %xmm2
valignq $2, %zmm24, %zmm26, %zmm24
vmovdqu64 %xmm24, %xmm3
valignq $2, %zmm24, %zmm26, %zmm24
vmovdqu64 %xmm25, %xmm4
valignq $2, %zmm25, %zmm26, %zmm25
vmovdqu64 %xmm25, %xmm5
valignq $2, %zmm25, %zmm26, %zmm25
vmovdqu64 %xmm25, %xmm6
valignq $2, %zmm25, %zmm26, %zmm25
vmovdqu64 %xmm25, %xmm7
valignq $2, %zmm25, %zmm26, %zmm25
/*
vmovdqu 128(%rax), A0xmm
vmovdqu 16+128(%rax), A1xmm
vmovdqu 32+128(%rax), A2xmm
vmovdqu 48+128(%rax), A3xmm
vmovdqu 64+128(%rax), B0xmm
vmovdqu 80+128(%rax), B1xmm
vmovdqu 96+128(%rax), B2xmm
vmovdqu 112+128(%rax), B3xmm
*/
/* #### load key ####
vperm2i128 $0x10, M0, M0, T2
#### dec load arg ####
key_schedule_128_128bit
#10
xor_arg_128_128bit
#9
inv_key_expansion_128_128bit rk_128 0x36 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#8
inv_key_expansion_128_128bit rk_128 0x1b rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#7
inv_key_expansion_128_128bit rk_128 0x80 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#6
inv_key_expansion_128_128bit rk_128 0x40 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#5
inv_key_expansion_128_128bit rk_128 0x20 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#4
inv_key_expansion_128_128bit rk_128 0x10 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#3
inv_key_expansion_128_128bit rk_128 0x8 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#2
inv_key_expansion_128_128bit rk_128 0x4 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#1
inv_key_expansion_128_128bit rk_128 0x2 rhelp_128
vaesimc rk_128, rhelp_128
aesdec_arg_128_128bit
#0
inv_key_expansion_128_128bit rk_128 0x1 rhelp_128
vaesimc rk_128, rhelp_128
aesdeclast_arg_128_128bit
########################################################################################
########################################################################################
### add Rq and result(A,B) ###
### rerange mul result from M to M T ###
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
*/
### A=A+P ###
xorq %rcx, %rcx
movq s8, %rcx
vmovq %xmm31, %rdi
### low 1024bit ###
vmovq A0xmm, %rax
vmovq M0xmm, %rbx
addq %rbx, %rax
# vpinsrq $0, %rax, A0xmm, A0xmm #R[0]
movq %rax, (%rdi)
vmovq B0xmm, %rax
vmovq T0xmm, %rbx
adcq %rbx, %rax
# vpinsrq $0, %rax, B0xmm, B0xmm #R[1]
movq %rax, 8(%rdi)
vmovq A1xmm, %rax
vmovq M1xmm, %rbx
adcq %rbx, %rax
# vpinsrq $0, %rax, A1xmm, A1xmm #R[2]
movq %rax, 16(%rdi)
vmovq B1xmm, %rax
vmovq T1xmm, %rbx
adcq %rbx, %rax
# vpinsrq $0, %rax, B1xmm, B1xmm #R[3]
movq %rax, 24(%rdi)
vmovq A2xmm, %rax
vmovq M2xmm, %rbx
adcq %rbx, %rax
# vpinsrq $0, %rax, A2xmm, A2xmm #R[4]
movq %rax, 32(%rdi)
vmovq B2xmm, %rax
vmovq T2xmm, %rbx
adcq %rbx, %rax
# vpinsrq $0, %rax, B2xmm, B2xmm #R[5]
movq %rax, 40(%rdi)
vmovq A3xmm, %rax
vmovq M3xmm, %rbx
adcq %rbx, %rax
# vpinsrq $0, %rax, A3xmm, A3xmm #R[6]
movq %rax, 48(%rdi)
vmovq B3xmm, %rax
vmovq T3xmm, %rbx
adcq %rbx, %rax
# vpinsrq $0, %rax, B3xmm, B3xmm #R[7]
movq %rax, 56(%rdi)
vpextrq $1, A0xmm, %rax
vpextrq $1, M0xmm, %rbx
adcq %rbx, %rax
# vpinsrq $1, %rax, A0xmm, A0xmm #R[8]
movq %rax, 64(%rdi)
vpextrq $1, B0xmm, %rax
vpextrq $1, T0xmm, %rbx
adcq %rbx, %rax
# vpinsrq $1, %rax, B0xmm, B0xmm #R[9]
movq %rax, 72(%rdi)
vpextrq $1, A1xmm, %rax
vpextrq $1, M1xmm, %rbx
adcq %rbx, %rax
# vpinsrq $1, %rax, A1xmm, A1xmm #R[10]
movq %rax, 80(%rdi)
vpextrq $1, B1xmm, %rax
vpextrq $1, T1xmm, %rbx
adcq %rbx, %rax
# vpinsrq $1, %rax, B1xmm, B1xmm #R[11]
movq %rax, 88(%rdi)
vpextrq $1, A2xmm, %rax
vpextrq $1, M2xmm, %rbx
adcq %rbx, %rax
# vpinsrq $1, %rax, A2xmm, A2xmm #R[12]
movq %rax, 96(%rdi)
vpextrq $1, B2xmm, %rax
vpextrq $1, T2xmm, %rbx
adcq %rbx, %rax
# vpinsrq $1, %rax, B2xmm, B2xmm #R[13]
movq %rax, 104(%rdi)
vpextrq $1, A3xmm, %rax
vpextrq $1, M3xmm, %rbx
adcq %rbx, %rax
# vpinsrq $1, %rax, A3xmm, A3xmm #R[14]
movq %rax, 112(%rdi)
vpextrq $1, B3xmm, %rax
vpextrq $1, T3xmm, %rbx
adcq %rbx, %rax
# vpinsrq $1, %rax, B3xmm, B3xmm #R[15]
movq %rax, 120(%rdi)
################################
### high 1024bit, for carry ###
movq r0, %rax
adcq $0, %rax
# vpinsrq $0, %rax, M0xmm, M0xmm #R[16]
movq %rax, 128(%rdi)
movq r1, %rax
adcq $0, %rax
# vpinsrq $0, %rax, T0xmm, T0xmm #R[17]
movq %rax, 136(%rdi)
movq r2, %rax
adcq $0, %rax
# vpinsrq $0, %rax, M1xmm, M1xmm #R[18]
movq %rax, 144(%rdi)
movq r3, %rax
adcq $0, %rax
# vpinsrq $0, %rax, T1xmm, T1xmm #R[19]
movq %rax, 152(%rdi)
movq r4, %rax
adcq $0, %rax
# vpinsrq $0, %rax, M2xmm, M2xmm #R[20]
movq %rax, 160(%rdi)
movq r5, %rax
adcq $0, %rax
# vpinsrq $0, %rax, T2xmm, T2xmm #R[21]
movq %rax, 168(%rdi)
movq r6, %rax
adcq $0, %rax
# vpinsrq $0, %rax, M3xmm, M3xmm #R[22]
movq %rax, 176(%rdi)
movq r7, %rax
adcq $0, %rax
# vpinsrq $0, %rax, T3xmm, T3xmm #R[23]
movq %rax, 184(%rdi)
adcq $0, s4
# vpinsrq $1, s4, M0xmm, M0xmm #R[24]
movq s4, 192(%rdi)
adcq $0, s5
# vpinsrq $1, s5, T0xmm, T0xmm #R[25]
movq s5, 200(%rdi)
adcq $0, s6
# vpinsrq $1, s6, M1xmm, M1xmm #R[26]
movq s6, 208(%rdi)
adcq $0, s7
# vpinsrq $1, s7, T1xmm, T1xmm #R[27]
movq s7, 216(%rdi)
adcq $0, %rcx
# vpinsrq $1, s8, M2xmm, M2xmm #R[28]
movq %rcx, 224(%rdi)
adcq $0, s9
# vpinsrq $1, s9, T2xmm, T2xmm #R[29]
movq s9, 232(%rdi)
adcq $0, s0
# vpinsrq $1, s0, M3xmm, M3xmm #R[30]
movq s0, 240(%rdi)
adcq $0, s1
# vpinsrq $1, s1, T3xmm, T3xmm #R[31]
movq s1, 248(%rdi)
/*
vmovdqu64 A0, %ymm24
vmovdqu64 A1, %ymm25
vmovdqu64 A2, %ymm26
vmovdqu64 A3, %ymm27
*/
/*
### rerange final result ###
vperm2i128 $0x20, B0, A0, A0
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vperm2i128 $0x20, T0, M0, M0
vperm2i128 $0x20, T1, M1, M1
vperm2i128 $0x20, T2, M2, M2
vperm2i128 $0x20, T3, M3, M3
vpermq $0xD8, A0, A0 #imm=3120
vpermq $0xD8, A1, A1 #imm=3120
vpermq $0xD8, A2, A2 #imm=3120
vpermq $0xD8, A3, A3 #imm=3120
vpermq $0xD8, M0, M0 #imm=3120
vpermq $0xD8, M1, M1 #imm=3120
vpermq $0xD8, M2, M2 #imm=3120
vpermq $0xD8, M3, M3 #imm=3120
vmovdqa M0, B0
vmovdqa M1, B1
vmovdqa M2, B2
vmovdqa M3, B3
*/
ret
.size mul_h_q_add_Rq, .-mul_h_q_add_Rq
|
LoCryptEn/Key-security | 137,966 | Register-bound/RSAIn_Register/Kernel/montmul.S | #include "rsa_head.S"
.file "montmul.S"
.text
##################################################################################################
### ###
### montmul(A,B,M,n0): ###
### ###
### R=A*B*R^(-1) mod M ###
### ###
### ###
##################################################################################################
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# A[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# B0 B1 B2 B3 #
# #
# B[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# M0 M1 M2 M3 #
# #
# M[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# T0 T1 T2 T3 #
# #
# T[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
#########################################################
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
.macro montmul_1st_movq
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# B[8] B[10] B[12] B[14] #
# A[8] A[10] A[12] A[14] #
# B[0] B[2] B[4] B[6] #
# A[0] A[2] A[4] A[6] #
#########################################
# B0 B1 B2 B3 #
# #
# B[9] B[11] B[13] B[15] #
# A[9] A[11] A[13] A[15] #
# B[1] B[3] B[5] B[7] #
# A[1] A[3] A[5] A[7] #
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[8] M[10] M[12] M[14] #
# M[0] M[2] M[4] M[6] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[9] M[11] M[13] M[15] #
# M[1] M[3] M[5] M[7] #
#########################################
#########################################################
##################################################################################################
### ###
### 1st part: A[0-7]*B[0-7] + M[0-7]*(q0-q7) ###
### ###
### sum 576=65+73*7 ###
### ###
##################################################################################################
### ###
### 1st_0: A[0-7]*B[0] + M[0-7]*q0 ###
### sum 65=11+3+17*3 ###
### ###
###########################################################
##### A[0 2 4 6]*B[0] #####
xorq s8, s8
xorq s9, s9
vpextrq $1, A0xmm, bi #B[0]
vmovq A0xmm, ai #A[0]
mulx bi, s0, s1 #A[0]*B[0]
vmovq A1xmm, ai #A[2]
mulx bi, s2, s3 #A[2]*B[0]
vmovq A2xmm, ai #A[4]
mulx bi, s4, s5 #A[4]*B[0]
vmovq A3xmm, ai #A[6]
mulx bi, s6, s7 #A[6]*B[0]
##### q0 #####
movq n0, %rdx
mulx s0, q, rh #q0=s0*n0
movq q, q0 #q0
##### M[0 2 4 6]*q0 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q0
add rl, s0
adc rh, s1
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q0
adc rl, s2
adc rh, s3
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q0
adc rl, s4
adc rh, s5
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q0
adc rl, s6
adc rh, s7
adc $0, s8
##### A[1 3 5 7]*B[0] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[0]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[0]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[0]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[0]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[1 3 5 7]*q0 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q0
add rl, s1
adc rh, s2
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q0
adc rl, s3
adc rh, s4
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q0
adc rl, s5
adc rh, s6
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q0
adc rl, s7
adc rh, s8
adc $0, s9
##################################################################################################
### ###
### 1st_1: A[0-7]*B[1] + M[0-7]*q1 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[1] #####
xorq s0, s0
vpextrq $1, B0xmm, bi #B[1]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[1]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[1]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[1]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[1]
adc rl, s7
adc rh, s8
adc $0, s9
##### q1 #####
movq n0, %rdx
mulx s1, q, rh #q1=s1*n0
movq q, q1 #q1
##### M[0 2 4 6]*q1 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q1
add rl, s1
adc rh, s2
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q1
adc rl, s3
adc rh, s4
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q1
adc rl, s5
adc rh, s6
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q1
adc rl, s7
adc rh, s8
adc $0, s9
##### A[1 3 5 7]*B[1] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[1]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[1]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[1]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[1]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[1 3 5 7]*q1 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q1
add rl, s2
adc rh, s3
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q1
adc rl, s4
adc rh, s5
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q1
adc rl, s6
adc rh, s7
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q1
adc rl, s8
adc rh, s9
adc $0, s0
##################################################################################################
### ###
### 1st_2: A[0-7]*B[2] + M[0-7]*q2 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[2] #####
xorq s1, s1
vpextrq $1, A1xmm, bi #B[2]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[2]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[2]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[2]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[2]
adc rl, s8
adc rh, s9
adc $0, s0
##### q2 #####
movq n0, %rdx
mulx s2, q, rh #q2=s2*n0
movq q, q2 #q2
##### M[0 2 4 6]*q2 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q2
add rl, s2
adc rh, s3
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q2
adc rl, s4
adc rh, s5
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q2
adc rl, s6
adc rh, s7
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q2
adc rl, s8
adc rh, s9
adc $0, s0
##### A[1 3 5 7]*B[2] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[2]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[2]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[2]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[2]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[1 3 5 7]*q2 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q2
add rl, s3
adc rh, s4
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q2
adc rl, s5
adc rh, s6
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q2
adc rl, s7
adc rh, s8
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q2
adc rl, s9
adc rh, s0
adc $0, s1
##################################################################################################
### ###
### 1st_3: A[0-7]*B[3] + M[0-7]*q3 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[3] #####
xorq s2, s2
vpextrq $1, B1xmm, bi #B[3]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[3]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[3]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[3]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[3]
adc rl, s9
adc rh, s0
adc $0, s1
##### q3 #####
movq n0, %rdx
mulx s3, q, rh #q3=s3*n0
movq q, q3 #q3
##### M[0 2 4 6]*q3 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q3
add rl, s3
adc rh, s4
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q3
adc rl, s5
adc rh, s6
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q3
adc rl, s7
adc rh, s8
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q3
adc rl, s9
adc rh, s0
adc $0, s1
##### A[1 3 5 7]*B[3] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[3]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[3]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[3]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[3]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[1 3 5 7]*q3 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q3
add rl, s4
adc rh, s5
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q3
adc rl, s6
adc rh, s7
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q3
adc rl, s8
adc rh, s9
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q3
adc rl, s0
adc rh, s1
adc $0, s2
##################################################################################################
### ###
### 1st_4: A[0-7]*B[4] + M[0-7]*q4 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[4] #####
xorq s3, s3
vpextrq $1, A2xmm, bi #B[4]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[4]
add rl, s4
adc rh, s5
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[4]
adc rl, s6
adc rh, s7
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[4]
adc rl, s8
adc rh, s9
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[4]
adc rl, s0
adc rh, s1
adc $0, s2
##### q4 #####
movq n0, %rdx
mulx s4, q, rh #q4=s4*n0
movq q, q4 #q4
##### M[0 2 4 6]*q4 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q4
add rl, s4
adc rh, s5
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q4
adc rl, s6
adc rh, s7
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q4
adc rl, s8
adc rh, s9
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q4
adc rl, s0
adc rh, s1
adc $0, s2
##### A[1 3 5 7]*B[4] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[4]
add rl, s5
adc rh, s6
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[4]
adc rl, s7
adc rh, s8
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[4]
adc rl, s9
adc rh, s0
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[4]
adc rl, s1
adc rh, s2
adc $0, s3
##### M[1 3 5 7]*q4 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q4
add rl, s5
adc rh, s6
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q4
adc rl, s7
adc rh, s8
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q4
adc rl, s9
adc rh, s0
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q4
adc rl, s1
adc rh, s2
adc $0, s3
##################################################################################################
### ###
### 1st_5: A[0-7]*B[5] + M[0-7]*q5 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[5] #####
xorq s4, s4
vpextrq $1, B2xmm, bi #B[5]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[5]
add rl, s5
adc rh, s6
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[5]
adc rl, s7
adc rh, s8
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[5]
adc rl, s9
adc rh, s0
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[5]
adc rl, s1
adc rh, s2
adc $0, s3
##### q5 #####
movq n0, %rdx
mulx s5, q, rh #q5=s5*n0
movq q, q5 #q5
##### M[0 2 4 6]*q5 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q5
add rl, s5
adc rh, s6
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q5
adc rl, s7
adc rh, s8
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q5
adc rl, s9
adc rh, s0
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q5
adc rl, s1
adc rh, s2
adc $0, s3
##### A[1 3 5 7]*B[5] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[5]
add rl, s6
adc rh, s7
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[5]
adc rl, s8
adc rh, s9
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[5]
adc rl, s0
adc rh, s1
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[5]
adc rl, s2
adc rh, s3
adc $0, s4
##### M[1 3 5 7]*q5 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q5
add rl, s6
adc rh, s7
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q5
adc rl, s8
adc rh, s9
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q5
adc rl, s0
adc rh, s1
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q5
adc rl, s2
adc rh, s3
adc $0, s4
##################################################################################################
### ###
### 1st_6: A[0-7]*B[6] + M[0-7]*q6 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[6] #####
xorq s5, s5
vpextrq $1, A3xmm, bi #B[6]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[6]
add rl, s6
adc rh, s7
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[6]
adc rl, s8
adc rh, s9
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[6]
adc rl, s0
adc rh, s1
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[6]
adc rl, s2
adc rh, s3
adc $0, s4
##### q6 #####
movq n0, %rdx
mulx s6, q, rh #q6=s6*n0
movq q, q6 #q6
##### M[0 2 4 6]*q6 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q6
add rl, s6
adc rh, s7
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q6
adc rl, s8
adc rh, s9
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q6
adc rl, s0
adc rh, s1
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q6
adc rl, s2
adc rh, s3
adc $0, s4
##### A[1 3 5 7]*B[6] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[6]
add rl, s7
adc rh, s8
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[6]
adc rl, s9
adc rh, s0
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[6]
adc rl, s1
adc rh, s2
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[6]
adc rl, s3
adc rh, s4
adc $0, s5
##### M[1 3 5 7]*q6 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q6
add rl, s7
adc rh, s8
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q6
adc rl, s9
adc rh, s0
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q6
adc rl, s1
adc rh, s2
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q6
adc rl, s3
adc rh, s4
adc $0, s5
##################################################################################################
### ###
### 1st_7: A[0-7]*B[7] + M[0-7]*q7 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[7] #####
xorq s6, s6
vpextrq $1, B3xmm, bi #B[7]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[7]
add rl, s7
adc rh, s8
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[7]
adc rl, s9
adc rh, s0
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[7]
adc rl, s1
adc rh, s2
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[7]
adc rl, s3
adc rh, s4
adc $0, s5
##### q7 #####
movq n0, %rdx
mulx s7, q, rh #q7=s7*n0
movq q, q7 #q7
##### M[0 2 4 6]*q7 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q7
add rl, s7
adc rh, s8
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q7
adc rl, s9
adc rh, s0
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q7
adc rl, s1
adc rh, s2
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q7
adc rl, s3
adc rh, s4
adc $0, s5
##### A[1 3 5 7]*B[7] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[7]
add rl, s8
adc rh, s9
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[7]
adc rl, s0
adc rh, s1
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[7]
adc rl, s2
adc rh, s3
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[7]
adc rl, s4
adc rh, s5
adc $0, s6
##### M[1 3 5 7]*q7 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q7
add rl, s8
adc rh, s9
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q7
adc rl, s0
adc rh, s1
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q7
adc rl, s2
adc rh, s3
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q7
adc rl, s4
adc rh, s5
adc $0, s6
##################################################################################################
### ###
### 1st part END ###
### ###
### low high ###
### ###
### s8 s9 s0 s1 s2 s3 s4 s5 s6 ###
### ###
##################################################################################################
.endm
.macro montmul_2nd_movq
##################################################################################################
### ###
### 2nd part: ###
### A[8-15]*B[0-7] + M[8-15]*(q0-q7) + A[0-7]*B[8-15] + M[0-7]*(q8-q15) ###
### ###
### sum 1248=56+149*8 ###
### ###
##################################################################################################
### ###
### 2nd_arrange_vector ###
### sum 8 ###
### ###
###########################################################
vpermq $0x8D, A0, A0 #imm=3120
vpermq $0x8D, A1, A1 #imm=3120
vpermq $0x8D, A2, A2 #imm=3120
vpermq $0x8D, A3, A3 #imm=3120
vpermq $0x8D, B0, B0 #imm=3120
vpermq $0x8D, B1, B1 #imm=3120
vpermq $0x8D, B2, B2 #imm=3120
vpermq $0x8D, B3, B3 #imm=3120
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# B[8] B[10] B[12] B[14] #
# B[0] B[2] B[4] B[6] #
# A[8] A[10] A[12] A[14] #
# A[0] A[2] A[4] A[6] #
#########################################
# B0 B1 B2 B3 #
# #
# B[9] B[11] B[13] B[15] #
# B[1] B[3] B[5] B[7] #
# A[9] A[11] A[13] A[15] #
# A[1] A[3] A[5] A[7] #
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[8] M[10] M[12] M[14] #
# M[0] M[2] M[4] M[6] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[9] M[11] M[13] M[15] #
# M[1] M[3] M[5] M[7] #
#########################################
#########################################################
##################################################################################################
### ###
### 2nd_0: ###
### A[8-15]*B[0] + M[8-15]*q0 + A[0-7]*B[8] + M[0-7]*q8 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[0] #####
xorq s7, s7
vperm2i128 $1, A0, A0, A0 #imm=01
vmovq A0xmm, bi #B[0]
vperm2i128 $1, A0, A0, A0 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[0]
add rl, s8
adc rh, s9
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[0]
adc rl, s0
adc rh, s1
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[0]
adc rl, s2
adc rh, s3
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[0]
adc rl, s4
adc rh, s5
adc $0, s6
##### q0 #####
movq q0, q
##### M[8 10 12 14]*q0 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q0
add rl, s8
adc rh, s9
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q0
adc rl, s0
adc rh, s1
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q0
adc rl, s2
adc rh, s3
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q0
adc rl, s4
adc rh, s5
adc $0, s6
##### A[9 11 13 15]*B[0] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[0]
add rl, s9
adc rh, s0
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[0]
adc rl, s1
adc rh, s2
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[0]
adc rl, s3
adc rh, s4
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[0]
adc rl, s5
adc rh, s6
adc $0, s7
##### M[9 11 13 15]*q0 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q0
add rl, s9
adc rh, s0
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q0
adc rl, s1
adc rh, s2
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q0
adc rl, s3
adc rh, s4
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q0
adc rl, s5
adc rh, s6
adc $0, s7
###################################################################
###################################################################
##### A[0 2 4 6]*B[8] #####
vperm2i128 $1, A0, A0, A0 #imm=01
vpextrq $1, A0xmm, bi #B[8]
vperm2i128 $1, A0, A0, A0 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[8]
add rl, s8
adc rh, s9
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[8]
adc rl, s0
adc rh, s1
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[8]
adc rl, s2
adc rh, s3
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[8]
adc rl, s4
adc rh, s5
adc $0, s6
adc $0, s7
##### q8 #####
movq n0, %rdx
mulx s8, q, rh #q8=s8*n0
movq q, q8 #q8
##### M[0 2 4 6]*q8 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q8
add rl, s8
adc rh, s9
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q8
adc rl, s0
adc rh, s1
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q8
adc rl, s2
adc rh, s3
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q8
adc rl, s4
adc rh, s5
adc $0, s6
adc $0, s7
##### A[1 3 5 7]*B[8] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[8]
add rl, s9
adc rh, s0
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[8]
adc rl, s1
adc rh, s2
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[8]
adc rl, s3
adc rh, s4
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[8]
adc rl, s5
adc rh, s6
adc $0, s7
##### M[1 3 5 7]*q8 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q8
add rl, s9
adc rh, s0
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q8
adc rl, s1
adc rh, s2
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q8
adc rl, s3
adc rh, s4
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q8
adc rl, s5
adc rh, s6
adc $0, s7
##################################################################################################
### ###
### 2nd_1: ###
### A[8-15]*B[1] + M[8-15]*q1 + A[0-7]*B[9] + M[0-7]*q9 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[1] #####
xorq s8, s8
vperm2i128 $1, B0, B0, B0 #imm=01
vmovq B0xmm, bi #B[1]
vperm2i128 $1, B0, B0, B0 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[1]
add rl, s9
adc rh, s0
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[1]
adc rl, s1
adc rh, s2
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[1]
adc rl, s3
adc rh, s4
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[1]
adc rl, s5
adc rh, s6
adc $0, s7
##### q1 #####
movq q1, q
##### M[8 10 12 14]*q1 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q1
add rl, s9
adc rh, s0
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q1
adc rl, s1
adc rh, s2
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q1
adc rl, s3
adc rh, s4
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q1
adc rl, s5
adc rh, s6
adc $0, s7
##### A[9 11 13 15]*B[1] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[1]
add rl, s0
adc rh, s1
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[1]
adc rl, s2
adc rh, s3
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[1]
adc rl, s4
adc rh, s5
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[1]
adc rl, s6
adc rh, s7
adc $0, s8
##### M[9 11 13 15]*q1 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q1
add rl, s0
adc rh, s1
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q1
adc rl, s2
adc rh, s3
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q1
adc rl, s4
adc rh, s5
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q1
adc rl, s6
adc rh, s7
adc $0, s8
###################################################################
###################################################################
##### A[0 2 4 6]*B[9] #####
vperm2i128 $1, B0, B0, B0 #imm=01
vpextrq $1, B0xmm, bi #B[9]
vperm2i128 $1, B0, B0, B0 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[9]
add rl, s9
adc rh, s0
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[9]
adc rl, s1
adc rh, s2
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[9]
adc rl, s3
adc rh, s4
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[9]
adc rl, s5
adc rh, s6
adc $0, s7
adc $0, s8
##### q9 #####
movq n0, %rdx
mulx s9, q, rh #q9=s9*n0
movq q, q9 #q9
##### M[0 2 4 6]*q9 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q9
add rl, s9
adc rh, s0
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q9
adc rl, s1
adc rh, s2
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q9
adc rl, s3
adc rh, s4
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q9
adc rl, s5
adc rh, s6
adc $0, s7
adc $0, s8
##### A[1 3 5 7]*B[9] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[9]
add rl, s0
adc rh, s1
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[9]
adc rl, s2
adc rh, s3
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[9]
adc rl, s4
adc rh, s5
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[9]
adc rl, s6
adc rh, s7
adc $0, s8
##### M[1 3 5 7]*q9 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q9
add rl, s0
adc rh, s1
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q9
adc rl, s2
adc rh, s3
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q9
adc rl, s4
adc rh, s5
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q9
adc rl, s6
adc rh, s7
adc $0, s8
##################################################################################################
### ###
### 2nd_2: ###
### A[8-15]*B[2] + M[8-15]*q2 + A[0-7]*B[10] + M[0-7]*q10 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[2] #####
xorq s9, s9
vperm2i128 $1, A1, A1, A1 #imm=01
vmovq A1xmm, bi #B[2]
vperm2i128 $1, A1, A1, A1 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[2]
add rl, s0
adc rh, s1
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[2]
adc rl, s2
adc rh, s3
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[2]
adc rl, s4
adc rh, s5
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[2]
adc rl, s6
adc rh, s7
adc $0, s8
##### q2 #####
movq q2, q
##### M[8 10 12 14]*q2 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q2
add rl, s0
adc rh, s1
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q2
adc rl, s2
adc rh, s3
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q2
adc rl, s4
adc rh, s5
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q2
adc rl, s6
adc rh, s7
adc $0, s8
##### A[9 11 13 15]*B[2] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[2]
add rl, s1
adc rh, s2
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[2]
adc rl, s3
adc rh, s4
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[2]
adc rl, s5
adc rh, s6
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[2]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[9 11 13 15]*q2 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q2
add rl, s1
adc rh, s2
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q2
adc rl, s3
adc rh, s4
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q2
adc rl, s5
adc rh, s6
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q2
adc rl, s7
adc rh, s8
adc $0, s9
###################################################################
###################################################################
##### A[0 2 4 6]*B[10] #####
vperm2i128 $1, A1, A1, A1 #imm=01
vpextrq $1, A1xmm, bi #B[10]
vperm2i128 $1, A1, A1, A1 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[10]
add rl, s0
adc rh, s1
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[10]
adc rl, s2
adc rh, s3
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[10]
adc rl, s4
adc rh, s5
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[10]
adc rl, s6
adc rh, s7
adc $0, s8
adc $0, s9
##### q10 #####
movq n0, %rdx
mulx s0, q, rh #q10=s0*n0
movq q, q10 #q10
##### M[0 2 4 6]*q10 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q10
add rl, s0
adc rh, s1
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q10
adc rl, s2
adc rh, s3
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q10
adc rl, s4
adc rh, s5
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q10
adc rl, s6
adc rh, s7
adc $0, s8
adc $0, s9
##### A[1 3 5 7]*B[10] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[10]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[10]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[10]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[10]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[1 3 5 7]*q10 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q10
add rl, s1
adc rh, s2
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q10
adc rl, s3
adc rh, s4
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q10
adc rl, s5
adc rh, s6
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q10
adc rl, s7
adc rh, s8
adc $0, s9
##################################################################################################
### ###
### 2nd_3: ###
### A[8-15]*B[3] + M[8-15]*q3 + A[0-7]*B[11] + M[0-7]*q11 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[3] #####
xorq s0, s0
vperm2i128 $1, B1, B1, B1 #imm=01
vmovq B1xmm, bi #B[3]
vperm2i128 $1, B1, B1, B1 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[3]
add rl, s1
adc rh, s2
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[3]
adc rl, s3
adc rh, s4
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[3]
adc rl, s5
adc rh, s6
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[3]
adc rl, s7
adc rh, s8
adc $0, s9
##### q3 #####
movq q3, q
##### M[8 10 12 14]*q3 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q3
add rl, s1
adc rh, s2
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q3
adc rl, s3
adc rh, s4
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q3
adc rl, s5
adc rh, s6
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q3
adc rl, s7
adc rh, s8
adc $0, s9
##### A[9 11 13 15]*B[3] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[3]
add rl, s2
adc rh, s3
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[3]
adc rl, s4
adc rh, s5
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[3]
adc rl, s6
adc rh, s7
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[3]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[9 11 13 15]*q3 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q3
add rl, s2
adc rh, s3
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q3
adc rl, s4
adc rh, s5
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q3
adc rl, s6
adc rh, s7
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q3
adc rl, s8
adc rh, s9
adc $0, s0
###################################################################
###################################################################
##### A[0 2 4 6]*B[11] #####
vperm2i128 $1, B1, B1, B1 #imm=01
vpextrq $1, B1xmm, bi #B[11]
vperm2i128 $1, B1, B1, B1 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[11]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[11]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[11]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[11]
adc rl, s7
adc rh, s8
adc $0, s9
adc $0, s0
##### q11 #####
movq n0, %rdx
mulx s1, q, rh #q11=s1*n0
movq q, q11 #q11
##### M[0 2 4 6]*q11 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q11
add rl, s1
adc rh, s2
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q11
adc rl, s3
adc rh, s4
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q11
adc rl, s5
adc rh, s6
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q11
adc rl, s7
adc rh, s8
adc $0, s9
adc $0, s0
##### A[1 3 5 7]*B[11] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[11]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[11]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[11]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[11]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[1 3 5 7]*q11 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q11
add rl, s2
adc rh, s3
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q11
adc rl, s4
adc rh, s5
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q11
adc rl, s6
adc rh, s7
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q11
adc rl, s8
adc rh, s9
adc $0, s0
##################################################################################################
### ###
### 2nd_4: ###
### A[8-15]*B[4] + M[8-15]*q4 + A[0-7]*B[12] + M[0-7]*q12 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[4] #####
xorq s1, s1
vperm2i128 $1, A2, A2, A2 #imm=01
vmovq A2xmm, bi #B[4]
vperm2i128 $1, A2, A2, A2 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[4]
add rl, s2
adc rh, s3
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[4]
adc rl, s4
adc rh, s5
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[4]
adc rl, s6
adc rh, s7
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[4]
adc rl, s8
adc rh, s9
adc $0, s0
##### q4 #####
movq q4, q
##### M[8 10 12 14]*q4 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q4
add rl, s2
adc rh, s3
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q4
adc rl, s4
adc rh, s5
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q4
adc rl, s6
adc rh, s7
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q4
adc rl, s8
adc rh, s9
adc $0, s0
##### A[9 11 13 15]*B[4] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[4]
add rl, s3
adc rh, s4
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[4]
adc rl, s5
adc rh, s6
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[4]
adc rl, s7
adc rh, s8
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[4]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[9 11 13 15]*q4 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q4
add rl, s3
adc rh, s4
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q4
adc rl, s5
adc rh, s6
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q4
adc rl, s7
adc rh, s8
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q4
adc rl, s9
adc rh, s0
adc $0, s1
###################################################################
###################################################################
##### A[0 2 4 6]*B[12] #####
vperm2i128 $1, A2, A2, A2 #imm=01
vpextrq $1, A2xmm, bi #B[12]
vperm2i128 $1, A2, A2, A2 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[12]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[12]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[12]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[12]
adc rl, s8
adc rh, s9
adc $0, s0
adc $0, s1
##### q12 #####
movq n0, %rdx
mulx s2, q, rh #q12=s2*n0
movq q, q12 #q12
##### M[0 2 4 6]*q12 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q12
add rl, s2
adc rh, s3
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q12
adc rl, s4
adc rh, s5
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q12
adc rl, s6
adc rh, s7
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q12
adc rl, s8
adc rh, s9
adc $0, s0
adc $0, s1
##### A[1 3 5 7]*B[12] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[12]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[12]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[12]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[12]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[1 3 5 7]*q12 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q12
add rl, s3
adc rh, s4
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q12
adc rl, s5
adc rh, s6
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q12
adc rl, s7
adc rh, s8
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q12
adc rl, s9
adc rh, s0
adc $0, s1
##################################################################################################
### ###
### 2nd_5: ###
### A[8-15]*B[5] + M[8-15]*q5 + A[0-7]*B[13] + M[0-7]*q13 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[5] #####
xorq s2, s2
vperm2i128 $1, B2, B2, B2 #imm=01
vmovq B2xmm, bi #B[5]
vperm2i128 $1, B2, B2, B2 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[5]
add rl, s3
adc rh, s4
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[5]
adc rl, s5
adc rh, s6
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[5]
adc rl, s7
adc rh, s8
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[5]
adc rl, s9
adc rh, s0
adc $0, s1
##### q5 #####
movq q5, q
##### M[8 10 12 14]*q5 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q5
add rl, s3
adc rh, s4
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q5
adc rl, s5
adc rh, s6
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q5
adc rl, s7
adc rh, s8
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q5
adc rl, s9
adc rh, s0
adc $0, s1
##### A[9 11 13 15]*B[5] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[5]
add rl, s4
adc rh, s5
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[5]
adc rl, s6
adc rh, s7
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[5]
adc rl, s8
adc rh, s9
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[5]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[9 11 13 15]*q5 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q5
add rl, s4
adc rh, s5
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q5
adc rl, s6
adc rh, s7
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q5
adc rl, s8
adc rh, s9
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q5
adc rl, s0
adc rh, s1
adc $0, s2
###################################################################
###################################################################
##### A[0 2 4 6]*B[13] #####
vperm2i128 $1, B2, B2, B2 #imm=01
vpextrq $1, B2xmm, bi #B[13]
vperm2i128 $1, B2, B2, B2 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[13]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[13]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[13]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[13]
adc rl, s9
adc rh, s0
adc $0, s1
adc $0, s2
##### q13 #####
movq n0, %rdx
mulx s3, q, rh #q13=s3*n0
movq q, q13 #q13
##### M[0 2 4 6]*q13 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q13
add rl, s3
adc rh, s4
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q13
adc rl, s5
adc rh, s6
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q13
adc rl, s7
adc rh, s8
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q13
adc rl, s9
adc rh, s0
adc $0, s1
adc $0, s2
##### A[1 3 5 7]*B[13] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[13]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[13]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[13]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[13]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[1 3 5 7]*q13 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q13
add rl, s4
adc rh, s5
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q13
adc rl, s6
adc rh, s7
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q13
adc rl, s8
adc rh, s9
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q13
adc rl, s0
adc rh, s1
adc $0, s2
##################################################################################################
### ###
### 2nd_6: ###
### A[8-15]*B[6] + M[8-15]*q6 + A[0-7]*B[14] + M[0-7]*q14 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[6] #####
xorq s3, s3
vperm2i128 $1, A3, A3, A3 #imm=01
vmovq A3xmm, bi #B[6]
vperm2i128 $1, A3, A3, A3 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[6]
add rl, s4
adc rh, s5
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[6]
adc rl, s6
adc rh, s7
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[6]
adc rl, s8
adc rh, s9
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[6]
adc rl, s0
adc rh, s1
adc $0, s2
##### q6 #####
movq q6, q
##### M[8 10 12 14]*q6 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q6
add rl, s4
adc rh, s5
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q6
adc rl, s6
adc rh, s7
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q6
adc rl, s8
adc rh, s9
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q6
adc rl, s0
adc rh, s1
adc $0, s2
##### A[9 11 13 15]*B[6] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[6]
add rl, s5
adc rh, s6
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[6]
adc rl, s7
adc rh, s8
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[6]
adc rl, s9
adc rh, s0
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[6]
adc rl, s1
adc rh, s2
adc $0, s3
##### M[9 11 13 15]*q6 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q6
add rl, s5
adc rh, s6
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q6
adc rl, s7
adc rh, s8
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q6
adc rl, s9
adc rh, s0
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q6
adc rl, s1
adc rh, s2
adc $0, s3
###################################################################
###################################################################
##### A[0 2 4 6]*B[14] #####
vperm2i128 $1, A3, A3, A3 #imm=01
vpextrq $1, A3xmm, bi #B[14]
vperm2i128 $1, A3, A3, A3 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[14]
add rl, s4
adc rh, s5
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[14]
adc rl, s6
adc rh, s7
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[14]
adc rl, s8
adc rh, s9
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[14]
adc rl, s0
adc rh, s1
adc $0, s2
adc $0, s3
##### q14 #####
movq n0, %rdx
mulx s4, q, rh #q14=s4*n0
movq q, q14 #q14
##### M[0 2 4 6]*q14 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q14
add rl, s4
adc rh, s5
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q14
adc rl, s6
adc rh, s7
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q14
adc rl, s8
adc rh, s9
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q14
adc rl, s0
adc rh, s1
adc $0, s2
adc $0, s3
##### A[1 3 5 7]*B[14] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[14]
add rl, s5
adc rh, s6
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[14]
adc rl, s7
adc rh, s8
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[14]
adc rl, s9
adc rh, s0
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[14]
adc rl, s1
adc rh, s2
adc $0, s3
##### M[1 3 5 7]*q14 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q14
add rl, s5
adc rh, s6
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q14
adc rl, s7
adc rh, s8
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q14
adc rl, s9
adc rh, s0
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q14
adc rl, s1
adc rh, s2
adc $0, s3
##################################################################################################
### ###
### 2nd_7: ###
### A[8-15]*B[7] + M[8-15]*q7 + A[0-7]*B[15] + M[0-7]*q15 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[7] #####
xorq s4, s4
vperm2i128 $1, B3, B3, B3 #imm=01
vmovq B3xmm, bi #B[7]
vperm2i128 $1, B3, B3, B3 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[7]
add rl, s5
adc rh, s6
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[7]
adc rl, s7
adc rh, s8
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[7]
adc rl, s9
adc rh, s0
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[7]
adc rl, s1
adc rh, s2
adc $0, s3
##### q7 #####
movq q7, q
##### M[8 10 12 14]*q7 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q7
add rl, s5
adc rh, s6
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q7
adc rl, s7
adc rh, s8
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q7
adc rl, s9
adc rh, s0
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q7
adc rl, s1
adc rh, s2
adc $0, s3
##### A[9 11 13 15]*B[7] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[7]
add rl, s6
adc rh, s7
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[7]
adc rl, s8
adc rh, s9
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[7]
adc rl, s0
adc rh, s1
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[7]
adc rl, s2
adc rh, s3
adc $0, s4
##### M[9 11 13 15]*q7 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q7
add rl, s6
adc rh, s7
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q7
adc rl, s8
adc rh, s9
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q7
adc rl, s0
adc rh, s1
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q7
adc rl, s2
adc rh, s3
adc $0, s4
###################################################################
###################################################################
##### A[0 2 4 6]*B[15] #####
vperm2i128 $1, B3, B3, B3 #imm=01
vpextrq $1, B3xmm, bi #B[15]
vperm2i128 $1, B3, B3, B3 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[15]
add rl, s5
adc rh, s6
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[15]
adc rl, s7
adc rh, s8
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[15]
adc rl, s9
adc rh, s0
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[15]
adc rl, s1
adc rh, s2
adc $0, s3
adc $0, s4
##### q15 #####
movq n0, %rdx
mulx s5, q, rh #q15=s5*n0
movq q, q15 #q15
##### M[0 2 4 6]*q15 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q15
add rl, s5
adc rh, s6
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q15
adc rl, s7
adc rh, s8
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q15
adc rl, s9
adc rh, s0
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q15
adc rl, s1
adc rh, s2
adc $0, s3
adc $0, s4
##### A[1 3 5 7]*B[15] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[15]
add rl, s6
adc rh, s7
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[15]
adc rl, s8
adc rh, s9
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[15]
adc rl, s0
adc rh, s1
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[15]
adc rl, s2
adc rh, s3
adc $0, s4
##### M[1 3 5 7]*q15 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q15
add rl, s6
adc rh, s7
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q15
adc rl, s8
adc rh, s9
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q15
adc rl, s0
adc rh, s1
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q15
adc rl, s2
adc rh, s3
adc $0, s4
##################################################################################################
### ###
### 2nd part END ###
### ###
### low high ###
### ###
### s6 s7 s8 s9 s0 s1 s2 s3 s4 ###
### ###
##################################################################################################
.endm
.macro montmul_3rd_movq
##################################################################################################
### ###
### 3rd part: A[8-15]*B[8-15] + M[8-15]*(q8-q15) ###
### ###
### sum 628=52+72*8 ###
### ###
##################################################################################################
### ###
### 3rd_arrange_vector ###
### sum 52=7*4+6*4 ###
### ###
###########################################################
vpermq $0x8D, A0, A0 #imm=2031
vpermq $0x8D, A1, A1 #imm=2031
vpermq $0x8D, A2, A2 #imm=2031
vpermq $0x8D, A3, A3 #imm=2031
vpermq $0x8D, B0, B0 #imm=2031
vpermq $0x8D, B1, B1 #imm=2031
vpermq $0x8D, B2, B2 #imm=2031
vpermq $0x8D, B3, B3 #imm=2031
/*
vpermq $0x72, A0, A0 #imm=01 11 00 10
vpermq $0x72, A1, A1 #imm=01 11 00 10
vpermq $0x72, A2, A2 #imm=01 11 00 10
vpermq $0x72, A3, A3 #imm=01 11 00 10
vpermq $0x72, B0, B0 #imm=01 11 00 10
vpermq $0x72, B1, B1 #imm=01 11 00 10
vpermq $0x72, B2, B2 #imm=01 11 00 10
vpermq $0x72, B3, B3 #imm=01 11 00 10
*/
### inverse M ###
vshufpd $0x05, M0, M0, M0 #imm=0101
vshufpd $0x05, M1, M1, M1 #imm=0101
vshufpd $0x05, M2, M2, M2 #imm=0101
vshufpd $0x05, M3, M3, M3 #imm=0101
vshufpd $0x05, T0, T0, T0 #imm=0101
vshufpd $0x05, T1, T1, T1 #imm=0101
vshufpd $0x05, T2, T2, T2 #imm=0101
vshufpd $0x05, T3, T3, T3 #imm=0101
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# B[0] B[2] B[4] B[6] #
# A[0] A[2] A[4] A[6] #
# B[8] B[10] B[12] B[14] #
# A[8] A[10] A[12] A[14] #
#########################################
# B0 B1 B2 B3 #
# #
# B[1] B[3] B[5] B[7] #
# A[1] A[3] A[5] A[7] #
# B[9] B[11] B[13] B[15] #
# A[9] A[11] A[13] A[15] #
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[0] M[2] M[4] M[6] #
# M[8] M[10] M[12] M[14] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[1] M[3] M[5] M[7] #
# M[9] M[11] M[13] M[15] #
#########################################
#########################################################
##################################################################################################
### ###
### 3rd_0: ###
### A[8-15]*B[8] + M[8-15]*q8 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[8] #####
xorq s5, s5
vpextrq $1, A0xmm, bi #B[8]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[8]
add rl, s6
adc rh, s7
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[8]
adc rl, s8
adc rh, s9
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[8]
adc rl, s0
adc rh, s1
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[8]
adc rl, s2
adc rh, s3
adc $0, s4
##### q8 #####
movq q8, q
##### M[8 10 12 14]*q8 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q8
add rl, s6
adc rh, s7
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q8
adc rl, s8
adc rh, s9
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q8
adc rl, s0
adc rh, s1
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q8
adc rl, s2
adc rh, s3
adc $0, s4
##### A[9 11 13 15]*B[8] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[8]
add rl, s7
adc rh, s8
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[8]
adc rl, s9
adc rh, s0
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[8]
adc rl, s1
adc rh, s2
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[8]
adc rl, s3
adc rh, s4
adc $0, s5
##### M[9 11 13 15]*q8 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q8
add rl, s7
adc rh, s8
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q8
adc rl, s9
adc rh, s0
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q8
adc rl, s1
adc rh, s2
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q8
adc rl, s3
adc rh, s4
adc $0, s5
movq s6, r0 #result[0]
##################################################################################################
### ###
### 3rd_1: ###
### A[8-15]*B[9] + M[8-15]*q9 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[9] #####
xorq s6, s6
vpextrq $1, B0xmm, bi #B[9]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[9]
add rl, s7
adc rh, s8
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[9]
adc rl, s9
adc rh, s0
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[9]
adc rl, s1
adc rh, s2
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[9]
adc rl, s3
adc rh, s4
adc $0, s5
##### q9 #####
movq q9, q
##### M[8 10 12 14]*q9 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q9
add rl, s7
adc rh, s8
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q9
adc rl, s9
adc rh, s0
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q9
adc rl, s1
adc rh, s2
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q9
adc rl, s3
adc rh, s4
adc $0, s5
##### A[9 11 13 15]*B[9] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[9]
add rl, s8
adc rh, s9
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[9]
adc rl, s0
adc rh, s1
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[9]
adc rl, s2
adc rh, s3
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[9]
adc rl, s4
adc rh, s5
adc $0, s6
##### M[9 11 13 15]*q9 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q9
add rl, s8
adc rh, s9
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q9
adc rl, s0
adc rh, s1
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q9
adc rl, s2
adc rh, s3
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q9
adc rl, s4
adc rh, s5
adc $0, s6
movq s7, r1 #result[1]
##################################################################################################
### ###
### 3rd_2: ###
### A[8-15]*B[10] + M[8-15]*q10 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[10] #####
xorq s7, s7
vpextrq $1, A1xmm, bi #B[10]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[10]
add rl, s8
adc rh, s9
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[10]
adc rl, s0
adc rh, s1
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[10]
adc rl, s2
adc rh, s3
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[10]
adc rl, s4
adc rh, s5
adc $0, s6
##### q10 #####
movq q10, q
##### M[8 10 12 14]*q10 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q10
add rl, s8
adc rh, s9
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q10
adc rl, s0
adc rh, s1
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q10
adc rl, s2
adc rh, s3
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q10
adc rl, s4
adc rh, s5
adc $0, s6
##### A[9 11 13 15]*B[10] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[10]
add rl, s9
adc rh, s0
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[10]
adc rl, s1
adc rh, s2
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[10]
adc rl, s3
adc rh, s4
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[10]
adc rl, s5
adc rh, s6
adc $0, s7
##### M[9 11 13 15]*q10 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q10
add rl, s9
adc rh, s0
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q10
adc rl, s1
adc rh, s2
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q10
adc rl, s3
adc rh, s4
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q10
adc rl, s5
adc rh, s6
adc $0, s7
movq s8, r2 #result[2]
##################################################################################################
### ###
### 3rd_3: ###
### A[8-15]*B[11] + M[8-15]*q11 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[11] #####
xorq s8, s8
vpextrq $1, B1xmm, bi #B[11]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[11]
add rl, s9
adc rh, s0
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[11]
adc rl, s1
adc rh, s2
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[11]
adc rl, s3
adc rh, s4
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[11]
adc rl, s5
adc rh, s6
adc $0, s7
##### q11 #####
movq q11, q
##### M[8 10 12 14]*q11 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q11
add rl, s9
adc rh, s0
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q11
adc rl, s1
adc rh, s2
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q11
adc rl, s3
adc rh, s4
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q11
adc rl, s5
adc rh, s6
adc $0, s7
##### A[9 11 13 15]*B[11] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[11]
add rl, s0
adc rh, s1
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[11]
adc rl, s2
adc rh, s3
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[11]
adc rl, s4
adc rh, s5
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[11]
adc rl, s6
adc rh, s7
adc $0, s8
##### M[9 11 13 15]*q11 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q11
add rl, s0
adc rh, s1
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q11
adc rl, s2
adc rh, s3
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q11
adc rl, s4
adc rh, s5
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q11
adc rl, s6
adc rh, s7
adc $0, s8
movq s9, r3 #result[3]
##################################################################################################
### ###
### 3rd_4: ###
### A[8-15]*B[12] + M[8-15]*q12 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[12] #####
xorq s9, s9
vpextrq $1, A2xmm, bi #B[12]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[12]
add rl, s0
adc rh, s1
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[12]
adc rl, s2
adc rh, s3
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[12]
adc rl, s4
adc rh, s5
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[12]
adc rl, s6
adc rh, s7
adc $0, s8
##### q12 #####
movq q12, q
##### M[8 10 12 14]*q12 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q12
add rl, s0
adc rh, s1
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q12
adc rl, s2
adc rh, s3
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q12
adc rl, s4
adc rh, s5
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q12
adc rl, s6
adc rh, s7
adc $0, s8
##### A[9 11 13 15]*B[12] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[12]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[12]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[12]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[12]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[9 11 13 15]*q12 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q12
add rl, s1
adc rh, s2
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q12
adc rl, s3
adc rh, s4
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q12
adc rl, s5
adc rh, s6
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q12
adc rl, s7
adc rh, s8
adc $0, s9
movq s0, r4 #result[4]
##################################################################################################
### ###
### 3rd_5: ###
### A[8-15]*B[13] + M[8-15]*q13 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[13] #####
xorq s0, s0
vpextrq $1, B2xmm, bi #B[13]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[13]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[13]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[13]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[13]
adc rl, s7
adc rh, s8
adc $0, s9
##### q13 #####
movq q13, q
##### M[8 10 12 14]*q13 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q13
add rl, s1
adc rh, s2
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q13
adc rl, s3
adc rh, s4
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q13
adc rl, s5
adc rh, s6
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q13
adc rl, s7
adc rh, s8
adc $0, s9
##### A[9 11 13 15]*B[13] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[13]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[13]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[13]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[13]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[9 11 13 15]*q13 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q13
add rl, s2
adc rh, s3
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q13
adc rl, s4
adc rh, s5
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q13
adc rl, s6
adc rh, s7
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q13
adc rl, s8
adc rh, s9
adc $0, s0
movq s1, r5 #result[5]
##################################################################################################
### ###
### 3rd_6: ###
### A[8-15]*B[14] + M[8-15]*q14 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[14] #####
xorq s1, s1
vpextrq $1, A3xmm, bi #B[14]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[14]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[14]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[14]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[14]
adc rl, s8
adc rh, s9
adc $0, s0
##### q14 #####
movq q14, q
##### M[8 10 12 14]*q14 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q14
add rl, s2
adc rh, s3
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q14
adc rl, s4
adc rh, s5
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q14
adc rl, s6
adc rh, s7
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q14
adc rl, s8
adc rh, s9
adc $0, s0
##### A[9 11 13 15]*B[14] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[14]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[14]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[14]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[14]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[9 11 13 15]*q14 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q14
add rl, s3
adc rh, s4
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q14
adc rl, s5
adc rh, s6
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q14
adc rl, s7
adc rh, s8
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q14
adc rl, s9
adc rh, s0
adc $0, s1
movq s2, r6 #result[6]
##################################################################################################
### ###
### 3rd_7: ###
### A[8-15]*B[15] + M[8-15]*q15 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[15] #####
xorq s2, s2
vpextrq $1, B3xmm, bi #B[15]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[15]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[15]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[15]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[15]
adc rl, s9
adc rh, s0
adc $0, s1
##### q15 #####
movq q15, q
##### M[8 10 12 14]*q15 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q15
add rl, s3
adc rh, s4
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q15
adc rl, s5
adc rh, s6
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q15
adc rl, s7
adc rh, s8
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q15
adc rl, s9
adc rh, s0
adc $0, s1
##### A[9 11 13 15]*B[15] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[15]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[15]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[15]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[15]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[9 11 13 15]*q15 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q15
add rl, s4
adc rh, s5
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q15
adc rl, s6
adc rh, s7
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q15
adc rl, s8
adc rh, s9
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q15
adc rl, s0
adc rh, s1
adc $0, s2
movq s3, r7 #result[7]
##################################################################################################
### reverse M ###
vshufpd $0x05, M0, M0, M0 #imm=0101
vshufpd $0x05, M1, M1, M1 #imm=0101
vshufpd $0x05, M2, M2, M2 #imm=0101
vshufpd $0x05, M3, M3, M3 #imm=0101
vshufpd $0x05, T0, T0, T0 #imm=0101
vshufpd $0x05, T1, T1, T1 #imm=0101
vshufpd $0x05, T2, T2, T2 #imm=0101
vshufpd $0x05, T3, T3, T3 #imm=0101
/* 16 256bit vector registers */
#########################################################
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[8] M[10] M[12] M[14] #
# M[0] M[2] M[4] M[6] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[9] M[11] M[13] M[15] #
# M[1] M[3] M[5] M[7] #
#########################################
#########################################################
##################################################################################################
### ###
### 3rd part END ###
### ###
### low high ###
### ###
### s4 s5 s6 s7 s8 s9 s0 s1 s2 ###
### ###
##################################################################################################
.endm
.macro montmul_last_movq
##################################################################################################
### ###
### last part: reduce and store result ###
### ###
### sum 102=8+94 ###
### ###
##################################################################################################
### ###
### reduce ###
### sum 94=4+62+28 ###
### ###
###########################################################
xorq rh, rh
movq s2, rh
subq $1, rh
#jb .montmul_last_end
jb 1f
#.montmul_last_sub_1$:
### r0-r7 ###
movq r0, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
movq r1, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
movq r2, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
movq r3, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
movq r4, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
movq r5, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
movq r6, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
movq r7, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
### r8-r15 ###
vpextrq $1, M0xmm, rh #R[8]
sbbq rh, r8
vpextrq $1, T0xmm, rh #R[9]
sbbq rh, r9
vpextrq $1, M1xmm, rh #R[10]
sbbq rh, r10
vpextrq $1, T1xmm, rh #R[11]
sbbq rh, r11
vpextrq $1, M2xmm, rh #R[12]
sbbq rh, r12
vpextrq $1, T2xmm, rh #R[13]
sbbq rh, r13
vpextrq $1, M3xmm, rh #R[14]
sbbq rh, r14
vpextrq $1, T3xmm, rh #R[15]
sbbq rh, r15
sbbq $0, s2
xorq rh, rh
movq s2, rh
subq $1, rh
jb 1f
#.montmul_last_sub_2:
### r0-r7 ###
movq r0, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
movq r1, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
movq r2, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
movq r3, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
movq r4, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
movq r5, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
movq r6, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
movq r7, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
### r8-r15 ###
vpextrq $1, M0xmm, rh #R[8]
sbbq rh, r8
vpextrq $1, T0xmm, rh #R[9]
sbbq rh, r9
vpextrq $1, M1xmm, rh #R[10]
sbbq rh, r10
vpextrq $1, T1xmm, rh #R[11]
sbbq rh, r11
vpextrq $1, M2xmm, rh #R[12]
sbbq rh, r12
vpextrq $1, T2xmm, rh #R[13]
sbbq rh, r13
vpextrq $1, M3xmm, rh #R[14]
sbbq rh, r14
vpextrq $1, T3xmm, rh #R[15]
sbbq rh, r15
sbbq $0, s2
#.montmul_last_end$:
1:
vpxorq %zmm18, %zmm18, %zmm18
vpxorq %zmm19, %zmm19, %zmm19
vpxorq %zmm20, %zmm20, %zmm20
movq r0, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r1, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r2, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r3, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r4, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r5, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r6, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r7, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
vmovq r8, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r9, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r10, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r11, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r12, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r13, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r14, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r15, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
### r0-r7 ###
movq r0, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
movq r1, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
movq r2, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
movq r3, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
movq r4, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
movq r5, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
movq r6, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
movq r7, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
### r8-r15 ###
vpextrq $1, M0xmm, rh #R[8]
sbbq rh, r8
vpextrq $1, T0xmm, rh #R[9]
sbbq rh, r9
vpextrq $1, M1xmm, rh #R[10]
sbbq rh, r10
vpextrq $1, T1xmm, rh #R[11]
sbbq rh, r11
vpextrq $1, M2xmm, rh #R[12]
sbbq rh, r12
vpextrq $1, T2xmm, rh #R[13]
sbbq rh, r13
vpextrq $1, M3xmm, rh #R[14]
sbbq rh, r14
vpextrq $1, T3xmm, rh #R[15]
sbbq rh, r15
jb 3f
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
vpxor T3, T3, T3
vpxor A0, A0, A0
vmovq r8, T3xmm #R[8]
vblendpd $0x1, T3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, rl #R[0]
vmovq rl, T3xmm #R[0]
vblendpd $0x1, T3, A0, A0 #imm=0001
vpxor B0, B0, B0
vmovq r9, T3xmm #R[9]
vblendpd $0x1, T3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r1, rl #R[1]
vmovq rl, T3xmm #R[1]
vblendpd $0x1, T3, B0, B0 #imm=0001
vpxor A1, A1, A1
vmovq r10, T3xmm #R[10]
vblendpd $0x1, T3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r2, rl #R[2]
vmovq rl, T3xmm #R[2]
vblendpd $0x1, T3, A1, A1 #imm=0001
vpxor B1, B1, B1
vmovq r11, T3xmm #R[11]
vblendpd $0x1, T3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r3, rl #R[3]
vmovq rl, T3xmm #R[3]
vblendpd $0x1, T3, B1, B1 #imm=0001
vpxor A2, A2, A2
vmovq r12, T3xmm #R[12]
vblendpd $0x1, T3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r4, rl #R[4]
vmovq rl, T3xmm #R[4]
vblendpd $0x1, T3, A2, A2 #imm=0001
vpxor B2, B2, B2
vmovq r13, T3xmm #R[13]
vblendpd $0x1, T3, B2, B2 #imm=0001
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r5, rl #R[5]
vmovq rl, T3xmm #R[5]
vblendpd $0x1, T3, B2, B2 #imm=0001
vpxor A3, A3, A3
vmovq r14, T3xmm #R[14]
vblendpd $0x1, T3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r6, rl #R[6]
vmovq rl, T3xmm #R[6]
vblendpd $0x1, T3, A3, A3 #imm=0001
vpxor B3, B3, B3
vmovq r15, T3xmm #R[15]
vblendpd $0x1, T3, B3, B3 #imm=0001
vperm2i128 $0x1, B3, B3, B3 #imm=1
movq r7, rl #R[7]
vmovq rl, T3xmm #R[7]
vblendpd $0x1, T3, B3, B3 #imm=0001
vpxor T3, T3, T3
vblendpd $0x3, T3, T2, T3 #imm=0011
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
jmp 4f
3:
vpxorq %zmm20, %zmm20, %zmm20
vmovq %xmm19, rl
movq rl, r0
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r1
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r2
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r3
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r4
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r5
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r6
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r7
valignq $1, %zmm19, %zmm20, %zmm19
vpxorq %zmm20, %zmm20, %zmm20
vmovq %xmm18, r8
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r9
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r10
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r11
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r12
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r13
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r14
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r15
valignq $1, %zmm18, %zmm20, %zmm18
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
vpxor T3, T3, T3
vpxor A0, A0, A0
vmovq r8, T3xmm #R[8]
vblendpd $0x1, T3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, rl #R[0]
vmovq rl, T3xmm #R[0]
vblendpd $0x1, T3, A0, A0 #imm=0001
vpxor B0, B0, B0
vmovq r9, T3xmm #R[9]
vblendpd $0x1, T3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r1, rl #R[1]
vmovq rl, T3xmm #R[1]
vblendpd $0x1, T3, B0, B0 #imm=0001
vpxor A1, A1, A1
vmovq r10, T3xmm #R[10]
vblendpd $0x1, T3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r2, rl #R[2]
vmovq rl, T3xmm #R[2]
vblendpd $0x1, T3, A1, A1 #imm=0001
vpxor B1, B1, B1
vmovq r11, T3xmm #R[11]
vblendpd $0x1, T3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r3, rl #R[3]
vmovq rl, T3xmm #R[3]
vblendpd $0x1, T3, B1, B1 #imm=0001
vpxor A2, A2, A2
vmovq r12, T3xmm #R[12]
vblendpd $0x1, T3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r4, rl #R[4]
vmovq rl, T3xmm #R[4]
vblendpd $0x1, T3, A2, A2 #imm=0001
vpxor B2, B2, B2
vmovq r13, T3xmm #R[13]
vblendpd $0x1, T3, B2, B2 #imm=0001
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r5, rl #R[5]
vmovq rl, T3xmm #R[5]
vblendpd $0x1, T3, B2, B2 #imm=0001
vpxor A3, A3, A3
vmovq r14, T3xmm #R[14]
vblendpd $0x1, T3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r6, rl #R[6]
vmovq rl, T3xmm #R[6]
vblendpd $0x1, T3, A3, A3 #imm=0001
vpxor B3, B3, B3
vmovq r15, T3xmm #R[15]
vblendpd $0x1, T3, B3, B3 #imm=0001
vperm2i128 $0x1, B3, B3, B3 #imm=1
movq r7, rl #R[7]
vmovq rl, T3xmm #R[7]
vblendpd $0x1, T3, B3, B3 #imm=0001
vpxor T3, T3, T3
vblendpd $0x3, T3, T2, T3 #imm=0011
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
4:
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# x x x x #
# R[8] R[10] R[12] R[14] #
# x x x x #
# R[0] R[2] R[4] R[6] #
#########################################
# B0 B1 B2 B3 #
# #
# x x x x #
# R[9] R[11] R[13] R[15] #
# x x x x #
# R[1] R[3] R[5] R[7] #
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[8] M[10] M[12] M[14] #
# M[0] M[2] M[4] M[6] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[9] M[11] M[13] M[15] #
# M[1] M[3] M[5] M[7] #
#########################################
#########################################################
##################################################################################################
### ###
### last part END ###
### ###
### result A0 A1 A2 A3 B0 B1 B2 B3 ###
### ###
##################################################################################################
.endm
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
.globl montmul1024
.type montmul1024, @function
.align 64
montmul1024:
#.macro montmul1024
##################################################################################################
### ###
### montmul1024: 1st 2nd 3rd last ###
### ###
### sum 2554=576+1248+628+102 ###
### ###
##################################################################################################
# movq %rsp, rsp #mm7 # old folder has this
/*
montmul_1st
montmul_2nd
montmul_3rd
montmul_last
*/
montmul_1st_movq
montmul_2nd_movq
montmul_3rd_movq
montmul_last_movq
/*
//montmul_1st
//montmul_1st_movq
//first
pinsrq $0, s8, A0xmm #R[0]
pinsrq $0, s9, A1xmm #R[1]
pinsrq $0, s0, A2xmm #R[2]
pinsrq $0, s1, A3xmm #R[3]
pinsrq $0, s2, B0xmm #R[4]
pinsrq $0, s3, B1xmm #R[5]
pinsrq $0, s4, B2xmm #R[6]
pinsrq $0, s5, B3xmm #R[7]
//montmul_2nd
//Second
pinsrq $0, s6, A0xmm #R[0]
pinsrq $0, s7, A1xmm #R[1]
pinsrq $0, s8, A2xmm #R[2]
pinsrq $0, s9, A3xmm #R[3]
pinsrq $0, s0, B0xmm #R[4]
pinsrq $0, s1, B1xmm #R[5]
pinsrq $0, s2, B2xmm #R[6]
pinsrq $0, s3, B3xmm #R[7]
//montmul_3rd
//third
pinsrq $0, s4, A0xmm #R[0]
pinsrq $0, s5, A1xmm #R[1]
pinsrq $0, s6, A2xmm #R[2]
pinsrq $0, s7, A3xmm #R[3]
pinsrq $0, s8, B0xmm #R[4]
pinsrq $0, s9, B1xmm #R[5]
pinsrq $0, s0, B2xmm #R[6]
pinsrq $0, s1, B3xmm #R[7]
*/
# movq rsp, %rsp #mm7
##################################################################################################
### ###
### montmul1024 END ###
### ###
### result A0 A1 A2 A3 ###
### ###
##################################################################################################
#.endm
ret
.size montmul1024, .-montmul1024
|
LoCryptEn/Key-security | 7,059 | Register-bound/RSAIn_Register/Kernel/rsa_head.S | .file "rsa_head.S"
.text
##################################################################################################
### ###
### montmul_1st montsqu_1st ###
### ###
##################################################################################################
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# A[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# B0 B1 B2 B3 #
# #
# B[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# M0 M1 M2 M3 #
# #
# M[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# T0 T1 T2 T3 #
# #
# T[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
#########################################################
/* A0-A3 */
.set A0, %ymm0
.set A1, %ymm1
.set A2, %ymm2
.set A3, %ymm3
/* B0-B3 */
.set B0, %ymm4
.set B1, %ymm5
.set B2, %ymm6
.set B3, %ymm7
/* M0-M3 */
.set M0, %ymm8
.set M1, %ymm9
.set M2, %ymm10
.set M3, %ymm11
/* T0-T3 Exponent */
.set T0, %ymm12
.set T1, %ymm13
.set T2, %ymm14
.set T3, %ymm15
##### A0xmm is the xmm register, low 128bit of ymm register #####
##### A0xmm is used for vpextrq #####
/* A0-A3 */
.set A0xmm, %xmm0
.set A1xmm, %xmm1
.set A2xmm, %xmm2
.set A3xmm, %xmm3
/* B0-B3 */
.set B0xmm, %xmm4
.set B1xmm, %xmm5
.set B2xmm, %xmm6
.set B3xmm, %xmm7
/* M0-M3 */
.set M0xmm, %xmm8
.set M1xmm, %xmm9
.set M2xmm, %xmm10
.set M3xmm, %xmm11
/* T0-T3 Exponent */
.set T0xmm, %xmm12
.set T1xmm, %xmm13
.set T2xmm, %xmm14
.set T3xmm, %xmm15
/* 8 64bit mmx registers */
/* q0-q7 */
.set q0, %mm0
.set q1, %mm1
.set q2, %mm2
.set q3, %mm3
.set q4, %mm4
.set q5, %mm5
#.set q6, %rbp
#.set q7, %rsp
.set q6, %mm6
.set q7, %rbp
.set q8, %mm0
.set q9, %mm1
.set q10, %mm2
.set q11, %mm3
.set q12, %mm4
.set q13, %mm5
#.set q14, %rbp
#.set q15, %rsp
.set q14, %mm6
.set q15, %rbp
#.set n0, %mm6 #n0=M'[0]
#.set rsp, %mm7 #%rsp
.set n0, %mm7
.set rsp, %rsp #for montmul montsqu to be function
/* 16 64bit scalar registers */
/* B */
.set bi, %rax
/* q */
.set q, %rbx
/* A M */
.set ai, %rdx
.set mi, %rdx
/* rl rh */
.set rl, %rcx
.set rh, %rdx
/* s0-s9 */
.set s0, %r8
.set s1, %r9
.set s2, %r10
.set s3, %r11
.set s4, %r12
.set s5, %r13
.set s6, %r14
.set s7, %r15
.set s8, %rdi
.set s9, %rsi
##################################################################################################
##################################################################################################
### ###
### montmul_1st montsqu_1st ###
### ###
##################################################################################################
/* 16 256bit vector registers */
##################################################################################################
### al ah can be used for temporary storage, for vector value exchange ###
### bi q also can be used ###
#################################################################################
# AL0 AL1 AL2 AL3 # BL0 BL1 BL2 BL3 #
# # #
# A[i] X X X X # B[i] 1 3 5 7 #
# X X X X # 0 2 4 6 #
# 1 3 5 7 # X X X X #
# 0 2 4 6 # X X X X #
#################################################################################
# AH0 AH1 AH2 AH3 # BH0 BH1 BH2 BH3 #
# # #
# A[i] X X X X # B[i] 9 11 13 15 #
# X X X X # 8 10 12 14 #
# 9 11 13 15 # X X X X #
# 8 10 12 14 # X X X X #
#################################################################################
# ML0 ML1 ML2 ML3 # TL0 TL1 TL2 TL3 #
# # #
# M[i] X X X X # T[i] 1 3 5 7 #
# X X X X # 0 2 4 6 #
# 1 3 5 7 # X X X X #
# 0 2 4 6 # X X X X #
#################################################################################
# MH0 MH1 MH2 MH3 # TH0 TH1 TH2 TH3 #
# # #
# M[i] X X X X # T[i] 9 11 13 15 #
# X X X X # 8 10 12 14 #
# 9 11 13 15 # X X X X #
# 8 10 12 14 # X X X X #
#################################################################################
#########################################################
/* AL0-AL3 */
.set AL0, A0
.set AL1, A1
.set AL2, A2
.set AL3, A3
/* AH0-AH3 */
.set AH0, B0
.set AH1, B1
.set AH2, B2
.set AH3, B3
/* BL0-BL3 */
.set BL0, A0
.set BL1, A1
.set BL2, A2
.set BL3, A3
/* BH0-BH3 */
.set BH0, B0
.set BH1, B1
.set BH2, B2
.set BH3, B3
/* ML0-ML3 */
.set ML0, M0
.set ML1, M1
.set ML2, M2
.set ML3, M3
/* MH0-MH3 */
.set MH0, T0
.set MH1, T1
.set MH2, T2
.set MH3, T3
/* TL0-TL3 */
.set TL0, M0
.set TL1, M1
.set TL2, M2
.set TL3, M3
/* TH0-TH3 */
.set TH0, T0
.set TH1, T1
.set TH2, T2
.set TH3, T3
##### AL0xmm is the xmm register, low 128bit of ymm register #####
##### AL0xmm is used for vpextrq #####
/*** T just exponent storage, not used for montmul1024 compute, so Txmm is not needed ***/
/* AL0-AL3 */
.set AL0xmm, A0xmm
.set AL1xmm, A1xmm
.set AL2xmm, A2xmm
.set AL3xmm, A3xmm
/* AH0-AH3 */
.set AH0xmm, B0xmm
.set AH1xmm, B1xmm
.set AH2xmm, B2xmm
.set AH3xmm, B3xmm
/* BL0-BL3 */
.set BL0xmm, A0xmm
.set BL1xmm, A1xmm
.set BL2xmm, A2xmm
.set BL3xmm, A3xmm
/* BH0-BH3 */
.set BH0xmm, B0xmm
.set BH1xmm, B1xmm
.set BH2xmm, B2xmm
.set BH3xmm, B3xmm
/* ML0-ML3 */
.set ML0xmm, M0xmm
.set ML1xmm, M1xmm
.set ML2xmm, M2xmm
.set ML3xmm, M3xmm
/* MH0-MH3 */
.set MH0xmm, T0xmm
.set MH1xmm, T1xmm
.set MH2xmm, T2xmm
.set MH3xmm, T3xmm
##################################################################################################
### ###
### montmul_3rd montsqu_3rd ###
### ###
##################################################################################################
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# A[i] 1 3 5 7 #
# 0 2 4 6 #
# 9 11 13 15 #
# 8 10 12 14 #
#########################################
# B0 B1 B2 B3 #
# #
# B[i] 1 3 5 7 #
# 0 2 4 6 #
# 9 11 13 15 #
# 8 10 12 14 #
#########################################
# M0 M1 M2 M3 #
# #
# M[i] 1 3 5 7 #
# 0 2 4 6 #
# 9 11 13 15 #
# 8 10 12 14 #
#########################################
# T0 T1 T2 T3 #
# #
# T[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
#########################################################
/* r0-r15 montmul1024 result*/
.set r0, q8
.set r1, q9
.set r2, q10
.set r3, q11
.set r4, q12
.set r5, q13
.set r6, q14
.set r7, q15 #rbp
.set r8, s4
.set r9, s5
.set r10, s6
.set r11, s7
.set r12, s8
.set r13, s9
.set r14, s0
.set r15, s1
|
LoCryptEn/Key-security | 139,044 | Register-bound/RSAIn_Register/Kernel/montsqu.S | #include "rsa_head.S"
.file "montsqu.S"
.text
##################################################################################################
### ###
### montsqu(A,M,n0): ###
### ###
### R=A*A*R^(-1) mod M ###
### ###
### ###
##################################################################################################
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# A[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# B0 B1 B2 B3 #
# #
# B[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# M0 M1 M2 M3 #
# #
# M[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
# T0 T1 T2 T3 #
# #
# T[i] 9 11 13 15 #
# 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
#########################################################
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
.macro montsqu_1st_movq
##################################################################################################
### ###
### 1st part: A[0-7]*A[0-7] + M[0-7]*(q0-q7) ###
### ###
### sum 576=65+73*7 ###
### ###
##################################################################################################
### ###
### 1st_0: A[0-7]*A[0] + M[0-7]*q0 ###
### sum 65=11+3+17*3 ###
### ###
###########################################################
##### A[0 2 4 6]*B[0] #####
xorq s8, s8
xorq s9, s9
vmovq A0xmm, bi #A[0]
vmovq A0xmm, ai #A[0]
mulx bi, s0, s1 #A[0]*B[0]
vmovq A1xmm, ai #A[2]
mulx bi, s2, s3 #A[2]*B[0]
vmovq A2xmm, ai #A[4]
mulx bi, s4, s5 #A[4]*B[0]
vmovq A3xmm, ai #A[6]
mulx bi, s6, s7 #A[6]*B[0]
##### q0 #####
movq n0, %rdx
mulx s0, q, rh #q0=s0*n0
movq q, q0 #q0
##### M[0 2 4 6]*q0 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q0
add rl, s0
adc rh, s1
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q0
adc rl, s2
adc rh, s3
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q0
adc rl, s4
adc rh, s5
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q0
adc rl, s6
adc rh, s7
adc $0, s8
##### A[1 3 5 7]*B[0] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[0]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[0]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[0]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[0]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[1 3 5 7]*q0 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q0
add rl, s1
adc rh, s2
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q0
adc rl, s3
adc rh, s4
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q0
adc rl, s5
adc rh, s6
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q0
adc rl, s7
adc rh, s8
adc $0, s9
##################################################################################################
### ###
### 1st_1: A[0-7]*B[1] + M[0-7]*q1 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[1] #####
xorq s0, s0
vmovq B0xmm, bi #A[1]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[1]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[1]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[1]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[1]
adc rl, s7
adc rh, s8
adc $0, s9
##### q1 #####
movq n0, %rdx
mulx s1, q, rh #q1=s1*n0
movq q, q1 #q1
##### M[0 2 4 6]*q1 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q1
add rl, s1
adc rh, s2
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q1
adc rl, s3
adc rh, s4
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q1
adc rl, s5
adc rh, s6
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q1
adc rl, s7
adc rh, s8
adc $0, s9
##### A[1 3 5 7]*B[1] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[1]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[1]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[1]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[1]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[1 3 5 7]*q1 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q1
add rl, s2
adc rh, s3
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q1
adc rl, s4
adc rh, s5
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q1
adc rl, s6
adc rh, s7
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q1
adc rl, s8
adc rh, s9
adc $0, s0
##################################################################################################
### ###
### 1st_2: A[0-7]*B[2] + M[0-7]*q2 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[2] #####
xorq s1, s1
vmovq A1xmm, bi #A[2]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[2]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[2]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[2]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[2]
adc rl, s8
adc rh, s9
adc $0, s0
##### q2 #####
movq n0, %rdx
mulx s2, q, rh #q2=s2*n0
movq q, q2 #q2
##### M[0 2 4 6]*q2 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q2
add rl, s2
adc rh, s3
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q2
adc rl, s4
adc rh, s5
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q2
adc rl, s6
adc rh, s7
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q2
adc rl, s8
adc rh, s9
adc $0, s0
##### A[1 3 5 7]*B[2] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[2]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[2]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[2]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[2]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[1 3 5 7]*q2 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q2
add rl, s3
adc rh, s4
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q2
adc rl, s5
adc rh, s6
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q2
adc rl, s7
adc rh, s8
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q2
adc rl, s9
adc rh, s0
adc $0, s1
##################################################################################################
### ###
### 1st_3: A[0-7]*B[3] + M[0-7]*q3 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[3] #####
xorq s2, s2
vmovq B1xmm, bi #A[3]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[3]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[3]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[3]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[3]
adc rl, s9
adc rh, s0
adc $0, s1
##### q3 #####
movq n0, %rdx
mulx s3, q, rh #q3=s3*n0
movq q, q3 #q3
##### M[0 2 4 6]*q3 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q3
add rl, s3
adc rh, s4
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q3
adc rl, s5
adc rh, s6
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q3
adc rl, s7
adc rh, s8
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q3
adc rl, s9
adc rh, s0
adc $0, s1
##### A[1 3 5 7]*B[3] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[3]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[3]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[3]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[3]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[1 3 5 7]*q3 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q3
add rl, s4
adc rh, s5
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q3
adc rl, s6
adc rh, s7
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q3
adc rl, s8
adc rh, s9
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q3
adc rl, s0
adc rh, s1
adc $0, s2
##################################################################################################
### ###
### 1st_4: A[0-7]*B[4] + M[0-7]*q4 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[4] #####
xorq s3, s3
vmovq A2xmm, bi #A[4]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[4]
add rl, s4
adc rh, s5
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[4]
adc rl, s6
adc rh, s7
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[4]
adc rl, s8
adc rh, s9
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[4]
adc rl, s0
adc rh, s1
adc $0, s2
##### q4 #####
movq n0, %rdx
mulx s4, q, rh #q4=s4*n0
movq q, q4 #q4
##### M[0 2 4 6]*q4 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q4
add rl, s4
adc rh, s5
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q4
adc rl, s6
adc rh, s7
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q4
adc rl, s8
adc rh, s9
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q4
adc rl, s0
adc rh, s1
adc $0, s2
##### A[1 3 5 7]*B[4] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[4]
add rl, s5
adc rh, s6
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[4]
adc rl, s7
adc rh, s8
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[4]
adc rl, s9
adc rh, s0
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[4]
adc rl, s1
adc rh, s2
adc $0, s3
##### M[1 3 5 7]*q4 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q4
add rl, s5
adc rh, s6
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q4
adc rl, s7
adc rh, s8
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q4
adc rl, s9
adc rh, s0
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q4
adc rl, s1
adc rh, s2
adc $0, s3
##################################################################################################
### ###
### 1st_5: A[0-7]*B[5] + M[0-7]*q5 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[5] #####
xorq s4, s4
vmovq B2xmm, bi #A[5]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[5]
add rl, s5
adc rh, s6
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[5]
adc rl, s7
adc rh, s8
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[5]
adc rl, s9
adc rh, s0
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[5]
adc rl, s1
adc rh, s2
adc $0, s3
##### q5 #####
movq n0, %rdx
mulx s5, q, rh #q5=s5*n0
movq q, q5 #q5
##### M[0 2 4 6]*q5 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q5
add rl, s5
adc rh, s6
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q5
adc rl, s7
adc rh, s8
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q5
adc rl, s9
adc rh, s0
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q5
adc rl, s1
adc rh, s2
adc $0, s3
##### A[1 3 5 7]*B[5] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[5]
add rl, s6
adc rh, s7
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[5]
adc rl, s8
adc rh, s9
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[5]
adc rl, s0
adc rh, s1
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[5]
adc rl, s2
adc rh, s3
adc $0, s4
##### M[1 3 5 7]*q5 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q5
add rl, s6
adc rh, s7
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q5
adc rl, s8
adc rh, s9
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q5
adc rl, s0
adc rh, s1
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q5
adc rl, s2
adc rh, s3
adc $0, s4
##################################################################################################
### ###
### 1st_6: A[0-7]*B[6] + M[0-7]*q6 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[6] #####
xorq s5, s5
vmovq A3xmm, bi #A[6]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[6]
add rl, s6
adc rh, s7
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[6]
adc rl, s8
adc rh, s9
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[6]
adc rl, s0
adc rh, s1
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[6]
adc rl, s2
adc rh, s3
adc $0, s4
##### q6 #####
movq n0, %rdx
mulx s6, q, rh #q6=s6*n0
movq q, q6 #q6
##### M[0 2 4 6]*q6 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q6
add rl, s6
adc rh, s7
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q6
adc rl, s8
adc rh, s9
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q6
adc rl, s0
adc rh, s1
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q6
adc rl, s2
adc rh, s3
adc $0, s4
##### A[1 3 5 7]*B[6] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[6]
add rl, s7
adc rh, s8
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[6]
adc rl, s9
adc rh, s0
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[6]
adc rl, s1
adc rh, s2
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[6]
adc rl, s3
adc rh, s4
adc $0, s5
##### M[1 3 5 7]*q6 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q6
add rl, s7
adc rh, s8
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q6
adc rl, s9
adc rh, s0
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q6
adc rl, s1
adc rh, s2
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q6
adc rl, s3
adc rh, s4
adc $0, s5
##################################################################################################
### ###
### 1st_7: A[0-7]*B[7] + M[0-7]*q7 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[7] #####
xorq s6, s6
vmovq B3xmm, bi #A[7]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[7]
add rl, s7
adc rh, s8
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[7]
adc rl, s9
adc rh, s0
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[7]
adc rl, s1
adc rh, s2
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[7]
adc rl, s3
adc rh, s4
adc $0, s5
##### q7 #####
movq n0, %rdx
mulx s7, q, rh #q7=s7*n0
movq q, q7 #q7
##### M[0 2 4 6]*q7 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q7
add rl, s7
adc rh, s8
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q7
adc rl, s9
adc rh, s0
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q7
adc rl, s1
adc rh, s2
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q7
adc rl, s3
adc rh, s4
adc $0, s5
##### A[1 3 5 7]*B[7] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[7]
add rl, s8
adc rh, s9
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[7]
adc rl, s0
adc rh, s1
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[7]
adc rl, s2
adc rh, s3
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[7]
adc rl, s4
adc rh, s5
adc $0, s6
##### M[1 3 5 7]*q7 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q7
add rl, s8
adc rh, s9
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q7
adc rl, s0
adc rh, s1
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q7
adc rl, s2
adc rh, s3
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q7
adc rl, s4
adc rh, s5
adc $0, s6
##################################################################################################
### ###
### 1st part END ###
### ###
### low high ###
### ###
### s8 s9 s0 s1 s2 s3 s4 s5 s6 ###
### ###
##################################################################################################
.endm
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
.macro montsqu_2nd_movq
##################################################################################################
### ###
### 2nd part: ###
### A[8-15]*B[0-7] + M[8-15]*(q0-q7) + A[0-7]*B[8-15] + M[0-7]*(q8-q15) ###
### ###
### sum 1248=56+149*8 ###
### ###
##################################################################################################
### ###
### 2nd_arrange_vector ###
### sum 56=7*8 ###
### ###
###########################################################
vpermq $0xD8, A0, A0 #imm=3120
vpermq $0xD8, A1, A1 #imm=3120
vpermq $0xD8, A2, A2 #imm=3120
vpermq $0xD8, A3, A3 #imm=3120
vpermq $0xD8, B0, B0 #imm=3120
vpermq $0xD8, B1, B1 #imm=3120
vpermq $0xD8, B2, B2 #imm=3120
vpermq $0xD8, B3, B3 #imm=3120
### store T3 ###
# vperm2i128 $0x20, T3, T0, T0
/*
vmovq T3xmm, bi
vpextrq $1, T3xmm, q
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, rl
vpextrq $1, T3xmm, rh
*/
/*
vperm2i128 $0x01, A0, A0, T3
vshufpd $0x05, T3, T3, T3 #imm=0101
vblendpd $0x06, T3, A0, A0 #imm=0110
vperm2i128 $0x01, A1, A1, T3
vshufpd $0x05, T3, T3, T3 #imm=0101
vblendpd $0x06, T3, A1, A1 #imm=0110
vperm2i128 $0x01, A2, A2, T3
vshufpd $0x05, T3, T3, T3 #imm=0101
vblendpd $0x06, T3, A2, A2 #imm=0110
vperm2i128 $0x01, A3, A3, T3
vshufpd $0x05, T3, T3, T3 #imm=0101
vblendpd $0x06, T3, A3, A3 #imm=0110
vperm2i128 $0x01, B0, B0, T3
vshufpd $0x05, T3, T3, T3 #imm=0101
vblendpd $0x06, T3, B0, B0 #imm=0110
vperm2i128 $0x01, B1, B1, T3
vshufpd $0x05, T3, T3, T3 #imm=0101
vblendpd $0x06, T3, B1, B1 #imm=0110
vperm2i128 $0x01, B2, B2, T3
vshufpd $0x05, T3, T3, T3 #imm=0101
vblendpd $0x06, T3, B2, B2 #imm=0110
vperm2i128 $0x01, B3, B3, T3
vshufpd $0x05, T3, T3, T3 #imm=0101
vblendpd $0x06, T3, B3, B3 #imm=0110
*/
### restore T3 ###
# vperm2i128 $0x31, T3, T0, T3
/*
vmovq rl, T3xmm
vpinsrq $1, rh, T3xmm, T3xmm
vperm2i128 $0x01, T3, T3, T3
pinsrq $0, bi, T3xmm
pinsrq $1, q, T3xmm
*/
##################################################################################################
### ###
### 2nd_0: ###
### A[8-15]*B[0] + M[8-15]*q0 + A[0-7]*B[8] + M[0-7]*q8 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[0] #####
xorq s7, s7
vmovq A0xmm, bi #B[0]
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[0]
add rl, s8
adc rh, s9
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[0]
adc rl, s0
adc rh, s1
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[0]
adc rl, s2
adc rh, s3
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[0]
adc rl, s4
adc rh, s5
adc $0, s6
##### q0 #####
movq q0, q
##### M[8 10 12 14]*q0 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q0
add rl, s8
adc rh, s9
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q0
adc rl, s0
adc rh, s1
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q0
adc rl, s2
adc rh, s3
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q0
adc rl, s4
adc rh, s5
adc $0, s6
##### A[9 11 13 15]*B[0] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[0]
add rl, s9
adc rh, s0
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[0]
adc rl, s1
adc rh, s2
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[0]
adc rl, s3
adc rh, s4
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[0]
adc rl, s5
adc rh, s6
adc $0, s7
##### M[9 11 13 15]*q0 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q0
add rl, s9
adc rh, s0
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q0
adc rl, s1
adc rh, s2
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q0
adc rl, s3
adc rh, s4
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q0
adc rl, s5
adc rh, s6
adc $0, s7
###################################################################
###################################################################
##### A[0 2 4 6]*B[8] #####
vpextrq $1, A0xmm, bi #B[8]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[8]
add rl, s8
adc rh, s9
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[8]
adc rl, s0
adc rh, s1
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[8]
adc rl, s2
adc rh, s3
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[8]
adc rl, s4
adc rh, s5
adc $0, s6
adc $0, s7
##### q8 #####
movq n0, %rdx
mulx s8, q, rh #q8=s8*n0
movq q, q8 #q8
##### M[0 2 4 6]*q8 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q8
add rl, s8
adc rh, s9
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q8
adc rl, s0
adc rh, s1
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q8
adc rl, s2
adc rh, s3
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q8
adc rl, s4
adc rh, s5
adc $0, s6
adc $0, s7
##### A[1 3 5 7]*B[8] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[8]
add rl, s9
adc rh, s0
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[8]
adc rl, s1
adc rh, s2
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[8]
adc rl, s3
adc rh, s4
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[8]
adc rl, s5
adc rh, s6
adc $0, s7
##### M[1 3 5 7]*q8 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q8
add rl, s9
adc rh, s0
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q8
adc rl, s1
adc rh, s2
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q8
adc rl, s3
adc rh, s4
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q8
adc rl, s5
adc rh, s6
adc $0, s7
##################################################################################################
### ###
### 2nd_1: ###
### A[8-15]*B[1] + M[8-15]*q1 + A[0-7]*B[9] + M[0-7]*q9 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[1] #####
xorq s8, s8
vmovq B0xmm, bi #A[1]
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[1]
add rl, s9
adc rh, s0
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[1]
adc rl, s1
adc rh, s2
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[1]
adc rl, s3
adc rh, s4
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[1]
adc rl, s5
adc rh, s6
adc $0, s7
##### q1 #####
movq q1, q
##### M[8 10 12 14]*q1 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q1
add rl, s9
adc rh, s0
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q1
adc rl, s1
adc rh, s2
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q1
adc rl, s3
adc rh, s4
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q1
adc rl, s5
adc rh, s6
adc $0, s7
##### A[9 11 13 15]*B[1] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[1]
add rl, s0
adc rh, s1
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[1]
adc rl, s2
adc rh, s3
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[1]
adc rl, s4
adc rh, s5
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[1]
adc rl, s6
adc rh, s7
adc $0, s8
##### M[9 11 13 15]*q1 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q1
add rl, s0
adc rh, s1
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q1
adc rl, s2
adc rh, s3
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q1
adc rl, s4
adc rh, s5
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q1
adc rl, s6
adc rh, s7
adc $0, s8
###################################################################
###################################################################
##### A[0 2 4 6]*B[9] #####
vpextrq $1, B0xmm, bi #A[9]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[9]
add rl, s9
adc rh, s0
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[9]
adc rl, s1
adc rh, s2
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[9]
adc rl, s3
adc rh, s4
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[9]
adc rl, s5
adc rh, s6
adc $0, s7
adc $0, s8
##### q9 #####
movq n0, %rdx
mulx s9, q, rh #q9=s9*n0
movq q, q9 #q9
##### M[0 2 4 6]*q9 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q9
add rl, s9
adc rh, s0
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q9
adc rl, s1
adc rh, s2
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q9
adc rl, s3
adc rh, s4
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q9
adc rl, s5
adc rh, s6
adc $0, s7
adc $0, s8
##### A[1 3 5 7]*B[9] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[9]
add rl, s0
adc rh, s1
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[9]
adc rl, s2
adc rh, s3
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[9]
adc rl, s4
adc rh, s5
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[9]
adc rl, s6
adc rh, s7
adc $0, s8
##### M[1 3 5 7]*q9 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q9
add rl, s0
adc rh, s1
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q9
adc rl, s2
adc rh, s3
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q9
adc rl, s4
adc rh, s5
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q9
adc rl, s6
adc rh, s7
adc $0, s8
##################################################################################################
### ###
### 2nd_2: ###
### A[8-15]*B[2] + M[8-15]*q2 + A[0-7]*B[10] + M[0-7]*q10 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[2] #####
xorq s9, s9
vmovq A1xmm, bi #B[2]
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[2]
add rl, s0
adc rh, s1
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[2]
adc rl, s2
adc rh, s3
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[2]
adc rl, s4
adc rh, s5
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[2]
adc rl, s6
adc rh, s7
adc $0, s8
##### q2 #####
movq q2, q
##### M[8 10 12 14]*q2 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q2
add rl, s0
adc rh, s1
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q2
adc rl, s2
adc rh, s3
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q2
adc rl, s4
adc rh, s5
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q2
adc rl, s6
adc rh, s7
adc $0, s8
##### A[9 11 13 15]*B[2] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[2]
add rl, s1
adc rh, s2
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[2]
adc rl, s3
adc rh, s4
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[2]
adc rl, s5
adc rh, s6
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[2]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[9 11 13 15]*q2 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q2
add rl, s1
adc rh, s2
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q2
adc rl, s3
adc rh, s4
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q2
adc rl, s5
adc rh, s6
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q2
adc rl, s7
adc rh, s8
adc $0, s9
###################################################################
###################################################################
##### A[0 2 4 6]*B[10] #####
vpextrq $1, A1xmm, bi #B[10]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[10]
add rl, s0
adc rh, s1
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[10]
adc rl, s2
adc rh, s3
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[10]
adc rl, s4
adc rh, s5
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[10]
adc rl, s6
adc rh, s7
adc $0, s8
adc $0, s9
##### q10 #####
movq n0, %rdx
mulx s0, q, rh #q10=s0*n0
movq q, q10 #q10
##### M[0 2 4 6]*q10 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q10
add rl, s0
adc rh, s1
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q10
adc rl, s2
adc rh, s3
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q10
adc rl, s4
adc rh, s5
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q10
adc rl, s6
adc rh, s7
adc $0, s8
adc $0, s9
##### A[1 3 5 7]*B[10] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[10]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[10]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[10]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[10]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[1 3 5 7]*q10 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q10
add rl, s1
adc rh, s2
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q10
adc rl, s3
adc rh, s4
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q10
adc rl, s5
adc rh, s6
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q10
adc rl, s7
adc rh, s8
adc $0, s9
##################################################################################################
### ###
### 2nd_3: ###
### A[8-15]*B[3] + M[8-15]*q3 + A[0-7]*B[11] + M[0-7]*q11 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[3] #####
xorq s0, s0
vmovq B1xmm, bi #B[3]
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[3]
add rl, s1
adc rh, s2
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[3]
adc rl, s3
adc rh, s4
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[3]
adc rl, s5
adc rh, s6
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[3]
adc rl, s7
adc rh, s8
adc $0, s9
##### q3 #####
movq q3, q
##### M[8 10 12 14]*q3 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q3
add rl, s1
adc rh, s2
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q3
adc rl, s3
adc rh, s4
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q3
adc rl, s5
adc rh, s6
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q3
adc rl, s7
adc rh, s8
adc $0, s9
##### A[9 11 13 15]*B[3] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[3]
add rl, s2
adc rh, s3
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[3]
adc rl, s4
adc rh, s5
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[3]
adc rl, s6
adc rh, s7
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[3]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[9 11 13 15]*q3 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q3
add rl, s2
adc rh, s3
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q3
adc rl, s4
adc rh, s5
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q3
adc rl, s6
adc rh, s7
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q3
adc rl, s8
adc rh, s9
adc $0, s0
###################################################################
###################################################################
##### A[0 2 4 6]*B[11] #####
vpextrq $1, B1xmm, bi #B[11]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[11]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[11]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[11]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[11]
adc rl, s7
adc rh, s8
adc $0, s9
adc $0, s0
##### q11 #####
movq n0, %rdx
mulx s1, q, rh #q11=s1*n0
movq q, q11 #q11
##### M[0 2 4 6]*q11 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q11
add rl, s1
adc rh, s2
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q11
adc rl, s3
adc rh, s4
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q11
adc rl, s5
adc rh, s6
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q11
adc rl, s7
adc rh, s8
adc $0, s9
adc $0, s0
##### A[1 3 5 7]*B[11] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[11]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[11]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[11]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[11]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[1 3 5 7]*q11 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q11
add rl, s2
adc rh, s3
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q11
adc rl, s4
adc rh, s5
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q11
adc rl, s6
adc rh, s7
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q11
adc rl, s8
adc rh, s9
adc $0, s0
##################################################################################################
### ###
### 2nd_4: ###
### A[8-15]*B[4] + M[8-15]*q4 + A[0-7]*B[12] + M[0-7]*q12 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[4] #####
xorq s1, s1
vmovq A2xmm, bi #B[4]
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[4]
add rl, s2
adc rh, s3
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[4]
adc rl, s4
adc rh, s5
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[4]
adc rl, s6
adc rh, s7
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[4]
adc rl, s8
adc rh, s9
adc $0, s0
##### q4 #####
movq q4, q
##### M[8 10 12 14]*q4 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q4
add rl, s2
adc rh, s3
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q4
adc rl, s4
adc rh, s5
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q4
adc rl, s6
adc rh, s7
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q4
adc rl, s8
adc rh, s9
adc $0, s0
##### A[9 11 13 15]*B[4] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[4]
add rl, s3
adc rh, s4
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[4]
adc rl, s5
adc rh, s6
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[4]
adc rl, s7
adc rh, s8
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[4]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[9 11 13 15]*q4 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q4
add rl, s3
adc rh, s4
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q4
adc rl, s5
adc rh, s6
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q4
adc rl, s7
adc rh, s8
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q4
adc rl, s9
adc rh, s0
adc $0, s1
###################################################################
###################################################################
##### A[0 2 4 6]*B[12] #####
vpextrq $1, A2xmm, bi #B[12]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[12]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[12]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[12]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[12]
adc rl, s8
adc rh, s9
adc $0, s0
adc $0, s1
##### q12 #####
movq n0, %rdx
mulx s2, q, rh #q12=s2*n0
movq q, q12 #q12
##### M[0 2 4 6]*q12 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q12
add rl, s2
adc rh, s3
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q12
adc rl, s4
adc rh, s5
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q12
adc rl, s6
adc rh, s7
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q12
adc rl, s8
adc rh, s9
adc $0, s0
adc $0, s1
##### A[1 3 5 7]*B[12] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[12]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[12]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[12]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[12]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[1 3 5 7]*q12 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q12
add rl, s3
adc rh, s4
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q12
adc rl, s5
adc rh, s6
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q12
adc rl, s7
adc rh, s8
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q12
adc rl, s9
adc rh, s0
adc $0, s1
##################################################################################################
### ###
### 2nd_5: ###
### A[8-15]*B[5] + M[8-15]*q5 + A[0-7]*B[13] + M[0-7]*q13 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[5] #####
xorq s2, s2
vmovq B2xmm, bi #B[5]
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[5]
add rl, s3
adc rh, s4
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[5]
adc rl, s5
adc rh, s6
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[5]
adc rl, s7
adc rh, s8
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[5]
adc rl, s9
adc rh, s0
adc $0, s1
##### q5 #####
movq q5, q
##### M[8 10 12 14]*q5 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q5
add rl, s3
adc rh, s4
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q5
adc rl, s5
adc rh, s6
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q5
adc rl, s7
adc rh, s8
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q5
adc rl, s9
adc rh, s0
adc $0, s1
##### A[9 11 13 15]*B[5] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[5]
add rl, s4
adc rh, s5
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[5]
adc rl, s6
adc rh, s7
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[5]
adc rl, s8
adc rh, s9
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[5]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[9 11 13 15]*q5 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q5
add rl, s4
adc rh, s5
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q5
adc rl, s6
adc rh, s7
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q5
adc rl, s8
adc rh, s9
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q5
adc rl, s0
adc rh, s1
adc $0, s2
###################################################################
###################################################################
##### A[0 2 4 6]*B[13] #####
vpextrq $1, B2xmm, bi #B[13]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[13]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[13]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[13]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[13]
adc rl, s9
adc rh, s0
adc $0, s1
adc $0, s2
##### q13 #####
movq n0, %rdx
mulx s3, q, rh #q13=s3*n0
movq q, q13 #q13
##### M[0 2 4 6]*q13 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q13
add rl, s3
adc rh, s4
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q13
adc rl, s5
adc rh, s6
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q13
adc rl, s7
adc rh, s8
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q13
adc rl, s9
adc rh, s0
adc $0, s1
adc $0, s2
##### A[1 3 5 7]*B[13] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[13]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[13]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[13]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[13]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[1 3 5 7]*q13 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q13
add rl, s4
adc rh, s5
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q13
adc rl, s6
adc rh, s7
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q13
adc rl, s8
adc rh, s9
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q13
adc rl, s0
adc rh, s1
adc $0, s2
##################################################################################################
### ###
### 2nd_6: ###
### A[8-15]*B[6] + M[8-15]*q6 + A[0-7]*B[14] + M[0-7]*q14 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[6] #####
xorq s3, s3
vmovq A3xmm, bi #B[6]
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[6]
add rl, s4
adc rh, s5
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[6]
adc rl, s6
adc rh, s7
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[6]
adc rl, s8
adc rh, s9
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[6]
adc rl, s0
adc rh, s1
adc $0, s2
##### q6 #####
movq q6, q
##### M[8 10 12 14]*q6 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q6
add rl, s4
adc rh, s5
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q6
adc rl, s6
adc rh, s7
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q6
adc rl, s8
adc rh, s9
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q6
adc rl, s0
adc rh, s1
adc $0, s2
##### A[9 11 13 15]*B[6] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[6]
add rl, s5
adc rh, s6
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[6]
adc rl, s7
adc rh, s8
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[6]
adc rl, s9
adc rh, s0
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[6]
adc rl, s1
adc rh, s2
adc $0, s3
##### M[9 11 13 15]*q6 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q6
add rl, s5
adc rh, s6
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q6
adc rl, s7
adc rh, s8
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q6
adc rl, s9
adc rh, s0
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q6
adc rl, s1
adc rh, s2
adc $0, s3
###################################################################
###################################################################
##### A[0 2 4 6]*B[14] #####
vpextrq $1, A3xmm, bi #B[14]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[14]
add rl, s4
adc rh, s5
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[14]
adc rl, s6
adc rh, s7
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[14]
adc rl, s8
adc rh, s9
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[14]
adc rl, s0
adc rh, s1
adc $0, s2
adc $0, s3
##### q14 #####
movq n0, %rdx
mulx s4, q, rh #q14=s4*n0
movq q, q14 #q14
##### M[0 2 4 6]*q14 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q14
add rl, s4
adc rh, s5
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q14
adc rl, s6
adc rh, s7
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q14
adc rl, s8
adc rh, s9
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q14
adc rl, s0
adc rh, s1
adc $0, s2
adc $0, s3
##### A[1 3 5 7]*B[14] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[14]
add rl, s5
adc rh, s6
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[14]
adc rl, s7
adc rh, s8
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[14]
adc rl, s9
adc rh, s0
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[14]
adc rl, s1
adc rh, s2
adc $0, s3
##### M[1 3 5 7]*q14 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q14
add rl, s5
adc rh, s6
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q14
adc rl, s7
adc rh, s8
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q14
adc rl, s9
adc rh, s0
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q14
adc rl, s1
adc rh, s2
adc $0, s3
##################################################################################################
### ###
### 2nd_7: ###
### A[8-15]*B[7] + M[8-15]*q7 + A[0-7]*B[15] + M[0-7]*q15 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[7] #####
xorq s4, s4
vmovq B3xmm, bi #B[7]
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[7]
add rl, s5
adc rh, s6
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[7]
adc rl, s7
adc rh, s8
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[7]
adc rl, s9
adc rh, s0
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[7]
adc rl, s1
adc rh, s2
adc $0, s3
##### q7 #####
movq q7, q
##### M[8 10 12 14]*q7 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q7
add rl, s5
adc rh, s6
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q7
adc rl, s7
adc rh, s8
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q7
adc rl, s9
adc rh, s0
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q7
adc rl, s1
adc rh, s2
adc $0, s3
##### A[9 11 13 15]*B[7] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[7]
add rl, s6
adc rh, s7
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[7]
adc rl, s8
adc rh, s9
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[7]
adc rl, s0
adc rh, s1
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[7]
adc rl, s2
adc rh, s3
adc $0, s4
##### M[9 11 13 15]*q7 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q7
add rl, s6
adc rh, s7
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q7
adc rl, s8
adc rh, s9
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q7
adc rl, s0
adc rh, s1
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q7
adc rl, s2
adc rh, s3
adc $0, s4
###################################################################
###################################################################
##### A[0 2 4 6]*B[15] #####
vpextrq $1, B3xmm, bi #B[15]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[15]
add rl, s5
adc rh, s6
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[15]
adc rl, s7
adc rh, s8
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[15]
adc rl, s9
adc rh, s0
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[15]
adc rl, s1
adc rh, s2
adc $0, s3
adc $0, s4
##### q15 #####
movq n0, %rdx
mulx s5, q, rh #q15=s5*n0
movq q, q15 #q15
##### M[0 2 4 6]*q15 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q15
add rl, s5
adc rh, s6
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q15
adc rl, s7
adc rh, s8
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q15
adc rl, s9
adc rh, s0
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q15
adc rl, s1
adc rh, s2
adc $0, s3
adc $0, s4
##### A[1 3 5 7]*B[15] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[15]
add rl, s6
adc rh, s7
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[15]
adc rl, s8
adc rh, s9
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[15]
adc rl, s0
adc rh, s1
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[15]
adc rl, s2
adc rh, s3
adc $0, s4
##### M[1 3 5 7]*q15 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q15
add rl, s6
adc rh, s7
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q15
adc rl, s8
adc rh, s9
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q15
adc rl, s0
adc rh, s1
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q15
adc rl, s2
adc rh, s3
adc $0, s4
##################################################################################################
### ###
### 2nd part END ###
### ###
### low high ###
### ###
### s6 s7 s8 s9 s0 s1 s2 s3 s4 ###
### ###
##################################################################################################
.endm
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
.macro montsqu_3rd_movq
##################################################################################################
### ###
### 3rd part: A[8-15]*B[8-15] + M[8-15]*(q8-q15) ###
### ###
### sum 628=52+72*8 ###
### ###
##################################################################################################
### ###
### 3rd_arrange_vector ###
### sum 52=7*4+6*4 ###
### ###
###########################################################
/*
### store T3 ###
vperm2i128 $0x20, T3, T0, T0
vperm2i128 $0x01, A0, A0, T3
vshufpd $0x05, A0, A0, A0 #imm=0101
vblendpd $0x06, A0, T3, A0 #imm=0110
vperm2i128 $0x01, A1, A1, T3
vshufpd $0x05, A1, A1, A1 #imm=0101
vblendpd $0x06, A1, T3, A1 #imm=0110
vperm2i128 $0x01, A2, A2, T3
vshufpd $0x05, A2, A2, A2 #imm=0101
vblendpd $0x06, A2, T3, A2 #imm=0110
vperm2i128 $0x01, A3, A3, T3
vshufpd $0x05, A3, A3, A3 #imm=0101
vblendpd $0x06, A3, T3, A3 #imm=0110
vperm2i128 $0x01, B0, B0, T3
vshufpd $0x05, B0, B0, B0 #imm=0101
vblendpd $0x06, B0, T3, B0 #imm=0110
vperm2i128 $0x01, B1, B1, T3
vshufpd $0x05, B1, B1, B1 #imm=0101
vblendpd $0x06, B1, T3, B1 #imm=0110
vperm2i128 $0x01, B2, B2, T3
vshufpd $0x05, B2, B2, B2 #imm=0101
vblendpd $0x06, B2, T3, B2 #imm=0110
vperm2i128 $0x01, B3, B3, T3
vshufpd $0x05, B3, B3, B3 #imm=0101
vblendpd $0x06, B3, T3, B3 #imm=0110
### restore T3 ###
vperm2i128 $0x31, T3, T0, T3
*/
vpermq $0x8D, A0, A0 #imm=2031
vpermq $0x8D, A1, A1 #imm=2031
vpermq $0x8D, A2, A2 #imm=2031
vpermq $0x8D, A3, A3 #imm=2031
vpermq $0x8D, B0, B0 #imm=2031
vpermq $0x8D, B1, B1 #imm=2031
vpermq $0x8D, B2, B2 #imm=2031
vpermq $0x8D, B3, B3 #imm=2031
/*
vpermq $0x72, A0, A0 #imm=01 11 00 10
vpermq $0x72, A1, A1 #imm=01 11 00 10
vpermq $0x72, A2, A2 #imm=01 11 00 10
vpermq $0x72, A3, A3 #imm=01 11 00 10
vpermq $0x72, B0, B0 #imm=01 11 00 10
vpermq $0x72, B1, B1 #imm=01 11 00 10
vpermq $0x72, B2, B2 #imm=01 11 00 10
vpermq $0x72, B3, B3 #imm=01 11 00 10
*/
### inverse M ###
vshufpd $0x05, M0, M0, M0 #imm=0101
vshufpd $0x05, M1, M1, M1 #imm=0101
vshufpd $0x05, M2, M2, M2 #imm=0101
vshufpd $0x05, M3, M3, M3 #imm=0101
vshufpd $0x05, T0, T0, T0 #imm=0101
vshufpd $0x05, T1, T1, T1 #imm=0101
vshufpd $0x05, T2, T2, T2 #imm=0101
vshufpd $0x05, T3, T3, T3 #imm=0101
##################################################################################################
### ###
### 3rd_0: ###
### A[8-15]*B[8] + M[8-15]*q8 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[8] #####
xorq s5, s5
vmovq A0xmm, bi #A[8]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[8]
add rl, s6
adc rh, s7
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[8]
adc rl, s8
adc rh, s9
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[8]
adc rl, s0
adc rh, s1
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[8]
adc rl, s2
adc rh, s3
adc $0, s4
##### q8 #####
movq q8, q
##### M[8 10 12 14]*q8 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q8
add rl, s6
adc rh, s7
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q8
adc rl, s8
adc rh, s9
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q8
adc rl, s0
adc rh, s1
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q8
adc rl, s2
adc rh, s3
adc $0, s4
##### A[9 11 13 15]*B[8] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[8]
add rl, s7
adc rh, s8
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[8]
adc rl, s9
adc rh, s0
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[8]
adc rl, s1
adc rh, s2
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[8]
adc rl, s3
adc rh, s4
adc $0, s5
##### M[9 11 13 15]*q8 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q8
add rl, s7
adc rh, s8
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q8
adc rl, s9
adc rh, s0
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q8
adc rl, s1
adc rh, s2
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q8
adc rl, s3
adc rh, s4
adc $0, s5
movq s6, r0 #result[0]
#vpinsrq $0, s6, T0xmm, T0xmm #result[0]
#pinsrq $0, s6, T0xmm #result[0]
##################################################################################################
### ###
### 3rd_1: ###
### A[8-15]*B[9] + M[8-15]*q9 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[9] #####
xorq s6, s6
vmovq B0xmm, bi #A[9]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[9]
add rl, s7
adc rh, s8
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[9]
adc rl, s9
adc rh, s0
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[9]
adc rl, s1
adc rh, s2
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[9]
adc rl, s3
adc rh, s4
adc $0, s5
##### q9 #####
movq q9, q
##### M[8 10 12 14]*q9 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q9
add rl, s7
adc rh, s8
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q9
adc rl, s9
adc rh, s0
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q9
adc rl, s1
adc rh, s2
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q9
adc rl, s3
adc rh, s4
adc $0, s5
##### A[9 11 13 15]*B[9] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[9]
add rl, s8
adc rh, s9
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[9]
adc rl, s0
adc rh, s1
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[9]
adc rl, s2
adc rh, s3
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[9]
adc rl, s4
adc rh, s5
adc $0, s6
##### M[9 11 13 15]*q9 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q9
add rl, s8
adc rh, s9
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q9
adc rl, s0
adc rh, s1
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q9
adc rl, s2
adc rh, s3
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q9
adc rl, s4
adc rh, s5
adc $0, s6
movq s7, r1 #result[1]
#vpinsrq $1, s7, T0xmm, T0xmm #result[1]
#pinsrq $1, s7, T0xmm #result[1]
##################################################################################################
### ###
### 3rd_2: ###
### A[8-15]*B[10] + M[8-15]*q10 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[10] #####
xorq s7, s7
vmovq A1xmm, bi #A[10]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[10]
add rl, s8
adc rh, s9
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[10]
adc rl, s0
adc rh, s1
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[10]
adc rl, s2
adc rh, s3
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[10]
adc rl, s4
adc rh, s5
adc $0, s6
##### q10 #####
movq q10, q
##### M[8 10 12 14]*q10 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q10
add rl, s8
adc rh, s9
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q10
adc rl, s0
adc rh, s1
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q10
adc rl, s2
adc rh, s3
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q10
adc rl, s4
adc rh, s5
adc $0, s6
##### A[9 11 13 15]*B[10] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[10]
add rl, s9
adc rh, s0
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[10]
adc rl, s1
adc rh, s2
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[10]
adc rl, s3
adc rh, s4
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[10]
adc rl, s5
adc rh, s6
adc $0, s7
##### M[9 11 13 15]*q10 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q10
add rl, s9
adc rh, s0
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q10
adc rl, s1
adc rh, s2
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q10
adc rl, s3
adc rh, s4
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q10
adc rl, s5
adc rh, s6
adc $0, s7
movq s8, r2 #result[2]
#vpinsrq $0, s8, T1xmm, T1xmm #result[2]
#pinsrq $0, s8, T1xmm #result[2]
##################################################################################################
### ###
### 3rd_3: ###
### A[8-15]*B[11] + M[8-15]*q11 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[11] #####
xorq s8, s8
vmovq B1xmm, bi #A[11]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[11]
add rl, s9
adc rh, s0
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[11]
adc rl, s1
adc rh, s2
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[11]
adc rl, s3
adc rh, s4
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[11]
adc rl, s5
adc rh, s6
adc $0, s7
##### q11 #####
movq q11, q
##### M[8 10 12 14]*q11 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q11
add rl, s9
adc rh, s0
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q11
adc rl, s1
adc rh, s2
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q11
adc rl, s3
adc rh, s4
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q11
adc rl, s5
adc rh, s6
adc $0, s7
##### A[9 11 13 15]*B[11] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[11]
add rl, s0
adc rh, s1
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[11]
adc rl, s2
adc rh, s3
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[11]
adc rl, s4
adc rh, s5
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[11]
adc rl, s6
adc rh, s7
adc $0, s8
##### M[9 11 13 15]*q11 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q11
add rl, s0
adc rh, s1
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q11
adc rl, s2
adc rh, s3
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q11
adc rl, s4
adc rh, s5
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q11
adc rl, s6
adc rh, s7
adc $0, s8
movq s9, r3 #result[3]
#vpinsrq $1, s9, T1xmm, T1xmm #result[3]
#pinsrq $1, s9, T1xmm #result[3]
##################################################################################################
### ###
### 3rd_4: ###
### A[8-15]*B[12] + M[8-15]*q12 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[12] #####
xorq s9, s9
vmovq A2xmm, bi #A[12]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[12]
add rl, s0
adc rh, s1
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[12]
adc rl, s2
adc rh, s3
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[12]
adc rl, s4
adc rh, s5
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[12]
adc rl, s6
adc rh, s7
adc $0, s8
##### q12 #####
movq q12, q
##### M[8 10 12 14]*q12 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q12
add rl, s0
adc rh, s1
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q12
adc rl, s2
adc rh, s3
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q12
adc rl, s4
adc rh, s5
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q12
adc rl, s6
adc rh, s7
adc $0, s8
##### A[9 11 13 15]*B[12] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[12]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[12]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[12]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[12]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[9 11 13 15]*q12 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q12
add rl, s1
adc rh, s2
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q12
adc rl, s3
adc rh, s4
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q12
adc rl, s5
adc rh, s6
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q12
adc rl, s7
adc rh, s8
adc $0, s9
movq s0, r4 #result[4]
#vpinsrq $0, s0, T2xmm, T2xmm #result[4]
#pinsrq $0, s0, T2xmm #result[4]
##################################################################################################
### ###
### 3rd_5: ###
### A[8-15]*B[13] + M[8-15]*q13 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[13] #####
xorq s0, s0
vmovq B2xmm, bi #A[13]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[13]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[13]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[13]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[13]
adc rl, s7
adc rh, s8
adc $0, s9
##### q13 #####
movq q13, q
##### M[8 10 12 14]*q13 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q13
add rl, s1
adc rh, s2
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q13
adc rl, s3
adc rh, s4
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q13
adc rl, s5
adc rh, s6
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q13
adc rl, s7
adc rh, s8
adc $0, s9
##### A[9 11 13 15]*B[13] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[13]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[13]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[13]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[13]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[9 11 13 15]*q13 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q13
add rl, s2
adc rh, s3
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q13
adc rl, s4
adc rh, s5
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q13
adc rl, s6
adc rh, s7
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q13
adc rl, s8
adc rh, s9
adc $0, s0
movq s1, r5 #result[5]
#vpinsrq $1, s1, T2xmm, T2xmm #result[5]
#pinsrq $1, s1, T2xmm #result[5]
##################################################################################################
### ###
### 3rd_6: ###
### A[8-15]*B[14] + M[8-15]*q14 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[14] #####
xorq s1, s1
vmovq A3xmm, bi #A[14]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[14]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[14]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[14]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[14]
adc rl, s8
adc rh, s9
adc $0, s0
##### q14 #####
movq q14, q
##### M[8 10 12 14]*q14 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q14
add rl, s2
adc rh, s3
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q14
adc rl, s4
adc rh, s5
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q14
adc rl, s6
adc rh, s7
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q14
adc rl, s8
adc rh, s9
adc $0, s0
##### A[9 11 13 15]*B[14] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[14]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[14]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[14]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[14]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[9 11 13 15]*q14 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q14
add rl, s3
adc rh, s4
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q14
adc rl, s5
adc rh, s6
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q14
adc rl, s7
adc rh, s8
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q14
adc rl, s9
adc rh, s0
adc $0, s1
movq s2, r6 #result[6]
#vpinsrq $0, s2, T3xmm, T3xmm #result[6]
#pinsrq $0, s2, T3xmm #result[6]
##################################################################################################
### ###
### 3rd_7: ###
### A[8-15]*B[15] + M[8-15]*q15 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[15] #####
xorq s2, s2
vmovq B3xmm, bi #A[15]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[15]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[15]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[15]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[15]
adc rl, s9
adc rh, s0
adc $0, s1
##### q15 #####
movq q15, q
##### M[8 10 12 14]*q15 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q15
add rl, s3
adc rh, s4
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q15
adc rl, s5
adc rh, s6
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q15
adc rl, s7
adc rh, s8
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q15
adc rl, s9
adc rh, s0
adc $0, s1
##### A[9 11 13 15]*B[15] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[15]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[15]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[15]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[15]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[9 11 13 15]*q15 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q15
add rl, s4
adc rh, s5
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q15
adc rl, s6
adc rh, s7
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q15
adc rl, s8
adc rh, s9
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q15
adc rl, s0
adc rh, s1
adc $0, s2
movq s3, r7 #result[7]
#vpinsrq $1, s3, T3xmm, T3xmm #result[7]
#pinsrq $1, s3, T3xmm #result[7]
##################################################################################################
### reverse M ###
vshufpd $0x05, M0, M0, M0 #imm=0101
vshufpd $0x05, M1, M1, M1 #imm=0101
vshufpd $0x05, M2, M2, M2 #imm=0101
vshufpd $0x05, M3, M3, M3 #imm=0101
vshufpd $0x05, T0, T0, T0 #imm=0101
vshufpd $0x05, T1, T1, T1 #imm=0101
vshufpd $0x05, T2, T2, T2 #imm=0101
vshufpd $0x05, T3, T3, T3 #imm=0101
##################################################################################################
### ###
### 3rd part END ###
### ###
### low high ###
### ###
### s4 s5 s6 s7 s8 s9 s0 s1 s2 ###
### ###
##################################################################################################
.endm
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
.macro montsqu_last_movq
##################################################################################################
### ###
### last part: reduce and store result ###
### ###
### sum 102=8+94 ###
### ###
##################################################################################################
### ###
### last_arrange_vector ###
### sum 8 ###
### ###
###########################################################
##################################################################################################
### ###
### reduce ###
### sum 94=4+62+28 ###
### ###
###########################################################
xorq rh, rh
movq s2, rh
subq $1, rh
#jb .montmul_last_end
jb 1f
#.montsqu_last_sub_1$:
### r0-r7 ###
movq r0, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
movq r1, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
movq r2, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
movq r3, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
movq r4, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
movq r5, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
movq r6, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
movq r7, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
### r8-r15 ###
vpextrq $1, M0xmm, rh #R[8]
sbbq rh, r8
vpextrq $1, T0xmm, rh #R[9]
sbbq rh, r9
vpextrq $1, M1xmm, rh #R[10]
sbbq rh, r10
vpextrq $1, T1xmm, rh #R[11]
sbbq rh, r11
vpextrq $1, M2xmm, rh #R[12]
sbbq rh, r12
vpextrq $1, T2xmm, rh #R[13]
sbbq rh, r13
vpextrq $1, M3xmm, rh #R[14]
sbbq rh, r14
vpextrq $1, T3xmm, rh #R[15]
sbbq rh, r15
sbbq $0, s2
xorq rh, rh
movq s2, rh
subq $1, rh
#jb .montmul_last_end
jb 1f
#.montsqu_last_sub_2:
### r0-r7 ###
movq r0, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
movq r1, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
movq r2, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
movq r3, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
movq r4, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
movq r5, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
movq r6, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
movq r7, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
### r8-r15 ###
vpextrq $1, M0xmm, rh #R[8]
sbbq rh, r8
vpextrq $1, T0xmm, rh #R[9]
sbbq rh, r9
vpextrq $1, M1xmm, rh #R[10]
sbbq rh, r10
vpextrq $1, T1xmm, rh #R[11]
sbbq rh, r11
vpextrq $1, M2xmm, rh #R[12]
sbbq rh, r12
vpextrq $1, T2xmm, rh #R[13]
sbbq rh, r13
vpextrq $1, M3xmm, rh #R[14]
sbbq rh, r14
vpextrq $1, T3xmm, rh #R[15]
sbbq rh, r15
sbbq $0, s2
#.montsqu_last_end$:
1:
/*
vmovq r8, T3xmm #R[8]
vblendpd $0x1, T3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, r8
vmovq r8, T3xmm #R[0]
vblendpd $0x1, T3, A0, A0 #imm=0001
vmovq r9, T3xmm #R[9]
vblendpd $0x1, T3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r1, r9
vmovq r9, T3xmm #R[1]
vblendpd $0x1, T3, A1, A1 #imm=0001
vmovq r10, T3xmm #R[10]
vblendpd $0x1, T3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r2, r10
vmovq r10, T3xmm #R[2]
vblendpd $0x1, T3, A2, A2 #imm=0001
vmovq r11, T3xmm #R[11]
vblendpd $0x1, T3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r3, r11
vmovq r11, T3xmm #R[3]
vblendpd $0x1, T3, A3, A3 #imm=0001
vmovq r12, T3xmm #R[12]
vblendpd $0x1, T3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r4, r12
vmovq r12, T3xmm #R[4]
vblendpd $0x1, T3, B0, B0 #imm=0001
vmovq r13, T3xmm #R[13]
vblendpd $0x1, T3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r5, r13
vmovq r13, T3xmm #R[5]
vblendpd $0x1, T3, B1, B1 #imm=0001
vmovq r14, T3xmm #R[14]
vblendpd $0x1, T3, B2, B2 #imm=0001
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r6, r14
vmovq r14, T3xmm #R[6]
vblendpd $0x1, T3, B2, B2 #imm=0001
vmovq r15, T3xmm #R[15]
vblendpd $0x1, T3, B3, B3 #imm=0001
vperm2i128 $0x1, B3, B3, B3 #imm=1
vmovq r7, T3xmm #R[7] #q15 rbp
vblendpd $0x1, T3, B3, B3 #imm=0001
//*/
/*
###################################################################
#### for all reg exp ####
### B3 can be used ###
vshufpd $0xF, B3, B2, B2 #1111
vmovq r8, B3xmm #R[8]
vblendpd $0x1, B3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, r8
vmovq r8, B3xmm #R[0]
vblendpd $0x1, B3, A0, A0 #imm=0001
vmovq r9, B3xmm #R[9]
vblendpd $0x1, B3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r1, r9
vmovq r9, B3xmm #R[1]
vblendpd $0x1, B3, A1, A1 #imm=0001
vmovq r10, B3xmm #R[10]
vblendpd $0x1, B3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r2, r10
vmovq r10, B3xmm #R[2]
vblendpd $0x1, B3, A2, A2 #imm=0001
vmovq r11, B3xmm #R[11]
vblendpd $0x1, B3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r3, r11
vmovq r11, B3xmm #R[3]
vblendpd $0x1, B3, A3, A3 #imm=0001
vmovq r12, B3xmm #R[12]
vblendpd $0x1, B3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r4, r12
vmovq r12, B3xmm #R[4]
vblendpd $0x1, B3, B0, B0 #imm=0001
vmovq r13, B3xmm #R[13]
vblendpd $0x1, B3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r5, r13
vmovq r13, B3xmm #R[5]
vblendpd $0x1, B3, B1, B1 #imm=0001
vmovq r15, B3xmm #R[15]
movq r7, r15
vpinsrq $0x1, r15, B3xmm, B3xmm #R[7]
vpermq $0x10, B3, B3 #imm=0100
vblendpd $0x5, B3, B2, B3 #imm=0101
vperm2i128 $0x1, B3, B3, B3 #imm=1
vshufpd $0x05, B2, B2, B2 #imm=0101
pinsrq $0x0, r14, B2xmm #R[14]
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r6, r14
pinsrq $0x0, r14, B2xmm #R[6]
*/
vpxorq %zmm18, %zmm18, %zmm18
vpxorq %zmm19, %zmm19, %zmm19
vpxorq %zmm20, %zmm20, %zmm20
movq r0, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r1, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r2, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r3, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r4, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r5, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r6, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r7, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
vmovq r8, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r9, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r10, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r11, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r12, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r13, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r14, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r15, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
### r0-r7 ###
movq r0, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
movq r1, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
movq r2, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
movq r3, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
movq r4, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
movq r5, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
movq r6, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
movq r7, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
### r8-r15 ###
vpextrq $1, M0xmm, rh #R[8]
sbbq rh, r8
vpextrq $1, T0xmm, rh #R[9]
sbbq rh, r9
vpextrq $1, M1xmm, rh #R[10]
sbbq rh, r10
vpextrq $1, T1xmm, rh #R[11]
sbbq rh, r11
vpextrq $1, M2xmm, rh #R[12]
sbbq rh, r12
vpextrq $1, T2xmm, rh #R[13]
sbbq rh, r13
vpextrq $1, M3xmm, rh #R[14]
sbbq rh, r14
vpextrq $1, T3xmm, rh #R[15]
sbbq rh, r15
jb 3f
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
vpxor T3, T3, T3
vpxor A0, A0, A0
vmovq r8, T3xmm #R[8]
vblendpd $0x1, T3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, rl #R[0]
vmovq rl, T3xmm #R[0]
vblendpd $0x1, T3, A0, A0 #imm=0001
vpxor B0, B0, B0
vmovq r9, T3xmm #R[9]
vblendpd $0x1, T3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r1, rl #R[1]
vmovq rl, T3xmm #R[1]
vblendpd $0x1, T3, B0, B0 #imm=0001
vpxor A1, A1, A1
vmovq r10, T3xmm #R[10]
vblendpd $0x1, T3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r2, rl #R[2]
vmovq rl, T3xmm #R[2]
vblendpd $0x1, T3, A1, A1 #imm=0001
vpxor B1, B1, B1
vmovq r11, T3xmm #R[11]
vblendpd $0x1, T3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r3, rl #R[3]
vmovq rl, T3xmm #R[3]
vblendpd $0x1, T3, B1, B1 #imm=0001
vpxor A2, A2, A2
vmovq r12, T3xmm #R[12]
vblendpd $0x1, T3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r4, rl #R[4]
vmovq rl, T3xmm #R[4]
vblendpd $0x1, T3, A2, A2 #imm=0001
vpxor B2, B2, B2
vmovq r13, T3xmm #R[13]
vblendpd $0x1, T3, B2, B2 #imm=0001
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r5, rl #R[5]
vmovq rl, T3xmm #R[5]
vblendpd $0x1, T3, B2, B2 #imm=0001
vpxor A3, A3, A3
vmovq r14, T3xmm #R[14]
vblendpd $0x1, T3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r6, rl #R[6]
vmovq rl, T3xmm #R[6]
vblendpd $0x1, T3, A3, A3 #imm=0001
vpxor B3, B3, B3
vmovq r15, T3xmm #R[15]
vblendpd $0x1, T3, B3, B3 #imm=0001
vperm2i128 $0x1, B3, B3, B3 #imm=1
movq r7, rl #R[7]
vmovq rl, T3xmm #R[7]
vblendpd $0x1, T3, B3, B3 #imm=0001
vpxor T3, T3, T3
vblendpd $0x3, T3, T2, T3 #imm=0011
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
jmp 4f
3:
vpxorq %zmm20, %zmm20, %zmm20
vmovq %xmm19, rl
movq rl, r0
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r1
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r2
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r3
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r4
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r5
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r6
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r7
valignq $1, %zmm19, %zmm20, %zmm19
vpxorq %zmm20, %zmm20, %zmm20
vmovq %xmm18, r8
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r9
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r10
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r11
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r12
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r13
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r14
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r15
valignq $1, %zmm18, %zmm20, %zmm18
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
vpxor T3, T3, T3
vpxor A0, A0, A0
vmovq r8, T3xmm #R[8]
vblendpd $0x1, T3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, rl #R[0]
vmovq rl, T3xmm #R[0]
vblendpd $0x1, T3, A0, A0 #imm=0001
vpxor B0, B0, B0
vmovq r9, T3xmm #R[9]
vblendpd $0x1, T3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r1, rl #R[1]
vmovq rl, T3xmm #R[1]
vblendpd $0x1, T3, B0, B0 #imm=0001
vpxor A1, A1, A1
vmovq r10, T3xmm #R[10]
vblendpd $0x1, T3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r2, rl #R[2]
vmovq rl, T3xmm #R[2]
vblendpd $0x1, T3, A1, A1 #imm=0001
vpxor B1, B1, B1
vmovq r11, T3xmm #R[11]
vblendpd $0x1, T3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r3, rl #R[3]
vmovq rl, T3xmm #R[3]
vblendpd $0x1, T3, B1, B1 #imm=0001
vpxor A2, A2, A2
vmovq r12, T3xmm #R[12]
vblendpd $0x1, T3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r4, rl #R[4]
vmovq rl, T3xmm #R[4]
vblendpd $0x1, T3, A2, A2 #imm=0001
vpxor B2, B2, B2
vmovq r13, T3xmm #R[13]
vblendpd $0x1, T3, B2, B2 #imm=0001
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r5, rl #R[5]
vmovq rl, T3xmm #R[5]
vblendpd $0x1, T3, B2, B2 #imm=0001
vpxor A3, A3, A3
vmovq r14, T3xmm #R[14]
vblendpd $0x1, T3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r6, rl #R[6]
vmovq rl, T3xmm #R[6]
vblendpd $0x1, T3, A3, A3 #imm=0001
vpxor B3, B3, B3
vmovq r15, T3xmm #R[15]
vblendpd $0x1, T3, B3, B3 #imm=0001
vperm2i128 $0x1, B3, B3, B3 #imm=1
movq r7, rl #R[7]
vmovq rl, T3xmm #R[7]
vblendpd $0x1, T3, B3, B3 #imm=0001
vpxor T3, T3, T3
vblendpd $0x3, T3, T2, T3 #imm=0011
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
4:
##################################################################################################
### ###
### last part END ###
### ###
### result A0 A1 A2 A3 ###
### ###
##################################################################################################
.endm
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
##################################################################################################
.globl montsqu1024
.type montsqu1024, @function
.align 64
montsqu1024:
#.macro montsqu1024
##################################################################################################
### ###
### montsqu1024: 1st 2nd 3rd last ###
### ###
### sum 2554=576+1248+628+102 ###
### ###
##################################################################################################
# movq %rsp, rsp #mm7
/*
montmul_1st
montmul_2nd
montmul_3rd
montmul_last
*/
montsqu_1st_movq
montsqu_2nd_movq
montsqu_3rd_movq
montsqu_last_movq
/*
//montmul_1st
//montmul_1st_movq
//first
pinsrq $0, s8, A0xmm #R[0]
pinsrq $0, s9, A1xmm #R[1]
pinsrq $0, s0, A2xmm #R[2]
pinsrq $0, s1, A3xmm #R[3]
pinsrq $0, s2, B0xmm #R[4]
pinsrq $0, s3, B1xmm #R[5]
pinsrq $0, s4, B2xmm #R[6]
pinsrq $0, s5, B3xmm #R[7]
//montmul_2nd
//Second
pinsrq $0, s6, A0xmm #R[0]
pinsrq $0, s7, A1xmm #R[1]
pinsrq $0, s8, A2xmm #R[2]
pinsrq $0, s9, A3xmm #R[3]
pinsrq $0, s0, B0xmm #R[4]
pinsrq $0, s1, B1xmm #R[5]
pinsrq $0, s2, B2xmm #R[6]
pinsrq $0, s3, B3xmm #R[7]
//montmul_3rd
//third
pinsrq $0, s4, A0xmm #R[0]
pinsrq $0, s5, A1xmm #R[1]
pinsrq $0, s6, A2xmm #R[2]
pinsrq $0, s7, A3xmm #R[3]
pinsrq $0, s8, B0xmm #R[4]
pinsrq $0, s9, B1xmm #R[5]
pinsrq $0, s0, B2xmm #R[6]
pinsrq $0, s1, B3xmm #R[7]
*/
# montmul_1st
# movq rsp, %rsp #mm7 ## old fold have this
##################################################################################################
### ###
### montsqu1024 END ###
### ###
### result A0 A1 A2 A3 ###
### ###
##################################################################################################
#.endm
ret
.size montsqu1024, .-montsqu1024
|
LoCryptEn/Key-security | 17,343 | Register-bound/RSAIn_Register/Kernel/montexp_AES.S | #include "montexp.S"
#include "aesni.S"
.file "montexp_AES.S"
.text
##################################################################################################
### ###
### montexp1024_AES_p: load exp argument ###
### Dec argument ###
### Call montexp ###
### Enc result ###
### Store result ###
### ###
##################################################################################################
##################################################################################################
### ###
### exp arg: 0-1023 :p :M ###
### 1024-2047 :Cp :A ###
### 2048-3071 :RRp :B ###
### 3072-4095 :dmp1 :T ###
### 4096-4159 :p0 :n0 ###
### ###
##################################################################################################
.globl montexp1024_AES_p
.type montexp1024_AES_p, @function
.align 64
montexp1024_AES_p:
##################################################################################################
### ###
### Load and Dec ###
### ###
### sum ###
### ###
##################################################################################################
### restore rsi ###
movq %mm0, %rsi
push %rsi
### load arg ###
#####################################################################
###########AES_DEC###########
vpxorq %ymm1, %ymm1, %ymm1
movq $0x0123456789ABCDEF, %rax
vmovq %rax, %xmm1
valignq $1, %ymm0, %ymm1, %ymm0
movq $0xFEDCBA9876543210, %rax
vmovq %rax, %xmm1
valignq $3, %ymm0, %ymm1, %ymm0
vmovdqu64 %zmm15 ,%zmm7 # no forget !!!!!
### load p0 to M0 and Dec ###
vmovdqu 512(%rsi), M0xmm #M0xmm is xmm8
vmovdqu M0xmm, %xmm15
aes_dec
vmovdqu %xmm15, M0xmm
### mov p0 from M0 to n0 ###
### high 64bit is padding, is all zero ###
vmovq M0xmm, %rax
movq %rax, n0
vmovdqu64 (%rsi), %zmm16
vmovdqu64 64(%rsi), %zmm17
mov $0x03, %eax # 0000 0011
kmovd %eax, %k1
mov $0x0C, %eax # 0000 1100
kmovd %eax, %k2
mov $0x30, %eax # 0011 0000
kmovd %eax, %k3
mov $0xC0, %eax # 1100 0000
kmovd %eax, %k4
vpcompressq %zmm16,%zmm15 {%k1}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k1}
vpcompressq %zmm16,%zmm15 {%k2}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k2}
vpcompressq %zmm16,%zmm15 {%k3}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k3}
vpcompressq %zmm16,%zmm15 {%k4}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k4}
vpcompressq %zmm17,%zmm15 {%k1}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k1}
vpcompressq %zmm17,%zmm15 {%k2}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k2}
vpcompressq %zmm17,%zmm15 {%k3}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k3}
vpcompressq %zmm17,%zmm15 {%k4}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k4}
vmovdqu64 %zmm7 ,%zmm15
### load p to A B ###
vpxorq %zmm0, %zmm0, %zmm0
valignq $0x00,%zmm16 ,%zmm0, %zmm0{%k1}{z} #shift 0*64
vpxorq %zmm1, %zmm1, %zmm1
valignq $0x02,%zmm16 ,%zmm1, %zmm1{%k1}{z} #shift 2*64
vpxorq %zmm2, %zmm2, %zmm2
valignq $0x04,%zmm16,%zmm2, %zmm2{%k1}{z} #shift 4*64
vpxorq %zmm3, %zmm3, %zmm3
valignq $0x06,%zmm16,%zmm3, %zmm3{%k1}{z} #shift 6*64
vpxorq %zmm4, %zmm4, %zmm4
valignq $0x00,%zmm17,%zmm4, %zmm4{%k1}{z} #shift 0*64
vpxorq %zmm5, %zmm5, %zmm5
valignq $0x02,%zmm17,%zmm5, %zmm5{%k1}{z} #shift 2*64
vpxorq %zmm6, %zmm6, %zmm6
valignq $0x04,%zmm17, %zmm6, %zmm6{%k1}{z} #shift 4*64
vpxorq %zmm7, %zmm7, %zmm7
valignq $0x06,%zmm17,%zmm7, %zmm7{%k1}{z} #shift 6*64
### load p0 to M0 and Dec ###
addq $512, %rsi
#vmovdqu (%rsi), M0xmm
### mov p0 from M0 to n0 ###
### high 64bit is padding, is all zero ###
#vmovq M0xmm, %rax
#movq %rax, n0
### rerange p to M ###
vperm2i128 $0x20, B0, A0, M0
vperm2i128 $0x20, B1, A1, M1
vperm2i128 $0x20, B2, A2, M2
vperm2i128 $0x20, B3, A3, M3
vpermq $0xD8, M0, M0 #imm=3120
vpermq $0xD8, M1, M1 #imm=3120
vpermq $0xD8, M2, M2 #imm=3120
vpermq $0xD8, M3, M3 #imm=3120
#####################################################################
### load RRp to A B and Dec ###
popq %rsi
pushq %rsi
addq $256, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
### rerange RRp to B ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x0A, A0, T3, B0 #imm=1010
vshufpd $0x00, A0, T3, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A1, T3, B1 #imm=1010
vshufpd $0x00, A1, T3, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A2, T3, B2 #imm=1010
vshufpd $0x00, A2, T3, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A3, T3, B3 #imm=1010
vshufpd $0x00, A3, T3, A3 #imm=0000
### set A to 1 ###
movq $1, %rax
vmovq %rax, T3xmm
vblendpd $0x01, T3, A0, A0 #imm=0001
#####################################################################
### Compute R=1*RRp*R^(-1)mod p ###
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
#### compute ####
call montmul1024
#####################################################################
### Enc and Store R ###
#### store B ####
# store_B
store_A #A在存之前是R
vmovq %xmm31, %rcx
vmovdqu64 A0, (%rcx)
vmovdqu64 A1, 32(%rcx)
vmovdqu64 A2, 64(%rcx)
vmovdqu64 A3, 96(%rcx)
vmovdqu64 B0, 128(%rcx)
vmovdqu64 B1, 160(%rcx)
vmovdqu64 B2, 192(%rcx)
vmovdqu64 B3, 224(%rcx)
##################################################################
##################################################################
### load Cp*R to A B and Dec ###
popq %rsi
pushq %rsi
addq $128, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
subq $128, %rsi
### rerange Cp*R to A ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
#### restore B ####
restore_B
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
### Compute CpR=CpR * Rp*R^(-1)mod p ###
#Cp*R in A, and result CpR store in A
call montmul1024 #Cp*R现在在 A里
vmovq %xmm31, %rcx
addq $256, %rcx
vmovdqu64 A0, (%rcx)
vmovdqu64 A1, 32(%rcx)
vmovdqu64 A2, 64(%rcx)
vmovdqu64 A3, 96(%rcx)
vmovdqu64 B0, 128(%rcx)
vmovdqu64 B1, 160(%rcx)
vmovdqu64 B2, 192(%rcx)
vmovdqu64 B3, 224(%rcx)
##################################################################
#### transfer %rsi ####
popq %rsi
movq %rsi, %mm0
### compute Cp^imp1 mod p ###
#result in A
call montexp1024
##################################################################
##################################################################
#### set B to 1 ####
vpxor T3, T3, T3
movq $1, %rax
vmovq %rax, T3xmm
vshufpd $0x00, T3, A0, A0 #imm=0000
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
#result*1*R(-1) mod p#
call montmul1024
##################################################################
#####################################################################
### Enc Result ###
#### prepare result for enc ####
##################################################################################################
### ###
### montexp1024_AES_p END ###
### ###
### result A0 A1 A2 A3 ###
### ###
##################################################################################################
ret
.size montexp1024_AES_p, .-montexp1024_AES_p
##################################################################################################
### ###
### montexp1024_AES_q: load exp argument ###
### Dec argument ###
### Call montexp ###
### Enc result ###
### Store result ###
### ###
##################################################################################################
##################################################################################################
### ###
### exp arg: 0-1023 :q :M ###
### 1024-2047 :Cq :A ###
### 2048-3071 :RRq :B ###
### 3072-4095 :dmq1 :T ###
### 4096-4159 :q0 :n0 ###
### ###
##################################################################################################
.globl montexp1024_AES_q
.type montexp1024_AES_q, @function
.align 64
montexp1024_AES_q:
##################################################################################################
### ###
### Load and Dec ###
### ###
### sum ###
### ###
##################################################################################################
### restore rsi ###
movq %mm0, %rsi
push %rsi
subq $640, %rsi
### new add-- aes-dec-- ###
########## aes_dec end###############
addq $640, %rsi
### new add-- aes-dec-- ###
vpxorq %ymm1, %ymm1, %ymm1
movq $0x0123456789ABCDEF, %rax
vmovq %rax, %xmm1
valignq $1, %ymm0, %ymm1, %ymm0
movq $0xFEDCBA9876543210, %rax
vmovq %rax, %xmm1
valignq $3, %ymm0, %ymm1, %ymm0
vmovdqu64 %zmm15 ,%zmm7 # no forget !!!!!
### load p0 to M0 and Dec ###
vmovdqu 512(%rsi), M0xmm #M0xmm is xmm8
vmovdqu M0xmm, %xmm15
aes_dec
vmovdqu %xmm15, M0xmm
### mov p0 from M0 to n0 ###
### high 64bit is padding, is all zero ###
vmovq M0xmm, %rax
movq %rax, n0
vmovdqu64 (%rsi), %zmm16
vmovdqu64 64(%rsi), %zmm17
mov $0x03, %eax # 0000 0011
kmovd %eax, %k1
mov $0x0C, %eax # 0000 1100
kmovd %eax, %k2
mov $0x30, %eax # 0011 0000
kmovd %eax, %k3
mov $0xC0, %eax # 1100 0000
kmovd %eax, %k4
vpcompressq %zmm16,%zmm15 {%k1}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k1}
vpcompressq %zmm16,%zmm15 {%k2}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k2}
vpcompressq %zmm16,%zmm15 {%k3}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k3}
vpcompressq %zmm16,%zmm15 {%k4}{z}
aes_dec
vpexpandq %zmm15,%zmm16 {%k4}
vpcompressq %zmm17,%zmm15 {%k1}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k1}
vpcompressq %zmm17,%zmm15 {%k2}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k2}
vpcompressq %zmm17,%zmm15 {%k3}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k3}
vpcompressq %zmm17,%zmm15 {%k4}{z}
aes_dec
vpexpandq %zmm15,%zmm17 {%k4}
vmovdqu64 %zmm7 ,%zmm15
### load p to A B ###
vpxorq %zmm0, %zmm0, %zmm0
valignq $0x00,%zmm16 ,%zmm0, %zmm0{%k1}{z} #shift 0*64
vpxorq %zmm1, %zmm1, %zmm1
valignq $0x02,%zmm16 ,%zmm1, %zmm1{%k1}{z} #shift 2*64
vpxorq %zmm2, %zmm2, %zmm2
valignq $0x04,%zmm16,%zmm2, %zmm2{%k1}{z} #shift 4*64
vpxorq %zmm3, %zmm3, %zmm3
valignq $0x06,%zmm16,%zmm3, %zmm3{%k1}{z} #shift 6*64
vpxorq %zmm4, %zmm4, %zmm4
valignq $0x00,%zmm17,%zmm4, %zmm4{%k1}{z} #shift 0*64
vpxorq %zmm5, %zmm5, %zmm5
valignq $0x02,%zmm17,%zmm5, %zmm5{%k1}{z} #shift 2*64
vpxorq %zmm6, %zmm6, %zmm6
valignq $0x04,%zmm17, %zmm6, %zmm6{%k1}{z} #shift 4*64
vpxorq %zmm7, %zmm7, %zmm7
valignq $0x06,%zmm17,%zmm7, %zmm7{%k1}{z} #shift 6*64
########## aes_dec end###############
### load q0 to M0 and Dec ###
addq $512, %rsi
#vmovdqu (%rsi), M0xmm
### mov p0 from M0 to n0 ###
### high 64bit is padding, is all zero ###
#vmovq M0xmm, %rax
#movq %rax, n0
### rerange q to M ###
vperm2i128 $0x20, B0, A0, M0
vperm2i128 $0x20, B1, A1, M1
vperm2i128 $0x20, B2, A2, M2
vperm2i128 $0x20, B3, A3, M3
vpermq $0xD8, M0, M0 #imm=3120
vpermq $0xD8, M1, M1 #imm=3120
vpermq $0xD8, M2, M2 #imm=3120
vpermq $0xD8, M3, M3 #imm=3120
#####################################################################
### load RRq to A B and Dec ###
popq %rsi
pushq %rsi
addq $256, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
### rerange RRq to B ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x0A, A0, T3, B0 #imm=1010
vshufpd $0x00, A0, T3, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A1, T3, B1 #imm=1010
vshufpd $0x00, A1, T3, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A2, T3, B2 #imm=1010
vshufpd $0x00, A2, T3, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x0A, A3, T3, B3 #imm=1010
vshufpd $0x00, A3, T3, A3 #imm=0000
### set A to 1 ###
movq $1, %rax
vmovq %rax, T3xmm
vblendpd $0x01, T3, A0, A0 #imm=0001
#####################################################################
### Compute R=1*RRq*R^(-1)mod q ###
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
#### compute ####
call montmul1024
#####################################################################
### Enc and Store R ###
#### store B ####
# store_B
store_A
vmovq %xmm31, %rcx
vmovdqu64 A0, (%rcx)
vmovdqu64 A1, 32(%rcx)
vmovdqu64 A2, 64(%rcx)
vmovdqu64 A3, 96(%rcx)
vmovdqu64 B0, 128(%rcx)
vmovdqu64 B1, 160(%rcx)
vmovdqu64 B2, 192(%rcx)
vmovdqu64 B3, 224(%rcx)
##################################################################
##################################################################
### load Cq to B and Dec ###
popq %rsi
pushq %rsi
addq $128, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
subq $128, %rsi
### rerange Cq to A ###
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
#### restore B ####
restore_B
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
### Compute CqR=Cq*RRq*R^(-1)mod q ###
#Cq in A, and result CqR store in A
call montmul1024
vmovq %xmm31, %rcx
addq $256, %rcx
vmovdqu64 A0, (%rcx)
vmovdqu64 A1, 32(%rcx)
vmovdqu64 A2, 64(%rcx)
vmovdqu64 A3, 96(%rcx)
vmovdqu64 B0, 128(%rcx)
vmovdqu64 B1, 160(%rcx)
vmovdqu64 B2, 192(%rcx)
vmovdqu64 B3, 224(%rcx)
##################################################################
#### transfer %rsi ####
popq %rsi
movq %rsi, %mm0
### compute Cq^imq1 mod p ###
#result in A
call montexp1024
##################################################################
##################################################################
#### set B to 1 ####
vpxor T3, T3, T3
movq $1, %rax
vmovq %rax, T3xmm
vshufpd $0x00, T3, A0, A0 #imm=0000
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
#result*1*R(-1) mod q#
call montmul1024
#####################################################################
### Enc Result ###
#### prepare result for enc ####
vpermq $0x08, A0, A0 #imm=0020
vpermq $0x08, A1, A1 #imm=0020
vpermq $0x08, A2, A2 #imm=0020
vpermq $0x08, A3, A3 #imm=0020
vpermq $0x08, B0, B0 #imm=0020
vpermq $0x08, B1, B1 #imm=0020
vpermq $0x08, B2, B2 #imm=0020
vpermq $0x08, B3, B3 #imm=0020
##################################################################################################
### ###
### montexp1024_AES_q END ###
### ###
### result A0 A1 A2 A3 ###
### ###
##################################################################################################
ret
.size montexp1024_AES_q, .-montexp1024_AES_q
|
LoCryptEn/Key-security | 63,351 | Register-bound/RSAIn_Register/Kernel/call_rrmontmul.S | .file "call_rrmontmul.s"
.text
# A0-A31
.set A0, %zmm0
.set B0, %zmm1
.set R0, %zmm3
.set T, %zmm4
#TT for temp register
.set TT, %zmm9
.set A1, %zmm5
.set A2, %zmm6
.set A3, %zmm7
.set A4, %zmm8
.set B1, %zmm11
.set B2, %zmm12
.set B3, %zmm13
.set B4, %zmm14
.set R5, %zmm15
.set R6, %zmm16
.set R7, %zmm17
.set RR8, %zmm18
.set RR9, %zmm23
.set RR10, %zmm24
.set RR11, %zmm25
.set RR12, %zmm26
.set RR13, %zmm27
.set RR14, %zmm28
.set R1, %zmm19
.set R2, %zmm20
.set R3, %zmm21
.set R4, %zmm22
.set A0xmm, %xmm0
.set A1xmm, %xmm5
.set A2xmm, %xmm6
.set A3xmm, %xmm7
.set A4xmm, %xmm8
.set B0xmm, %xmm1
.set B1xmm, %xmm11
.set B2xmm, %xmm12
.set B3xmm, %xmm13
.set B4xmm, %xmm14
## .set M0xmm, %xmm2
.set R0xmm, %xmm3
.set R1xmm, %xmm19
.set R2xmm, %xmm20
.set R3xmm, %xmm21
.set R4xmm, %xmm22
.set R5xmm, %xmm15
.set R6xmm, %xmm16
.set R7xmm, %xmm17
.set R8xmm, %xmm18
.set R9xmm, %xmm23
.set R10xmm, %xmm24
.set R11xmm, %xmm25
.set R12xmm, %xmm26
.set R13xmm, %xmm27
.set R14xmm, %xmm28
.global call_rrmontmul
.type call_rrmontmul, @function
.align 64
call_rrmontmul:
#stack balance
subq $128, %rsp
movq %r12, (%rsp)
movq %r13, 8(%rsp)
movq %rdi, 16(%rsp)
movq %rsi, 24(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 40(%rsp)
movq %rbx, 48(%rsp)
movq %r14, 56(%rsp)
movq %r15, 64(%rsp)
movq %rbp, 72(%rsp)
#zero register
vpxorq A0, A0, A0
vpxorq B0, B0, B0
vpxorq R0, R0, R0
vpxorq A1, A1, A1
vpxorq B1, B1, B1
vpxorq R1, R1, R1
vpxorq A2, A2, A2
vpxorq B2, B2, B2
vpxorq R2, R2, R2
vpxorq A3, A3, A3
vpxorq B3, B3, B3
vpxorq R3, R3, R3
vpxorq A4, A4, A4
vpxorq B4, B4, B4
vpxorq R4, R4, R4
vpxorq T, T, T
vpxorq TT, TT, TT
vpxorq R5, R5, R5
vpxorq R6, R6, R6
vpxorq R7, R7, R7
vpxorq RR8, RR8, RR8
vpxorq RR9, RR9, RR9
vpxorq RR10, RR10, RR10
vpxorq RR11, RR11, RR11
vpxorq RR12, RR12, RR12
vpxorq RR13, RR13, RR13
xorq %rax, %rax
xorq %rbp, %rbp
xorq %r9, %r9
xorq %r10, %r10
xorq %r12, %r12
xorq %r13, %r13
xorq %r14, %r14
xorq %r15, %r15
xorq %rbx, %rbx
# load A B
vmovdqu64 (%rsi), A0
vmovdqu64 (%rdx), B0
vmovdqu64 64(%rsi), A1
vmovdqu64 64(%rdx), B1
vmovdqu64 128(%rsi), A2
vmovdqu64 128(%rdx), B2
vmovdqu64 192(%rsi), A3
vmovdqu64 192(%rdx), B3
vmovdqu64 256(%rsi), A4
vmovdqu64 256(%rdx), B4
vpxorq %zmm10, %zmm10, %zmm10
#load result R used to restore X0~Xq
vmovdqu64 (%rdi), R0
vmovdqu64 64(%rdi), R1
vmovdqu64 128(%rdi), R2
vmovdqu64 192(%rdi), R3
vmovdqu64 256(%rdi), R4
### start big integer multiply ###
# The first round b0
#mul: multiply rax and r64 , the result is restored in rdx(high 64 bits) and rax(low 64 bits)
#add: add the first src1 to the second src2, the result is restored in src2
vmovq B0xmm, %r14
# broadcast b0
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R5, %zmm10, R5
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
#The first round finish
#The second round b1
valignq $1, B0, %zmm10, B0
vmovq B0xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R5, %zmm10, R5
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
#The second round finish
# 3 round start b2
valignq $1, B0, %zmm10, B0
vmovq B0xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R5, %zmm10, R5
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 3 round finish
# 4 round start b3
valignq $1, B0, %zmm10, B0
vmovq B0xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R5, %zmm10, R5
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 4 round finish
# 5 round start b4
valignq $1, B0, %zmm10, B0
vmovq B0xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R5, %zmm10, R5
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 5 round finish
# 6 round start b5
valignq $1, B0, %zmm10, B0
vmovq B0xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R5, %zmm10, R5
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 6 round finish
# 7 round start b6
valignq $1, B0, %zmm10, B0
vmovq B0xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R5, %zmm10, R5
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 7 round finish
# 8 round start b7
valignq $1, B0, %zmm10, B0
vmovq B0xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R5, %zmm10, R5
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 8 round finish
# 9 round start b8
vmovq B1xmm, %r14
# broadcast b8
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R6, %zmm10, R6
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 9 round finish
# 10 round start b9
valignq $1, B1, %zmm10, B1
vmovq B1xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R6, %zmm10, R6
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 10 round finish
# 11 round start b10
valignq $1, B1, %zmm10, B1
vmovq B1xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R6, %zmm10, R6
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 11 round finish
# 12 round start b11
valignq $1, B1, %zmm10, B1
vmovq B1xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R6, %zmm10, R6
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 12 round finish
# 13 round start b12
valignq $1, B1, %zmm10, B1
vmovq B1xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R6, %zmm10, R6
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 13 round finish
# 14 round start b13
valignq $1, B1, %zmm10, B1
vmovq B1xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R6, %zmm10, R6
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 14 round finish
# 15 round start b14
valignq $1, B1, %zmm10, B1
vmovq B1xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R6, %zmm10, R6
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 15 round finish
# 16 round start b15
valignq $1, B1, %zmm10, B1
vmovq B1xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R6, %zmm10, R6
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 16 round finish
# 17 round start b16
vmovq B2xmm, %r14
# broadcast b0
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R7, %zmm10, R7
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 17 round finish
# 18 round start b17
valignq $1, B2, %zmm10, B2
vmovq B2xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R7, %zmm10, R7
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 18 round finish
# 19 round start b18
valignq $1, B2, %zmm10, B2
vmovq B2xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R7, %zmm10, R7
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 19 round finish
# 20 round start b19
valignq $1, B2, %zmm10, B2
vmovq B2xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R7, %zmm10, R7
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 20 round finish
# 21 round start b20
valignq $1, B2, %zmm10, B2
vmovq B2xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R7, %zmm10, R7
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 21 round finish
# 22 round start b21
valignq $1, B2, %zmm10, B2
vmovq B2xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R7, %zmm10, R7
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 22 round finish
# 23 round start b22
valignq $1, B2, %zmm10, B2
vmovq B2xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R7, %zmm10, R7
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 23 round finish
# 24 round start b23
valignq $1, B2, %zmm10, B2
vmovq B2xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, R7, %zmm10, R7
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 24 round finish
# 25 round start b24
vmovq B3xmm, %r14
# broadcast b0
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR8, %zmm10, RR8
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 25 round finish
# 26 round start b25
valignq $1, B3, %zmm10, B3
vmovq B3xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR8, %zmm10, RR8
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 26 round finish
# 27 round start b26
valignq $1, B3, %zmm10, B3
vmovq B3xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR8, %zmm10, RR8
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 27 round finish
# 28 round start b27
valignq $1, B3, %zmm10, B3
vmovq B3xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR8, %zmm10, RR8
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 28 round finish
# 29 round start b28
valignq $1, B3, %zmm10, B3
vmovq B3xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR8, %zmm10, RR8
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 29 round finish
# 30 round start b29
valignq $1, B3, %zmm10, B3
vmovq B3xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR8, %zmm10, RR8
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 30 round finish
# 31 round start b30
valignq $1, B3, %zmm10, B3
vmovq B3xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR8, %zmm10, RR8
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 31 round finish
# 32 round start b31
valignq $1, B3, %zmm10, B3
vmovq B3xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR8, %zmm10, RR8
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 32 round finish
# 33 round start b32
vmovq B4xmm, %r14
# broadcast b0
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR9, %zmm10, RR9
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 33 round finish
# 34 round start b33
valignq $1, B4, %zmm10, B4
vmovq B4xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR9, %zmm10, RR9
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 34 round finish
# 35 round start b34
valignq $1, B4, %zmm10, B4
vmovq B4xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR9, %zmm10, RR9
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 35 round finish
# 36 round start b35
valignq $1, B4, %zmm10, B4
vmovq B4xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR9, %zmm10, RR9
vpxorq %zmm10, %zmm10, %zmm10
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# 36 round finish
#The last round : 37 round start b36
valignq $1, B4, %zmm10, B4
vmovq B4xmm, %r14
# broadcast b1
vpbroadcastq %r14, T
#compute for j=1~q, X[j] = X[j] + A[j] * T
vpmuludq A0, T, TT
vpaddq R0, TT, R0
vpmuludq A1, T, TT
vpaddq R1, TT, R1
vpmuludq A2, T, TT
vpaddq R2, TT, R2
vpmuludq A3, T, TT
vpaddq R3, TT, R3
vpmuludq A4, T, TT
vpaddq R4, TT, R4
#store R0[0]
vmovq R0xmm, %rax
vmovq %rax, %xmm10
valignq $1, RR9, %zmm10, RR9
vpxorq %zmm10, %zmm10, %zmm10
valignq $1, RR9, %zmm10, RR9
valignq $1, RR9, %zmm10, RR9
valignq $1, RR9, %zmm10, RR9
# Xq...X1=Xq...X1>>64
valignq $1, R0, R1, R0
valignq $1, R1, R2, R1
valignq $1, R2, R3, R2
valignq $1, R3, R4, R3
valignq $1, R4, %zmm10, R4
# The last round : 37 round finish
### The big integer multiply finish ###
## The sequence is R5 R6 R7 RR8 RR9 R0 R1 R2 R3 R4 (R4 has 4 digits and RR9 has 5 digits)
## RR10 RR11 RR12 RR13 is used for store results
### transfer the redundant to 2^m and immediately transfer to 2^64
# rbp is for temp register
# In every round , transfer the 28 bits into 64 bits. %r11 is used.
xorq %r11, %r11
xorq %rbp, %rbp
#v0 rbx for u0
vmovq R5xmm, %rbx
valignq $1, R5, %zmm10, R5
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
movq %rax, %r11
shr $28, %rbp
#v1 rbx for u1
vmovq R5xmm, %rbx
valignq $1, R5, %zmm10, R5
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $28, %rax
add %rax, %r11
shr $28, %rbp
#v2 rbx for u2
vmovq R5xmm, %rbx
valignq $1, R5, %zmm10, R5
add %rbx, %rbp
movq %rbp, %rax
and $0x00000000000000ff, %rax
shl $56, %rax
add %rax, %r11
vmovq %r11, R11xmm
valignq $1, RR10, RR11, RR10
movq %rbp, %rax
and $0x000000000fffff00, %rax
shr $8, %rax
movq %rax, %r11
shr $28, %rbp
#v3 rbx for u3
vmovq R5xmm, %rbx
valignq $1, R5, %zmm10, R5
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $20, %rax
add %rax, %r11
shr $28, %rbp
#v4 rbx for u4
vmovq R5xmm, %rbx
valignq $1, R5, %zmm10, R5
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000ffff, %rax
shl $48, %rax
add %rax, %r11
vmovq %r11, R11xmm
valignq $1, RR10, RR11, RR10
movq %rbp, %rax
and $0x000000000fff0000, %rax
shr $16, %rax
movq %rax, %r11
shr $28, %rbp
#v5 rbx for u5
vmovq R5xmm, %rbx
valignq $1, R5, %zmm10, R5
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $12, %rax
add %rax, %r11
shr $28, %rbp
#v6 rbx for u6
vmovq R5xmm, %rbx
valignq $1, R5, %zmm10, R5
add %rbx, %rbp
movq %rbp, %rax
and $0x0000000000ffffff, %rax
shl $40, %rax
add %rax, %r11
vmovq %r11, R11xmm
valignq $1, RR10, RR11, RR10
movq %rbp, %rax
and $0x000000000f000000, %rax
shr $24, %rax
movq %rax, %r11
shr $28, %rbp
#v7 rbx for u7
vmovq R5xmm, %rbx
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $4, %rax
add %rax, %r11
shr $28, %rbp
#v8 rbx for u8
vmovq R6xmm, %rbx
valignq $1, R6, %zmm10, R6
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $32, %rax
add %rax, %r11
shr $28, %rbp
#v9 rbx for u9
vmovq R6xmm, %rbx
valignq $1, R6, %zmm10, R6
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000000f, %rax
shl $60, %rax
add %rax, %r11
vmovq %r11, R11xmm
valignq $1, RR10, RR11, RR10
movq %rbp, %rax
and $0x000000000ffffff0, %rax
shr $4, %rax
movq %rax, %r11
shr $28, %rbp
#v10 rbx for u10
vmovq R6xmm, %rbx
valignq $1, R6, %zmm10, R6
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $24, %rax
add %rax, %r11
shr $28, %rbp
#v11 rbx for u11
vmovq R6xmm, %rbx
valignq $1, R6, %zmm10, R6
add %rbx, %rbp
movq %rbp, %rax
and $0x0000000000000fff, %rax
shl $52, %rax
add %rax, %r11
vmovq %r11, R11xmm
valignq $1, RR10, RR11, RR10
movq %rbp, %rax
and $0x000000000ffff000, %rax
shr $12, %rax
movq %rax, %r11
shr $28, %rbp
#v12 rbx for u12
vmovq R6xmm, %rbx
valignq $1, R6, %zmm10, R6
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $16, %rax
add %rax, %r11
shr $28, %rbp
#v13 rbx for u13
vmovq R6xmm, %rbx
valignq $1, R6, %zmm10, R6
add %rbx, %rbp
movq %rbp, %rax
and $0x00000000000fffff, %rax
shl $44, %rax
add %rax, %r11
vmovq %r11, R11xmm
valignq $1, RR10, RR11, RR10
movq %rbp, %rax
and $0x000000000ff00000, %rax
shr $20, %rax
movq %rax, %r11
shr $28, %rbp
#v14 rbx for u14
vmovq R6xmm, %rbx
valignq $1, R6, %zmm10, R6
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $8, %rax
add %rax, %r11
shr $28, %rbp
#v15 rbx for u15
vmovq R6xmm, %rbx
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $36, %rax
add %rax, %r11
vmovq %r11, R11xmm
valignq $1, RR10, RR11, RR10
shr $28, %rbp
#v16 rbx for u16
vmovq R7xmm, %rbx
valignq $1, R7, %zmm10, R7
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
movq %rax, %r11
shr $28, %rbp
#v17 rbx for u17
vmovq R7xmm, %rbx
valignq $1, R7, %zmm10, R7
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $28, %rax
add %rax, %r11
shr $28, %rbp
#v18 rbx for u18
vmovq R7xmm, %rbx
valignq $1, R7, %zmm10, R7
add %rbx, %rbp
movq %rbp, %rax
and $0x00000000000000ff, %rax
shl $56, %rax
add %rax, %r11
vmovq %r11, R11xmm
valignq $1, RR10, RR11, RR10
movq %rbp, %rax
and $0x000000000fffff00, %rax
shr $8, %rax
movq %rax, %r11
shr $28, %rbp
#v19 rbx for u19
vmovq R7xmm, %rbx
valignq $1, R7, %zmm10, R7
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $20, %rax
add %rax, %r11
shr $28, %rbp
#v20 rbx for u20
vmovq R7xmm, %rbx
valignq $1, R7, %zmm10, R7
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000ffff, %rax
shl $48, %rax
add %rax, %r11
vmovq %r11, R12xmm
valignq $1, RR11, RR12, RR11
movq %rbp, %rax
and $0x000000000fff0000, %rax
shr $16, %rax
movq %rax, %r11
shr $28, %rbp
#v21 rbx for u21
vmovq R7xmm, %rbx
valignq $1, R7, %zmm10, R7
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $12, %rax
add %rax, %r11
shr $28, %rbp
#v22 rbx for u22
vmovq R7xmm, %rbx
valignq $1, R7, %zmm10, R7
add %rbx, %rbp
movq %rbp, %rax
and $0x0000000000ffffff, %rax
shl $40, %rax
add %rax, %r11
vmovq %r11, R12xmm
valignq $1, RR11, RR12, RR11
movq %rbp, %rax
and $0x000000000f000000, %rax
shr $24, %rax
movq %rax, %r11
shr $28, %rbp
#v23 rbx for u23
vmovq R7xmm, %rbx
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $4, %rax
add %rax, %r11
shr $28, %rbp
#v24 rbx for u24
vmovq R8xmm, %rbx
valignq $1, RR8, %zmm10, RR8
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $32, %rax
add %rax, %r11
shr $28, %rbp
#v25 rbx for u25
vmovq R8xmm, %rbx
valignq $1, RR8, %zmm10, RR8
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000000f, %rax
shl $60, %rax
add %rax, %r11
vmovq %r11, R12xmm
valignq $1, RR11, RR12, RR11
movq %rbp, %rax
and $0x000000000ffffff0, %rax
shr $4, %rax
movq %rax, %r11
shr $28, %rbp
#v26 rbx for u26
vmovq R8xmm, %rbx
valignq $1, RR8, %zmm10, RR8
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $24, %rax
add %rax, %r11
shr $28, %rbp
#v27 rbx for u27
vmovq R8xmm, %rbx
valignq $1, RR8, %zmm10, RR8
add %rbx, %rbp
movq %rbp, %rax
and $0x0000000000000fff, %rax
shl $52, %rax
add %rax, %r11
vmovq %r11, R12xmm
valignq $1, RR11, RR12, RR11
movq %rbp, %rax
and $0x000000000ffff000, %rax
shr $12, %rax
movq %rax, %r11
shr $28, %rbp
#v28 rbx for u28
vmovq R8xmm, %rbx
valignq $1, RR8, %zmm10, RR8
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $16, %rax
add %rax, %r11
shr $28, %rbp
#v29 rbx for u29
vmovq R8xmm, %rbx
valignq $1, RR8, %zmm10, RR8
add %rbx, %rbp
movq %rbp, %rax
and $0x00000000000fffff, %rax
shl $44, %rax
add %rax, %r11
vmovq %r11, R12xmm
valignq $1, RR11, RR12, RR11
movq %rbp, %rax
and $0x000000000ff00000, %rax
shr $20, %rax
movq %rax, %r11
shr $28, %rbp
#v30 rbx for u30
vmovq R8xmm, %rbx
valignq $1, RR8, %zmm10, RR8
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $8, %rax
add %rax, %r11
shr $28, %rbp
#v31 rbx for u31
vmovq R8xmm, %rbx
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $36, %rax
add %rax, %r11
vmovq %r11, R12xmm
valignq $1, RR11, RR12, RR11
shr $28, %rbp
#v32 rbx for u32
vmovq R9xmm, %rbx
valignq $1, RR9, %zmm10, RR9
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
movq %rax, %r11
shr $28, %rbp
#v33 rbx for u33
vmovq R9xmm, %rbx
valignq $1, RR9, %zmm10, RR9
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $28, %rax
add %rax, %r11
shr $28, %rbp
#v34 rbx for u34
vmovq R9xmm, %rbx
valignq $1, RR9, %zmm10, RR9
add %rbx, %rbp
movq %rbp, %rax
and $0x00000000000000ff, %rax
shl $56, %rax
add %rax, %r11
vmovq %r11, R12xmm
valignq $1, RR11, RR12, RR11
movq %rbp, %rax
and $0x000000000fffff00, %rax
shr $8, %rax
movq %rax, %r11
shr $28, %rbp
#v35 rbx for u35
vmovq R9xmm, %rbx
valignq $1, RR9, %zmm10, RR9
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $20, %rax
add %rax, %r11
shr $28, %rbp
#v36 rbx for u36
vmovq R9xmm, %rbx
valignq $1, RR9, %zmm10, RR9
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000ffff, %rax
shl $48, %rax
add %rax, %r11
vmovq %r11, R12xmm
valignq $1, RR11, RR12, RR11
vpxorq %zmm10, %zmm10, %zmm10
movq %rbp, %rax
and $0x000000000fff0000, %rax
shr $16, %rax
movq %rax, %rbp
#v37 rbx for u37
vmovq R0xmm, %rbx
valignq $1, R0, %zmm10, R0
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $12, %rax
add %rax, %r11
shr $28, %rbp
#v38 rbx for u38
vmovq R0xmm, %rbx
valignq $1, R0, %zmm10, R0
add %rbx, %rbp
movq %rbp, %rax
and $0x0000000000ffffff, %rax
shl $40, %rax
add %rax, %r11
vmovq %r11, R13xmm
valignq $1, RR12, RR13, RR12
movq %rbp, %rax
and $0x000000000f000000, %rax
shr $24, %rax
movq %rax, %r11
shr $28, %rbp
#v39 rbx for u39
vmovq R0xmm, %rbx
valignq $1, R0, %zmm10, R0
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $4, %rax
add %rax, %r11
shr $28, %rbp
#v40 rbx for u40
vmovq R0xmm, %rbx
valignq $1, R0, %zmm10, R0
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $32, %rax
add %rax, %r11
shr $28, %rbp
#v41 rbx for u41
vmovq R0xmm, %rbx
valignq $1, R0, %zmm10, R0
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000000f, %rax
shl $60, %rax
add %rax, %r11
vmovq %r11, R13xmm
valignq $1, RR12, RR13, RR12
movq %rbp, %rax
and $0x000000000ffffff0, %rax
shr $4, %rax
movq %rax, %r11
shr $28, %rbp
#v42 rbx for u42
vmovq R0xmm, %rbx
valignq $1, R0, %zmm10, R0
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $24, %rax
add %rax, %r11
shr $28, %rbp
#v43 rbx for u43
vmovq R0xmm, %rbx
valignq $1, R0, %zmm10, R0
add %rbx, %rbp
movq %rbp, %rax
and $0x0000000000000fff, %rax
shl $52, %rax
add %rax, %r11
vmovq %r11, R13xmm
valignq $1, RR12, RR13, RR12
movq %rbp, %rax
and $0x000000000ffff000, %rax
shr $12, %rax
movq %rax, %r11
shr $28, %rbp
#v44 rbx for u44
vmovq R0xmm, %rbx
valignq $1, R0, %zmm10, R0
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $16, %rax
add %rax, %r11
shr $28, %rbp
#v45 rbx for u45
vmovq R1xmm, %rbx
valignq $1, R1, %zmm10, R1
add %rbx, %rbp
movq %rbp, %rax
and $0x00000000000fffff, %rax
shl $44, %rax
add %rax, %r11
vmovq %r11, R13xmm
valignq $1, RR12, RR13, RR12
movq %rbp, %rax
and $0x000000000ff00000, %rax
shr $20, %rax
movq %rax, %r11
shr $28, %rbp
#v46 rbx for u46
vmovq R1xmm, %rbx
valignq $1, R1, %zmm10, R1
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $8, %rax
add %rax, %r11
shr $28, %rbp
#v47 rbx for u47
vmovq R1xmm, %rbx
valignq $1, R1, %zmm10, R1
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $36, %rax
add %rax, %r11
vmovq %r11, R13xmm
valignq $1, RR12, RR13, RR12
shr $28, %rbp
#v48 rbx for u48
vmovq R1xmm, %rbx
valignq $1, R1, %zmm10, R1
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
movq %rax, %r11
shr $28, %rbp
#v49 rbx for u49
vmovq R1xmm, %rbx
valignq $1, R1, %zmm10, R1
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $28, %rax
add %rax, %r11
shr $28, %rbp
#v50 rbx for u50
vmovq R1xmm, %rbx
valignq $1, R1, %zmm10, R1
add %rbx, %rbp
movq %rbp, %rax
and $0x00000000000000ff, %rax
shl $56, %rax
add %rax, %r11
vmovq %r11, R13xmm
valignq $1, RR12, RR13, RR12
movq %rbp, %rax
and $0x000000000fffff00, %rax
shr $8, %rax
movq %rax, %r11
shr $28, %rbp
#v51 rbx for u51
vmovq R1xmm, %rbx
valignq $1, R1, %zmm10, R1
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $20, %rax
add %rax, %r11
shr $28, %rbp
#v52 rbx for u52
vmovq R1xmm, %rbx
valignq $1, R1, %zmm10, R1
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000ffff, %rax
shl $48, %rax
add %rax, %r11
vmovq %r11, R13xmm
valignq $1, RR12, RR13, RR12
vpxorq %zmm10, %zmm10, %zmm10
movq %rbp, %rax
and $0x000000000fff0000, %rax
shr $16, %rax
movq %rax, %rbp
#v53 rbx for u53
vmovq R2xmm, %rbx
valignq $1, R2, %zmm10, R2
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $12, %rax
add %rax, %r11
shr $28, %rbp
#v54 rbx for u54
vmovq R2xmm, %rbx
valignq $1, R2, %zmm10, R2
add %rbx, %rbp
movq %rbp, %rax
and $0x0000000000ffffff, %rax
shl $40, %rax
add %rax, %r11
vmovq %r11, R13xmm
valignq $1, RR12, RR13, RR12
movq %rbp, %rax
and $0x000000000f000000, %rax
shr $24, %rax
movq %rax, %r11
shr $28, %rbp
#v55 rbx for u55
vmovq R2xmm, %rbx
valignq $1, R2, %zmm10, R2
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $4, %rax
add %rax, %r11
shr $28, %rbp
#v56 rbx for u56
vmovq R2xmm, %rbx
valignq $1, R2, %zmm10, R2
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $32, %rax
add %rax, %r11
shr $28, %rbp
#v57 rbx for u57
vmovq R2xmm, %rbx
valignq $1, R2, %zmm10, R2
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000000f, %rax
shl $60, %rax
add %rax, %r11
vmovq %r11, R14xmm
valignq $1, RR13, RR14, RR13
movq %rbp, %rax
and $0x000000000ffffff0, %rax
shr $4, %rax
movq %rax, %r11
shr $28, %rbp
#v58 rbx for u58
vmovq R2xmm, %rbx
valignq $1, R2, %zmm10, R2
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $24, %rax
add %rax, %r11
shr $28, %rbp
#v59 rbx for u59
vmovq R2xmm, %rbx
valignq $1, R2, %zmm10, R2
add %rbx, %rbp
movq %rbp, %rax
and $0x0000000000000fff, %rax
shl $52, %rax
add %rax, %r11
vmovq %r11, R14xmm
valignq $1, RR13, RR14, RR13
movq %rbp, %rax
and $0x000000000ffff000, %rax
shr $12, %rax
movq %rax, %r11
shr $28, %rbp
#v60 rbx for u60
vmovq R2xmm, %rbx
valignq $1, R2, %zmm10, R2
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $16, %rax
add %rax, %r11
shr $28, %rbp
#v61 rbx for u61
vmovq R3xmm, %rbx
valignq $1, R3, %zmm10, R3
add %rbx, %rbp
movq %rbp, %rax
and $0x00000000000fffff, %rax
shl $44, %rax
add %rax, %r11
vmovq %r11, R14xmm
valignq $1, RR13, RR14, RR13
movq %rbp, %rax
and $0x000000000ff00000, %rax
shr $20, %rax
movq %rax, %r11
shr $28, %rbp
#v62 rbx for u62
vmovq R3xmm, %rbx
valignq $1, R3, %zmm10, R3
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $8, %rax
add %rax, %r11
shr $28, %rbp
#v63 rbx for u63
vmovq R3xmm, %rbx
valignq $1, R3, %zmm10, R3
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $36, %rax
add %rax, %r11
vmovq %r11, R14xmm
valignq $1, RR13, RR14, RR13
shr $28, %rbp
#v64 rbx for u64
vmovq R3xmm, %rbx
valignq $1, R3, %zmm10, R3
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
movq %rax, %r11
shr $28, %rbp
#v65 rbx for u65
vmovq R3xmm, %rbx
valignq $1, R3, %zmm10, R3
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $28, %rax
add %rax, %r11
shr $28, %rbp
#v66 rbx for u66
vmovq R3xmm, %rbx
valignq $1, R3, %zmm10, R3
add %rbx, %rbp
movq %rbp, %rax
and $0x00000000000000ff, %rax
shl $56, %rax
add %rax, %r11
vmovq %r11, R14xmm
valignq $1, RR13, RR14, RR13
movq %rbp, %rax
and $0x000000000fffff00, %rax
shr $8, %rax
movq %rax, %r11
shr $28, %rbp
#v67 rbx for u67
vmovq R3xmm, %rbx
valignq $1, R3, %zmm10, R3
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $20, %rax
add %rax, %r11
shr $28, %rbp
#v68 rbx for u68
vmovq R3xmm, %rbx
valignq $1, R3, %zmm10, R3
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000ffff, %rax
shl $48, %rax
add %rax, %r11
vmovq %r11, R14xmm
valignq $1, RR13, RR14, RR13
movq %rbp, %rax
and $0x000000000fff0000, %rax
shr $16, %rax
movq %rax, %rbp
#v69 rbx for u69
vmovq R4xmm, %rbx
valignq $1, R4, %zmm10, R4
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $12, %rax
add %rax, %r11
shr $28, %rbp
#v70 rbx for u70
vmovq R4xmm, %rbx
valignq $1, R4, %zmm10, R4
add %rbx, %rbp
movq %rbp, %rax
and $0x0000000000ffffff, %rax
shl $40, %rax
add %rax, %r11
vmovq %r11, R14xmm
valignq $1, RR13, RR14, RR13
movq %rbp, %rax
and $0x000000000f000000, %rax
shr $24, %rax
movq %rax, %r11
shr $28, %rbp
#v71 rbx for u71
vmovq R4xmm, %rbx
valignq $1, R4, %zmm10, R4
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $4, %rax
add %rax, %r11
shr $28, %rbp
#v72 rbx for u72
vmovq R4xmm, %rbx
valignq $1, R4, %zmm10, R4
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000fffffff, %rax
shl $32, %rax
add %rax, %r11
shr $28, %rbp
#v73 rbx for u73
vmovq R4xmm, %rbx
valignq $1, R4, %zmm10, R4
add %rbx, %rbp
movq %rbp, %rax
and $0x000000000000000f, %rax
shl $60, %rax
add %rax, %r11
vmovq %r11, R14xmm
valignq $1, RR13, RR14, RR13
# transfer finish
/*
# load R into rdi
movq %rbx, (%rdi)
vmovdqu64 R0, 8(%rdi)
vmovdqu64 R1, 72(%rdi)
vmovdqu64 R2, 136(%rdi)
vmovdqu64 R3, 200(%rdi)
vmovdqu64 R4, 264(%rdi)
*/
/*
# load A into rdi
vmovdqu64 A0, (%rdi)
vmovdqu64 A1, 64(%rdi)
vmovdqu64 A2, 128(%rdi)
vmovdqu64 A3, 192(%rdi)
vmovdqu64 A4, 256(%rdi)
movq %rbp, 320(%rdi)
*/
#load RR10 ~ RR13 into rdi
vmovdqu64 RR10, (%rdi)
vmovdqu64 RR11, 64(%rdi)
vmovdqu64 RR12, 128(%rdi)
vmovdqu64 RR13, 192(%rdi)
## movq %rbp, 128(%rdi)
## recovery ##
movq (%rsp), %r12
movq 8(%rsp), %r13
movq 32(%rsp), %rdx
movq 48(%rsp), %rbx
movq 56(%rsp), %r14
movq 64(%rsp), %r15
movq 72(%rsp), %rbp
movq 16(%rsp), %rdi
movq 24(%rsp), %rsi
movq 32(%rsp), %rdx
movq 40(%rsp) , %rcx
addq $128, %rsp
ret
.size call_rrmontmul, .-call_rrmontmul
|
LoCryptEn/Key-security | 6,461 | Register-bound/RSAIn_Register/Kernel/VIRSA.S |
//#include "kernel.h"
#include "RSA_function.S"
.file "VIRSA.S"
.text
##################################################################################################
### ###
### montmul2014_test ###
### ###
### ###
### ###
##################################################################################################
### function prototype ###
# int VIRSA(unsigned long long *R, unsigned long long *Arg) #
# R %rdi
# Arg %rsi
#######################################################################################################################
#########################################################################################################
# #
# P:Arg+0 0-127 128-255 256-383 384-511 512-527 #
# p Cp (CpR) RRp dmp1 p0(128bit cypher) #
# #
#########################################################################################################
# #
# q:Arg+640 0-127 128-255 256-383 384-511 512-527 #
# q Cq (CqR) RRq dmq1 q0(128bit cypher) #
# #
#########################################################################################################
# #
# iqmp:Arg+1280 0-127 #
# iqmp #
# #
#########################################################################################################
#######################################################################################################################
.globl VIRSA
.type VIRSA, @function
.align 64
VIRSA:
#########################################################
### prologue ###
#########################################################
### zero all YMM registers ###
vzeroall
### 1. stack for storing Rp and Rq ###
subq $512, %rsp
### 2. store scalar register ###
movq %rbx, (%rsp)
movq %rbp, 8(%rsp)
movq %r12, 16(%rsp)
movq %r13, 24(%rsp)
movq %r14, 32(%rsp)
movq %r15, 40(%rsp)
### 3. store two argment pointer ###
movq %rdi, 48(%rsp)
movq %rsi, 64(%rsp)
vmovq %rdi, %xmm31
#########################################################
### VIRSA computation ###
#########################################################
### input and output all are ciphers which encrypt by AES-256 ###
### key of AES-256 stored in Debug registers ###
### 1. Cq^(dmq1) mod q ###
movq 64(%rsp), %rsi # address of A in %rsi now
addq $640, %rsi # A+640 is address of q in %rsi now
movq %rsi, %mm0 # address of q in %mm0
call montexp1024_AES_q
##store result in zmm register##
valignq $2, %zmm24, %zmm0, %zmm24
valignq $2, %zmm24, %zmm1, %zmm24
valignq $2, %zmm24, %zmm2, %zmm24
valignq $2, %zmm24, %zmm3, %zmm24
valignq $2, %zmm25, %zmm4, %zmm25
valignq $2, %zmm25, %zmm5, %zmm25
valignq $2, %zmm25, %zmm6, %zmm25
valignq $2, %zmm25, %zmm7, %zmm25
vmovdqu64 %zmm24, %zmm21
vmovdqu64 %zmm25, %zmm27
### 2. Cp^(dmp1) mod p ###
movq 64(%rsp), %rsi
movq %rsi, %mm0
call montexp1024_AES_p
### 3. Rp-Rq mod p ###
movq 64(%rsp), %rsi
store_A
vpxorq %zmm26, %zmm26, %zmm26
vmovdqu64 %xmm24, %xmm0
valignq $2, %zmm24, %zmm26, %zmm24
vmovdqu64 %xmm24, %xmm1
valignq $2, %zmm24, %zmm26, %zmm24
vmovdqu64 %xmm24, %xmm2
valignq $2, %zmm24, %zmm26, %zmm24
vmovdqu64 %xmm24, %xmm3
valignq $2, %zmm24, %zmm26, %zmm24
vmovdqu64 %xmm25, %xmm4
valignq $2, %zmm25, %zmm26, %zmm25
vmovdqu64 %xmm25, %xmm5
valignq $2, %zmm25, %zmm26, %zmm25
vmovdqu64 %xmm25, %xmm6
valignq $2, %zmm25, %zmm26, %zmm25
vmovdqu64 %xmm25, %xmm7
valignq $2, %zmm25, %zmm26, %zmm25
vpermq $0x54, A0, A0 #imm=1110
vpermq $0xE0, A0, A0 #imm=3200
vpermq $0x54, A1, A1 #imm=1110
vpermq $0xE0, A1, A1 #imm=3200
vpermq $0x54, A2, A2 #imm=1110
vpermq $0xE0, A2, A2 #imm=3200
vpermq $0x54, A3, A3 #imm=1110
vpermq $0xE0, A3, A3 #imm=3200
vpermq $0x54, B0, B0 #imm=1110
vpermq $0xE0, B0, B0 #imm=3200
vpermq $0x54, B1, B1 #imm=1110
vpermq $0xE0, B1, B1 #imm=3200
vpermq $0x54, B2, B2 #imm=1110
vpermq $0xE0, B2, B2 #imm=3200
vpermq $0x54, B3, B3 #imm=1110
vpermq $0xE0, B3, B3 #imm=3200
restore_A
call sub_mp_mq
vpermq $0x54, A0, A0 #imm=1110
vpermq $0xE0, A0, A0 #imm=3200
vpermq $0x54, A1, A1 #imm=1110
vpermq $0xE0, A1, A1 #imm=3200
vpermq $0x54, A2, A2 #imm=1110
vpermq $0xE0, A2, A2 #imm=3200
vpermq $0x54, A3, A3 #imm=1110
vpermq $0xE0, A3, A3 #imm=3200
vpermq $0x54, B0, B0 #imm=1110
vpermq $0xE0, B0, B0 #imm=3200
vpermq $0x54, B1, B1 #imm=1110
vpermq $0xE0, B1, B1 #imm=3200
vpermq $0x54, B2, B2 #imm=1110
vpermq $0xE0, B2, B2 #imm=3200
vpermq $0x54, B3, B3 #imm=1110
vpermq $0xE0, B3, B3 #imm=3200
store_A
### 4. R*qinv mod p ### (qinv = R * qinv mod p)
movq 64(%rsp), %rsi
call mul_qinv
### 5. h*q+Rq ###
movq 64(%rsp), %rsi
addq $640, %rsi #for q
call mul_h_q_add_Rq
### 6. store result ###
vmovq %xmm31, %rdi
#########################################################
### prologue ###
#########################################################
### 1. zero YMM registers ###
vzeroall
### 2. zero MMX registers ###
pxor %mm0, %mm0
pxor %mm1, %mm1
pxor %mm2, %mm2
pxor %mm3, %mm3
pxor %mm4, %mm4
pxor %mm5, %mm5
pxor %mm6, %mm6
pxor %mm7, %mm7
### 3. zero scalar registers ###
xorq %rax, %rax
xorq %rbx, %rbx
xorq %rcx, %rcx
xorq %rdx, %rdx
xorq %rdi, %rdi
xorq %rsi, %rsi
xorq %rbp, %rbp
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
xorq %r14, %r14
xorq %r15, %r15
### 4. restore scalar registers ###
movq (%rsp), %rbx
movq 8(%rsp), %rbp
movq 16(%rsp), %r12
movq 24(%rsp), %r13
movq 32(%rsp), %r14
movq 40(%rsp), %r15
### 5. stack balance ###
addq $512, %rsp
#########################################################
### End ###
#########################################################
ret
.size VIRSA, .-VIRSA
##################################################################################################
### ###
### montmul2014_test ###
### ###
### ###
##################################################################################################
|
LoCryptEn/Key-security | 16,395 | Register-bound/RSAIn_Register/Kernel/montexp.S | #include "montmul.S"
#include "montsqu.S"
#include "aesni.S"
//#include "montmul_raw.S"
//#include "montsqu_raw.S"
.file "montexp.S"
.data
my_var: .quad 0x123344
.text
##################################################################################################
### ###
### montexp(A,Exp,n0): ###
### ###
### R=A^Exp mod M ###
### ###
### ###
##################################################################################################
.macro store_B
##################################################################################################
#### store B ####
vshufpd $0x05, A0, A0, T3 #imm=0101
vmovq T3xmm, %rax #q0=B[0] s0=B[8]
movq %rax, q0
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s0
vshufpd $0x05, A1, A1, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q1
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s1
vshufpd $0x05, A2, A2, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q2
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s2
vshufpd $0x05, A3, A3, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q3
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s3
vshufpd $0x05, B0, B0, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q4
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s4
vshufpd $0x05, B1, B1, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q5
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s5
vshufpd $0x05, B2, B2, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q6
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s6
vshufpd $0x05, B3, B3, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q7
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s7
## new add ##
#vpxorq %zmm28, %zmm28, %zmm28
movq q0, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q1, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q2, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q3, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q4, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q5, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q6, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q7, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
vmovq s0, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s1, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s2, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s3, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s4, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s5, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s6, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s7, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
##################################################################################################
.endm
.macro restore_B
##################################################################################################
#### restore B ####
vpxorq %zmm30, %zmm30, %zmm30
vmovq %xmm29, s0
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s1
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s2
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s3
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s4
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s5
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s6
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s7
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %rax, %xmm23
vmovq %xmm28, %rax
movq %rax, q0
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q1
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q2
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q3
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q4
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q5
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q6
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q7
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm23, %rax
vpinsrq $1, s0, T3xmm, T3xmm
movq q0, s0 #q0=B[0] s0=B[8]
vpinsrq $0, s0, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A0, A0 #imm=1010
vpinsrq $1, s1, T3xmm, T3xmm
movq q1, s1
vpinsrq $0, s1, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B0, B0 #imm=1010
vpinsrq $1, s2, T3xmm, T3xmm
movq q2, s2
vpinsrq $0, s2, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A1, A1 #imm=1010
vpinsrq $1, s3, T3xmm, T3xmm
movq q3, s3
vpinsrq $0, s3, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B1, B1 #imm=1010
vpinsrq $1, s4, T3xmm, T3xmm
movq q4, s4
vpinsrq $0, s4, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A2, A2 #imm=1010
vpinsrq $1, s5, T3xmm, T3xmm
movq q5, s5
vpinsrq $0, s5, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B2, B2 #imm=1010
vpinsrq $1, s6, T3xmm, T3xmm
movq q6, s6
vpinsrq $0, s6, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A3, A3 #imm=1010
vpinsrq $1, s7, T3xmm, T3xmm
movq q7, s7
vpinsrq $0, s7, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B3, B3 #imm=1010
/*
vpinsrq $1, s0, T3xmm, T3xmm
movq q0, s0 #q0=B[0] s0=B[8]
vpinsrq $0, s0, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A0, A0 #imm=1010
vpinsrq $1, s1, T3xmm, T3xmm
movq q1, s1
vpinsrq $0, s1, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A1, A1 #imm=1010
vpinsrq $1, s2, T3xmm, T3xmm
movq q2, s2
vpinsrq $0, s2, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A2, A2 #imm=1010
vpinsrq $1, s3, T3xmm, T3xmm
movq q3, s3
vpinsrq $0, s3, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A3, A3 #imm=1010
vpinsrq $1, s4, T3xmm, T3xmm
movq q4, s4
vpinsrq $0, s4, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B0, B0 #imm=1010
vpinsrq $1, s5, T3xmm, T3xmm
movq q5, s5
vpinsrq $0, s5, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B1, B1 #imm=1010
vpinsrq $1, s6, T3xmm, T3xmm
movq q6, s6
vpinsrq $0, s6, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B2, B2 #imm=1010
vpinsrq $1, s7, T3xmm, T3xmm
movq q7, s7
vpinsrq $0, s7, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B3, B3 #imm=1010
*/
##################################################################################################
.endm
.macro store_A
##################################################################################################
#### store A ####
vmovq A0xmm, %rax #q0=A[0] s0=A[8]
movq %rax, q0
vperm2i128 $0x01, A0, A0, A0
vmovq A0xmm, s0
vperm2i128 $0x01, A0, A0, A0
vmovq B0xmm, %rax
movq %rax, q1
vperm2i128 $0x01, B0, B0, B0
vmovq B0xmm, s1
vperm2i128 $0x01, B0, B0, B0
vmovq A1xmm, %rax
movq %rax, q2
vperm2i128 $0x01, A1, A1, A1
vmovq A1xmm, s2
vperm2i128 $0x01, A1, A1, A1
vmovq B1xmm, %rax
movq %rax, q3
vperm2i128 $0x01, B1, B1, B1
vmovq B1xmm, s3
vperm2i128 $0x01, B1, B1, B1
vmovq A2xmm, %rax
movq %rax, q4
vperm2i128 $0x01, A2, A2, A2
vmovq A2xmm, s4
vperm2i128 $0x01, A2, A2, A2
vmovq B2xmm, %rax
movq %rax, q5
vperm2i128 $0x01, B2, B2, B2
vmovq B2xmm, s5
vperm2i128 $0x01, B2, B2, B2
vmovq A3xmm, %rax
movq %rax, q6
vperm2i128 $0x01, A3, A3, A3
vmovq A3xmm, s6
vperm2i128 $0x01, A3, A3, A3
vmovq B3xmm, %rax
movq %rax, q7
vperm2i128 $0x01, B3, B3, B3
vmovq B3xmm, s7
vperm2i128 $0x01, B3, B3, B3
## new add ##
#vpxorq %zmm28, %zmm28, %zmm28
movq q0, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q1, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q2, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q3, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q4, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q5, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q6, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q7, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
vmovq s0, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s1, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s2, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s3, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s4, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s5, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s6, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s7, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
##################################################################################################
.endm
.macro restore_A
##################################################################################################
#### restore A ####
vpxorq %zmm30, %zmm30, %zmm30
vmovq %xmm29, s0
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s1
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s2
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s3
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s4
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s5
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s6
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s7
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %rax, %xmm23
vmovq %xmm28, %rax
movq %rax, q0
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q1
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q2
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q3
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q4
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q5
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q6
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q7
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm23, %rax
vpinsrq $1, s0, T3xmm, T3xmm #q0=A[0] s0=A[8]
movq q0, s0
vpinsrq $0, s0, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, A0, A0 #imm=0101
vpinsrq $1, s1, T3xmm, T3xmm
movq q1, s1
vpinsrq $0, s1, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, B0, B0 #imm=0101
vpinsrq $1, s2, T3xmm, T3xmm
movq q2, s2
vpinsrq $0, s2, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, A1, A1 #imm=0101
vpinsrq $1, s3, T3xmm, T3xmm
movq q3, s3
vpinsrq $0, s3, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, B1, B1 #imm=0101
vpinsrq $1, s4, T3xmm, T3xmm
movq q4, s4
vpinsrq $0, s4, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, A2, A2 #imm=0101
vpinsrq $1, s5, T3xmm, T3xmm
movq q5, s5
vpinsrq $0, s5, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, B2, B2 #imm=0101
vpinsrq $1, s6, T3xmm, T3xmm
movq q6, s6
vpinsrq $0, s6, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, A3, A3 #imm=0101
vpinsrq $1, s7, T3xmm, T3xmm
movq q7, s7
vpinsrq $0, s7, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, B3, B3 #imm=0101
##################################################################################################
.endm
### exp inverse order, for square and multiplication algorithm ###
##################################################################
#########################################
# T0 T1 T2 T3 #
# #
# T[i] 9 11 13 15 #
# D[i] 8 10 12 14 #
# 1 3 5 7 #
# 0 2 4 6 #
#########################################
#########################################################
.globl montexp1024
.type montexp1024, @function
.align 64
montexp1024:
#.macro montexp1024
##################################################################################################
### ###
### montexp1024 ###
### ###
### sum ###
### ###
##################################################################################################
movq %mm0, %rsi
vmovq %xmm31, %rcx
vmovdqu64 (%rcx), A0
vmovdqu64 32(%rcx), A1
vmovdqu64 64(%rcx), A2
vmovdqu64 96(%rcx), A3
vmovdqu64 128(%rcx), B0
vmovdqu64 160(%rcx), B1
vmovdqu64 192(%rcx), B2
vmovdqu64 224(%rcx), B3
store_A
vmovq %xmm31, %rcx
addq $256, %rcx
vmovdqu64 (%rcx), A0
vmovdqu64 32(%rcx), A1
vmovdqu64 64(%rcx), A2
vmovdqu64 96(%rcx), A3
vmovdqu64 128(%rcx), B0
vmovdqu64 160(%rcx), B1
vmovdqu64 192(%rcx), B2
vmovdqu64 224(%rcx), B3
mov %rsi, my_var(%rip)
mov $0x01, %eax # 0000 0001
kmovd %eax, %k1
mov $0x02, %eax # 0000 0010
kmovd %eax, %k2
mov $0x03, %eax # 0000 0011
kmovd %eax, %k3
#mov $0x0F, %eax # 0000 1111
#kmovd %eax, %k4
mov $0xF0, %eax # 1111 0000
kmovd %eax, %k5
mov $0x0F, %eax # 0000 1111
kmovd %eax, %k6
mov $0xC0, %eax # 1100 0000
kmovd %eax, %k7
.rept 8
valignq $0x04,%zmm4,%zmm0, %zmm0{%k5}{z} #shift 4*64
valignq $0x04,%zmm5,%zmm1, %zmm1{%k5}{z} #shift 4*64
valignq $0x04,%zmm6,%zmm2, %zmm2{%k5}{z} #shift 4*64
valignq $0x04,%zmm7,%zmm3, %zmm3{%k5}{z} #shift 4*64
vmovdqu64 %zmm0,%zmm4{%k5}
vmovdqu64 %zmm1,%zmm5{%k5}
vmovdqu64 %zmm2,%zmm6{%k5}
vmovdqu64 %zmm3,%zmm7{%k5}
mov my_var(%rip), %rdx
movq 384(%rdx), %rax
addq $8, %rdx
movq 384(%rdx), %rbx
addq $8, %rdx
mov %rdx, my_var(%rip)
#8*16=128b ,一次解密128
vmovq %rax, %xmm16
vmovdqu64 %zmm16,%zmm17{%k1}{z}
vmovq %rbx, %xmm16
vpexpandq %zmm16,%zmm17{%k2}
vmovdqu64 %zmm15,%zmm16 # dont forget!!!
vmovdqu64 %zmm17,%zmm15
###########AES_DEC###########
vpxorq %ymm1, %ymm1, %ymm1
movq $0x0123456789ABCDEF, %rax
vmovq %rax, %xmm1
valignq $1, %ymm0, %ymm1, %ymm0
movq $0xFEDCBA9876543210, %rax
vmovq %rax, %xmm1
valignq $3, %ymm0, %ymm1, %ymm0
aes_dec
vmovdqu64 %zmm15,%zmm17
vmovdqu64 %zmm16,%zmm15
vmovdqu64 %zmm17,%zmm16
valignq $0x04,%zmm4,%zmm0, %zmm0{%k6}{z} #shift 4*64
valignq $0x04,%zmm5,%zmm1, %zmm1{%k6}{z} #shift 4*64
valignq $0x04,%zmm6,%zmm2, %zmm2{%k6}{z} #shift 4*64
valignq $0x04,%zmm7,%zmm3, %zmm3{%k6}{z} #shift 4*64
.rept 128
vmovq %xmm16, %rax
vpermq $0xF1,%ymm16,%ymm16 # 1111 0001 即前3组64位 高位置0,最后一组为 原先的[64-127]位
vmovq %xmm16, %rbx
and $0x1, %rbx
shl $63, %rbx
vpermq $0xF1,%ymm16,%ymm16 # 1111 0001 即前3组64位 高位置0,最后一组为 原先的[64-127]位
vpsrlq $1, %xmm16,%xmm16{%k3}
vmovq %rbx ,%xmm17
vporq %xmm16,%xmm17,%xmm16{%k1}
and $0x1, %rax
subq $1, %rax
jb 7f
restore_B
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
call montmul1024
store_A
7:
vmovq %xmm31, %rcx
addq $256, %rcx
vmovdqu64 (%rcx), A0
vmovdqu64 32(%rcx), A1
vmovdqu64 64(%rcx), A2
vmovdqu64 96(%rcx), A3
vmovdqu64 128(%rcx), B0
vmovdqu64 160(%rcx), B1
vmovdqu64 192(%rcx), B2
vmovdqu64 224(%rcx), B3
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
call montsqu1024
vmovq %xmm31, %rcx
addq $256, %rcx
vmovdqu64 A0, (%rcx)
vmovdqu64 A1, 32(%rcx)
vmovdqu64 A2, 64(%rcx)
vmovdqu64 A3, 96(%rcx)
vmovdqu64 B0, 128(%rcx)
vmovdqu64 B1, 160(%rcx)
vmovdqu64 B2, 192(%rcx)
vmovdqu64 B3, 224(%rcx)
xorq %rax, %rax
.endr
.endr
restore_A
##################################################################################################
### ###
### montexp1024 END ###
### ###
### result A0 A1 A2 A3 ###
### ###
##################################################################################################
#.endm
ret
.size montexp1024, .-montexp1024
|
LoCryptEn/Key-security | 5,765 | Register-bound/RSAIn_Register/Kernel/aesni.S | .file "aes.S"
.text
#ifndef AES_NI
#define AES_NI
.set rstate, %xmm0 //AES state
.set rhelp, %xmm1 //helping reg 1
.set round_key_i, %xmm2 //round key i
.set round_key_j, %xmm3 //round key j
.set mes, %xmm15 //message
.macro key_shedule r0 r1 rcon
pxor rhelp, rhelp
movdqu \r0, \r1
shufps $0x1f, \r1, rhelp
pxor rhelp, \r1
shufps $0x8c, \r1, rhelp
pxor rhelp, \r1
aeskeygenassist $\rcon, \r0, rhelp
shufps $0xff, rhelp, rhelp
pxor rhelp, \r1
.endm
#ENC用到:round_key_i round_key_j rhelp (不算mes(因为本来就会用到),rstate(直接从DR里取到),3个寄存器)
.macro aes_enc
vmovdqu rstate, round_key_i
pxor round_key_i, mes
key_shedule round_key_i round_key_j 0x1
aesenc round_key_j, mes
key_shedule round_key_j round_key_i 0x2
aesenc round_key_i, mes
key_shedule round_key_i round_key_j 0x4
aesenc round_key_j, mes
key_shedule round_key_j round_key_i 0x8
aesenc round_key_i, mes
key_shedule round_key_i round_key_j 0x10
aesenc round_key_j, mes
key_shedule round_key_j round_key_i 0x20
aesenc round_key_i, mes
key_shedule round_key_i round_key_j 0x40
aesenc round_key_j, mes
key_shedule round_key_j round_key_i 0x80
aesenc round_key_i, mes
key_shedule round_key_i round_key_j 0x1b
aesenc round_key_j, mes
key_shedule round_key_j round_key_i 0x36
aesenclast round_key_i, mes
.endm
#生成解密用第i轮密钥,假设128bit AES,round为 0-10,共11轮密钥,这个宏用来生成1-10轮,
#结果放在 round_key_i
key_shedule_for_round_ith:
# 保存寄存器-略
movq %rdi, %rax #加载key_shedule执行次数到 %eax
# 检查是否为 0
cmpl $0, %eax
je .L_key_shedule_for_round_end_after_imc
key_shedule round_key_i round_key_j 0x1
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end
key_shedule round_key_j round_key_i 0x2
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end
key_shedule round_key_i round_key_j 0x4
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end
key_shedule round_key_j round_key_i 0x8
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end
key_shedule round_key_i round_key_j 0x10
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end
key_shedule round_key_j round_key_i 0x20
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end
key_shedule round_key_i round_key_j 0x40
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end
key_shedule round_key_j round_key_i 0x80
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end
key_shedule round_key_i round_key_j 0x1b
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end
key_shedule round_key_j round_key_i 0x36
.L_key_shedule_for_round_end:
decl %eax
cmpl $0, %eax
je .L_key_shedule_for_round_end_after_imc
movq %rdi, %rax
testb $1,%al
jz .L_no_copy_from_round_key_j_to_round_key_i
vmovdqu round_key_j, round_key_i
.L_no_copy_from_round_key_j_to_round_key_i:
movq %rdi, %rax
cmpl $10, %eax
je .L_key_shedule_for_round_end_after_imc
aesimc round_key_i, round_key_i
.L_key_shedule_for_round_end_after_imc: #第0/10轮不用imc
# 恢复寄存器-略
#Recovery
ret
.macro aes_dec
pushq %rax
pushq %rdi
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $10, %rdi # 传递参数
call key_shedule_for_round_ith
pxor round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $9, %rdi # 传递参数
call key_shedule_for_round_ith
aesdec round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $8, %rdi # 传递参数
call key_shedule_for_round_ith
aesdec round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $7, %rdi # 传递参数
call key_shedule_for_round_ith
aesdec round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $6, %rdi # 传递参数
call key_shedule_for_round_ith
aesdec round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $5, %rdi # 传递参数
call key_shedule_for_round_ith
aesdec round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $4, %rdi # 传递参数
call key_shedule_for_round_ith
aesdec round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $3, %rdi # 传递参数
call key_shedule_for_round_ith
aesdec round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $2, %rdi # 传递参数
call key_shedule_for_round_ith
aesdec round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $1, %rdi # 传递参数
call key_shedule_for_round_ith
aesdec round_key_i, mes
vmovdqu rstate, round_key_i
# 调用函数 key_shedule_for_round_ith
movq $0, %rdi # 传递参数
call key_shedule_for_round_ith
aesdeclast round_key_i, mes
popq %rax
popq %rdi
.endm
#endif |
LoCryptEn/Key-security | 165,226 | Register-bound/RSAIn_Register/Kernel/Comcq.S | #include "aesni.S"
#include "rsa_head.S"
.file "Comcq.S"
.text
.macro store_B
##################################################################################################
#### store B ####
vshufpd $0x05, A0, A0, T3 #imm=0101
vmovq T3xmm, %rax #q0=B[0] s0=B[8]
movq %rax, q0
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s0
vshufpd $0x05, A1, A1, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q1
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s1
vshufpd $0x05, A2, A2, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q2
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s2
vshufpd $0x05, A3, A3, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q3
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s3
vshufpd $0x05, B0, B0, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q4
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s4
vshufpd $0x05, B1, B1, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q5
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s5
vshufpd $0x05, B2, B2, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q6
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s6
vshufpd $0x05, B3, B3, T3 #imm=0101
vmovq T3xmm, %rax
movq %rax, q7
vperm2i128 $0x01, T3, T3, T3
vmovq T3xmm, s7
## new add ##
#vpxorq %zmm28, %zmm28, %zmm28
movq q0, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q1, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q2, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q3, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q4, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q5, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q6, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q7, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
vmovq s0, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s1, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s2, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s3, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s4, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s5, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s6, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s7, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
##################################################################################################
.endm
.macro restore_B
##################################################################################################
#### restore B ####
vpxorq %zmm30, %zmm30, %zmm30
vmovq %xmm29, s0
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s1
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s2
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s3
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s4
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s5
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s6
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s7
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %rax, %xmm23
vmovq %xmm28, %rax
movq %rax, q0
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q1
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q2
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q3
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q4
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q5
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q6
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q7
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm23, %rax
vpinsrq $1, s0, T3xmm, T3xmm
movq q0, s0 #q0=B[0] s0=B[8]
vpinsrq $0, s0, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A0, A0 #imm=1010
vpinsrq $1, s1, T3xmm, T3xmm
movq q1, s1
vpinsrq $0, s1, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B0, B0 #imm=1010
vpinsrq $1, s2, T3xmm, T3xmm
movq q2, s2
vpinsrq $0, s2, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A1, A1 #imm=1010
vpinsrq $1, s3, T3xmm, T3xmm
movq q3, s3
vpinsrq $0, s3, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B1, B1 #imm=1010
vpinsrq $1, s4, T3xmm, T3xmm
movq q4, s4
vpinsrq $0, s4, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A2, A2 #imm=1010
vpinsrq $1, s5, T3xmm, T3xmm
movq q5, s5
vpinsrq $0, s5, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B2, B2 #imm=1010
vpinsrq $1, s6, T3xmm, T3xmm
movq q6, s6
vpinsrq $0, s6, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A3, A3 #imm=1010
vpinsrq $1, s7, T3xmm, T3xmm
movq q7, s7
vpinsrq $0, s7, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B3, B3 #imm=1010
/*
vpinsrq $1, s0, T3xmm, T3xmm
movq q0, s0 #q0=B[0] s0=B[8]
vpinsrq $0, s0, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A0, A0 #imm=1010
vpinsrq $1, s1, T3xmm, T3xmm
movq q1, s1
vpinsrq $0, s1, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A1, A1 #imm=1010
vpinsrq $1, s2, T3xmm, T3xmm
movq q2, s2
vpinsrq $0, s2, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A2, A2 #imm=1010
vpinsrq $1, s3, T3xmm, T3xmm
movq q3, s3
vpinsrq $0, s3, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, A3, A3 #imm=1010
vpinsrq $1, s4, T3xmm, T3xmm
movq q4, s4
vpinsrq $0, s4, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B0, B0 #imm=1010
vpinsrq $1, s5, T3xmm, T3xmm
movq q5, s5
vpinsrq $0, s5, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B1, B1 #imm=1010
vpinsrq $1, s6, T3xmm, T3xmm
movq q6, s6
vpinsrq $0, s6, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B2, B2 #imm=1010
vpinsrq $1, s7, T3xmm, T3xmm
movq q7, s7
vpinsrq $0, s7, T3xmm, T3xmm
vpermq $0x62, T3, T3 #imm=1202
vblendpd $0xA, T3, B3, B3 #imm=1010
*/
##################################################################################################
.endm
.macro store_A
##################################################################################################
#### store A ####
vmovq A0xmm, %rax #q0=A[0] s0=A[8]
movq %rax, q0
vperm2i128 $0x01, A0, A0, A0
vmovq A0xmm, s0
vperm2i128 $0x01, A0, A0, A0
vmovq B0xmm, %rax
movq %rax, q1
vperm2i128 $0x01, B0, B0, B0
vmovq B0xmm, s1
vperm2i128 $0x01, B0, B0, B0
vmovq A1xmm, %rax
movq %rax, q2
vperm2i128 $0x01, A1, A1, A1
vmovq A1xmm, s2
vperm2i128 $0x01, A1, A1, A1
vmovq B1xmm, %rax
movq %rax, q3
vperm2i128 $0x01, B1, B1, B1
vmovq B1xmm, s3
vperm2i128 $0x01, B1, B1, B1
vmovq A2xmm, %rax
movq %rax, q4
vperm2i128 $0x01, A2, A2, A2
vmovq A2xmm, s4
vperm2i128 $0x01, A2, A2, A2
vmovq B2xmm, %rax
movq %rax, q5
vperm2i128 $0x01, B2, B2, B2
vmovq B2xmm, s5
vperm2i128 $0x01, B2, B2, B2
vmovq A3xmm, %rax
movq %rax, q6
vperm2i128 $0x01, A3, A3, A3
vmovq A3xmm, s6
vperm2i128 $0x01, A3, A3, A3
vmovq B3xmm, %rax
movq %rax, q7
vperm2i128 $0x01, B3, B3, B3
vmovq B3xmm, s7
vperm2i128 $0x01, B3, B3, B3
## new add ##
#vpxorq %zmm28, %zmm28, %zmm28
movq q0, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q1, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q2, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q3, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q4, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q5, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q6, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
movq q7, %rax
vmovq %rax, %xmm30
valignq $1, %zmm28, %zmm30, %zmm28
vmovq s0, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s1, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s2, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s3, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s4, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s5, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s6, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
vmovq s7, %xmm30
valignq $1, %zmm29, %zmm30, %zmm29
##################################################################################################
.endm
.macro restore_A
##################################################################################################
#### restore A ####
vpxorq %zmm30, %zmm30, %zmm30
vmovq %xmm29, s0
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s1
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s2
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s3
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s4
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s5
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s6
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %xmm29, s7
valignq $1, %zmm29, %zmm30, %zmm29
vmovq %rax, %xmm23
vmovq %xmm28, %rax
movq %rax, q0
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q1
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q2
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q3
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q4
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q5
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q6
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm28, %rax
movq %rax, q7
valignq $1, %zmm28, %zmm30, %zmm28
vmovq %xmm23, %rax
vpinsrq $1, s0, T3xmm, T3xmm #q0=A[0] s0=A[8]
movq q0, s0
vpinsrq $0, s0, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, A0, A0 #imm=0101
vpinsrq $1, s1, T3xmm, T3xmm
movq q1, s1
vpinsrq $0, s1, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, B0, B0 #imm=0101
vpinsrq $1, s2, T3xmm, T3xmm
movq q2, s2
vpinsrq $0, s2, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, A1, A1 #imm=0101
vpinsrq $1, s3, T3xmm, T3xmm
movq q3, s3
vpinsrq $0, s3, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, B1, B1 #imm=0101
vpinsrq $1, s4, T3xmm, T3xmm
movq q4, s4
vpinsrq $0, s4, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, A2, A2 #imm=0101
vpinsrq $1, s5, T3xmm, T3xmm
movq q5, s5
vpinsrq $0, s5, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, B2, B2 #imm=0101
vpinsrq $1, s6, T3xmm, T3xmm
movq q6, s6
vpinsrq $0, s6, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, A3, A3 #imm=0101
vpinsrq $1, s7, T3xmm, T3xmm
movq q7, s7
vpinsrq $0, s7, T3xmm, T3xmm
vpermq $0x98, T3, T3 #imm=2120
vblendpd $0x5, T3, B3, B3 #imm=0101
##################################################################################################
.endm
.macro montmul_1st_movq
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# B[8] B[10] B[12] B[14] #
# A[8] A[10] A[12] A[14] #
# B[0] B[2] B[4] B[6] #
# A[0] A[2] A[4] A[6] #
#########################################
# B0 B1 B2 B3 #
# #
# B[9] B[11] B[13] B[15] #
# A[9] A[11] A[13] A[15] #
# B[1] B[3] B[5] B[7] #
# A[1] A[3] A[5] A[7] #
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[8] M[10] M[12] M[14] #
# M[0] M[2] M[4] M[6] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[9] M[11] M[13] M[15] #
# M[1] M[3] M[5] M[7] #
#########################################
#########################################################
##################################################################################################
### ###
### 1st part: A[0-7]*B[0-7] + M[0-7]*(q0-q7) ###
### ###
### sum 576=65+73*7 ###
### ###
##################################################################################################
### ###
### 1st_0: A[0-7]*B[0] + M[0-7]*q0 ###
### sum 65=11+3+17*3 ###
### ###
###########################################################
##### A[0 2 4 6]*B[0] #####
xorq s8, s8
xorq s9, s9
vpextrq $1, A0xmm, bi #B[0]
vmovq A0xmm, ai #A[0]
mulx bi, s0, s1 #A[0]*B[0]
vmovq A1xmm, ai #A[2]
mulx bi, s2, s3 #A[2]*B[0]
vmovq A2xmm, ai #A[4]
mulx bi, s4, s5 #A[4]*B[0]
vmovq A3xmm, ai #A[6]
mulx bi, s6, s7 #A[6]*B[0]
##### q0 #####
movq n0, %rdx
mulx s0, q, rh #q0=s0*n0
movq q, q0 #q0
##### M[0 2 4 6]*q0 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q0
add rl, s0
adc rh, s1
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q0
adc rl, s2
adc rh, s3
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q0
adc rl, s4
adc rh, s5
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q0
adc rl, s6
adc rh, s7
adc $0, s8
##### A[1 3 5 7]*B[0] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[0]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[0]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[0]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[0]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[1 3 5 7]*q0 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q0
add rl, s1
adc rh, s2
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q0
adc rl, s3
adc rh, s4
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q0
adc rl, s5
adc rh, s6
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q0
adc rl, s7
adc rh, s8
adc $0, s9
##################################################################################################
### ###
### 1st_1: A[0-7]*B[1] + M[0-7]*q1 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[1] #####
xorq s0, s0
vpextrq $1, B0xmm, bi #B[1]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[1]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[1]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[1]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[1]
adc rl, s7
adc rh, s8
adc $0, s9
##### q1 #####
movq n0, %rdx
mulx s1, q, rh #q1=s1*n0
movq q, q1 #q1
##### M[0 2 4 6]*q1 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q1
add rl, s1
adc rh, s2
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q1
adc rl, s3
adc rh, s4
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q1
adc rl, s5
adc rh, s6
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q1
adc rl, s7
adc rh, s8
adc $0, s9
##### A[1 3 5 7]*B[1] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[1]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[1]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[1]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[1]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[1 3 5 7]*q1 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q1
add rl, s2
adc rh, s3
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q1
adc rl, s4
adc rh, s5
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q1
adc rl, s6
adc rh, s7
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q1
adc rl, s8
adc rh, s9
adc $0, s0
##################################################################################################
### ###
### 1st_2: A[0-7]*B[2] + M[0-7]*q2 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[2] #####
xorq s1, s1
vpextrq $1, A1xmm, bi #B[2]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[2]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[2]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[2]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[2]
adc rl, s8
adc rh, s9
adc $0, s0
##### q2 #####
movq n0, %rdx
mulx s2, q, rh #q2=s2*n0
movq q, q2 #q2
##### M[0 2 4 6]*q2 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q2
add rl, s2
adc rh, s3
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q2
adc rl, s4
adc rh, s5
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q2
adc rl, s6
adc rh, s7
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q2
adc rl, s8
adc rh, s9
adc $0, s0
##### A[1 3 5 7]*B[2] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[2]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[2]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[2]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[2]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[1 3 5 7]*q2 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q2
add rl, s3
adc rh, s4
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q2
adc rl, s5
adc rh, s6
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q2
adc rl, s7
adc rh, s8
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q2
adc rl, s9
adc rh, s0
adc $0, s1
##################################################################################################
### ###
### 1st_3: A[0-7]*B[3] + M[0-7]*q3 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[3] #####
xorq s2, s2
vpextrq $1, B1xmm, bi #B[3]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[3]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[3]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[3]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[3]
adc rl, s9
adc rh, s0
adc $0, s1
##### q3 #####
movq n0, %rdx
mulx s3, q, rh #q3=s3*n0
movq q, q3 #q3
##### M[0 2 4 6]*q3 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q3
add rl, s3
adc rh, s4
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q3
adc rl, s5
adc rh, s6
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q3
adc rl, s7
adc rh, s8
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q3
adc rl, s9
adc rh, s0
adc $0, s1
##### A[1 3 5 7]*B[3] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[3]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[3]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[3]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[3]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[1 3 5 7]*q3 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q3
add rl, s4
adc rh, s5
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q3
adc rl, s6
adc rh, s7
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q3
adc rl, s8
adc rh, s9
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q3
adc rl, s0
adc rh, s1
adc $0, s2
##################################################################################################
### ###
### 1st_4: A[0-7]*B[4] + M[0-7]*q4 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[4] #####
xorq s3, s3
vpextrq $1, A2xmm, bi #B[4]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[4]
add rl, s4
adc rh, s5
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[4]
adc rl, s6
adc rh, s7
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[4]
adc rl, s8
adc rh, s9
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[4]
adc rl, s0
adc rh, s1
adc $0, s2
##### q4 #####
movq n0, %rdx
mulx s4, q, rh #q4=s4*n0
movq q, q4 #q4
##### M[0 2 4 6]*q4 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q4
add rl, s4
adc rh, s5
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q4
adc rl, s6
adc rh, s7
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q4
adc rl, s8
adc rh, s9
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q4
adc rl, s0
adc rh, s1
adc $0, s2
##### A[1 3 5 7]*B[4] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[4]
add rl, s5
adc rh, s6
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[4]
adc rl, s7
adc rh, s8
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[4]
adc rl, s9
adc rh, s0
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[4]
adc rl, s1
adc rh, s2
adc $0, s3
##### M[1 3 5 7]*q4 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q4
add rl, s5
adc rh, s6
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q4
adc rl, s7
adc rh, s8
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q4
adc rl, s9
adc rh, s0
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q4
adc rl, s1
adc rh, s2
adc $0, s3
##################################################################################################
### ###
### 1st_5: A[0-7]*B[5] + M[0-7]*q5 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[5] #####
xorq s4, s4
vpextrq $1, B2xmm, bi #B[5]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[5]
add rl, s5
adc rh, s6
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[5]
adc rl, s7
adc rh, s8
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[5]
adc rl, s9
adc rh, s0
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[5]
adc rl, s1
adc rh, s2
adc $0, s3
##### q5 #####
movq n0, %rdx
mulx s5, q, rh #q5=s5*n0
movq q, q5 #q5
##### M[0 2 4 6]*q5 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q5
add rl, s5
adc rh, s6
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q5
adc rl, s7
adc rh, s8
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q5
adc rl, s9
adc rh, s0
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q5
adc rl, s1
adc rh, s2
adc $0, s3
##### A[1 3 5 7]*B[5] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[5]
add rl, s6
adc rh, s7
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[5]
adc rl, s8
adc rh, s9
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[5]
adc rl, s0
adc rh, s1
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[5]
adc rl, s2
adc rh, s3
adc $0, s4
##### M[1 3 5 7]*q5 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q5
add rl, s6
adc rh, s7
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q5
adc rl, s8
adc rh, s9
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q5
adc rl, s0
adc rh, s1
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q5
adc rl, s2
adc rh, s3
adc $0, s4
##################################################################################################
### ###
### 1st_6: A[0-7]*B[6] + M[0-7]*q6 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[6] #####
xorq s5, s5
vpextrq $1, A3xmm, bi #B[6]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[6]
add rl, s6
adc rh, s7
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[6]
adc rl, s8
adc rh, s9
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[6]
adc rl, s0
adc rh, s1
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[6]
adc rl, s2
adc rh, s3
adc $0, s4
##### q6 #####
movq n0, %rdx
mulx s6, q, rh #q6=s6*n0
movq q, q6 #q6
##### M[0 2 4 6]*q6 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q6
add rl, s6
adc rh, s7
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q6
adc rl, s8
adc rh, s9
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q6
adc rl, s0
adc rh, s1
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q6
adc rl, s2
adc rh, s3
adc $0, s4
##### A[1 3 5 7]*B[6] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[6]
add rl, s7
adc rh, s8
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[6]
adc rl, s9
adc rh, s0
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[6]
adc rl, s1
adc rh, s2
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[6]
adc rl, s3
adc rh, s4
adc $0, s5
##### M[1 3 5 7]*q6 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q6
add rl, s7
adc rh, s8
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q6
adc rl, s9
adc rh, s0
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q6
adc rl, s1
adc rh, s2
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q6
adc rl, s3
adc rh, s4
adc $0, s5
##################################################################################################
### ###
### 1st_7: A[0-7]*B[7] + M[0-7]*q7 ###
### sum 73=2+3+17*4 ###
### ###
###########################################################
##### A[0 2 4 6]*B[7] #####
xorq s6, s6
vpextrq $1, B3xmm, bi #B[7]
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[7]
add rl, s7
adc rh, s8
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[7]
adc rl, s9
adc rh, s0
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[7]
adc rl, s1
adc rh, s2
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[7]
adc rl, s3
adc rh, s4
adc $0, s5
##### q7 #####
movq n0, %rdx
mulx s7, q, rh #q7=s7*n0
movq q, q7 #q7
##### M[0 2 4 6]*q7 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q7
add rl, s7
adc rh, s8
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q7
adc rl, s9
adc rh, s0
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q7
adc rl, s1
adc rh, s2
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q7
adc rl, s3
adc rh, s4
adc $0, s5
##### A[1 3 5 7]*B[7] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[7]
add rl, s8
adc rh, s9
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[7]
adc rl, s0
adc rh, s1
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[7]
adc rl, s2
adc rh, s3
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[7]
adc rl, s4
adc rh, s5
adc $0, s6
##### M[1 3 5 7]*q7 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q7
add rl, s8
adc rh, s9
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q7
adc rl, s0
adc rh, s1
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q7
adc rl, s2
adc rh, s3
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q7
adc rl, s4
adc rh, s5
adc $0, s6
##################################################################################################
### ###
### 1st part END ###
### ###
### low high ###
### ###
### s8 s9 s0 s1 s2 s3 s4 s5 s6 ###
### ###
##################################################################################################
.endm
.macro montmul_2nd_movq
##################################################################################################
### ###
### 2nd part: ###
### A[8-15]*B[0-7] + M[8-15]*(q0-q7) + A[0-7]*B[8-15] + M[0-7]*(q8-q15) ###
### ###
### sum 1248=56+149*8 ###
### ###
##################################################################################################
### ###
### 2nd_arrange_vector ###
### sum 8 ###
### ###
###########################################################
vpermq $0x8D, A0, A0 #imm=3120
vpermq $0x8D, A1, A1 #imm=3120
vpermq $0x8D, A2, A2 #imm=3120
vpermq $0x8D, A3, A3 #imm=3120
vpermq $0x8D, B0, B0 #imm=3120
vpermq $0x8D, B1, B1 #imm=3120
vpermq $0x8D, B2, B2 #imm=3120
vpermq $0x8D, B3, B3 #imm=3120
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# B[8] B[10] B[12] B[14] #
# B[0] B[2] B[4] B[6] #
# A[8] A[10] A[12] A[14] #
# A[0] A[2] A[4] A[6] #
#########################################
# B0 B1 B2 B3 #
# #
# B[9] B[11] B[13] B[15] #
# B[1] B[3] B[5] B[7] #
# A[9] A[11] A[13] A[15] #
# A[1] A[3] A[5] A[7] #
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[8] M[10] M[12] M[14] #
# M[0] M[2] M[4] M[6] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[9] M[11] M[13] M[15] #
# M[1] M[3] M[5] M[7] #
#########################################
#########################################################
##################################################################################################
### ###
### 2nd_0: ###
### A[8-15]*B[0] + M[8-15]*q0 + A[0-7]*B[8] + M[0-7]*q8 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[0] #####
xorq s7, s7
vperm2i128 $1, A0, A0, A0 #imm=01
vmovq A0xmm, bi #B[0]
vperm2i128 $1, A0, A0, A0 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[0]
add rl, s8
adc rh, s9
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[0]
adc rl, s0
adc rh, s1
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[0]
adc rl, s2
adc rh, s3
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[0]
adc rl, s4
adc rh, s5
adc $0, s6
##### q0 #####
movq q0, q
##### M[8 10 12 14]*q0 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q0
add rl, s8
adc rh, s9
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q0
adc rl, s0
adc rh, s1
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q0
adc rl, s2
adc rh, s3
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q0
adc rl, s4
adc rh, s5
adc $0, s6
##### A[9 11 13 15]*B[0] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[0]
add rl, s9
adc rh, s0
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[0]
adc rl, s1
adc rh, s2
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[0]
adc rl, s3
adc rh, s4
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[0]
adc rl, s5
adc rh, s6
adc $0, s7
##### M[9 11 13 15]*q0 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q0
add rl, s9
adc rh, s0
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q0
adc rl, s1
adc rh, s2
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q0
adc rl, s3
adc rh, s4
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q0
adc rl, s5
adc rh, s6
adc $0, s7
###################################################################
###################################################################
##### A[0 2 4 6]*B[8] #####
vperm2i128 $1, A0, A0, A0 #imm=01
vpextrq $1, A0xmm, bi #B[8]
vperm2i128 $1, A0, A0, A0 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[8]
add rl, s8
adc rh, s9
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[8]
adc rl, s0
adc rh, s1
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[8]
adc rl, s2
adc rh, s3
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[8]
adc rl, s4
adc rh, s5
adc $0, s6
adc $0, s7
##### q8 #####
movq n0, %rdx
mulx s8, q, rh #q8=s8*n0
movq q, q8 #q8
##### M[0 2 4 6]*q8 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q8
add rl, s8
adc rh, s9
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q8
adc rl, s0
adc rh, s1
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q8
adc rl, s2
adc rh, s3
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q8
adc rl, s4
adc rh, s5
adc $0, s6
adc $0, s7
##### A[1 3 5 7]*B[8] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[8]
add rl, s9
adc rh, s0
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[8]
adc rl, s1
adc rh, s2
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[8]
adc rl, s3
adc rh, s4
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[8]
adc rl, s5
adc rh, s6
adc $0, s7
##### M[1 3 5 7]*q8 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q8
add rl, s9
adc rh, s0
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q8
adc rl, s1
adc rh, s2
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q8
adc rl, s3
adc rh, s4
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q8
adc rl, s5
adc rh, s6
adc $0, s7
##################################################################################################
### ###
### 2nd_1: ###
### A[8-15]*B[1] + M[8-15]*q1 + A[0-7]*B[9] + M[0-7]*q9 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[1] #####
xorq s8, s8
vperm2i128 $1, B0, B0, B0 #imm=01
vmovq B0xmm, bi #B[1]
vperm2i128 $1, B0, B0, B0 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[1]
add rl, s9
adc rh, s0
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[1]
adc rl, s1
adc rh, s2
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[1]
adc rl, s3
adc rh, s4
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[1]
adc rl, s5
adc rh, s6
adc $0, s7
##### q1 #####
movq q1, q
##### M[8 10 12 14]*q1 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q1
add rl, s9
adc rh, s0
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q1
adc rl, s1
adc rh, s2
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q1
adc rl, s3
adc rh, s4
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q1
adc rl, s5
adc rh, s6
adc $0, s7
##### A[9 11 13 15]*B[1] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[1]
add rl, s0
adc rh, s1
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[1]
adc rl, s2
adc rh, s3
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[1]
adc rl, s4
adc rh, s5
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[1]
adc rl, s6
adc rh, s7
adc $0, s8
##### M[9 11 13 15]*q1 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q1
add rl, s0
adc rh, s1
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q1
adc rl, s2
adc rh, s3
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q1
adc rl, s4
adc rh, s5
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q1
adc rl, s6
adc rh, s7
adc $0, s8
###################################################################
###################################################################
##### A[0 2 4 6]*B[9] #####
vperm2i128 $1, B0, B0, B0 #imm=01
vpextrq $1, B0xmm, bi #B[9]
vperm2i128 $1, B0, B0, B0 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[9]
add rl, s9
adc rh, s0
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[9]
adc rl, s1
adc rh, s2
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[9]
adc rl, s3
adc rh, s4
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[9]
adc rl, s5
adc rh, s6
adc $0, s7
adc $0, s8
##### q9 #####
movq n0, %rdx
mulx s9, q, rh #q9=s9*n0
movq q, q9 #q9
##### M[0 2 4 6]*q9 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q9
add rl, s9
adc rh, s0
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q9
adc rl, s1
adc rh, s2
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q9
adc rl, s3
adc rh, s4
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q9
adc rl, s5
adc rh, s6
adc $0, s7
adc $0, s8
##### A[1 3 5 7]*B[9] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[9]
add rl, s0
adc rh, s1
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[9]
adc rl, s2
adc rh, s3
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[9]
adc rl, s4
adc rh, s5
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[9]
adc rl, s6
adc rh, s7
adc $0, s8
##### M[1 3 5 7]*q9 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q9
add rl, s0
adc rh, s1
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q9
adc rl, s2
adc rh, s3
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q9
adc rl, s4
adc rh, s5
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q9
adc rl, s6
adc rh, s7
adc $0, s8
##################################################################################################
### ###
### 2nd_2: ###
### A[8-15]*B[2] + M[8-15]*q2 + A[0-7]*B[10] + M[0-7]*q10 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[2] #####
xorq s9, s9
vperm2i128 $1, A1, A1, A1 #imm=01
vmovq A1xmm, bi #B[2]
vperm2i128 $1, A1, A1, A1 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[2]
add rl, s0
adc rh, s1
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[2]
adc rl, s2
adc rh, s3
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[2]
adc rl, s4
adc rh, s5
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[2]
adc rl, s6
adc rh, s7
adc $0, s8
##### q2 #####
movq q2, q
##### M[8 10 12 14]*q2 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q2
add rl, s0
adc rh, s1
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q2
adc rl, s2
adc rh, s3
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q2
adc rl, s4
adc rh, s5
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q2
adc rl, s6
adc rh, s7
adc $0, s8
##### A[9 11 13 15]*B[2] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[2]
add rl, s1
adc rh, s2
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[2]
adc rl, s3
adc rh, s4
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[2]
adc rl, s5
adc rh, s6
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[2]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[9 11 13 15]*q2 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q2
add rl, s1
adc rh, s2
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q2
adc rl, s3
adc rh, s4
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q2
adc rl, s5
adc rh, s6
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q2
adc rl, s7
adc rh, s8
adc $0, s9
###################################################################
###################################################################
##### A[0 2 4 6]*B[10] #####
vperm2i128 $1, A1, A1, A1 #imm=01
vpextrq $1, A1xmm, bi #B[10]
vperm2i128 $1, A1, A1, A1 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[10]
add rl, s0
adc rh, s1
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[10]
adc rl, s2
adc rh, s3
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[10]
adc rl, s4
adc rh, s5
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[10]
adc rl, s6
adc rh, s7
adc $0, s8
adc $0, s9
##### q10 #####
movq n0, %rdx
mulx s0, q, rh #q10=s0*n0
movq q, q10 #q10
##### M[0 2 4 6]*q10 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q10
add rl, s0
adc rh, s1
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q10
adc rl, s2
adc rh, s3
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q10
adc rl, s4
adc rh, s5
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q10
adc rl, s6
adc rh, s7
adc $0, s8
adc $0, s9
##### A[1 3 5 7]*B[10] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[10]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[10]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[10]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[10]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[1 3 5 7]*q10 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q10
add rl, s1
adc rh, s2
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q10
adc rl, s3
adc rh, s4
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q10
adc rl, s5
adc rh, s6
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q10
adc rl, s7
adc rh, s8
adc $0, s9
##################################################################################################
### ###
### 2nd_3: ###
### A[8-15]*B[3] + M[8-15]*q3 + A[0-7]*B[11] + M[0-7]*q11 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[3] #####
xorq s0, s0
vperm2i128 $1, B1, B1, B1 #imm=01
vmovq B1xmm, bi #B[3]
vperm2i128 $1, B1, B1, B1 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[3]
add rl, s1
adc rh, s2
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[3]
adc rl, s3
adc rh, s4
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[3]
adc rl, s5
adc rh, s6
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[3]
adc rl, s7
adc rh, s8
adc $0, s9
##### q3 #####
movq q3, q
##### M[8 10 12 14]*q3 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q3
add rl, s1
adc rh, s2
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q3
adc rl, s3
adc rh, s4
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q3
adc rl, s5
adc rh, s6
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q3
adc rl, s7
adc rh, s8
adc $0, s9
##### A[9 11 13 15]*B[3] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[3]
add rl, s2
adc rh, s3
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[3]
adc rl, s4
adc rh, s5
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[3]
adc rl, s6
adc rh, s7
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[3]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[9 11 13 15]*q3 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q3
add rl, s2
adc rh, s3
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q3
adc rl, s4
adc rh, s5
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q3
adc rl, s6
adc rh, s7
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q3
adc rl, s8
adc rh, s9
adc $0, s0
###################################################################
###################################################################
##### A[0 2 4 6]*B[11] #####
vperm2i128 $1, B1, B1, B1 #imm=01
vpextrq $1, B1xmm, bi #B[11]
vperm2i128 $1, B1, B1, B1 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[11]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[11]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[11]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[11]
adc rl, s7
adc rh, s8
adc $0, s9
adc $0, s0
##### q11 #####
movq n0, %rdx
mulx s1, q, rh #q11=s1*n0
movq q, q11 #q11
##### M[0 2 4 6]*q11 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q11
add rl, s1
adc rh, s2
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q11
adc rl, s3
adc rh, s4
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q11
adc rl, s5
adc rh, s6
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q11
adc rl, s7
adc rh, s8
adc $0, s9
adc $0, s0
##### A[1 3 5 7]*B[11] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[11]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[11]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[11]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[11]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[1 3 5 7]*q11 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q11
add rl, s2
adc rh, s3
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q11
adc rl, s4
adc rh, s5
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q11
adc rl, s6
adc rh, s7
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q11
adc rl, s8
adc rh, s9
adc $0, s0
##################################################################################################
### ###
### 2nd_4: ###
### A[8-15]*B[4] + M[8-15]*q4 + A[0-7]*B[12] + M[0-7]*q12 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[4] #####
xorq s1, s1
vperm2i128 $1, A2, A2, A2 #imm=01
vmovq A2xmm, bi #B[4]
vperm2i128 $1, A2, A2, A2 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[4]
add rl, s2
adc rh, s3
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[4]
adc rl, s4
adc rh, s5
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[4]
adc rl, s6
adc rh, s7
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[4]
adc rl, s8
adc rh, s9
adc $0, s0
##### q4 #####
movq q4, q
##### M[8 10 12 14]*q4 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q4
add rl, s2
adc rh, s3
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q4
adc rl, s4
adc rh, s5
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q4
adc rl, s6
adc rh, s7
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q4
adc rl, s8
adc rh, s9
adc $0, s0
##### A[9 11 13 15]*B[4] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[4]
add rl, s3
adc rh, s4
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[4]
adc rl, s5
adc rh, s6
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[4]
adc rl, s7
adc rh, s8
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[4]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[9 11 13 15]*q4 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q4
add rl, s3
adc rh, s4
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q4
adc rl, s5
adc rh, s6
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q4
adc rl, s7
adc rh, s8
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q4
adc rl, s9
adc rh, s0
adc $0, s1
###################################################################
###################################################################
##### A[0 2 4 6]*B[12] #####
vperm2i128 $1, A2, A2, A2 #imm=01
vpextrq $1, A2xmm, bi #B[12]
vperm2i128 $1, A2, A2, A2 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[12]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[12]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[12]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[12]
adc rl, s8
adc rh, s9
adc $0, s0
adc $0, s1
##### q12 #####
movq n0, %rdx
mulx s2, q, rh #q12=s2*n0
movq q, q12 #q12
##### M[0 2 4 6]*q12 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q12
add rl, s2
adc rh, s3
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q12
adc rl, s4
adc rh, s5
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q12
adc rl, s6
adc rh, s7
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q12
adc rl, s8
adc rh, s9
adc $0, s0
adc $0, s1
##### A[1 3 5 7]*B[12] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[12]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[12]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[12]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[12]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[1 3 5 7]*q12 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q12
add rl, s3
adc rh, s4
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q12
adc rl, s5
adc rh, s6
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q12
adc rl, s7
adc rh, s8
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q12
adc rl, s9
adc rh, s0
adc $0, s1
##################################################################################################
### ###
### 2nd_5: ###
### A[8-15]*B[5] + M[8-15]*q5 + A[0-7]*B[13] + M[0-7]*q13 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[5] #####
xorq s2, s2
vperm2i128 $1, B2, B2, B2 #imm=01
vmovq B2xmm, bi #B[5]
vperm2i128 $1, B2, B2, B2 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[5]
add rl, s3
adc rh, s4
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[5]
adc rl, s5
adc rh, s6
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[5]
adc rl, s7
adc rh, s8
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[5]
adc rl, s9
adc rh, s0
adc $0, s1
##### q5 #####
movq q5, q
##### M[8 10 12 14]*q5 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q5
add rl, s3
adc rh, s4
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q5
adc rl, s5
adc rh, s6
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q5
adc rl, s7
adc rh, s8
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q5
adc rl, s9
adc rh, s0
adc $0, s1
##### A[9 11 13 15]*B[5] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[5]
add rl, s4
adc rh, s5
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[5]
adc rl, s6
adc rh, s7
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[5]
adc rl, s8
adc rh, s9
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[5]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[9 11 13 15]*q5 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q5
add rl, s4
adc rh, s5
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q5
adc rl, s6
adc rh, s7
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q5
adc rl, s8
adc rh, s9
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q5
adc rl, s0
adc rh, s1
adc $0, s2
###################################################################
###################################################################
##### A[0 2 4 6]*B[13] #####
vperm2i128 $1, B2, B2, B2 #imm=01
vpextrq $1, B2xmm, bi #B[13]
vperm2i128 $1, B2, B2, B2 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[13]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[13]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[13]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[13]
adc rl, s9
adc rh, s0
adc $0, s1
adc $0, s2
##### q13 #####
movq n0, %rdx
mulx s3, q, rh #q13=s3*n0
movq q, q13 #q13
##### M[0 2 4 6]*q13 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q13
add rl, s3
adc rh, s4
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q13
adc rl, s5
adc rh, s6
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q13
adc rl, s7
adc rh, s8
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q13
adc rl, s9
adc rh, s0
adc $0, s1
adc $0, s2
##### A[1 3 5 7]*B[13] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[13]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[13]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[13]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[13]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[1 3 5 7]*q13 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q13
add rl, s4
adc rh, s5
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q13
adc rl, s6
adc rh, s7
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q13
adc rl, s8
adc rh, s9
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q13
adc rl, s0
adc rh, s1
adc $0, s2
##################################################################################################
### ###
### 2nd_6: ###
### A[8-15]*B[6] + M[8-15]*q6 + A[0-7]*B[14] + M[0-7]*q14 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[6] #####
xorq s3, s3
vperm2i128 $1, A3, A3, A3 #imm=01
vmovq A3xmm, bi #B[6]
vperm2i128 $1, A3, A3, A3 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[6]
add rl, s4
adc rh, s5
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[6]
adc rl, s6
adc rh, s7
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[6]
adc rl, s8
adc rh, s9
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[6]
adc rl, s0
adc rh, s1
adc $0, s2
##### q6 #####
movq q6, q
##### M[8 10 12 14]*q6 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q6
add rl, s4
adc rh, s5
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q6
adc rl, s6
adc rh, s7
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q6
adc rl, s8
adc rh, s9
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q6
adc rl, s0
adc rh, s1
adc $0, s2
##### A[9 11 13 15]*B[6] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[6]
add rl, s5
adc rh, s6
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[6]
adc rl, s7
adc rh, s8
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[6]
adc rl, s9
adc rh, s0
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[6]
adc rl, s1
adc rh, s2
adc $0, s3
##### M[9 11 13 15]*q6 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q6
add rl, s5
adc rh, s6
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q6
adc rl, s7
adc rh, s8
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q6
adc rl, s9
adc rh, s0
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q6
adc rl, s1
adc rh, s2
adc $0, s3
###################################################################
###################################################################
##### A[0 2 4 6]*B[14] #####
vperm2i128 $1, A3, A3, A3 #imm=01
vpextrq $1, A3xmm, bi #B[14]
vperm2i128 $1, A3, A3, A3 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[14]
add rl, s4
adc rh, s5
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[14]
adc rl, s6
adc rh, s7
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[14]
adc rl, s8
adc rh, s9
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[14]
adc rl, s0
adc rh, s1
adc $0, s2
adc $0, s3
##### q14 #####
movq n0, %rdx
mulx s4, q, rh #q14=s4*n0
movq q, q14 #q14
##### M[0 2 4 6]*q14 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q14
add rl, s4
adc rh, s5
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q14
adc rl, s6
adc rh, s7
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q14
adc rl, s8
adc rh, s9
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q14
adc rl, s0
adc rh, s1
adc $0, s2
adc $0, s3
##### A[1 3 5 7]*B[14] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[14]
add rl, s5
adc rh, s6
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[14]
adc rl, s7
adc rh, s8
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[14]
adc rl, s9
adc rh, s0
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[14]
adc rl, s1
adc rh, s2
adc $0, s3
##### M[1 3 5 7]*q14 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q14
add rl, s5
adc rh, s6
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q14
adc rl, s7
adc rh, s8
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q14
adc rl, s9
adc rh, s0
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q14
adc rl, s1
adc rh, s2
adc $0, s3
##################################################################################################
### ###
### 2nd_7: ###
### A[8-15]*B[7] + M[8-15]*q7 + A[0-7]*B[15] + M[0-7]*q15 ###
### ###
### sum 149=21+18+17+17+76 ###
### ###
###################################################################
##### A[8 10 12 14]*B[7] #####
xorq s4, s4
vperm2i128 $1, B3, B3, B3 #imm=01
vmovq B3xmm, bi #B[7]
vperm2i128 $1, B3, B3, B3 #imm=01
vpextrq $1, A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[7]
add rl, s5
adc rh, s6
vpextrq $1, A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[7]
adc rl, s7
adc rh, s8
vpextrq $1, A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[7]
adc rl, s9
adc rh, s0
vpextrq $1, A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[7]
adc rl, s1
adc rh, s2
adc $0, s3
##### q7 #####
movq q7, q
##### M[8 10 12 14]*q7 #####
vpextrq $1, M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q7
add rl, s5
adc rh, s6
vpextrq $1, M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q7
adc rl, s7
adc rh, s8
vpextrq $1, M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q7
adc rl, s9
adc rh, s0
vpextrq $1, M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q7
adc rl, s1
adc rh, s2
adc $0, s3
##### A[9 11 13 15]*B[7] #####
vpextrq $1, B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[7]
add rl, s6
adc rh, s7
vpextrq $1, B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[7]
adc rl, s8
adc rh, s9
vpextrq $1, B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[7]
adc rl, s0
adc rh, s1
vpextrq $1, B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[7]
adc rl, s2
adc rh, s3
adc $0, s4
##### M[9 11 13 15]*q7 #####
vpextrq $1, T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q7
add rl, s6
adc rh, s7
vpextrq $1, T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q7
adc rl, s8
adc rh, s9
vpextrq $1, T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q7
adc rl, s0
adc rh, s1
vpextrq $1, T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q7
adc rl, s2
adc rh, s3
adc $0, s4
###################################################################
###################################################################
##### A[0 2 4 6]*B[15] #####
vperm2i128 $1, B3, B3, B3 #imm=01
vpextrq $1, B3xmm, bi #B[15]
vperm2i128 $1, B3, B3, B3 #imm=01
vmovq A0xmm, ai #A[0]
mulx bi, rl, rh #A[0]*B[15]
add rl, s5
adc rh, s6
vmovq A1xmm, ai #A[2]
mulx bi, rl, rh #A[2]*B[15]
adc rl, s7
adc rh, s8
vmovq A2xmm, ai #A[4]
mulx bi, rl, rh #A[4]*B[15]
adc rl, s9
adc rh, s0
vmovq A3xmm, ai #A[6]
mulx bi, rl, rh #A[6]*B[15]
adc rl, s1
adc rh, s2
adc $0, s3
adc $0, s4
##### q15 #####
movq n0, %rdx
mulx s5, q, rh #q15=s5*n0
movq q, q15 #q15
##### M[0 2 4 6]*q15 #####
vmovq M0xmm, mi #M[0]
mulx q, rl, rh #M[0]*q15
add rl, s5
adc rh, s6
vmovq M1xmm, mi #M[2]
mulx q, rl, rh #M[2]*q15
adc rl, s7
adc rh, s8
vmovq M2xmm, mi #M[4]
mulx q, rl, rh #M[4]*q15
adc rl, s9
adc rh, s0
vmovq M3xmm, mi #M[6]
mulx q, rl, rh #M[6]*q15
adc rl, s1
adc rh, s2
adc $0, s3
adc $0, s4
##### A[1 3 5 7]*B[15] #####
vmovq B0xmm, ai #A[1]
mulx bi, rl, rh #A[1]*B[15]
add rl, s6
adc rh, s7
vmovq B1xmm, ai #A[3]
mulx bi, rl, rh #A[3]*B[15]
adc rl, s8
adc rh, s9
vmovq B2xmm, ai #A[5]
mulx bi, rl, rh #A[5]*B[15]
adc rl, s0
adc rh, s1
vmovq B3xmm, ai #A[7]
mulx bi, rl, rh #A[7]*B[15]
adc rl, s2
adc rh, s3
adc $0, s4
##### M[1 3 5 7]*q15 #####
vmovq T0xmm, mi #M[1]
mulx q, rl, rh #M[1]*q15
add rl, s6
adc rh, s7
vmovq T1xmm, mi #M[3]
mulx q, rl, rh #M[3]*q15
adc rl, s8
adc rh, s9
vmovq T2xmm, mi #M[5]
mulx q, rl, rh #M[5]*q15
adc rl, s0
adc rh, s1
vmovq T3xmm, mi #M[7]
mulx q, rl, rh #M[7]*q15
adc rl, s2
adc rh, s3
adc $0, s4
##################################################################################################
### ###
### 2nd part END ###
### ###
### low high ###
### ###
### s6 s7 s8 s9 s0 s1 s2 s3 s4 ###
### ###
##################################################################################################
.endm
.macro montmul_3rd_movq
##################################################################################################
### ###
### 3rd part: A[8-15]*B[8-15] + M[8-15]*(q8-q15) ###
### ###
### sum 628=52+72*8 ###
### ###
##################################################################################################
### ###
### 3rd_arrange_vector ###
### sum 52=7*4+6*4 ###
### ###
###########################################################
vpermq $0x8D, A0, A0 #imm=2031
vpermq $0x8D, A1, A1 #imm=2031
vpermq $0x8D, A2, A2 #imm=2031
vpermq $0x8D, A3, A3 #imm=2031
vpermq $0x8D, B0, B0 #imm=2031
vpermq $0x8D, B1, B1 #imm=2031
vpermq $0x8D, B2, B2 #imm=2031
vpermq $0x8D, B3, B3 #imm=2031
/*
vpermq $0x72, A0, A0 #imm=01 11 00 10
vpermq $0x72, A1, A1 #imm=01 11 00 10
vpermq $0x72, A2, A2 #imm=01 11 00 10
vpermq $0x72, A3, A3 #imm=01 11 00 10
vpermq $0x72, B0, B0 #imm=01 11 00 10
vpermq $0x72, B1, B1 #imm=01 11 00 10
vpermq $0x72, B2, B2 #imm=01 11 00 10
vpermq $0x72, B3, B3 #imm=01 11 00 10
*/
### inverse M ###
vshufpd $0x05, M0, M0, M0 #imm=0101
vshufpd $0x05, M1, M1, M1 #imm=0101
vshufpd $0x05, M2, M2, M2 #imm=0101
vshufpd $0x05, M3, M3, M3 #imm=0101
vshufpd $0x05, T0, T0, T0 #imm=0101
vshufpd $0x05, T1, T1, T1 #imm=0101
vshufpd $0x05, T2, T2, T2 #imm=0101
vshufpd $0x05, T3, T3, T3 #imm=0101
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# B[0] B[2] B[4] B[6] #
# A[0] A[2] A[4] A[6] #
# B[8] B[10] B[12] B[14] #
# A[8] A[10] A[12] A[14] #
#########################################
# B0 B1 B2 B3 #
# #
# B[1] B[3] B[5] B[7] #
# A[1] A[3] A[5] A[7] #
# B[9] B[11] B[13] B[15] #
# A[9] A[11] A[13] A[15] #
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[0] M[2] M[4] M[6] #
# M[8] M[10] M[12] M[14] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[1] M[3] M[5] M[7] #
# M[9] M[11] M[13] M[15] #
#########################################
#########################################################
##################################################################################################
### ###
### 3rd_0: ###
### A[8-15]*B[8] + M[8-15]*q8 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[8] #####
xorq s5, s5
vpextrq $1, A0xmm, bi #B[8]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[8]
add rl, s6
adc rh, s7
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[8]
adc rl, s8
adc rh, s9
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[8]
adc rl, s0
adc rh, s1
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[8]
adc rl, s2
adc rh, s3
adc $0, s4
##### q8 #####
movq q8, q
##### M[8 10 12 14]*q8 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q8
add rl, s6
adc rh, s7
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q8
adc rl, s8
adc rh, s9
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q8
adc rl, s0
adc rh, s1
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q8
adc rl, s2
adc rh, s3
adc $0, s4
##### A[9 11 13 15]*B[8] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[8]
add rl, s7
adc rh, s8
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[8]
adc rl, s9
adc rh, s0
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[8]
adc rl, s1
adc rh, s2
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[8]
adc rl, s3
adc rh, s4
adc $0, s5
##### M[9 11 13 15]*q8 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q8
add rl, s7
adc rh, s8
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q8
adc rl, s9
adc rh, s0
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q8
adc rl, s1
adc rh, s2
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q8
adc rl, s3
adc rh, s4
adc $0, s5
movq s6, r0 #result[0]
##################################################################################################
### ###
### 3rd_1: ###
### A[8-15]*B[9] + M[8-15]*q9 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[9] #####
xorq s6, s6
vpextrq $1, B0xmm, bi #B[9]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[9]
add rl, s7
adc rh, s8
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[9]
adc rl, s9
adc rh, s0
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[9]
adc rl, s1
adc rh, s2
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[9]
adc rl, s3
adc rh, s4
adc $0, s5
##### q9 #####
movq q9, q
##### M[8 10 12 14]*q9 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q9
add rl, s7
adc rh, s8
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q9
adc rl, s9
adc rh, s0
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q9
adc rl, s1
adc rh, s2
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q9
adc rl, s3
adc rh, s4
adc $0, s5
##### A[9 11 13 15]*B[9] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[9]
add rl, s8
adc rh, s9
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[9]
adc rl, s0
adc rh, s1
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[9]
adc rl, s2
adc rh, s3
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[9]
adc rl, s4
adc rh, s5
adc $0, s6
##### M[9 11 13 15]*q9 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q9
add rl, s8
adc rh, s9
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q9
adc rl, s0
adc rh, s1
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q9
adc rl, s2
adc rh, s3
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q9
adc rl, s4
adc rh, s5
adc $0, s6
movq s7, r1 #result[1]
##################################################################################################
### ###
### 3rd_2: ###
### A[8-15]*B[10] + M[8-15]*q10 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[10] #####
xorq s7, s7
vpextrq $1, A1xmm, bi #B[10]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[10]
add rl, s8
adc rh, s9
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[10]
adc rl, s0
adc rh, s1
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[10]
adc rl, s2
adc rh, s3
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[10]
adc rl, s4
adc rh, s5
adc $0, s6
##### q10 #####
movq q10, q
##### M[8 10 12 14]*q10 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q10
add rl, s8
adc rh, s9
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q10
adc rl, s0
adc rh, s1
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q10
adc rl, s2
adc rh, s3
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q10
adc rl, s4
adc rh, s5
adc $0, s6
##### A[9 11 13 15]*B[10] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[10]
add rl, s9
adc rh, s0
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[10]
adc rl, s1
adc rh, s2
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[10]
adc rl, s3
adc rh, s4
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[10]
adc rl, s5
adc rh, s6
adc $0, s7
##### M[9 11 13 15]*q10 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q10
add rl, s9
adc rh, s0
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q10
adc rl, s1
adc rh, s2
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q10
adc rl, s3
adc rh, s4
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q10
adc rl, s5
adc rh, s6
adc $0, s7
movq s8, r2 #result[2]
##################################################################################################
### ###
### 3rd_3: ###
### A[8-15]*B[11] + M[8-15]*q11 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[11] #####
xorq s8, s8
vpextrq $1, B1xmm, bi #B[11]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[11]
add rl, s9
adc rh, s0
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[11]
adc rl, s1
adc rh, s2
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[11]
adc rl, s3
adc rh, s4
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[11]
adc rl, s5
adc rh, s6
adc $0, s7
##### q11 #####
movq q11, q
##### M[8 10 12 14]*q11 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q11
add rl, s9
adc rh, s0
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q11
adc rl, s1
adc rh, s2
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q11
adc rl, s3
adc rh, s4
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q11
adc rl, s5
adc rh, s6
adc $0, s7
##### A[9 11 13 15]*B[11] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[11]
add rl, s0
adc rh, s1
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[11]
adc rl, s2
adc rh, s3
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[11]
adc rl, s4
adc rh, s5
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[11]
adc rl, s6
adc rh, s7
adc $0, s8
##### M[9 11 13 15]*q11 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q11
add rl, s0
adc rh, s1
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q11
adc rl, s2
adc rh, s3
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q11
adc rl, s4
adc rh, s5
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q11
adc rl, s6
adc rh, s7
adc $0, s8
movq s9, r3 #result[3]
##################################################################################################
### ###
### 3rd_4: ###
### A[8-15]*B[12] + M[8-15]*q12 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[12] #####
xorq s9, s9
vpextrq $1, A2xmm, bi #B[12]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[12]
add rl, s0
adc rh, s1
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[12]
adc rl, s2
adc rh, s3
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[12]
adc rl, s4
adc rh, s5
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[12]
adc rl, s6
adc rh, s7
adc $0, s8
##### q12 #####
movq q12, q
##### M[8 10 12 14]*q12 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q12
add rl, s0
adc rh, s1
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q12
adc rl, s2
adc rh, s3
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q12
adc rl, s4
adc rh, s5
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q12
adc rl, s6
adc rh, s7
adc $0, s8
##### A[9 11 13 15]*B[12] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[12]
add rl, s1
adc rh, s2
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[12]
adc rl, s3
adc rh, s4
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[12]
adc rl, s5
adc rh, s6
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[12]
adc rl, s7
adc rh, s8
adc $0, s9
##### M[9 11 13 15]*q12 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q12
add rl, s1
adc rh, s2
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q12
adc rl, s3
adc rh, s4
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q12
adc rl, s5
adc rh, s6
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q12
adc rl, s7
adc rh, s8
adc $0, s9
movq s0, r4 #result[4]
##################################################################################################
### ###
### 3rd_5: ###
### A[8-15]*B[13] + M[8-15]*q13 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[13] #####
xorq s0, s0
vpextrq $1, B2xmm, bi #B[13]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[13]
add rl, s1
adc rh, s2
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[13]
adc rl, s3
adc rh, s4
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[13]
adc rl, s5
adc rh, s6
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[13]
adc rl, s7
adc rh, s8
adc $0, s9
##### q13 #####
movq q13, q
##### M[8 10 12 14]*q13 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q13
add rl, s1
adc rh, s2
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q13
adc rl, s3
adc rh, s4
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q13
adc rl, s5
adc rh, s6
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q13
adc rl, s7
adc rh, s8
adc $0, s9
##### A[9 11 13 15]*B[13] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[13]
add rl, s2
adc rh, s3
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[13]
adc rl, s4
adc rh, s5
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[13]
adc rl, s6
adc rh, s7
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[13]
adc rl, s8
adc rh, s9
adc $0, s0
##### M[9 11 13 15]*q13 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q13
add rl, s2
adc rh, s3
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q13
adc rl, s4
adc rh, s5
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q13
adc rl, s6
adc rh, s7
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q13
adc rl, s8
adc rh, s9
adc $0, s0
movq s1, r5 #result[5]
##################################################################################################
### ###
### 3rd_6: ###
### A[8-15]*B[14] + M[8-15]*q14 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[14] #####
xorq s1, s1
vpextrq $1, A3xmm, bi #B[14]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[14]
add rl, s2
adc rh, s3
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[14]
adc rl, s4
adc rh, s5
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[14]
adc rl, s6
adc rh, s7
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[14]
adc rl, s8
adc rh, s9
adc $0, s0
##### q14 #####
movq q14, q
##### M[8 10 12 14]*q14 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q14
add rl, s2
adc rh, s3
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q14
adc rl, s4
adc rh, s5
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q14
adc rl, s6
adc rh, s7
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q14
adc rl, s8
adc rh, s9
adc $0, s0
##### A[9 11 13 15]*B[14] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[14]
add rl, s3
adc rh, s4
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[14]
adc rl, s5
adc rh, s6
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[14]
adc rl, s7
adc rh, s8
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[14]
adc rl, s9
adc rh, s0
adc $0, s1
##### M[9 11 13 15]*q14 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q14
add rl, s3
adc rh, s4
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q14
adc rl, s5
adc rh, s6
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q14
adc rl, s7
adc rh, s8
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q14
adc rl, s9
adc rh, s0
adc $0, s1
movq s2, r6 #result[6]
##################################################################################################
### ###
### 3rd_7: ###
### A[8-15]*B[15] + M[8-15]*q15 ###
### ###
### sum 72=19+18+17+18 ###
### ###
###################################################################
##### A[8 10 12 14]*B[15] #####
xorq s2, s2
vpextrq $1, B3xmm, bi #B[15]
vmovq A0xmm, ai #A[8]
mulx bi, rl, rh #A[8]*B[15]
add rl, s3
adc rh, s4
vmovq A1xmm, ai #A[10]
mulx bi, rl, rh #A[10]*B[15]
adc rl, s5
adc rh, s6
vmovq A2xmm, ai #A[12]
mulx bi, rl, rh #A[12]*B[15]
adc rl, s7
adc rh, s8
vmovq A3xmm, ai #A[14]
mulx bi, rl, rh #A[14]*B[15]
adc rl, s9
adc rh, s0
adc $0, s1
##### q15 #####
movq q15, q
##### M[8 10 12 14]*q15 #####
vmovq M0xmm, mi #M[8]
mulx q, rl, rh #M[8]*q15
add rl, s3
adc rh, s4
vmovq M1xmm, mi #M[10]
mulx q, rl, rh #M[10]*q15
adc rl, s5
adc rh, s6
vmovq M2xmm, mi #M[12]
mulx q, rl, rh #M[12]*q15
adc rl, s7
adc rh, s8
vmovq M3xmm, mi #M[14]
mulx q, rl, rh #M[14]*q15
adc rl, s9
adc rh, s0
adc $0, s1
##### A[9 11 13 15]*B[15] #####
vmovq B0xmm, ai #A[9]
mulx bi, rl, rh #A[9]*B[15]
add rl, s4
adc rh, s5
vmovq B1xmm, ai #A[11]
mulx bi, rl, rh #A[11]*B[15]
adc rl, s6
adc rh, s7
vmovq B2xmm, ai #A[13]
mulx bi, rl, rh #A[13]*B[15]
adc rl, s8
adc rh, s9
vmovq B3xmm, ai #A[15]
mulx bi, rl, rh #A[15]*B[15]
adc rl, s0
adc rh, s1
adc $0, s2
##### M[9 11 13 15]*q15 #####
vmovq T0xmm, mi #M[9]
mulx q, rl, rh #M[9]*q15
add rl, s4
adc rh, s5
vmovq T1xmm, mi #M[11]
mulx q, rl, rh #M[11]*q15
adc rl, s6
adc rh, s7
vmovq T2xmm, mi #M[13]
mulx q, rl, rh #M[13]*q15
adc rl, s8
adc rh, s9
vmovq T3xmm, mi #M[15]
mulx q, rl, rh #M[15]*q15
adc rl, s0
adc rh, s1
adc $0, s2
movq s3, r7 #result[7]
##################################################################################################
### reverse M ###
vshufpd $0x05, M0, M0, M0 #imm=0101
vshufpd $0x05, M1, M1, M1 #imm=0101
vshufpd $0x05, M2, M2, M2 #imm=0101
vshufpd $0x05, M3, M3, M3 #imm=0101
vshufpd $0x05, T0, T0, T0 #imm=0101
vshufpd $0x05, T1, T1, T1 #imm=0101
vshufpd $0x05, T2, T2, T2 #imm=0101
vshufpd $0x05, T3, T3, T3 #imm=0101
/* 16 256bit vector registers */
#########################################################
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[8] M[10] M[12] M[14] #
# M[0] M[2] M[4] M[6] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[9] M[11] M[13] M[15] #
# M[1] M[3] M[5] M[7] #
#########################################
#########################################################
##################################################################################################
### ###
### 3rd part END ###
### ###
### low high ###
### ###
### s4 s5 s6 s7 s8 s9 s0 s1 s2 ###
### ###
##################################################################################################
.endm
.macro montmul_last_movq
##################################################################################################
### ###
### last part: reduce and store result ###
### ###
### sum 102=8+94 ###
### ###
##################################################################################################
### ###
### reduce ###
### sum 94=4+62+28 ###
### ###
###########################################################
xorq rh, rh
movq s2, rh
subq $1, rh
#jb .montmul_last_end
jb 1f
#.montmul_last_sub_1$:
### r0-r7 ###
movq r0, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
movq r1, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
movq r2, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
movq r3, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
movq r4, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
movq r5, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
movq r6, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
movq r7, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
### r8-r15 ###
vpextrq $1, M0xmm, rh #R[8]
sbbq rh, r8
vpextrq $1, T0xmm, rh #R[9]
sbbq rh, r9
vpextrq $1, M1xmm, rh #R[10]
sbbq rh, r10
vpextrq $1, T1xmm, rh #R[11]
sbbq rh, r11
vpextrq $1, M2xmm, rh #R[12]
sbbq rh, r12
vpextrq $1, T2xmm, rh #R[13]
sbbq rh, r13
vpextrq $1, M3xmm, rh #R[14]
sbbq rh, r14
vpextrq $1, T3xmm, rh #R[15]
sbbq rh, r15
sbbq $0, s2
xorq rh, rh
movq s2, rh
subq $1, rh
jb 1f
#.montmul_last_sub_2:
### r0-r7 ###
movq r0, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
movq r1, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
movq r2, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
movq r3, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
movq r4, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
movq r5, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
movq r6, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
movq r7, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
### r8-r15 ###
vpextrq $1, M0xmm, rh #R[8]
sbbq rh, r8
vpextrq $1, T0xmm, rh #R[9]
sbbq rh, r9
vpextrq $1, M1xmm, rh #R[10]
sbbq rh, r10
vpextrq $1, T1xmm, rh #R[11]
sbbq rh, r11
vpextrq $1, M2xmm, rh #R[12]
sbbq rh, r12
vpextrq $1, T2xmm, rh #R[13]
sbbq rh, r13
vpextrq $1, M3xmm, rh #R[14]
sbbq rh, r14
vpextrq $1, T3xmm, rh #R[15]
sbbq rh, r15
sbbq $0, s2
#.montmul_last_end$:
1:
vpxorq %zmm18, %zmm18, %zmm18
vpxorq %zmm19, %zmm19, %zmm19
vpxorq %zmm20, %zmm20, %zmm20
movq r0, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r1, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r2, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r3, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r4, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r5, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r6, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
movq r7, rl
vmovq rl, %xmm20
valignq $1, %zmm19, %zmm20, %zmm19
vmovq r8, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r9, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r10, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r11, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r12, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r13, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r14, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
vmovq r15, %xmm20
valignq $1, %zmm18, %zmm20, %zmm18
### r0-r7 ###
movq r0, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
movq r1, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
movq r2, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
movq r3, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
movq r4, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
movq r5, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
movq r6, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
movq r7, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
### r8-r15 ###
vpextrq $1, M0xmm, rh #R[8]
sbbq rh, r8
vpextrq $1, T0xmm, rh #R[9]
sbbq rh, r9
vpextrq $1, M1xmm, rh #R[10]
sbbq rh, r10
vpextrq $1, T1xmm, rh #R[11]
sbbq rh, r11
vpextrq $1, M2xmm, rh #R[12]
sbbq rh, r12
vpextrq $1, T2xmm, rh #R[13]
sbbq rh, r13
vpextrq $1, M3xmm, rh #R[14]
sbbq rh, r14
vpextrq $1, T3xmm, rh #R[15]
sbbq rh, r15
jb 3f
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
vpxor T3, T3, T3
vpxor A0, A0, A0
vmovq r8, T3xmm #R[8]
vblendpd $0x1, T3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, rl #R[0]
vmovq rl, T3xmm #R[0]
vblendpd $0x1, T3, A0, A0 #imm=0001
vpxor B0, B0, B0
vmovq r9, T3xmm #R[9]
vblendpd $0x1, T3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r1, rl #R[1]
vmovq rl, T3xmm #R[1]
vblendpd $0x1, T3, B0, B0 #imm=0001
vpxor A1, A1, A1
vmovq r10, T3xmm #R[10]
vblendpd $0x1, T3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r2, rl #R[2]
vmovq rl, T3xmm #R[2]
vblendpd $0x1, T3, A1, A1 #imm=0001
vpxor B1, B1, B1
vmovq r11, T3xmm #R[11]
vblendpd $0x1, T3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r3, rl #R[3]
vmovq rl, T3xmm #R[3]
vblendpd $0x1, T3, B1, B1 #imm=0001
vpxor A2, A2, A2
vmovq r12, T3xmm #R[12]
vblendpd $0x1, T3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r4, rl #R[4]
vmovq rl, T3xmm #R[4]
vblendpd $0x1, T3, A2, A2 #imm=0001
vpxor B2, B2, B2
vmovq r13, T3xmm #R[13]
vblendpd $0x1, T3, B2, B2 #imm=0001
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r5, rl #R[5]
vmovq rl, T3xmm #R[5]
vblendpd $0x1, T3, B2, B2 #imm=0001
vpxor A3, A3, A3
vmovq r14, T3xmm #R[14]
vblendpd $0x1, T3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r6, rl #R[6]
vmovq rl, T3xmm #R[6]
vblendpd $0x1, T3, A3, A3 #imm=0001
vpxor B3, B3, B3
vmovq r15, T3xmm #R[15]
vblendpd $0x1, T3, B3, B3 #imm=0001
vperm2i128 $0x1, B3, B3, B3 #imm=1
movq r7, rl #R[7]
vmovq rl, T3xmm #R[7]
vblendpd $0x1, T3, B3, B3 #imm=0001
vpxor T3, T3, T3
vblendpd $0x3, T3, T2, T3 #imm=0011
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
jmp 4f
3:
vpxorq %zmm20, %zmm20, %zmm20
vmovq %xmm19, rl
movq rl, r0
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r1
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r2
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r3
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r4
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r5
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r6
valignq $1, %zmm19, %zmm20, %zmm19
vmovq %xmm19, rl
movq rl, r7
valignq $1, %zmm19, %zmm20, %zmm19
vpxorq %zmm20, %zmm20, %zmm20
vmovq %xmm18, r8
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r9
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r10
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r11
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r12
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r13
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r14
valignq $1, %zmm18, %zmm20, %zmm18
vmovq %xmm18, r15
valignq $1, %zmm18, %zmm20, %zmm18
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
vpxor T3, T3, T3
vpxor A0, A0, A0
vmovq r8, T3xmm #R[8]
vblendpd $0x1, T3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, rl #R[0]
vmovq rl, T3xmm #R[0]
vblendpd $0x1, T3, A0, A0 #imm=0001
vpxor B0, B0, B0
vmovq r9, T3xmm #R[9]
vblendpd $0x1, T3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r1, rl #R[1]
vmovq rl, T3xmm #R[1]
vblendpd $0x1, T3, B0, B0 #imm=0001
vpxor A1, A1, A1
vmovq r10, T3xmm #R[10]
vblendpd $0x1, T3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r2, rl #R[2]
vmovq rl, T3xmm #R[2]
vblendpd $0x1, T3, A1, A1 #imm=0001
vpxor B1, B1, B1
vmovq r11, T3xmm #R[11]
vblendpd $0x1, T3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r3, rl #R[3]
vmovq rl, T3xmm #R[3]
vblendpd $0x1, T3, B1, B1 #imm=0001
vpxor A2, A2, A2
vmovq r12, T3xmm #R[12]
vblendpd $0x1, T3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r4, rl #R[4]
vmovq rl, T3xmm #R[4]
vblendpd $0x1, T3, A2, A2 #imm=0001
vpxor B2, B2, B2
vmovq r13, T3xmm #R[13]
vblendpd $0x1, T3, B2, B2 #imm=0001
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r5, rl #R[5]
vmovq rl, T3xmm #R[5]
vblendpd $0x1, T3, B2, B2 #imm=0001
vpxor A3, A3, A3
vmovq r14, T3xmm #R[14]
vblendpd $0x1, T3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r6, rl #R[6]
vmovq rl, T3xmm #R[6]
vblendpd $0x1, T3, A3, A3 #imm=0001
vpxor B3, B3, B3
vmovq r15, T3xmm #R[15]
vblendpd $0x1, T3, B3, B3 #imm=0001
vperm2i128 $0x1, B3, B3, B3 #imm=1
movq r7, rl #R[7]
vmovq rl, T3xmm #R[7]
vblendpd $0x1, T3, B3, B3 #imm=0001
vpxor T3, T3, T3
vblendpd $0x3, T3, T2, T3 #imm=0011
vperm2i128 $1, T3, T3, T3 #imm=01
vblendpd $0x3, T2, T3, T2 #imm=0011
4:
/* 16 256bit vector registers */
#########################################################
#########################################
# A0 A1 A2 A3 #
# #
# x x x x #
# R[8] R[10] R[12] R[14] #
# x x x x #
# R[0] R[2] R[4] R[6] #
#########################################
# B0 B1 B2 B3 #
# #
# x x x x #
# R[9] R[11] R[13] R[15] #
# x x x x #
# R[1] R[3] R[5] R[7] #
#########################################
# M0 M1 M2 M3 #
# #
# x x x x #
# x x x x #
# M[8] M[10] M[12] M[14] #
# M[0] M[2] M[4] M[6] #
#########################################
# T0 T1 T2 T3 #
# #
# x x x x #
# x x x x #
# M[9] M[11] M[13] M[15] #
# M[1] M[3] M[5] M[7] #
#########################################
#########################################################
##################################################################################################
### ###
### last part END ###
### ###
### result A0 A1 A2 A3 B0 B1 B2 B3 ###
### ###
##################################################################################################
.endm
.globl Comcq
.type Comcq, @function
.align 64
Comcq:
subq $512, %rsp
movq %rbx, (%rsp)
movq %rbp, 8(%rsp)
movq %r12, 16(%rsp)
movq %r13, 24(%rsp)
movq %r14, 32(%rsp)
movq %r15, 40(%rsp)
movq %rdi, 48(%rsp)
movq %rsi, 64(%rsp)
##q(0-127) c2(127-255) r3r(256-383) c1(384-511) r2r(512-639) q0(640)##
/*
vmovdqu (%rsi), A0xmm #M[0] M[1]
vmovdqu 16(%rsi), A1xmm #M[2] M[3]
vmovdqu 32(%rsi), A2xmm #M[4] M[5]
vmovdqu 48(%rsi), A3xmm #M[6] M[7]
vmovdqu 64(%rsi), B0xmm #M[8] M[9]
vmovdqu 80(%rsi), B1xmm #M[10] M[11]
vmovdqu 96(%rsi), B2xmm #M[12] M[13]
vmovdqu 112(%rsi), B3xmm #M[14] M[15]
*/
#movq 640(%rsi), %rax
#movq %rax, n0
### new add-- aes-dec-- ###
vpxorq %ymm1, %ymm1, %ymm1
movq $0x0123456789ABCDEF, %rax
vmovq %rax, %xmm1
valignq $1, %ymm0, %ymm1, %ymm0
movq $0xFEDCBA9876543210, %rax
vmovq %rax, %xmm1
valignq $3, %ymm0, %ymm1, %ymm0
vmovdqu 640(%rsi), %xmm15
aes_dec
vmovq %xmm15,%rax
movq %rax, n0
vmovdqu 16(%rsi), %xmm15
aes_dec
vmovdqu64 %xmm15, %xmm16
vmovdqu 32(%rsi), %xmm15
aes_dec
vmovdqu64 %xmm15, %xmm17
vmovdqu 48(%rsi), %xmm15
aes_dec
vmovdqu64 %xmm15, %xmm18
vmovdqu 64(%rsi), %xmm15
aes_dec
vmovdqu64 %xmm15, %xmm19
vmovdqu 80(%rsi), %xmm15
aes_dec
vmovdqu64 %xmm15, %xmm20
vmovdqu 96(%rsi), %xmm15
aes_dec
vmovdqu64 %xmm15, %xmm21
vmovdqu 112(%rsi), %xmm15
aes_dec
vmovdqu64 %xmm15, %xmm22
vmovdqu (%rsi), %xmm15
aes_dec
vmovdqu64 %xmm15, A0xmm
vmovdqu64 %xmm16, A1xmm
vmovdqu64 %xmm17, A2xmm
vmovdqu64 %xmm18, A3xmm
vmovdqu64 %xmm19, B0xmm
vmovdqu64 %xmm20, B1xmm
vmovdqu64 %xmm21, B2xmm
vmovdqu64 %xmm22, B3xmm
### rerange q to M ###
vperm2i128 $0x20, B0, A0, M0
vperm2i128 $0x20, B1, A1, M1
vperm2i128 $0x20, B2, A2, M2
vperm2i128 $0x20, B3, A3, M3
vpermq $0xD8, M0, M0 #imm=3120
vpermq $0xD8, M1, M1 #imm=3120
vpermq $0xD8, M2, M2 #imm=3120
vpermq $0xD8, M3, M3 #imm=3120
### load r3r ###
movq 64(%rsp), %rsi
addq $256, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
subq $256, %rsi
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
store_A
### load c2 ###
movq 64(%rsp), %rsi
addq $128, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
subq $128, %rsi
vmovdqu64 A0, %ymm18
vmovdqu64 A1, %ymm19
vmovdqu64 A2, %ymm20
vmovdqu64 A3, %ymm21
vmovdqu64 B0, %ymm22
vmovdqu64 B1, %ymm23
vmovdqu64 B2, %ymm24
vmovdqu64 B3, %ymm25
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
## c2 mod q ##
vmovq A0xmm, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
vpextrq $1, A0xmm, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
vmovq A1xmm, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
vpextrq $1, A1xmm, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
vmovq A2xmm, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
vpextrq $1, A2xmm, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
vmovq A3xmm, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
vpextrq $1, A3xmm, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
vmovq B0xmm, rl #R[8]
vpextrq $1, M0xmm, rh
sbbq rh, rl
movq rl, r8
vpextrq $1, B0xmm, rl #R[9]
vpextrq $1, T0xmm, rh
sbbq rh, rl
movq rl, r9
vmovq B1xmm, rl #R[10]
vpextrq $1, M1xmm, rh
sbbq rh, rl
movq rl, r10
vpextrq $1, B1xmm, rl #R[11]
vpextrq $1, T1xmm, rh
sbbq rh, rl
movq rl, r11
vmovq B2xmm, rl #R[12]
vpextrq $1, M2xmm, rh
sbbq rh, rl
movq rl, r12
vpextrq $1, B2xmm, rl #R[13]
vpextrq $1, T2xmm, rh
sbbq rh, rl
movq rl, r13
vmovq B3xmm, rl #R[14]
vpextrq $1, M3xmm, rh
sbbq rh, rl
movq rl, r14
vpextrq $1, B3xmm, rl #R[15]
vpextrq $1, T3xmm, rh
sbbq rh, rl
movq rl, r15
jb 8f
vpxor T3, T3, T3
vpxor A0, A0, A0
vmovq r8, T3xmm #R[8]
vblendpd $0x1, T3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, rl #R[0]
vmovq rl, T3xmm #R[0]
vblendpd $0x1, T3, A0, A0 #imm=0001
vpxor B0, B0, B0
vmovq r9, T3xmm #R[9]
vblendpd $0x1, T3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r1, rl #R[1]
vmovq rl, T3xmm #R[1]
vblendpd $0x1, T3, B0, B0 #imm=0001
vpxor A1, A1, A1
vmovq r10, T3xmm #R[10]
vblendpd $0x1, T3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r2, rl #R[2]
vmovq rl, T3xmm #R[2]
vblendpd $0x1, T3, A1, A1 #imm=0001
vpxor B1, B1, B1
vmovq r11, T3xmm #R[11]
vblendpd $0x1, T3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r3, rl #R[3]
vmovq rl, T3xmm #R[3]
vblendpd $0x1, T3, B1, B1 #imm=0001
vpxor A2, A2, A2
vmovq r12, T3xmm #R[12]
vblendpd $0x1, T3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r4, rl #R[4]
vmovq rl, T3xmm #R[4]
vblendpd $0x1, T3, A2, A2 #imm=0001
vpxor B2, B2, B2
vmovq r13, T3xmm #R[13]
vblendpd $0x1, T3, B2, B2 #imm=0001
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r5, rl #R[5]
vmovq rl, T3xmm #R[5]
vblendpd $0x1, T3, B2, B2 #imm=0001
vpxor A3, A3, A3
vmovq r14, T3xmm #R[14]
vblendpd $0x1, T3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r6, rl #R[6]
vmovq rl, T3xmm #R[6]
vblendpd $0x1, T3, A3, A3 #imm=0001
vpxor B3, B3, B3
vmovq r15, T3xmm #R[15]
vblendpd $0x1, T3, B3, B3 #imm=0001
vperm2i128 $0x1, B3, B3, B3 #imm=1
movq r7, rl #R[7]
vmovq rl, T3xmm #R[7]
vblendpd $0x1, T3, B3, B3 #imm=0001
vpxor T3, T3, T3
jmp 9f
8:
vmovdqu64 %ymm18, A0
vmovdqu64 %ymm19, A1
vmovdqu64 %ymm20, A2
vmovdqu64 %ymm21, A3
vmovdqu64 %ymm22, B0
vmovdqu64 %ymm23, B1
vmovdqu64 %ymm24, B2
vmovdqu64 %ymm25, B3
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
vpxor T3, T3, T3
9:
#### restore B ####
restore_B
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
##call montmul1024
montmul_1st_movq
montmul_2nd_movq
montmul_3rd_movq
montmul_last_movq
movq 48(%rsp), %rdi
vmovdqu64 A0, (%rdi)
vmovdqu64 B0, 32(%rdi)
vmovdqu64 A1, 64(%rdi)
vmovdqu64 B1, 96(%rdi)
vmovdqu64 A2, 128(%rdi)
vmovdqu64 B2, 160(%rdi)
vmovdqu64 A3, 192(%rdi)
vmovdqu64 B3, 224(%rdi)
### load r2r ###
movq 64(%rsp), %rsi
addq $512, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
subq $512, %rsi
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
store_A
### load c1 ###
movq 64(%rsp), %rsi
addq $384, %rsi
vmovdqu (%rsi), A0
vmovdqu 16(%rsi), A1
vmovdqu 32(%rsi), A2
vmovdqu 48(%rsi), A3
vmovdqu 64(%rsi), B0
vmovdqu 80(%rsi), B1
vmovdqu 96(%rsi), B2
vmovdqu 112(%rsi), B3
subq $384, %rsi
vmovdqu64 A0, %ymm18
vmovdqu64 A1, %ymm19
vmovdqu64 A2, %ymm20
vmovdqu64 A3, %ymm21
vmovdqu64 B0, %ymm22
vmovdqu64 B1, %ymm23
vmovdqu64 B2, %ymm24
vmovdqu64 B3, %ymm25
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
## c1 mod q ##
vmovq A0xmm, rl #R[0]
vmovq M0xmm, rh
subq rh, rl
movq rl, r0
vpextrq $1, A0xmm, rl #R[1]
vmovq T0xmm, rh
sbbq rh, rl
movq rl, r1
vmovq A1xmm, rl #R[2]
vmovq M1xmm, rh
sbbq rh, rl
movq rl, r2
vpextrq $1, A1xmm, rl #R[3]
vmovq T1xmm, rh
sbbq rh, rl
movq rl, r3
vmovq A2xmm, rl #R[4]
vmovq M2xmm, rh
sbbq rh, rl
movq rl, r4
vpextrq $1, A2xmm, rl #R[5]
vmovq T2xmm, rh
sbbq rh, rl
movq rl, r5
vmovq A3xmm, rl #R[6]
vmovq M3xmm, rh
sbbq rh, rl
movq rl, r6
vpextrq $1, A3xmm, rl #R[7]
vmovq T3xmm, rh
sbbq rh, rl
movq rl, r7
vmovq B0xmm, rl #R[8]
vpextrq $1, M0xmm, rh
sbbq rh, rl
movq rl, r8
vpextrq $1, B0xmm, rl #R[9]
vpextrq $1, T0xmm, rh
sbbq rh, rl
movq rl, r9
vmovq B1xmm, rl #R[10]
vpextrq $1, M1xmm, rh
sbbq rh, rl
movq rl, r10
vpextrq $1, B1xmm, rl #R[11]
vpextrq $1, T1xmm, rh
sbbq rh, rl
movq rl, r11
vmovq B2xmm, rl #R[12]
vpextrq $1, M2xmm, rh
sbbq rh, rl
movq rl, r12
vpextrq $1, B2xmm, rl #R[13]
vpextrq $1, T2xmm, rh
sbbq rh, rl
movq rl, r13
vmovq B3xmm, rl #R[14]
vpextrq $1, M3xmm, rh
sbbq rh, rl
movq rl, r14
vpextrq $1, B3xmm, rl #R[15]
vpextrq $1, T3xmm, rh
sbbq rh, rl
movq rl, r15
jb 6f
vpxor T3, T3, T3
vpxor A0, A0, A0
vmovq r8, T3xmm #R[8]
vblendpd $0x1, T3, A0, A0 #imm=0001
vperm2i128 $0x1, A0, A0, A0 #imm=1
movq r0, rl #R[0]
vmovq rl, T3xmm #R[0]
vblendpd $0x1, T3, A0, A0 #imm=0001
vpxor B0, B0, B0
vmovq r9, T3xmm #R[9]
vblendpd $0x1, T3, B0, B0 #imm=0001
vperm2i128 $0x1, B0, B0, B0 #imm=1
movq r1, rl #R[1]
vmovq rl, T3xmm #R[1]
vblendpd $0x1, T3, B0, B0 #imm=0001
vpxor A1, A1, A1
vmovq r10, T3xmm #R[10]
vblendpd $0x1, T3, A1, A1 #imm=0001
vperm2i128 $0x1, A1, A1, A1 #imm=1
movq r2, rl #R[2]
vmovq rl, T3xmm #R[2]
vblendpd $0x1, T3, A1, A1 #imm=0001
vpxor B1, B1, B1
vmovq r11, T3xmm #R[11]
vblendpd $0x1, T3, B1, B1 #imm=0001
vperm2i128 $0x1, B1, B1, B1 #imm=1
movq r3, rl #R[3]
vmovq rl, T3xmm #R[3]
vblendpd $0x1, T3, B1, B1 #imm=0001
vpxor A2, A2, A2
vmovq r12, T3xmm #R[12]
vblendpd $0x1, T3, A2, A2 #imm=0001
vperm2i128 $0x1, A2, A2, A2 #imm=1
movq r4, rl #R[4]
vmovq rl, T3xmm #R[4]
vblendpd $0x1, T3, A2, A2 #imm=0001
vpxor B2, B2, B2
vmovq r13, T3xmm #R[13]
vblendpd $0x1, T3, B2, B2 #imm=0001
vperm2i128 $0x1, B2, B2, B2 #imm=1
movq r5, rl #R[5]
vmovq rl, T3xmm #R[5]
vblendpd $0x1, T3, B2, B2 #imm=0001
vpxor A3, A3, A3
vmovq r14, T3xmm #R[14]
vblendpd $0x1, T3, A3, A3 #imm=0001
vperm2i128 $0x1, A3, A3, A3 #imm=1
movq r6, rl #R[6]
vmovq rl, T3xmm #R[6]
vblendpd $0x1, T3, A3, A3 #imm=0001
vpxor B3, B3, B3
vmovq r15, T3xmm #R[15]
vblendpd $0x1, T3, B3, B3 #imm=0001
vperm2i128 $0x1, B3, B3, B3 #imm=1
movq r7, rl #R[7]
vmovq rl, T3xmm #R[7]
vblendpd $0x1, T3, B3, B3 #imm=0001
vpxor T3, T3, T3
jmp 7f
6:
vmovdqu64 %ymm18, A0
vmovdqu64 %ymm19, A1
vmovdqu64 %ymm20, A2
vmovdqu64 %ymm21, A3
vmovdqu64 %ymm22, B0
vmovdqu64 %ymm23, B1
vmovdqu64 %ymm24, B2
vmovdqu64 %ymm25, B3
vperm2i128 $0x20, B0, A0, A0 #B0 B1 B8 B9
vperm2i128 $0x20, B1, A1, A1
vperm2i128 $0x20, B2, A2, A2
vperm2i128 $0x20, B3, A3, A3
vpxor T3, T3, T3
vshufpd $0x05, T3, A0, B0 #imm=0101
vshufpd $0x00, T3, A0, A0 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A1, B1 #imm=0101
vshufpd $0x00, T3, A1, A1 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A2, B2 #imm=0101
vshufpd $0x00, T3, A2, A2 #imm=0000
vpxor T3, T3, T3
vshufpd $0x05, T3, A3, B3 #imm=0101
vshufpd $0x00, T3, A3, A3 #imm=0000
vpxor T3, T3, T3
7:
#### restore B ####
restore_B
#### prepare M ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
##call montmul1024
montmul_1st_movq
montmul_2nd_movq
montmul_3rd_movq
montmul_last_movq
store_A
movq 48(%rsp), %rdi
vmovdqu64 (%rdi), A0
vmovdqu64 32(%rdi), B0
vmovdqu64 64(%rdi), A1
vmovdqu64 96(%rdi), B1
vmovdqu64 128(%rdi), A2
vmovdqu64 160(%rdi), B2
vmovdqu64 192(%rdi), A3
vmovdqu64 224(%rdi), B3
restore_B
### add & mod q ###
xorq bi, bi
vpextrq $0, A0xmm, rh
vpextrq $1, A0xmm, rl
addq rl, rh
movq rh, r0 #R[0]
vpextrq $0, B0xmm, rh
vpextrq $1, B0xmm, rl
adcq rl, rh
movq rh, r1 #R[1]
vpextrq $0, A1xmm, rh
vpextrq $1, A1xmm, rl
adcq rl, rh
movq rh, r2 #R[2]
vpextrq $0, B1xmm, rh
vpextrq $1, B1xmm, rl
adcq rl, rh
movq rh, r3 #R[3]
vpextrq $0, A2xmm, rh
vpextrq $1, A2xmm, rl
adcq rl, rh
movq rh, r4 #R[4]
vpextrq $0, B2xmm, rh
vpextrq $1, B2xmm, rl
adcq rl, rh
movq rh, r5 #R[5]
vpextrq $0, A3xmm, rh
vpextrq $1, A3xmm, rl
adcq rl, rh
movq rh, r6 #R[6]
vpextrq $0, B3xmm, rh
vpextrq $1, B3xmm, rl
adcq rl, rh
movq rh, r7 #R[7]
vperm2i128 $0x1, A0, A0, A0
vperm2i128 $0x1, A1, A1, A1
vperm2i128 $0x1, A2, A2, A2
vperm2i128 $0x1, A3, A3, A3
vperm2i128 $0x1, B0, B0, B0
vperm2i128 $0x1, B1, B1, B1
vperm2i128 $0x1, B2, B2, B2
vperm2i128 $0x1, B3, B3, B3
vpextrq $0, A0xmm, rh
vpextrq $1, A0xmm, rl
adcq rl, rh
movq rh, r8 #R[8]
vpextrq $0, B0xmm, rh
vpextrq $1, B0xmm, rl
adcq rl, rh
movq rh, r9 #R[9]
vpextrq $0, A1xmm, rh
vpextrq $1, A1xmm, rl
adcq rl, rh
movq rh, r10 #R[10]
vpextrq $0, B1xmm, rh
vpextrq $1, B1xmm, rl
adcq rl, rh
movq rh, r11 #R[11]
vpextrq $0, A2xmm, rh
vpextrq $1, A2xmm, rl
adcq rl, rh
movq rh, r12 #R[12]
vpextrq $0, B2xmm, rh
vpextrq $1, B2xmm, rl
adcq rl, rh
movq rh, r13 #R[13]
vpextrq $0, A3xmm, rh
vpextrq $1, A3xmm, rl
adcq rl, rh
movq rh, r14 #R[14]
vpextrq $0, B3xmm, rh
vpextrq $1, B3xmm, rl
adcq rl, rh
movq rh, r15 #R[15]
adcq $0, bi
movq r12, %rbx
movq 48(%rsp), %rdi
movq r0, rl
movq rl, (%rdi)
movq r1, rl
movq rl, 8(%rdi)
movq r2, rl
movq rl, 16(%rdi)
movq r3, rl
movq rl, 24(%rdi)
movq r4, rl
movq rl, 32(%rdi)
movq r5, rl
movq rl, 40(%rdi)
movq r6, rl
movq rl, 48(%rdi)
movq r7, rl
movq rl, 56(%rdi)
movq r8, 64(%rdi)
movq r9, 72(%rdi)
movq r10, 80(%rdi)
movq r11, 88(%rdi)
movq %rbx, 96(%rdi)
movq r13, 104(%rdi)
movq r14, 112(%rdi)
movq r15, 120(%rdi)
#### mod q ####
vperm2i128 $0x21, T0, M0, T0
vperm2i128 $0x21, T1, M1, T1
vperm2i128 $0x21, T2, M2, T2
vperm2i128 $0x21, T3, M3, T3
movq %rbx, r12
vpextrq $0, M0xmm, rl
movq r0, rh
subq rl, rh
movq rh, r0
vpextrq $0, T0xmm, rl
movq r1, rh
sbbq rl, rh
movq rh, r1
vpextrq $0, M1xmm, rl
movq r2, rh
sbbq rl, rh
movq rh, r2
vpextrq $0, T1xmm, rl
movq r3, rh
sbbq rl, rh
movq rh, r3
vpextrq $0, M2xmm, rl
movq r4, rh
sbbq rl, rh
movq rh, r4
vpextrq $0, T2xmm, rl
movq r5, rh
sbbq rl, rh
movq rh, r5
vpextrq $0, M3xmm, rl
movq r6, rh
sbbq rl, rh
movq rh, r6
vpextrq $0, T3xmm, rl
movq r7, rh
sbbq rl, rh
movq rh, r7
vpextrq $1, M0xmm, rl
sbbq rl, r8
vpextrq $1, T0xmm, rl
sbbq rl, r9
vpextrq $1, M1xmm, rl
sbbq rl, r10
vpextrq $1, T1xmm, rl
sbbq rl, r11
vpextrq $1, M2xmm, rl
sbbq rl, r12
vpextrq $1, T2xmm, rl
sbbq rl, r13
vpextrq $1, M3xmm, rl
sbbq rl, r14
vpextrq $1, T3xmm, rl
sbbq rl, r15
sbbq $0, bi
jb 5f
movq r12, %rbx
movq 48(%rsp), %rdi
movq r0, rl
movq rl, (%rdi)
movq r1, rl
movq rl, 8(%rdi)
movq r2, rl
movq rl, 16(%rdi)
movq r3, rl
movq rl, 24(%rdi)
movq r4, rl
movq rl, 32(%rdi)
movq r5, rl
movq rl, 40(%rdi)
movq r6, rl
movq rl, 48(%rdi)
movq r7, rl
movq rl, 56(%rdi)
movq r8, 64(%rdi)
movq r9, 72(%rdi)
movq r10, 80(%rdi)
movq r11, 88(%rdi)
movq %rbx, 96(%rdi)
movq r13, 104(%rdi)
movq r14, 112(%rdi)
movq r15, 120(%rdi)
jmp 2f
5:
2:
movq (%rsp), %rbx
movq 8(%rsp), %rbp
movq 16(%rsp), %r12
movq 24(%rsp), %r13
movq 32(%rsp), %r14
movq 40(%rsp), %r15
movq 48(%rsp), %rdi
movq 64(%rsp), %rsi
addq $512, %rsp
/*
vmovdqu64 A0, (%rdi)
vmovdqu64 B0, 32(%rdi)
vmovdqu64 A1, 64(%rdi)
vmovdqu64 B1, 96(%rdi)
vmovdqu64 A2, 128(%rdi)
vmovdqu64 B2, 160(%rdi)
vmovdqu64 A3, 192(%rdi)
vmovdqu64 B3, 224(%rdi)
*/
ret
.size Comcq, .-Comcq
|
LoCryptEn/Key-security | 3,064 | Register-bound/ECCIn_Register/User/aes.S | .file "aes.S"
.text
// Register assignments
.set rstate, %xmm0 // AES state (message)
.set rk0, %xmm1 // Round key 0 (initial key)
.set rk1, %xmm2 // Round key 1
.set rk2, %xmm3 // Round key 2
.set rk3, %xmm4 // Round key 3
.set rk4, %xmm5 // Round key 4
.set rk5, %xmm6 // Round key 5
.set rk6, %xmm7 // Round key 6
.set rk7, %xmm8 // Round key 7
.set rk8, %xmm9 // Round key 8
.set rk9, %xmm10 // Round key 9
.set rk10, %xmm11 // Round key 10
.set rhelp, %xmm12 // Helping register for key schedule
// Key schedule macro (reusable)
.macro key_schedule r0, r1, rcon
pxor rhelp, rhelp
movdqu \r0, \r1
shufps $0x1f, \r1, rhelp
pxor rhelp, \r1
shufps $0x8c, \r1, rhelp
pxor rhelp, \r1
aeskeygenassist $\rcon, \r0, rhelp
shufps $0xff, rhelp, rhelp
pxor rhelp, \r1
.endm
// AES_ENC function: Encrypt the message using the provided key
// Arguments:
// - key (first argument): passed in %xmm1
// - message (second argument): passed in %xmm0
// Output:
// - encrypted message in %xmm0
.globl AES_ENC
AES_ENC:
// Generate the round keys using the key_schedule macro
key_schedule rk0, rk1, 0x1
key_schedule rk1, rk2, 0x2
key_schedule rk2, rk3, 0x4
key_schedule rk3, rk4, 0x8
key_schedule rk4, rk5, 0x10
key_schedule rk5, rk6, 0x20
key_schedule rk6, rk7, 0x40
key_schedule rk7, rk8, 0x80
key_schedule rk8, rk9, 0x1b
key_schedule rk9, rk10, 0x36
// Encryption rounds
pxor rk0, rstate // Initial XOR with first round key
aesenc rk1, rstate // Encryption rounds
aesenc rk2, rstate
aesenc rk3, rstate
aesenc rk4, rstate
aesenc rk5, rstate
aesenc rk6, rstate
aesenc rk7, rstate
aesenc rk8, rstate
aesenc rk9, rstate
aesenclast rk10, rstate // Last round of AES
// Return encrypted message in %xmm0 (rstate)
ret
.global AES_DEC
AES_DEC:
// Generate the round keys using the key_schedule macro
key_schedule rk0, rk1, 0x1
key_schedule rk1, rk2, 0x2
key_schedule rk2, rk3, 0x4
key_schedule rk3, rk4, 0x8
key_schedule rk4, rk5, 0x10
key_schedule rk5, rk6, 0x20
key_schedule rk6, rk7, 0x40
key_schedule rk7, rk8, 0x80
key_schedule rk8, rk9, 0x1b
key_schedule rk9, rk10, 0x36
// 对轮密钥应用 aesimc 进行逆向处理
aesimc rk1, rk1
aesimc rk2, rk2
aesimc rk3, rk3
aesimc rk4, rk4
aesimc rk5, rk5
aesimc rk6, rk6
aesimc rk7, rk7
aesimc rk8, rk8
aesimc rk9, rk9
// 初始 XOR 操作
pxor rk10, rstate // Initial XOR with last round key
// 解密轮
aesdec rk9, rstate
aesdec rk8, rstate
aesdec rk7, rstate
aesdec rk6, rstate
aesdec rk5, rstate
aesdec rk4, rstate
aesdec rk3, rstate
aesdec rk2, rstate
aesdec rk1, rstate
// 最后一轮解密
aesdeclast rk0, rstate
// 返回解密后的消息在 %xmm0 (rstate)
ret
|
LoCryptEn/Key-security | 5,552 | Register-bound/ECCIn_Register/Kernel/montmul_n256_xmm.S | // Montmul n256
// n256: 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551
// Input: x(256 bits in [xmm28 | xmm29]), y(256 bits in [xmm30 | xmm31])
// Output: x * y * R^-1 store in [xmm26 | xmm27]
// Macro "mulpadd i x" adds %rdx * x to the (i,i+1) position of
// the rotating register window %r15,...,%r8 maintaining consistent
// double-carrying using ADCX and ADOX and using %rbx/%rax as temps
.macro mulpadd i, x
mulx \x, %rax, %rbx
.if (\i % 8) == 0
adcx %rax, %r8
adox %rbx, %r9
.elseif (\i % 8) == 1
adcx %rax, %r9
adox %rbx, %r10
.elseif (\i % 8) == 2
adcx %rax, %r10
adox %rbx, %r11
.elseif (\i % 8) == 3
adcx %rax, %r11
adox %rbx, %r12
.elseif (\i % 8) == 4
adcx %rax, %r12
adox %rbx, %r13
.elseif (\i % 8) == 5
adcx %rax, %r13
adox %rbx, %r14
.elseif (\i % 8) == 6
adcx %rax, %r14
adox %rbx, %r15
.elseif (\i % 8) == 7
adcx %rax, %r15
adox %rbx, %r8
.endif
.endm
.global bignum_montmul_n256
.section .text
bignum_montmul_n256:
// CIOS round 0
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
xorq %r14, %r14
xorq %r15, %r15
vmovq %xmm31, %rdx
vmovq %xmm29, %rax
mulx %rax, %r8, %r9
vpextrq $1, %xmm29, %rax
mulx %rax, %rbx, %r10
adcx %rbx, %r9
vmovq %xmm28, %rax
mulx %rax, %rbx, %r11
adcx %rbx, %r10
vpextrq $1, %xmm28, %rax
mulx %rax, %rbx, %r12
adcx %rbx, %r11
adcx %r13, %r12
movq $0xccd1c8aaee00bc4f, %rdx
mulx %r8, %rdx, %rbx
movq $0xf3b9cac2fc632551, %rax
mulpadd 0, %rax
movq $0xbce6faada7179e84, %rax
mulpadd 1, %rax
movq $0xffffffffffffffff, %rax
mulpadd 2, %rax
movq $0xffffffff00000000, %rax
mulpadd 3, %rax
adcx %r13, %r12
adox %r14, %r13
// CIOS round 1
vpextrq $1, %xmm31, %rdx
xorq %r8, %r8
vmovq %xmm29, %rax
mulpadd 1, %rax
vpextrq $1, %xmm29, %rax
mulpadd 2, %rax
vmovq %xmm28, %rax
mulpadd 3, %rax
vpextrq $1, %xmm28, %rax
mulpadd 4, %rax
adcx %r14, %r13
adox %r15, %r14
movq $0xccd1c8aaee00bc4f, %rdx
mulx %r9, %rdx, %rbx
movq $0xf3b9cac2fc632551, %rax
mulpadd 1, %rax
movq $0xbce6faada7179e84, %rax
mulpadd 2, %rax
movq $0xffffffffffffffff, %rax
mulpadd 3, %rax
movq $0xffffffff00000000, %rax
mulpadd 4, %rax
adcx %r14, %r13
adox %r15, %r14
// CIOS round 2
vmovq %xmm30, %rdx
xorq %r8, %r8
vmovq %xmm29, %rax
mulpadd 2, %rax
vpextrq $1, %xmm29, %rax
mulpadd 3, %rax
vmovq %xmm28, %rax
mulpadd 4, %rax
vpextrq $1, %xmm28, %rax
mulpadd 5, %rax
adcx %r15, %r14
adox %r8, %r15
movq $0xccd1c8aaee00bc4f, %rdx
mulx %r10, %rdx, %rbx
movq $0xf3b9cac2fc632551, %rax
mulpadd 2, %rax
movq $0xbce6faada7179e84, %rax
mulpadd 3, %rax
movq $0xffffffffffffffff, %rax
mulpadd 4, %rax
movq $0xffffffff00000000, %rax
mulpadd 5, %rax
adcx %r15, %r14
adox %r8, %r15
// Add row 3
vpextrq $1, %xmm30, %rdx
xorq %r8, %r8
vmovq %xmm29, %rax
mulpadd 3, %rax
vpextrq $1, %xmm29, %rax
mulpadd 4, %rax
vmovq %xmm28, %rax
mulpadd 5, %rax
vpextrq $1, %xmm28, %rax
mulpadd 6, %rax
adcx %r8, %r15
adox %r9, %r8
movq $0xccd1c8aaee00bc4f, %rdx
mulx %r11, %rdx, %rbx
movq $0xf3b9cac2fc632551, %rax
mulpadd 3, %rax
movq $0xbce6faada7179e84, %rax
mulpadd 4, %rax
movq $0xffffffffffffffff, %rax
mulpadd 5, %rax
movq $0xffffffff00000000, %rax
mulpadd 6, %rax
adcx %r8, %r15
adox %r9, %r8
// compare and sub
xorq %rcx, %rcx
xorq %rdx, %rdx
xorq %rax, %rax
xorq %r9, %r9
xorq %r10, %r10
movq $0x0c46353d039cdaaf, %rcx
addq %r12, %rcx
movq $0x4319055258e8617b, %rdx
adcq %r13, %rdx
adcq %r14, %rax
movq $0x00000000ffffffff, %r9
adcq %r15, %r9
decq %r10
adcq %r8, %r10
cmovc %rcx, %r12
cmovc %rdx, %r13
cmovc %rax, %r14
cmovc %r9, %r15
vpinsrq $1, %r15, %xmm26, %xmm26
vpinsrq $0, %r14, %xmm26, %xmm26
vpinsrq $1, %r13, %xmm27, %xmm27
vpinsrq $0, %r12, %xmm27, %xmm27
ret
|
LoCryptEn/Key-security | 3,064 | Register-bound/ECCIn_Register/Kernel/aes.S | .file "aes.S"
.text
// Register assignments
.set rstate, %xmm0 // AES state (message)
.set rk0, %xmm1 // Round key 0 (initial key)
.set rk1, %xmm2 // Round key 1
.set rk2, %xmm3 // Round key 2
.set rk3, %xmm4 // Round key 3
.set rk4, %xmm5 // Round key 4
.set rk5, %xmm6 // Round key 5
.set rk6, %xmm7 // Round key 6
.set rk7, %xmm8 // Round key 7
.set rk8, %xmm9 // Round key 8
.set rk9, %xmm10 // Round key 9
.set rk10, %xmm11 // Round key 10
.set rhelp, %xmm12 // Helping register for key schedule
// Key schedule macro (reusable)
.macro key_schedule r0, r1, rcon
pxor rhelp, rhelp
movdqu \r0, \r1
shufps $0x1f, \r1, rhelp
pxor rhelp, \r1
shufps $0x8c, \r1, rhelp
pxor rhelp, \r1
aeskeygenassist $\rcon, \r0, rhelp
shufps $0xff, rhelp, rhelp
pxor rhelp, \r1
.endm
// AES_ENC function: Encrypt the message using the provided key
// Arguments:
// - key (first argument): passed in %xmm1
// - message (second argument): passed in %xmm0
// Output:
// - encrypted message in %xmm0
.globl AES_ENC
AES_ENC:
// Generate the round keys using the key_schedule macro
key_schedule rk0, rk1, 0x1
key_schedule rk1, rk2, 0x2
key_schedule rk2, rk3, 0x4
key_schedule rk3, rk4, 0x8
key_schedule rk4, rk5, 0x10
key_schedule rk5, rk6, 0x20
key_schedule rk6, rk7, 0x40
key_schedule rk7, rk8, 0x80
key_schedule rk8, rk9, 0x1b
key_schedule rk9, rk10, 0x36
// Encryption rounds
pxor rk0, rstate // Initial XOR with first round key
aesenc rk1, rstate // Encryption rounds
aesenc rk2, rstate
aesenc rk3, rstate
aesenc rk4, rstate
aesenc rk5, rstate
aesenc rk6, rstate
aesenc rk7, rstate
aesenc rk8, rstate
aesenc rk9, rstate
aesenclast rk10, rstate // Last round of AES
// Return encrypted message in %xmm0 (rstate)
ret
.global AES_DEC
AES_DEC:
// Generate the round keys using the key_schedule macro
key_schedule rk0, rk1, 0x1
key_schedule rk1, rk2, 0x2
key_schedule rk2, rk3, 0x4
key_schedule rk3, rk4, 0x8
key_schedule rk4, rk5, 0x10
key_schedule rk5, rk6, 0x20
key_schedule rk6, rk7, 0x40
key_schedule rk7, rk8, 0x80
key_schedule rk8, rk9, 0x1b
key_schedule rk9, rk10, 0x36
// 对轮密钥应用 aesimc 进行逆向处理
aesimc rk1, rk1
aesimc rk2, rk2
aesimc rk3, rk3
aesimc rk4, rk4
aesimc rk5, rk5
aesimc rk6, rk6
aesimc rk7, rk7
aesimc rk8, rk8
aesimc rk9, rk9
// 初始 XOR 操作
pxor rk10, rstate // Initial XOR with last round key
// 解密轮
aesdec rk9, rstate
aesdec rk8, rstate
aesdec rk7, rstate
aesdec rk6, rstate
aesdec rk5, rstate
aesdec rk4, rstate
aesdec rk3, rstate
aesdec rk2, rstate
aesdec rk1, rstate
// 最后一轮解密
aesdeclast rk0, rstate
// 返回解密后的消息在 %xmm0 (rstate)
ret
|
LoCryptEn/Key-security | 5,146 | Register-bound/ECCIn_Register/Kernel/SecSig.S | .file "SecSig.S"
.section .data
.align 16
SHUFFLE_MASK:
.quad 0x08090a0b0c0d0e0f
.quad 0x0001020304050607
.section .text
// [r8 | r9 | r10 | r11] + [r12 | r13 | r14 | r15] mod n256 & save to [xmm30 | xmm31]
.macro addmod
add %r11, %r15
adc %r10, %r14
adc %r9, %r13
adc %r8, %r12
adc $0, %rax
movq %r12, %r8
movq %r13, %r9
movq %r14, %r10
movq %r15, %r11
// c = (a + b) mod n & save to [r12 | r13 | r14 | r15]
movq $0xf3b9cac2fc632551, %rbx
subq %rbx, %r15
movq $0xbce6faada7179e84, %rbx
sbbq %rbx, %r14
movq $0xffffffffffffffff, %rbx
sbbq %rbx, %r13
movq $0xffffffff00000000, %rbx
sbbq %rbx, %r12
sbbq $0, %rax
jb 1f
jmp 2f
1:
movq %r8, %r12
movq %r9, %r13
movq %r10, %r14
movq %r11, %r15
2:
vpinsrq $1, %r12, %xmm30, %xmm30
vpinsrq $0, %r13, %xmm30, %xmm30
vpinsrq $1, %r14, %xmm31, %xmm31
vpinsrq $0, %r15, %xmm31, %xmm31
.endm
.global dosecsig
.type dosecsig, @function
dosecsig:
subq $128, %rsp
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %r14, 24(%rsp)
movq %r15, 32(%rsp)
movq %rax, 40(%rsp)
movq %rbx, 48(%rsp)
movq %r8, 56(%rsp)
movq %r9, 64(%rsp)
movq %r10, 72(%rsp)
movq %r11, 80(%rsp)
movq %r12, 88(%rsp)
movq %r13, 96(%rsp)
movq %rcx, 104(%rsp)
movq %dr0, %rax
movq %dr1, %rbx
bswap %rax
bswap %rbx
vpinsrq $0, %rax, %xmm1, %xmm1
vpinsrq $1, %rbx, %xmm1, %xmm1
vmovdqu SHUFFLE_MASK(%rip), %xmm15 # Hardcode shuffle mask into xmm24
xorq %rax, %rax
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
xorq %r14, %r14
xorq %r15, %r15
// Dec k1
vmovdqa64 (%rdi), %xmm0
call AES_DEC
vpshufb %xmm15, %xmm0, %xmm0
pextrq $1, %xmm0, %r8
vmovq %xmm0, %r9
vmovdqa64 16(%rdi), %xmm0
call AES_DEC
vpshufb %xmm15, %xmm0, %xmm0
pextrq $1, %xmm0, %r10
vmovq %xmm0, %r11
// read k2
movq 32(%rdi), %r15
movq 40(%rdi), %r14
movq 48(%rdi), %r13
movq 56(%rdi), %r12
addmod
// save k to [xmm30 | xmm31]
// Dec a
vmovdqa64 64(%rdi), %xmm0
call AES_DEC
vpshufb %xmm15, %xmm0, %xmm0
vmovdqa64 %xmm0, %xmm28
vmovdqa64 80(%rdi), %xmm0
call AES_DEC
vpshufb %xmm15, %xmm0, %xmm0
vmovdqa64 %xmm0, %xmm29
// Copy a to [xmm24 | xmm25]
vmovdqa64 %xmm28, %xmm24
vmovdqa64 %xmm29, %xmm25
call bignum_montmul_n256
// store a * k * R^{-1} to [xmm13 | xmm14]
vmovdqa64 %xmm26, %xmm13
vmovdqa64 %xmm27, %xmm14
// Dec d
vmovdqa64 96(%rdi), %xmm0
call AES_DEC
vpshufb %xmm15, %xmm0, %xmm0
vmovdqa64 %xmm0, %xmm28
vmovdqa64 112(%rdi), %xmm0
call AES_DEC
vpshufb %xmm15, %xmm0, %xmm0
vmovdqa64 %xmm0, %xmm29
// Load r * R^{-1}
vmovdqa64 128(%rdi), %xmm30
vpshufb %xmm15, %xmm30, %xmm30
vmovdqa64 144(%rdi), %xmm31
vpshufb %xmm15, %xmm31, %xmm31
call bignum_montmul_n256
// Done r * d
xorq %rax, %rax
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
xorq %r14, %r14
xorq %r15, %r15
// Load d * r to [r8 | r9 | r10 | r11]
vpextrq $1, %xmm26, %r8
vmovq %xmm26, %r9
vpextrq $1, %xmm27, %r10
vmovq %xmm27, %r11
// Load H(m) to [r12 | r13 | r14 | r15]
movq 160(%rdi), %r12
movq 168(%rdi), %r13
movq 176(%rdi), %r14
movq 184(%rdi), %r15
bswap %r12
bswap %r13
bswap %r14
bswap %r15
addmod
vmovdqa64 %xmm24, %xmm28
vmovdqa64 %xmm25, %xmm29
call bignum_montmul_n256
vmovdqa64 %xmm14, (%rsi)
vmovdqa64 %xmm13, 16(%rsi)
vmovdqa64 %xmm27, 32(%rsi)
vmovdqa64 %xmm26, 48(%rsi)
movq (%rsp), %rdi
movq 8(%rsp), %rsi
movq 16(%rsp), %rdx
movq 24(%rsp), %r14
movq 32(%rsp), %r15
movq 40(%rsp), %rax
movq 48(%rsp), %rbx
movq 56(%rsp), %r8
movq 64(%rsp), %r9
movq 72(%rsp), %r10
movq 80(%rsp), %r11
movq 88(%rsp), %r12
movq 96(%rsp), %r13
movq 104(%rsp), %rcx
addq $128, %rsp
ret
|
lucadentella/esp32-tutorial | 10,220 | 18_u8g2/components/u8g2/sys/arm/stm32l031x6/stm32l0xx/src/startup_stm32l031xx.s | /*
Source: en.stm32cubel0.zip
STM32Cube_FW_L0_V1.8.0/Drivers/CMSIS/Device/ST/STM32L0xx/Source/Templates/gcc
*/
/**
******************************************************************************
* @file startup_stm32l031xx.s
* @author MCD Application Team
* @version V1.7.1
* @date 25-November-2016
* @brief STM32L031xx Devices vector table for gcc.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0+ processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0plus
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word 0 /* Reserved */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word LPTIM1_IRQHandler /* LPTIM1 */
.word 0 /* Reserved */
.word TIM2_IRQHandler /* TIM2 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word TIM21_IRQHandler /* TIM21 */
.word 0 /* Reserved */
.word TIM22_IRQHandler /* TIM22 */
.word I2C1_IRQHandler /* I2C1 */
.word 0 /* Reserved */
.word SPI1_IRQHandler /* SPI1 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word USART2_IRQHandler /* USART2 */
.word LPUART1_IRQHandler /* LPUART1 */
.word 0 /* Reserved */
.word 0 /* Reserved */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak LPTIM1_IRQHandler
.thumb_set LPTIM1_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM21_IRQHandler
.thumb_set TIM21_IRQHandler,Default_Handler
.weak TIM22_IRQHandler
.thumb_set TIM22_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak LPUART1_IRQHandler
.thumb_set LPUART1_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
lucadentella/esp32-tutorial | 10,220 | 22_deep_sleep/components/u8g2/sys/arm/stm32l031x6/stm32l0xx/src/startup_stm32l031xx.s | /*
Source: en.stm32cubel0.zip
STM32Cube_FW_L0_V1.8.0/Drivers/CMSIS/Device/ST/STM32L0xx/Source/Templates/gcc
*/
/**
******************************************************************************
* @file startup_stm32l031xx.s
* @author MCD Application Team
* @version V1.7.1
* @date 25-November-2016
* @brief STM32L031xx Devices vector table for gcc.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M0+ processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m0plus
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M0. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word 0
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word RTC_IRQHandler /* RTC through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_1_IRQHandler /* EXTI Line 0 and 1 */
.word EXTI2_3_IRQHandler /* EXTI Line 2 and 3 */
.word EXTI4_15_IRQHandler /* EXTI Line 4 to 15 */
.word 0 /* Reserved */
.word DMA1_Channel1_IRQHandler /* DMA1 Channel 1 */
.word DMA1_Channel2_3_IRQHandler /* DMA1 Channel 2 and Channel 3 */
.word DMA1_Channel4_5_6_7_IRQHandler /* DMA1 Channel 4, Channel 5, Channel 6 and Channel 7*/
.word ADC1_COMP_IRQHandler /* ADC1, COMP1 and COMP2 */
.word LPTIM1_IRQHandler /* LPTIM1 */
.word 0 /* Reserved */
.word TIM2_IRQHandler /* TIM2 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word TIM21_IRQHandler /* TIM21 */
.word 0 /* Reserved */
.word TIM22_IRQHandler /* TIM22 */
.word I2C1_IRQHandler /* I2C1 */
.word 0 /* Reserved */
.word SPI1_IRQHandler /* SPI1 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word USART2_IRQHandler /* USART2 */
.word LPUART1_IRQHandler /* LPUART1 */
.word 0 /* Reserved */
.word 0 /* Reserved */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_1_IRQHandler
.thumb_set EXTI0_1_IRQHandler,Default_Handler
.weak EXTI2_3_IRQHandler
.thumb_set EXTI2_3_IRQHandler,Default_Handler
.weak EXTI4_15_IRQHandler
.thumb_set EXTI4_15_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_3_IRQHandler
.thumb_set DMA1_Channel2_3_IRQHandler,Default_Handler
.weak DMA1_Channel4_5_6_7_IRQHandler
.thumb_set DMA1_Channel4_5_6_7_IRQHandler,Default_Handler
.weak ADC1_COMP_IRQHandler
.thumb_set ADC1_COMP_IRQHandler,Default_Handler
.weak LPTIM1_IRQHandler
.thumb_set LPTIM1_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM21_IRQHandler
.thumb_set TIM21_IRQHandler,Default_Handler
.weak TIM22_IRQHandler
.thumb_set TIM22_IRQHandler,Default_Handler
.weak I2C1_IRQHandler
.thumb_set I2C1_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak LPUART1_IRQHandler
.thumb_set LPUART1_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.