repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
xiao-tai/ics2021 | 1,025 | abstract-machine/am/src/riscv/mycpu/trap.S |
#define concat_temp(x, y) x ## y
#define concat(x, y) concat_temp(x, y)
#define MAP(c, f) c(f)
#define REGS(f) \
f( 1) f( 3) f( 4) f( 5) f( 6) f( 7) f( 8) f( 9) \
f(10) f(11) f(12) f(13) f(14) f(15) f(16) f(17) f(18) f(19) \
f(20) f(21) f(22) f(23) f(24) f(25) f(26) f(27) f(28) f(29) \
f(30) f(31)
#define PUSH(n) sd concat(x, n), (n * 8)(sp);
#define POP(n) ld concat(x, n), (n * 8)(sp);
#define CONTEXT_SIZE ((32 + 3) * 8)
#define OFFSET_SP ( 2 * 8)
#define OFFSET_CAUSE (32 * 8)
#define OFFSET_STATUS (33 * 8)
#define OFFSET_EPC (34 * 8)
.globl __am_asm_trap
__am_asm_trap:
addi sp, sp, -CONTEXT_SIZE
MAP(REGS, PUSH)
mv t0, sp
addi t0, t0, CONTEXT_SIZE
sd t0, OFFSET_SP(sp)
csrr t0, mcause
csrr t1, mstatus
csrr t2, mepc
sd t0, OFFSET_CAUSE(sp)
sd t1, OFFSET_STATUS(sp)
sd t2, OFFSET_EPC(sp)
mv a0, sp
jal __am_irq_handle
ld t1, OFFSET_STATUS(sp)
ld t2, OFFSET_EPC(sp)
csrw mstatus, t1
csrw mepc, t2
MAP(REGS, POP)
addi sp, sp, CONTEXT_SIZE
mret
|
xiao-tai/ics2021 | 2,775 | abstract-machine/am/src/riscv/mycpu/libgcc/div.S | #define FUNC_TYPE(X) .type X,@function
#define FUNC_SIZE(X) .size X,.-X
#define FUNC_BEGIN(X) \
.globl X; \
FUNC_TYPE (X); \
X:
#define FUNC_END(X) \
FUNC_SIZE(X)
#define FUNC_ALIAS(X,Y) \
.globl X; \
X = Y
#define __riscv_xlen 64
.text
.align 2
#if __riscv_xlen == 32
/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
# define __udivdi3 __udivsi3
# define __umoddi3 __umodsi3
# define __divdi3 __divsi3
# define __moddi3 __modsi3
#else
FUNC_BEGIN (__udivsi3)
/* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */
sll a0, a0, 32
sll a1, a1, 32
move t0, ra
jal __udivdi3
sext.w a0, a0
jr t0
FUNC_END (__udivsi3)
FUNC_BEGIN (__umodsi3)
/* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */
sll a0, a0, 32
sll a1, a1, 32
srl a0, a0, 32
srl a1, a1, 32
move t0, ra
jal __udivdi3
sext.w a0, a1
jr t0
FUNC_END (__umodsi3)
FUNC_ALIAS (__modsi3, __moddi3)
FUNC_BEGIN( __divsi3)
/* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */
li t0, -1
beq a1, t0, .L20
#endif
FUNC_BEGIN (__divdi3)
bltz a0, .L10
bltz a1, .L11
/* Since the quotient is positive, fall into __udivdi3. */
FUNC_BEGIN (__udivdi3)
mv a2, a1
mv a1, a0
li a0, -1
beqz a2, .L5
li a3, 1
bgeu a2, a1, .L2
.L1:
blez a2, .L2
slli a2, a2, 1
slli a3, a3, 1
bgtu a1, a2, .L1
.L2:
li a0, 0
.L3:
bltu a1, a2, .L4
sub a1, a1, a2
or a0, a0, a3
.L4:
srli a3, a3, 1
srli a2, a2, 1
bnez a3, .L3
.L5:
ret
FUNC_END (__udivdi3)
FUNC_BEGIN (__umoddi3)
/* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */
move t0, ra
jal __udivdi3
move a0, a1
jr t0
FUNC_END (__umoddi3)
/* Handle negative arguments to __divdi3. */
.L10:
neg a0, a0
/* Zero is handled as a negative so that the result will not be inverted. */
bgtz a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */
neg a1, a1
j __udivdi3 /* Compute __udivdi3(-a0, -a1). */
.L11: /* Compute __udivdi3(a0, -a1), then negate the result. */
neg a1, a1
.L12:
move t0, ra
jal __udivdi3
neg a0, a0
jr t0
FUNC_END (__divdi3)
FUNC_BEGIN (__moddi3)
move t0, ra
bltz a1, .L31
bltz a0, .L32
.L30:
jal __udivdi3 /* The dividend is not negative. */
move a0, a1
jr t0
.L31:
neg a1, a1
bgez a0, .L30
.L32:
neg a0, a0
jal __udivdi3 /* The dividend is hella negative. */
neg a0, a1
jr t0
FUNC_END (__moddi3)
#if __riscv_xlen == 64
/* continuation of __divsi3 */
.L20:
sll t0, t0, 31
bne a0, t0, __divdi3
ret
FUNC_END (__divsi3)
#endif
|
xiao-tai/ics2021 | 1,717 | abstract-machine/am/src/x86/qemu/trap32.S | #include "x86-qemu.h"
.globl __am_kcontext_start
__am_kcontext_start:
// eax = arg, ebx = entry
pushl %eax
pushl $__am_panic_on_return
jmpl *%ebx
trap:
cli
subl $20, %esp
pushl %ebp
pushl %edi
pushl %esi
pushl $0
pushl %edx
pushl %ecx
pushl %ebx
pushl %eax
movw %ds, %ax
pushl %eax
pushl $0
movw $KSEL(SEG_KDATA), %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
pushl %esp
call __am_irq_handle
.globl __am_iret
__am_iret:
addl $4, %esp
popl %eax
movl %eax, %esp
addl $4, %esp
popl %eax
movw %ax, %ds
movw %ax, %es
cmpw $KSEL(SEG_KCODE), 36(%esp)
je .kernel_iret
.user_iret:
popl %eax
popl %ebx
popl %ecx
popl %edx
addl $4, %esp
popl %esi
popl %edi
popl %ebp
iret
.kernel_iret:
popl %eax
popl %ebx
popl %ecx
popl %edx
addl $4, %esp
/* stack frame:
28 ss
24 esp (not popped by iret when returning to ring0)
20 eflags ---> move to new-esp
16 cs
12 eip
8 ebp
4 edi
0 esi <--- %esp
*/
movl %esp, %ebp
movl 24(%ebp), %edi // %edi is new-esp
movl 20(%ebp), %esi; movl %esi, -4(%edi)
movl 16(%ebp), %esi; movl %esi, -8(%edi)
movl 12(%ebp), %esi; movl %esi, -12(%edi)
movl 8(%ebp), %esi; movl %esi, -16(%edi)
movl 4(%ebp), %esi; movl %esi, -20(%edi)
movl 0(%ebp), %esi; movl %esi, -24(%edi)
leal -24(%edi), %esp
popl %esi
popl %edi
popl %ebp
iret
#define NOERR push $0
#define ERR
#define IRQ_DEF(id, dpl, err) \
.globl __am_irq##id; __am_irq##id: cli; err; push $id; jmp trap;
IRQS(IRQ_DEF)
.globl __am_irqall; __am_irqall: cli; push $0; push $-1; jmp trap;
|
xiao-tai/ics2021 | 1,562 | abstract-machine/am/src/x86/qemu/start64.S | #include <x86/x86.h>
#include "x86-qemu.h"
.code32
.globl _start
_start:
movl $(PDPT_ADDR | PTE_P | PTE_W), %eax
cmpl (PML4_ADDR), %eax
je .long_mode_init
movl $(PDPT_ADDR | PTE_P | PTE_W), %eax
movl %eax, (PML4_ADDR)
movl $0, %ecx
movl $512, %esi // 512 pages
// |
.loop: // x
movl %ecx, %eax // |
shll $30, %eax // |
orl $(PTE_P | PTE_W | PTE_PS), %eax // 1 GiB page
movl %eax, PDPT_ADDR(, %ecx, 8)
movl %ecx, %eax
shrl $2, %eax
movl %eax, PDPT_ADDR + 4(, %ecx, 8)
inc %ecx
cmp %esi, %ecx
jne .loop
.long_mode_init:
movl $PML4_ADDR, %eax
movl %eax, %cr3 // %cr3 = PML4 base
movl $CR4_PAE, %eax
movl %eax, %cr4 // %cr4.PAE = 1
movl $0xc0000080, %ecx
rdmsr
orl $0x100, %eax
wrmsr // %EFER.LME = 1
movl %cr0, %eax
orl $CR0_PG, %eax
movl %eax, %cr0 // %cr0.PG = 1
lgdt gdt_ptr // bootstrap GDT
ljmp $8, $_start64 // should not return
.code64
_start64:
movw $0, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
movw %ax, %fs
movw %ax, %gs
movq $MAINARG_ADDR, %rdi
pushq $0
jmp _start_c
.align 16
gdt_ptr:
.word gdt64_end - gdt64_begin - 1
.quad gdt64_begin
gdt64_begin:
.long 0x00000000 // 0: null desc
.long 0x00000000
.long 0x00000000 // 1: code
.long 0x00209800
gdt64_end:
|
xiao-tai/ics2021 | 1,094 | abstract-machine/am/src/x86/qemu/boot/start.S | #define CR0_PE 0x00000001
#define GDT_ENTRY(n) \
((n) << 3)
#define SEG_NULLASM \
.word 0, 0; \
.byte 0, 0, 0, 0
#define SEG_ASM(type, base, lim) \
.word (((lim) >> 12) & 0xffff), ((base) & 0xffff); \
.byte (((base) >> 16) & 0xff), (0x90 | (type)), \
(0xC0 | (((lim) >> 28) & 0xf)), (((base) >> 24) & 0xff)
.code16
.globl _start
_start:
cli
xorw %ax, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
# Set a 640 x 480 x 32 video mode
mov $0x4f01, %ax
mov $0x0112, %cx
mov $0x4000, %di
int $0x10
mov $0x4f02, %ax
mov $0x4112, %bx
int $0x10
lgdt gdtdesc
movl %cr0, %eax
orl $CR0_PE, %eax
movl %eax, %cr0
ljmp $GDT_ENTRY(1), $start32
.code32
start32:
movw $GDT_ENTRY(2), %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
movl $0xa000, %esp
call load_kernel
# GDT
.p2align 2
gdt:
SEG_NULLASM
SEG_ASM(0xA, 0x0, 0xffffffff)
SEG_ASM(0x2, 0x0, 0xffffffff)
gdtdesc:
.word (gdtdesc - gdt - 1)
.long gdt
|
xiao-tai/ics2021 | 1,402 | abstract-machine/am/src/mips/nemu/trap.S |
#define MAP(c, f) c(f)
#define REGS(f) \
f( 1) f( 2) f( 3) f( 4) f( 5) f( 6) f( 7) f( 8) f( 9) \
f(10) f(11) f(12) f(13) f(14) f(15) f(16) f(17) f(18) f(19) \
f(20) f(21) f(22) f(23) f(24) f(25) f(28) \
f(30) f(31)
#define PUSH(n) sw $n, (n * 4)($sp);
#define POP(n) lw $n, (n * 4)($sp);
#define CONTEXT_SIZE ((31 + 6) * 4)
#define OFFSET_SP (29 * 4)
#define OFFSET_LO (32 * 4)
#define OFFSET_HI (33 * 4)
#define OFFSET_CAUSE (34 * 4)
#define OFFSET_STATUS (35 * 4)
#define OFFSET_EPC (36 * 4)
#define CP0_STATUS 12
#define CP0_CAUSE 13
#define CP0_EPC 14
.set noat
.globl __am_asm_trap
__am_asm_trap:
move $k1, $sp
addiu $sp, $sp, -CONTEXT_SIZE
MAP(REGS, PUSH)
sw $k1, OFFSET_SP($sp)
mflo $t0
mfhi $t1
mfc0 $t2, $CP0_CAUSE
mfc0 $t3, $CP0_STATUS
mfc0 $t4, $CP0_EPC
sw $t0, OFFSET_LO($sp)
sw $t1, OFFSET_HI($sp)
sw $t2, OFFSET_CAUSE($sp)
sw $t3, OFFSET_STATUS($sp)
sw $t4, OFFSET_EPC($sp)
# allow nested exception
li $a0, ~0x3
and $t3, $t3, $a0 # clear status.exl and status.ie
mtc0 $t3, $CP0_STATUS
move $a0, $sp
jal __am_irq_handle
lw $t0, OFFSET_LO($sp)
lw $t1, OFFSET_HI($sp)
lw $t3, OFFSET_STATUS($sp)
lw $t4, OFFSET_EPC($sp)
# set status.exl
ori $t3, $t3, 0x2
mtlo $t0
mthi $t1
mtc0 $t3, $CP0_STATUS
mtc0 $t4, $CP0_EPC
MAP(REGS, POP)
addiu $sp, $sp, CONTEXT_SIZE
eret
|
xiaowuzxc/SparrowRV | 1,207 | bsp/lib/start.S | .section .init; /*声明此处段名为.init*/
.globl _start; /*声明_start是全局的*/
.type _start,@function /*声明_start是函数*/
_start:
.option push /*保存编译设置*/
.option norelax /*禁用相对寻址*/
nop /*空操作*/
la gp, __global_pointer$ /*设置gp全局指针,__global_pointer$来源于链接脚本,与data段关联,指向全局变量*/
.option pop
la sp, _sp /*设置sp堆栈指针,_sp来源于链接脚本,指向普通的局部变量*/
/*加载data段,存储需要初始化的全局变量和静态变量*/
la a0, _data_lma /*程序存储器的data段起始地址 加载至 a0*/
la a1, _data /*数据存储器的data段起始地址 加载至 a1*/
la a2, _edata /*数据存储器的data段结束地址 加载至 a2*/
bgeu a1, a2, 2f /*a1大于等于a2,跳转至往下第一个2标签;否则向下执行*/
1:
lw t0, (a0) /*a0指向的地址 写入 t0*/
sw t0, (a1) /*t0的数据 写入 a1指向的地址 */
addi a0, a0, 4 /*a0+4*/
addi a1, a1, 4 /*a1+4*/
bltu a1, a2, 1b /*a1小于a2,跳转至往上第一个1标签;否则向下执行*/
/*加载data段*/
2:
/*清空bss段,存储不用初始化的全局变量和静态变量*/
la a0, __bss_start /*bss段起始地址 加载至 a0*/
la a1, _end /*bss段结束地址 加载至 a1*/
bgeu a0, a1, 2f /*a0大于等于a1,跳转至往下第一个2标签;否则向下执行*/
1:
sw zero, (a0) /*a0指向的地址 写入 0*/
addi a0, a0, 4 /*a0+4*/
bltu a0, a1, 1b /*a0小于a1,跳转至往上第一个1标签;否则向下执行*/
/*清空bss段*/
2:
call _init /*初始化函数*/
call main /*main函数*/
csrwi 0x347,1 /*仿真专用,退出仿真*/
loop: /*无限循环*/
j loop
trap_vector_base:/*备用中断向量表*/
j _start
.word 0
.word 0
.word 0
|
xiaowuzxc/SparrowRV | 1,250 | bsp/lib/trap/trap_entry.S | .section .text.entry
.align 2
.global trap_entry
trap_entry:
addi sp, sp, -16*4
sw x1 , 0*4(sp)
sw x5 , 1*4(sp)
sw x6 , 2*4(sp)
sw x7 , 3*4(sp)
sw x10, 4*4(sp)
sw x11, 5*4(sp)
sw x12, 6*4(sp)
sw x13, 7*4(sp)
sw x14, 8*4(sp)
sw x15, 9*4(sp)
sw x16, 10*4(sp)
sw x17, 11*4(sp)
sw x28, 12*4(sp)
sw x29, 13*4(sp)
sw x30, 14*4(sp)
sw x31, 15*4(sp)
csrr a0, mcause
csrr a1, mepc
test_if_asynchronous:
srli a2, a0, 31 /* MSB of mcause is 1 if handing an asynchronous interrupt - shift to LSB to clear other bits. */
beq a2, x0, handle_synchronous /* Branch past interrupt handing if not asynchronous. */
call trap_handler
j asynchronous_return
handle_synchronous:
addi a1, a1, 4
csrw mepc, a1
asynchronous_return:
lw x1 , 0*4(sp)
lw x5 , 1*4(sp)
lw x6 , 2*4(sp)
lw x7 , 3*4(sp)
lw x10, 4*4(sp)
lw x11, 5*4(sp)
lw x12, 6*4(sp)
lw x13, 7*4(sp)
lw x14, 8*4(sp)
lw x15, 9*4(sp)
lw x16, 10*4(sp)
lw x17, 11*4(sp)
lw x28, 12*4(sp)
lw x29, 13*4(sp)
lw x30, 14*4(sp)
lw x31, 15*4(sp)
addi sp, sp, 16*4
mret
.weak trap_handler
trap_handler:
1:
j 1b
|
xiaowuzxc/SparrowRV | 2,651 | tb/tools/isa/rv32ui/xor.S | # See LICENSE for license details.
#*****************************************************************************
# xor.S
#-----------------------------------------------------------------------------
#
# Test xor instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Logical tests
#-------------------------------------------------------------
TEST_RR_OP( 2, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_OP( 3, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_OP( 4, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_OP( 5, xor, 0x00ff00ff, 0xf00ff00f, 0xf0f0f0f0 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 6, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC2_EQ_DEST( 7, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_EQ_DEST( 8, xor, 0x00000000, 0xff00ff00 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 9, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_DEST_BYPASS( 10, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_DEST_BYPASS( 11, 2, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 12, 0, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 13, 0, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 14, 0, 2, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 15, 1, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 16, 1, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 17, 2, 0, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 18, 0, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 19, 0, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 20, 0, 2, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 21, 1, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 22, 1, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 23, 2, 0, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_ZEROSRC1( 24, xor, 0xff00ff00, 0xff00ff00 );
TEST_RR_ZEROSRC2( 25, xor, 0x00ff00ff, 0x00ff00ff );
TEST_RR_ZEROSRC12( 26, xor, 0 );
TEST_RR_ZERODEST( 27, xor, 0x11111111, 0x22222222 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 3,122 | tb/tools/isa/rv32ui/sub.S | # See LICENSE for license details.
#*****************************************************************************
# sub.S
#-----------------------------------------------------------------------------
#
# Test sub instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, sub, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 );
TEST_RR_OP( 3, sub, 0x0000000000000000, 0x0000000000000001, 0x0000000000000001 );
TEST_RR_OP( 4, sub, 0xfffffffffffffffc, 0x0000000000000003, 0x0000000000000007 );
TEST_RR_OP( 5, sub, 0x0000000000008000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, sub, 0xffffffff80000000, 0xffffffff80000000, 0x0000000000000000 );
TEST_RR_OP( 7, sub, 0xffffffff80008000, 0xffffffff80000000, 0xffffffffffff8000 );
TEST_RR_OP( 8, sub, 0xffffffffffff8001, 0x0000000000000000, 0x0000000000007fff );
TEST_RR_OP( 9, sub, 0x000000007fffffff, 0x000000007fffffff, 0x0000000000000000 );
TEST_RR_OP( 10, sub, 0x000000007fff8000, 0x000000007fffffff, 0x0000000000007fff );
TEST_RR_OP( 11, sub, 0xffffffff7fff8001, 0xffffffff80000000, 0x0000000000007fff );
TEST_RR_OP( 12, sub, 0x0000000080007fff, 0x000000007fffffff, 0xffffffffffff8000 );
TEST_RR_OP( 13, sub, 0x0000000000000001, 0x0000000000000000, 0xffffffffffffffff );
TEST_RR_OP( 14, sub, 0xfffffffffffffffe, 0xffffffffffffffff, 0x0000000000000001 );
TEST_RR_OP( 15, sub, 0x0000000000000000, 0xffffffffffffffff, 0xffffffffffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 16, sub, 2, 13, 11 );
TEST_RR_SRC2_EQ_DEST( 17, sub, 3, 14, 11 );
TEST_RR_SRC12_EQ_DEST( 18, sub, 0, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 19, 0, sub, 2, 13, 11 );
TEST_RR_DEST_BYPASS( 20, 1, sub, 3, 14, 11 );
TEST_RR_DEST_BYPASS( 21, 2, sub, 4, 15, 11 );
TEST_RR_SRC12_BYPASS( 22, 0, 0, sub, 2, 13, 11 );
TEST_RR_SRC12_BYPASS( 23, 0, 1, sub, 3, 14, 11 );
TEST_RR_SRC12_BYPASS( 24, 0, 2, sub, 4, 15, 11 );
TEST_RR_SRC12_BYPASS( 25, 1, 0, sub, 2, 13, 11 );
TEST_RR_SRC12_BYPASS( 26, 1, 1, sub, 3, 14, 11 );
TEST_RR_SRC12_BYPASS( 27, 2, 0, sub, 4, 15, 11 );
TEST_RR_SRC21_BYPASS( 28, 0, 0, sub, 2, 13, 11 );
TEST_RR_SRC21_BYPASS( 29, 0, 1, sub, 3, 14, 11 );
TEST_RR_SRC21_BYPASS( 30, 0, 2, sub, 4, 15, 11 );
TEST_RR_SRC21_BYPASS( 31, 1, 0, sub, 2, 13, 11 );
TEST_RR_SRC21_BYPASS( 32, 1, 1, sub, 3, 14, 11 );
TEST_RR_SRC21_BYPASS( 33, 2, 0, sub, 4, 15, 11 );
TEST_RR_ZEROSRC1( 34, sub, 15, -15 );
TEST_RR_ZEROSRC2( 35, sub, 32, 32 );
TEST_RR_ZEROSRC12( 36, sub, 0 );
TEST_RR_ZERODEST( 37, sub, 16, 30 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,308 | tb/tools/isa/rv32ui/lw.S | # See LICENSE for license details.
#*****************************************************************************
# lw.S
#-----------------------------------------------------------------------------
#
# Test lw instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, lw, 0x0000000000ff00ff, 0, tdat );
TEST_LD_OP( 3, lw, 0xffffffffff00ff00, 4, tdat );
TEST_LD_OP( 4, lw, 0x000000000ff00ff0, 8, tdat );
TEST_LD_OP( 5, lw, 0xfffffffff00ff00f, 12, tdat );
# Test with negative offset
TEST_LD_OP( 6, lw, 0x0000000000ff00ff, -12, tdat4 );
TEST_LD_OP( 7, lw, 0xffffffffff00ff00, -8, tdat4 );
TEST_LD_OP( 8, lw, 0x000000000ff00ff0, -4, tdat4 );
TEST_LD_OP( 9, lw, 0xfffffffff00ff00f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0x0000000000ff00ff, \
la x1, tdat; \
addi x1, x1, -32; \
lw x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0xffffffffff00ff00, \
la x1, tdat; \
addi x1, x1, -3; \
lw x5, 7(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, lw, 0x000000000ff00ff0, 4, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, lw, 0xfffffffff00ff00f, 4, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, lw, 0xffffffffff00ff00, 4, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, lw, 0x000000000ff00ff0, 4, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, lw, 0xfffffffff00ff00f, 4, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, lw, 0xffffffffff00ff00, 4, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
lw x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
lw x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .word 0x00ff00ff
tdat2: .word 0xff00ff00
tdat3: .word 0x0ff00ff0
tdat4: .word 0xf00ff00f
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,748 | tb/tools/isa/rv32ui/sltu.S | # See LICENSE for license details.
#*****************************************************************************
# sltu.S
#-----------------------------------------------------------------------------
#
# Test sltu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, sltu, 0, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, sltu, 0, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, sltu, 1, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, sltu, 0, 0x00000007, 0x00000003 );
TEST_RR_OP( 6, sltu, 1, 0x00000000, 0xffff8000 );
TEST_RR_OP( 7, sltu, 0, 0x80000000, 0x00000000 );
TEST_RR_OP( 8, sltu, 1, 0x80000000, 0xffff8000 );
TEST_RR_OP( 9, sltu, 1, 0x00000000, 0x00007fff );
TEST_RR_OP( 10, sltu, 0, 0x7fffffff, 0x00000000 );
TEST_RR_OP( 11, sltu, 0, 0x7fffffff, 0x00007fff );
TEST_RR_OP( 12, sltu, 0, 0x80000000, 0x00007fff );
TEST_RR_OP( 13, sltu, 1, 0x7fffffff, 0xffff8000 );
TEST_RR_OP( 14, sltu, 1, 0x00000000, 0xffffffff );
TEST_RR_OP( 15, sltu, 0, 0xffffffff, 0x00000001 );
TEST_RR_OP( 16, sltu, 0, 0xffffffff, 0xffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 17, sltu, 0, 14, 13 );
TEST_RR_SRC2_EQ_DEST( 18, sltu, 1, 11, 13 );
TEST_RR_SRC12_EQ_DEST( 19, sltu, 0, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 20, 0, sltu, 1, 11, 13 );
TEST_RR_DEST_BYPASS( 21, 1, sltu, 0, 14, 13 );
TEST_RR_DEST_BYPASS( 22, 2, sltu, 1, 12, 13 );
TEST_RR_SRC12_BYPASS( 23, 0, 0, sltu, 0, 14, 13 );
TEST_RR_SRC12_BYPASS( 24, 0, 1, sltu, 1, 11, 13 );
TEST_RR_SRC12_BYPASS( 25, 0, 2, sltu, 0, 15, 13 );
TEST_RR_SRC12_BYPASS( 26, 1, 0, sltu, 1, 10, 13 );
TEST_RR_SRC12_BYPASS( 27, 1, 1, sltu, 0, 16, 13 );
TEST_RR_SRC12_BYPASS( 28, 2, 0, sltu, 1, 9, 13 );
TEST_RR_SRC21_BYPASS( 29, 0, 0, sltu, 0, 17, 13 );
TEST_RR_SRC21_BYPASS( 30, 0, 1, sltu, 1, 8, 13 );
TEST_RR_SRC21_BYPASS( 31, 0, 2, sltu, 0, 18, 13 );
TEST_RR_SRC21_BYPASS( 32, 1, 0, sltu, 1, 7, 13 );
TEST_RR_SRC21_BYPASS( 33, 1, 1, sltu, 0, 19, 13 );
TEST_RR_SRC21_BYPASS( 34, 2, 0, sltu, 1, 6, 13 );
TEST_RR_ZEROSRC1( 35, sltu, 1, -1 );
TEST_RR_ZEROSRC2( 36, sltu, 0, -1 );
TEST_RR_ZEROSRC12( 37, sltu, 0 );
TEST_RR_ZERODEST( 38, sltu, 16, 30 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,538 | tb/tools/isa/rv32ui/bgeu.S | # See LICENSE for license details.
#*****************************************************************************
# bgeu.S
#-----------------------------------------------------------------------------
#
# Test bgeu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Branch tests
#-------------------------------------------------------------
# Each test checks both forward and backward branches
TEST_BR2_OP_TAKEN( 2, bgeu, 0x00000000, 0x00000000 );
TEST_BR2_OP_TAKEN( 3, bgeu, 0x00000001, 0x00000001 );
TEST_BR2_OP_TAKEN( 4, bgeu, 0xffffffff, 0xffffffff );
TEST_BR2_OP_TAKEN( 5, bgeu, 0x00000001, 0x00000000 );
TEST_BR2_OP_TAKEN( 6, bgeu, 0xffffffff, 0xfffffffe );
TEST_BR2_OP_TAKEN( 7, bgeu, 0xffffffff, 0x00000000 );
TEST_BR2_OP_NOTTAKEN( 8, bgeu, 0x00000000, 0x00000001 );
TEST_BR2_OP_NOTTAKEN( 9, bgeu, 0xfffffffe, 0xffffffff );
TEST_BR2_OP_NOTTAKEN( 10, bgeu, 0x00000000, 0xffffffff );
TEST_BR2_OP_NOTTAKEN( 11, bgeu, 0x7fffffff, 0x80000000 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_BR2_SRC12_BYPASS( 12, 0, 0, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 13, 0, 1, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 14, 0, 2, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 15, 1, 0, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 16, 1, 1, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 17, 2, 0, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 18, 0, 0, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 19, 0, 1, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 20, 0, 2, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 21, 1, 0, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 22, 1, 1, bgeu, 0xefffffff, 0xf0000000 );
TEST_BR2_SRC12_BYPASS( 23, 2, 0, bgeu, 0xefffffff, 0xf0000000 );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 24, x1, 3, \
li x1, 1; \
bgeu x1, x0, 1f; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
1: addi x1, x1, 1; \
addi x1, x1, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,175 | tb/tools/isa/rv32ui/slti.S | # See LICENSE for license details.
#*****************************************************************************
# slti.S
#-----------------------------------------------------------------------------
#
# Test slti instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, slti, 0, 0x0000000000000000, 0x000 );
TEST_IMM_OP( 3, slti, 0, 0x0000000000000001, 0x001 );
TEST_IMM_OP( 4, slti, 1, 0x0000000000000003, 0x007 );
TEST_IMM_OP( 5, slti, 0, 0x0000000000000007, 0x003 );
TEST_IMM_OP( 6, slti, 0, 0x0000000000000000, 0x800 );
TEST_IMM_OP( 7, slti, 1, 0xffffffff80000000, 0x000 );
TEST_IMM_OP( 8, slti, 1, 0xffffffff80000000, 0x800 );
TEST_IMM_OP( 9, slti, 1, 0x0000000000000000, 0x7ff );
TEST_IMM_OP( 10, slti, 0, 0x000000007fffffff, 0x000 );
TEST_IMM_OP( 11, slti, 0, 0x000000007fffffff, 0x7ff );
TEST_IMM_OP( 12, slti, 1, 0xffffffff80000000, 0x7ff );
TEST_IMM_OP( 13, slti, 0, 0x000000007fffffff, 0x800 );
TEST_IMM_OP( 14, slti, 0, 0x0000000000000000, 0xfff );
TEST_IMM_OP( 15, slti, 1, 0xffffffffffffffff, 0x001 );
TEST_IMM_OP( 16, slti, 0, 0xffffffffffffffff, 0xfff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, slti, 1, 11, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, slti, 0, 15, 10 );
TEST_IMM_DEST_BYPASS( 19, 1, slti, 1, 10, 16 );
TEST_IMM_DEST_BYPASS( 20, 2, slti, 0, 16, 9 );
TEST_IMM_SRC1_BYPASS( 21, 0, slti, 1, 11, 15 );
TEST_IMM_SRC1_BYPASS( 22, 1, slti, 0, 17, 8 );
TEST_IMM_SRC1_BYPASS( 23, 2, slti, 1, 12, 14 );
TEST_IMM_ZEROSRC1( 24, slti, 0, 0xfff );
TEST_IMM_ZERODEST( 25, slti, 0x00ff00ff, 0xfff );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 4,209 | tb/tools/isa/rv32ui/sll.S | # See LICENSE for license details.
#*****************************************************************************
# sll.S
#-----------------------------------------------------------------------------
#
# Test sll instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, sll, 0x0000000000000001, 0x0000000000000001, 0 );
TEST_RR_OP( 3, sll, 0x0000000000000002, 0x0000000000000001, 1 );
TEST_RR_OP( 4, sll, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_RR_OP( 5, sll, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_RR_OP( 6, sll, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_RR_OP( 7, sll, 0xffffffffffffffff, 0xffffffffffffffff, 0 );
TEST_RR_OP( 8, sll, 0xfffffffffffffffe, 0xffffffffffffffff, 1 );
TEST_RR_OP( 9, sll, 0xffffffffffffff80, 0xffffffffffffffff, 7 );
TEST_RR_OP( 10, sll, 0xffffffffffffc000, 0xffffffffffffffff, 14 );
TEST_RR_OP( 11, sll, 0xffffffff80000000, 0xffffffffffffffff, 31 );
TEST_RR_OP( 12, sll, 0x0000000021212121, 0x0000000021212121, 0 );
TEST_RR_OP( 13, sll, 0x0000000042424242, 0x0000000021212121, 1 );
TEST_RR_OP( 14, sll, 0x0000001090909080, 0x0000000021212121, 7 );
TEST_RR_OP( 15, sll, 0x0000084848484000, 0x0000000021212121, 14 );
TEST_RR_OP( 16, sll, 0x1090909080000000, 0x0000000021212121, 31 );
# Verify that shifts only use bottom six bits
TEST_RR_OP( 17, sll, 0x0000000021212121, 0x0000000021212121, 0xffffffffffffffc0 );
TEST_RR_OP( 18, sll, 0x0000000042424242, 0x0000000021212121, 0xffffffffffffffc1 );
TEST_RR_OP( 19, sll, 0x0000001090909080, 0x0000000021212121, 0xffffffffffffffc7 );
TEST_RR_OP( 20, sll, 0x0000084848484000, 0x0000000021212121, 0xffffffffffffffce );
#if __riscv_xlen == 64
TEST_RR_OP( 21, sll, 0x8000000000000000, 0x0000000021212121, 0xffffffffffffffff );
TEST_RR_OP( 50, sll, 0x8000000000000000, 0x0000000000000001, 63 );
TEST_RR_OP( 51, sll, 0xffffff8000000000, 0xffffffffffffffff, 39 );
TEST_RR_OP( 52, sll, 0x0909080000000000, 0x0000000021212121, 43 );
#endif
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 22, sll, 0x00000080, 0x00000001, 7 );
TEST_RR_SRC2_EQ_DEST( 23, sll, 0x00004000, 0x00000001, 14 );
TEST_RR_SRC12_EQ_DEST( 24, sll, 24, 3 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 25, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_RR_DEST_BYPASS( 26, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_RR_DEST_BYPASS( 27, 2, sll, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_RR_SRC12_BYPASS( 28, 0, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_RR_SRC12_BYPASS( 29, 0, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_RR_SRC12_BYPASS( 30, 0, 2, sll, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_RR_SRC12_BYPASS( 31, 1, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_RR_SRC12_BYPASS( 32, 1, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_RR_SRC12_BYPASS( 33, 2, 0, sll, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_RR_SRC21_BYPASS( 34, 0, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_RR_SRC21_BYPASS( 35, 0, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_RR_SRC21_BYPASS( 36, 0, 2, sll, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_RR_SRC21_BYPASS( 37, 1, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_RR_SRC21_BYPASS( 38, 1, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_RR_SRC21_BYPASS( 39, 2, 0, sll, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_RR_ZEROSRC1( 40, sll, 0, 15 );
TEST_RR_ZEROSRC2( 41, sll, 32, 32 );
TEST_RR_ZEROSRC12( 42, sll, 0 );
TEST_RR_ZERODEST( 43, sll, 1024, 2048 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,345 | tb/tools/isa/rv32ui/addi.S | # See LICENSE for license details.
#*****************************************************************************
# addi.S
#-----------------------------------------------------------------------------
#
# Test addi instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, addi, 0x00000000, 0x00000000, 0x000 );
TEST_IMM_OP( 3, addi, 0x00000002, 0x00000001, 0x001 );
TEST_IMM_OP( 4, addi, 0x0000000a, 0x00000003, 0x007 );
TEST_IMM_OP( 5, addi, 0xfffffffffffff800, 0x0000000000000000, 0x800 );
TEST_IMM_OP( 6, addi, 0xffffffff80000000, 0xffffffff80000000, 0x000 );
TEST_IMM_OP( 7, addi, 0xffffffff7ffff800, 0xffffffff80000000, 0x800 );
TEST_IMM_OP( 8, addi, 0x00000000000007ff, 0x00000000, 0x7ff );
TEST_IMM_OP( 9, addi, 0x000000007fffffff, 0x7fffffff, 0x000 );
TEST_IMM_OP( 10, addi, 0x00000000800007fe, 0x7fffffff, 0x7ff );
TEST_IMM_OP( 11, addi, 0xffffffff800007ff, 0xffffffff80000000, 0x7ff );
TEST_IMM_OP( 12, addi, 0x000000007ffff7ff, 0x000000007fffffff, 0x800 );
TEST_IMM_OP( 13, addi, 0xffffffffffffffff, 0x0000000000000000, 0xfff );
TEST_IMM_OP( 14, addi, 0x0000000000000000, 0xffffffffffffffff, 0x001 );
TEST_IMM_OP( 15, addi, 0xfffffffffffffffe, 0xffffffffffffffff, 0xfff );
TEST_IMM_OP( 16, addi, 0x0000000080000000, 0x7fffffff, 0x001 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, addi, 24, 13, 11 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, addi, 24, 13, 11 );
TEST_IMM_DEST_BYPASS( 19, 1, addi, 23, 13, 10 );
TEST_IMM_DEST_BYPASS( 20, 2, addi, 22, 13, 9 );
TEST_IMM_SRC1_BYPASS( 21, 0, addi, 24, 13, 11 );
TEST_IMM_SRC1_BYPASS( 22, 1, addi, 23, 13, 10 );
TEST_IMM_SRC1_BYPASS( 23, 2, addi, 22, 13, 9 );
TEST_IMM_ZEROSRC1( 24, addi, 32, 32 );
TEST_IMM_ZERODEST( 25, addi, 33, 50 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,345 | tb/tools/isa/rv32ui/ld.S | # See LICENSE for license details.
#*****************************************************************************
# ld.S
#-----------------------------------------------------------------------------
#
# Test ld instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, ld, 0x00ff00ff00ff00ff, 0, tdat );
TEST_LD_OP( 3, ld, 0xff00ff00ff00ff00, 8, tdat );
TEST_LD_OP( 4, ld, 0x0ff00ff00ff00ff0, 16, tdat );
TEST_LD_OP( 5, ld, 0xf00ff00ff00ff00f, 24, tdat );
# Test with negative offset
TEST_LD_OP( 6, ld, 0x00ff00ff00ff00ff, -24, tdat4 );
TEST_LD_OP( 7, ld, 0xff00ff00ff00ff00, -16, tdat4 );
TEST_LD_OP( 8, ld, 0x0ff00ff00ff00ff0, -8, tdat4 );
TEST_LD_OP( 9, ld, 0xf00ff00ff00ff00f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0x00ff00ff00ff00ff, \
la x1, tdat; \
addi x1, x1, -32; \
ld x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0xff00ff00ff00ff00, \
la x1, tdat; \
addi x1, x1, -3; \
ld x5, 11(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, ld, 0x0ff00ff00ff00ff0, 8, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, ld, 0xf00ff00ff00ff00f, 8, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, ld, 0xff00ff00ff00ff00, 8, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, ld, 0x0ff00ff00ff00ff0, 8, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, ld, 0xf00ff00ff00ff00f, 8, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, ld, 0xff00ff00ff00ff00, 8, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
ld x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
ld x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .dword 0x00ff00ff00ff00ff
tdat2: .dword 0xff00ff00ff00ff00
tdat3: .dword 0x0ff00ff00ff00ff0
tdat4: .dword 0xf00ff00ff00ff00f
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 1,680 | tb/tools/isa/rv32ui/andi.S | # See LICENSE for license details.
#*****************************************************************************
# andi.S
#-----------------------------------------------------------------------------
#
# Test andi instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Logical tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, andi, 0xff00ff00, 0xff00ff00, 0xf0f );
TEST_IMM_OP( 3, andi, 0x000000f0, 0x0ff00ff0, 0x0f0 );
TEST_IMM_OP( 4, andi, 0x0000000f, 0x00ff00ff, 0x70f );
TEST_IMM_OP( 5, andi, 0x00000000, 0xf00ff00f, 0x0f0 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 6, andi, 0x00000000, 0xff00ff00, 0x0f0 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 7, 0, andi, 0x00000700, 0x0ff00ff0, 0x70f );
TEST_IMM_DEST_BYPASS( 8, 1, andi, 0x000000f0, 0x00ff00ff, 0x0f0 );
TEST_IMM_DEST_BYPASS( 9, 2, andi, 0xf00ff00f, 0xf00ff00f, 0xf0f );
TEST_IMM_SRC1_BYPASS( 10, 0, andi, 0x00000700, 0x0ff00ff0, 0x70f );
TEST_IMM_SRC1_BYPASS( 11, 1, andi, 0x000000f0, 0x00ff00ff, 0x0f0 );
TEST_IMM_SRC1_BYPASS( 12, 2, andi, 0x0000000f, 0xf00ff00f, 0x70f );
TEST_IMM_ZEROSRC1( 13, andi, 0, 0x0f0 );
TEST_IMM_ZERODEST( 14, andi, 0x00ff00ff, 0x70f );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,310 | tb/tools/isa/rv32ui/lhu.S | # See LICENSE for license details.
#*****************************************************************************
# lhu.S
#-----------------------------------------------------------------------------
#
# Test lhu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, lhu, 0x00000000000000ff, 0, tdat );
TEST_LD_OP( 3, lhu, 0x000000000000ff00, 2, tdat );
TEST_LD_OP( 4, lhu, 0x0000000000000ff0, 4, tdat );
TEST_LD_OP( 5, lhu, 0x000000000000f00f, 6, tdat );
# Test with negative offset
TEST_LD_OP( 6, lhu, 0x00000000000000ff, -6, tdat4 );
TEST_LD_OP( 7, lhu, 0x000000000000ff00, -4, tdat4 );
TEST_LD_OP( 8, lhu, 0x0000000000000ff0, -2, tdat4 );
TEST_LD_OP( 9, lhu, 0x000000000000f00f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0x00000000000000ff, \
la x1, tdat; \
addi x1, x1, -32; \
lhu x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x000000000000ff00, \
la x1, tdat; \
addi x1, x1, -5; \
lhu x5, 7(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, lhu, 0x0000000000000ff0, 2, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, lhu, 0x000000000000f00f, 2, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, lhu, 0x000000000000ff00, 2, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, lhu, 0x0000000000000ff0, 2, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, lhu, 0x000000000000f00f, 2, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, lhu, 0x000000000000ff00, 2, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
lhu x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
lhu x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .half 0x00ff
tdat2: .half 0xff00
tdat3: .half 0x0ff0
tdat4: .half 0xf00f
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,633 | tb/tools/isa/rv32ui/and.S | # See LICENSE for license details.
#*****************************************************************************
# and.S
#-----------------------------------------------------------------------------
#
# Test and instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Logical tests
#-------------------------------------------------------------
TEST_RR_OP( 2, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_OP( 3, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_OP( 4, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_OP( 5, and, 0xf000f000, 0xf00ff00f, 0xf0f0f0f0 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 6, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC2_EQ_DEST( 7, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_EQ_DEST( 8, and, 0xff00ff00, 0xff00ff00 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 9, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_DEST_BYPASS( 10, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_DEST_BYPASS( 11, 2, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 12, 0, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 13, 0, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 14, 0, 2, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 15, 1, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 16, 1, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 17, 2, 0, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 18, 0, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 19, 0, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 20, 0, 2, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 21, 1, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 22, 1, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 23, 2, 0, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_ZEROSRC1( 24, and, 0, 0xff00ff00 );
TEST_RR_ZEROSRC2( 25, and, 0, 0x00ff00ff );
TEST_RR_ZEROSRC12( 26, and, 0 );
TEST_RR_ZERODEST( 27, and, 0x11111111, 0x22222222 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,366 | tb/tools/isa/rv32ui/bltu.S | # See LICENSE for license details.
#*****************************************************************************
# bltu.S
#-----------------------------------------------------------------------------
#
# Test bltu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Branch tests
#-------------------------------------------------------------
# Each test checks both forward and backward branches
TEST_BR2_OP_TAKEN( 2, bltu, 0x00000000, 0x00000001 );
TEST_BR2_OP_TAKEN( 3, bltu, 0xfffffffe, 0xffffffff );
TEST_BR2_OP_TAKEN( 4, bltu, 0x00000000, 0xffffffff );
TEST_BR2_OP_NOTTAKEN( 5, bltu, 0x00000001, 0x00000000 );
TEST_BR2_OP_NOTTAKEN( 6, bltu, 0xffffffff, 0xfffffffe );
TEST_BR2_OP_NOTTAKEN( 7, bltu, 0xffffffff, 0x00000000 );
TEST_BR2_OP_NOTTAKEN( 8, bltu, 0x80000000, 0x7fffffff );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_BR2_SRC12_BYPASS( 9, 0, 0, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 10, 0, 1, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 11, 0, 2, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 12, 1, 0, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 13, 1, 1, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 14, 2, 0, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 15, 0, 0, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 16, 0, 1, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 17, 0, 2, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 18, 1, 0, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 19, 1, 1, bltu, 0xf0000000, 0xefffffff );
TEST_BR2_SRC12_BYPASS( 20, 2, 0, bltu, 0xf0000000, 0xefffffff );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 21, x1, 3, \
li x1, 1; \
bltu x0, x1, 1f; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
1: addi x1, x1, 1; \
addi x1, x1, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,642 | tb/tools/isa/rv32ui/sh.S | # See LICENSE for license details.
#*****************************************************************************
# sh.S
#-----------------------------------------------------------------------------
#
# Test sh instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_ST_OP( 2, lh, sh, 0x00000000000000aa, 0, tdat );
TEST_ST_OP( 3, lh, sh, 0xffffffffffffaa00, 2, tdat );
TEST_ST_OP( 4, lw, sh, 0xffffffffbeef0aa0, 4, tdat );
TEST_ST_OP( 5, lh, sh, 0xffffffffffffa00a, 6, tdat );
# Test with negative offset
TEST_ST_OP( 6, lh, sh, 0x00000000000000aa, -6, tdat8 );
TEST_ST_OP( 7, lh, sh, 0xffffffffffffaa00, -4, tdat8 );
TEST_ST_OP( 8, lh, sh, 0x0000000000000aa0, -2, tdat8 );
TEST_ST_OP( 9, lh, sh, 0xffffffffffffa00a, 0, tdat8 );
# Test with a negative base
TEST_CASE( 10, x5, 0x5678, \
la x1, tdat9; \
li x2, 0x12345678; \
addi x4, x1, -32; \
sh x2, 32(x4); \
lh x5, 0(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x3098, \
la x1, tdat9; \
li x2, 0x00003098; \
addi x1, x1, -5; \
sh x2, 7(x1); \
la x4, tdat10; \
lh x5, 0(x4); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_ST_SRC12_BYPASS( 12, 0, 0, lh, sh, 0xffffffffffffccdd, 0, tdat );
TEST_ST_SRC12_BYPASS( 13, 0, 1, lh, sh, 0xffffffffffffbccd, 2, tdat );
TEST_ST_SRC12_BYPASS( 14, 0, 2, lh, sh, 0xffffffffffffbbcc, 4, tdat );
TEST_ST_SRC12_BYPASS( 15, 1, 0, lh, sh, 0xffffffffffffabbc, 6, tdat );
TEST_ST_SRC12_BYPASS( 16, 1, 1, lh, sh, 0xffffffffffffaabb, 8, tdat );
TEST_ST_SRC12_BYPASS( 17, 2, 0, lh, sh, 0xffffffffffffdaab, 10, tdat );
TEST_ST_SRC21_BYPASS( 18, 0, 0, lh, sh, 0x2233, 0, tdat );
TEST_ST_SRC21_BYPASS( 19, 0, 1, lh, sh, 0x1223, 2, tdat );
TEST_ST_SRC21_BYPASS( 20, 0, 2, lh, sh, 0x1122, 4, tdat );
TEST_ST_SRC21_BYPASS( 21, 1, 0, lh, sh, 0x0112, 6, tdat );
TEST_ST_SRC21_BYPASS( 22, 1, 1, lh, sh, 0x0011, 8, tdat );
TEST_ST_SRC21_BYPASS( 23, 2, 0, lh, sh, 0x3001, 10, tdat );
li a0, 0xbeef
la a1, tdat
sh a0, 6(a1)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .half 0xbeef
tdat2: .half 0xbeef
tdat3: .half 0xbeef
tdat4: .half 0xbeef
tdat5: .half 0xbeef
tdat6: .half 0xbeef
tdat7: .half 0xbeef
tdat8: .half 0xbeef
tdat9: .half 0xbeef
tdat10: .half 0xbeef
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 3,433 | tb/tools/isa/rv32ui/srl.S | # See LICENSE for license details.
#*****************************************************************************
# srl.S
#-----------------------------------------------------------------------------
#
# Test srl instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
#define TEST_SRL(n, v, a) \
TEST_RR_OP(n, srl, ((v) & ((1 << (__riscv_xlen-1) << 1) - 1)) >> (a), v, a)
TEST_SRL( 2, 0xffffffff80000000, 0 );
TEST_SRL( 3, 0xffffffff80000000, 1 );
TEST_SRL( 4, 0xffffffff80000000, 7 );
TEST_SRL( 5, 0xffffffff80000000, 14 );
TEST_SRL( 6, 0xffffffff80000001, 31 );
TEST_SRL( 7, 0xffffffffffffffff, 0 );
TEST_SRL( 8, 0xffffffffffffffff, 1 );
TEST_SRL( 9, 0xffffffffffffffff, 7 );
TEST_SRL( 10, 0xffffffffffffffff, 14 );
TEST_SRL( 11, 0xffffffffffffffff, 31 );
TEST_SRL( 12, 0x0000000021212121, 0 );
TEST_SRL( 13, 0x0000000021212121, 1 );
TEST_SRL( 14, 0x0000000021212121, 7 );
TEST_SRL( 15, 0x0000000021212121, 14 );
TEST_SRL( 16, 0x0000000021212121, 31 );
# Verify that shifts only use bottom five bits
TEST_RR_OP( 17, srl, 0x0000000021212121, 0x0000000021212121, 0xffffffffffffffc0 );
TEST_RR_OP( 18, srl, 0x0000000010909090, 0x0000000021212121, 0xffffffffffffffc1 );
TEST_RR_OP( 19, srl, 0x0000000000424242, 0x0000000021212121, 0xffffffffffffffc7 );
TEST_RR_OP( 20, srl, 0x0000000000008484, 0x0000000021212121, 0xffffffffffffffce );
TEST_RR_OP( 21, srl, 0x0000000000000000, 0x0000000021212121, 0xffffffffffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 22, srl, 0x01000000, 0x80000000, 7 );
TEST_RR_SRC2_EQ_DEST( 23, srl, 0x00020000, 0x80000000, 14 );
TEST_RR_SRC12_EQ_DEST( 24, srl, 0, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 25, 0, srl, 0x01000000, 0x80000000, 7 );
TEST_RR_DEST_BYPASS( 26, 1, srl, 0x00020000, 0x80000000, 14 );
TEST_RR_DEST_BYPASS( 27, 2, srl, 0x00000001, 0x80000000, 31 );
TEST_RR_SRC12_BYPASS( 28, 0, 0, srl, 0x01000000, 0x80000000, 7 );
TEST_RR_SRC12_BYPASS( 29, 0, 1, srl, 0x00020000, 0x80000000, 14 );
TEST_RR_SRC12_BYPASS( 30, 0, 2, srl, 0x00000001, 0x80000000, 31 );
TEST_RR_SRC12_BYPASS( 31, 1, 0, srl, 0x01000000, 0x80000000, 7 );
TEST_RR_SRC12_BYPASS( 32, 1, 1, srl, 0x00020000, 0x80000000, 14 );
TEST_RR_SRC12_BYPASS( 33, 2, 0, srl, 0x00000001, 0x80000000, 31 );
TEST_RR_SRC21_BYPASS( 34, 0, 0, srl, 0x01000000, 0x80000000, 7 );
TEST_RR_SRC21_BYPASS( 35, 0, 1, srl, 0x00020000, 0x80000000, 14 );
TEST_RR_SRC21_BYPASS( 36, 0, 2, srl, 0x00000001, 0x80000000, 31 );
TEST_RR_SRC21_BYPASS( 37, 1, 0, srl, 0x01000000, 0x80000000, 7 );
TEST_RR_SRC21_BYPASS( 38, 1, 1, srl, 0x00020000, 0x80000000, 14 );
TEST_RR_SRC21_BYPASS( 39, 2, 0, srl, 0x00000001, 0x80000000, 31 );
TEST_RR_ZEROSRC1( 40, srl, 0, 15 );
TEST_RR_ZEROSRC2( 41, srl, 32, 32 );
TEST_RR_ZEROSRC12( 42, srl, 0 );
TEST_RR_ZERODEST( 43, srl, 1024, 2048 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,610 | tb/tools/isa/rv32ui/sb.S | # See LICENSE for license details.
#*****************************************************************************
# sb.S
#-----------------------------------------------------------------------------
#
# Test sb instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_ST_OP( 2, lb, sb, 0xffffffffffffffaa, 0, tdat );
TEST_ST_OP( 3, lb, sb, 0x0000000000000000, 1, tdat );
TEST_ST_OP( 4, lh, sb, 0xffffffffffffefa0, 2, tdat );
TEST_ST_OP( 5, lb, sb, 0x000000000000000a, 3, tdat );
# Test with negative offset
TEST_ST_OP( 6, lb, sb, 0xffffffffffffffaa, -3, tdat8 );
TEST_ST_OP( 7, lb, sb, 0x0000000000000000, -2, tdat8 );
TEST_ST_OP( 8, lb, sb, 0xffffffffffffffa0, -1, tdat8 );
TEST_ST_OP( 9, lb, sb, 0x000000000000000a, 0, tdat8 );
# Test with a negative base
TEST_CASE( 10, x5, 0x78, \
la x1, tdat9; \
li x2, 0x12345678; \
addi x4, x1, -32; \
sb x2, 32(x4); \
lb x5, 0(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0xffffffffffffff98, \
la x1, tdat9; \
li x2, 0x00003098; \
addi x1, x1, -6; \
sb x2, 7(x1); \
la x4, tdat10; \
lb x5, 0(x4); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_ST_SRC12_BYPASS( 12, 0, 0, lb, sb, 0xffffffffffffffdd, 0, tdat );
TEST_ST_SRC12_BYPASS( 13, 0, 1, lb, sb, 0xffffffffffffffcd, 1, tdat );
TEST_ST_SRC12_BYPASS( 14, 0, 2, lb, sb, 0xffffffffffffffcc, 2, tdat );
TEST_ST_SRC12_BYPASS( 15, 1, 0, lb, sb, 0xffffffffffffffbc, 3, tdat );
TEST_ST_SRC12_BYPASS( 16, 1, 1, lb, sb, 0xffffffffffffffbb, 4, tdat );
TEST_ST_SRC12_BYPASS( 17, 2, 0, lb, sb, 0xffffffffffffffab, 5, tdat );
TEST_ST_SRC21_BYPASS( 18, 0, 0, lb, sb, 0x33, 0, tdat );
TEST_ST_SRC21_BYPASS( 19, 0, 1, lb, sb, 0x23, 1, tdat );
TEST_ST_SRC21_BYPASS( 20, 0, 2, lb, sb, 0x22, 2, tdat );
TEST_ST_SRC21_BYPASS( 21, 1, 0, lb, sb, 0x12, 3, tdat );
TEST_ST_SRC21_BYPASS( 22, 1, 1, lb, sb, 0x11, 4, tdat );
TEST_ST_SRC21_BYPASS( 23, 2, 0, lb, sb, 0x01, 5, tdat );
li a0, 0xef
la a1, tdat
sb a0, 3(a1)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .byte 0xef
tdat2: .byte 0xef
tdat3: .byte 0xef
tdat4: .byte 0xef
tdat5: .byte 0xef
tdat6: .byte 0xef
tdat7: .byte 0xef
tdat8: .byte 0xef
tdat9: .byte 0xef
tdat10: .byte 0xef
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,028 | tb/tools/isa/rv32ui/beq.S | # See LICENSE for license details.
#*****************************************************************************
# beq.S
#-----------------------------------------------------------------------------
#
# Test beq instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Branch tests
#-------------------------------------------------------------
# Each test checks both forward and backward branches
TEST_BR2_OP_TAKEN( 2, beq, 0, 0 );
TEST_BR2_OP_TAKEN( 3, beq, 1, 1 );
TEST_BR2_OP_TAKEN( 4, beq, -1, -1 );
TEST_BR2_OP_NOTTAKEN( 5, beq, 0, 1 );
TEST_BR2_OP_NOTTAKEN( 6, beq, 1, 0 );
TEST_BR2_OP_NOTTAKEN( 7, beq, -1, 1 );
TEST_BR2_OP_NOTTAKEN( 8, beq, 1, -1 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_BR2_SRC12_BYPASS( 9, 0, 0, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 10, 0, 1, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 11, 0, 2, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 12, 1, 0, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 13, 1, 1, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 14, 2, 0, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 15, 0, 0, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 16, 0, 1, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 17, 0, 2, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 18, 1, 0, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 19, 1, 1, beq, 0, -1 );
TEST_BR2_SRC12_BYPASS( 20, 2, 0, beq, 0, -1 );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 21, x1, 3, \
li x1, 1; \
beq x0, x0, 1f; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
1: addi x1, x1, 1; \
addi x1, x1, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,750 | tb/tools/isa/rv32ui/sd.S | # See LICENSE for license details.
#*****************************************************************************
# sd.S
#-----------------------------------------------------------------------------
#
# Test sd instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_ST_OP( 2, ld, sd, 0x00aa00aa00aa00aa, 0, tdat );
TEST_ST_OP( 3, ld, sd, 0xaa00aa00aa00aa00, 8, tdat );
TEST_ST_OP( 4, ld, sd, 0x0aa00aa00aa00aa0, 16, tdat );
TEST_ST_OP( 5, ld, sd, 0xa00aa00aa00aa00a, 24, tdat );
# Test with negative offset
TEST_ST_OP( 6, ld, sd, 0x00aa00aa00aa00aa, -24, tdat8 );
TEST_ST_OP( 7, ld, sd, 0xaa00aa00aa00aa00, -16, tdat8 );
TEST_ST_OP( 8, ld, sd, 0x0aa00aa00aa00aa0, -8, tdat8 );
TEST_ST_OP( 9, ld, sd, 0xa00aa00aa00aa00a, 0, tdat8 );
# Test with a negative base
TEST_CASE( 10, x5, 0x1234567812345678, \
la x1, tdat9; \
li x2, 0x1234567812345678; \
addi x4, x1, -32; \
sd x2, 32(x4); \
ld x5, 0(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x5821309858213098, \
la x1, tdat9; \
li x2, 0x5821309858213098; \
addi x1, x1, -3; \
sd x2, 11(x1); \
la x4, tdat10; \
ld x5, 0(x4); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_ST_SRC12_BYPASS( 12, 0, 0, ld, sd, 0xabbccdd, 0, tdat );
TEST_ST_SRC12_BYPASS( 13, 0, 1, ld, sd, 0xaabbccd, 8, tdat );
TEST_ST_SRC12_BYPASS( 14, 0, 2, ld, sd, 0xdaabbcc, 16, tdat );
TEST_ST_SRC12_BYPASS( 15, 1, 0, ld, sd, 0xddaabbc, 24, tdat );
TEST_ST_SRC12_BYPASS( 16, 1, 1, ld, sd, 0xcddaabb, 32, tdat );
TEST_ST_SRC12_BYPASS( 17, 2, 0, ld, sd, 0xccddaab, 40, tdat );
TEST_ST_SRC21_BYPASS( 18, 0, 0, ld, sd, 0x00112233, 0, tdat );
TEST_ST_SRC21_BYPASS( 19, 0, 1, ld, sd, 0x30011223, 8, tdat );
TEST_ST_SRC21_BYPASS( 20, 0, 2, ld, sd, 0x33001122, 16, tdat );
TEST_ST_SRC21_BYPASS( 21, 1, 0, ld, sd, 0x23300112, 24, tdat );
TEST_ST_SRC21_BYPASS( 22, 1, 1, ld, sd, 0x22330011, 32, tdat );
TEST_ST_SRC21_BYPASS( 23, 2, 0, ld, sd, 0x12233001, 40, tdat );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .dword 0xdeadbeefdeadbeef
tdat2: .dword 0xdeadbeefdeadbeef
tdat3: .dword 0xdeadbeefdeadbeef
tdat4: .dword 0xdeadbeefdeadbeef
tdat5: .dword 0xdeadbeefdeadbeef
tdat6: .dword 0xdeadbeefdeadbeef
tdat7: .dword 0xdeadbeefdeadbeef
tdat8: .dword 0xdeadbeefdeadbeef
tdat9: .dword 0xdeadbeefdeadbeef
tdat10: .dword 0xdeadbeefdeadbeef
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 1,829 | tb/tools/isa/rv32ui/ori.S | # See LICENSE for license details.
#*****************************************************************************
# ori.S
#-----------------------------------------------------------------------------
#
# Test ori instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Logical tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, ori, 0xffffffffffffff0f, 0xffffffffff00ff00, 0xf0f );
TEST_IMM_OP( 3, ori, 0x000000000ff00ff0, 0x000000000ff00ff0, 0x0f0 );
TEST_IMM_OP( 4, ori, 0x0000000000ff07ff, 0x0000000000ff00ff, 0x70f );
TEST_IMM_OP( 5, ori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 6, ori, 0xff00fff0, 0xff00ff00, 0x0f0 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 7, 0, ori, 0x000000000ff00ff0, 0x000000000ff00ff0, 0x0f0 );
TEST_IMM_DEST_BYPASS( 8, 1, ori, 0x0000000000ff07ff, 0x0000000000ff00ff, 0x70f );
TEST_IMM_DEST_BYPASS( 9, 2, ori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 );
TEST_IMM_SRC1_BYPASS( 10, 0, ori, 0x000000000ff00ff0, 0x000000000ff00ff0, 0x0f0 );
TEST_IMM_SRC1_BYPASS( 11, 1, ori, 0xffffffffffffffff, 0x0000000000ff00ff, 0xf0f );
TEST_IMM_SRC1_BYPASS( 12, 2, ori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 );
TEST_IMM_ZEROSRC1( 13, ori, 0x0f0, 0x0f0 );
TEST_IMM_ZERODEST( 14, ori, 0x00ff00ff, 0x70f );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,302 | tb/tools/isa/rv32ui/lbu.S | # See LICENSE for license details.
#*****************************************************************************
# lbu.S
#-----------------------------------------------------------------------------
#
# Test lbu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, lbu, 0x00000000000000ff, 0, tdat );
TEST_LD_OP( 3, lbu, 0x0000000000000000, 1, tdat );
TEST_LD_OP( 4, lbu, 0x00000000000000f0, 2, tdat );
TEST_LD_OP( 5, lbu, 0x000000000000000f, 3, tdat );
# Test with negative offset
TEST_LD_OP( 6, lbu, 0x00000000000000ff, -3, tdat4 );
TEST_LD_OP( 7, lbu, 0x0000000000000000, -2, tdat4 );
TEST_LD_OP( 8, lbu, 0x00000000000000f0, -1, tdat4 );
TEST_LD_OP( 9, lbu, 0x000000000000000f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0x00000000000000ff, \
la x1, tdat; \
addi x1, x1, -32; \
lbu x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x0000000000000000, \
la x1, tdat; \
addi x1, x1, -6; \
lbu x5, 7(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, lbu, 0x00000000000000f0, 1, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, lbu, 0x000000000000000f, 1, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, lbu, 0x0000000000000000, 1, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, lbu, 0x00000000000000f0, 1, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, lbu, 0x000000000000000f, 1, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, lbu, 0x0000000000000000, 1, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
lbu x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
lbu x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .byte 0xff
tdat2: .byte 0x00
tdat3: .byte 0xf0
tdat4: .byte 0x0f
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,326 | tb/tools/isa/rv32ui/lwu.S | # See LICENSE for license details.
#*****************************************************************************
# lwu.S
#-----------------------------------------------------------------------------
#
# Test lwu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, lwu, 0x0000000000ff00ff, 0, tdat );
TEST_LD_OP( 3, lwu, 0x00000000ff00ff00, 4, tdat );
TEST_LD_OP( 4, lwu, 0x000000000ff00ff0, 8, tdat );
TEST_LD_OP( 5, lwu, 0x00000000f00ff00f, 12, tdat );
# Test with negative offset
TEST_LD_OP( 6, lwu, 0x0000000000ff00ff, -12, tdat4 );
TEST_LD_OP( 7, lwu, 0x00000000ff00ff00, -8, tdat4 );
TEST_LD_OP( 8, lwu, 0x000000000ff00ff0, -4, tdat4 );
TEST_LD_OP( 9, lwu, 0x00000000f00ff00f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0x0000000000ff00ff, \
la x1, tdat; \
addi x1, x1, -32; \
lwu x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x00000000ff00ff00, \
la x1, tdat; \
addi x1, x1, -3; \
lwu x5, 7(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, lwu, 0x000000000ff00ff0, 4, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, lwu, 0x00000000f00ff00f, 4, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, lwu, 0x00000000ff00ff00, 4, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, lwu, 0x000000000ff00ff0, 4, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, lwu, 0x00000000f00ff00f, 4, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, lwu, 0x00000000ff00ff00, 4, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
lwu x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
lwu x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .word 0x00ff00ff
tdat2: .word 0xff00ff00
tdat3: .word 0x0ff00ff0
tdat4: .word 0xf00ff00f
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,680 | tb/tools/isa/rv32ui/sw.S | # See LICENSE for license details.
#*****************************************************************************
# sw.S
#-----------------------------------------------------------------------------
#
# Test sw instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_ST_OP( 2, lw, sw, 0x0000000000aa00aa, 0, tdat );
TEST_ST_OP( 3, lw, sw, 0xffffffffaa00aa00, 4, tdat );
TEST_ST_OP( 4, lw, sw, 0x000000000aa00aa0, 8, tdat );
TEST_ST_OP( 5, lw, sw, 0xffffffffa00aa00a, 12, tdat );
# Test with negative offset
TEST_ST_OP( 6, lw, sw, 0x0000000000aa00aa, -12, tdat8 );
TEST_ST_OP( 7, lw, sw, 0xffffffffaa00aa00, -8, tdat8 );
TEST_ST_OP( 8, lw, sw, 0x000000000aa00aa0, -4, tdat8 );
TEST_ST_OP( 9, lw, sw, 0xffffffffa00aa00a, 0, tdat8 );
# Test with a negative base
TEST_CASE( 10, x5, 0x12345678, \
la x1, tdat9; \
li x2, 0x12345678; \
addi x4, x1, -32; \
sw x2, 32(x4); \
lw x5, 0(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x58213098, \
la x1, tdat9; \
li x2, 0x58213098; \
addi x1, x1, -3; \
sw x2, 7(x1); \
la x4, tdat10; \
lw x5, 0(x4); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_ST_SRC12_BYPASS( 12, 0, 0, lw, sw, 0xffffffffaabbccdd, 0, tdat );
TEST_ST_SRC12_BYPASS( 13, 0, 1, lw, sw, 0xffffffffdaabbccd, 4, tdat );
TEST_ST_SRC12_BYPASS( 14, 0, 2, lw, sw, 0xffffffffddaabbcc, 8, tdat );
TEST_ST_SRC12_BYPASS( 15, 1, 0, lw, sw, 0xffffffffcddaabbc, 12, tdat );
TEST_ST_SRC12_BYPASS( 16, 1, 1, lw, sw, 0xffffffffccddaabb, 16, tdat );
TEST_ST_SRC12_BYPASS( 17, 2, 0, lw, sw, 0xffffffffbccddaab, 20, tdat );
TEST_ST_SRC21_BYPASS( 18, 0, 0, lw, sw, 0x00112233, 0, tdat );
TEST_ST_SRC21_BYPASS( 19, 0, 1, lw, sw, 0x30011223, 4, tdat );
TEST_ST_SRC21_BYPASS( 20, 0, 2, lw, sw, 0x33001122, 8, tdat );
TEST_ST_SRC21_BYPASS( 21, 1, 0, lw, sw, 0x23300112, 12, tdat );
TEST_ST_SRC21_BYPASS( 22, 1, 1, lw, sw, 0x22330011, 16, tdat );
TEST_ST_SRC21_BYPASS( 23, 2, 0, lw, sw, 0x12233001, 20, tdat );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .word 0xdeadbeef
tdat2: .word 0xdeadbeef
tdat3: .word 0xdeadbeef
tdat4: .word 0xdeadbeef
tdat5: .word 0xdeadbeef
tdat6: .word 0xdeadbeef
tdat7: .word 0xdeadbeef
tdat8: .word 0xdeadbeef
tdat9: .word 0xdeadbeef
tdat10: .word 0xdeadbeef
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 4,003 | tb/tools/isa/rv32ui/sra.S | # See LICENSE for license details.
#*****************************************************************************
# sra.S
#-----------------------------------------------------------------------------
#
# Test sra instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, sra, 0xffffffff80000000, 0xffffffff80000000, 0 );
TEST_RR_OP( 3, sra, 0xffffffffc0000000, 0xffffffff80000000, 1 );
TEST_RR_OP( 4, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_OP( 5, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_OP( 6, sra, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_RR_OP( 7, sra, 0x000000007fffffff, 0x000000007fffffff, 0 );
TEST_RR_OP( 8, sra, 0x000000003fffffff, 0x000000007fffffff, 1 );
TEST_RR_OP( 9, sra, 0x0000000000ffffff, 0x000000007fffffff, 7 );
TEST_RR_OP( 10, sra, 0x000000000001ffff, 0x000000007fffffff, 14 );
TEST_RR_OP( 11, sra, 0x0000000000000000, 0x000000007fffffff, 31 );
TEST_RR_OP( 12, sra, 0xffffffff81818181, 0xffffffff81818181, 0 );
TEST_RR_OP( 13, sra, 0xffffffffc0c0c0c0, 0xffffffff81818181, 1 );
TEST_RR_OP( 14, sra, 0xffffffffff030303, 0xffffffff81818181, 7 );
TEST_RR_OP( 15, sra, 0xfffffffffffe0606, 0xffffffff81818181, 14 );
TEST_RR_OP( 16, sra, 0xffffffffffffffff, 0xffffffff81818181, 31 );
# Verify that shifts only use bottom five bits
TEST_RR_OP( 17, sra, 0xffffffff81818181, 0xffffffff81818181, 0xffffffffffffffc0 );
TEST_RR_OP( 18, sra, 0xffffffffc0c0c0c0, 0xffffffff81818181, 0xffffffffffffffc1 );
TEST_RR_OP( 19, sra, 0xffffffffff030303, 0xffffffff81818181, 0xffffffffffffffc7 );
TEST_RR_OP( 20, sra, 0xfffffffffffe0606, 0xffffffff81818181, 0xffffffffffffffce );
TEST_RR_OP( 21, sra, 0xffffffffffffffff, 0xffffffff81818181, 0xffffffffffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 22, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC2_EQ_DEST( 23, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC12_EQ_DEST( 24, sra, 0, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 25, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_DEST_BYPASS( 26, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_DEST_BYPASS( 27, 2, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_SRC12_BYPASS( 28, 0, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC12_BYPASS( 29, 0, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC12_BYPASS( 30, 0, 2, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_SRC12_BYPASS( 31, 1, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC12_BYPASS( 32, 1, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC12_BYPASS( 33, 2, 0, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_SRC21_BYPASS( 34, 0, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC21_BYPASS( 35, 0, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC21_BYPASS( 36, 0, 2, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_SRC21_BYPASS( 37, 1, 0, sra, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_RR_SRC21_BYPASS( 38, 1, 1, sra, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_RR_SRC21_BYPASS( 39, 2, 0, sra, 0xffffffffffffffff, 0xffffffff80000000, 31 );
TEST_RR_ZEROSRC1( 40, sra, 0, 15 );
TEST_RR_ZEROSRC2( 41, sra, 32, 32 );
TEST_RR_ZEROSRC12( 42, sra, 0 );
TEST_RR_ZERODEST( 43, sra, 1024, 2048 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,623 | tb/tools/isa/rv32ui/or.S | # See LICENSE for license details.
#*****************************************************************************
# or.S
#-----------------------------------------------------------------------------
#
# Test or instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Logical tests
#-------------------------------------------------------------
TEST_RR_OP( 2, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_OP( 3, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_OP( 4, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_OP( 5, or, 0xf0fff0ff, 0xf00ff00f, 0xf0f0f0f0 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 6, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC2_EQ_DEST( 7, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_EQ_DEST( 8, or, 0xff00ff00, 0xff00ff00 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 9, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_DEST_BYPASS( 10, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_DEST_BYPASS( 11, 2, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 12, 0, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 13, 0, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 14, 0, 2, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 15, 1, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 16, 1, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 17, 2, 0, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 18, 0, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 19, 0, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 20, 0, 2, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 21, 1, 0, or, 0xff0fff0f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 22, 1, 1, or, 0xfff0fff0, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 23, 2, 0, or, 0x0fff0fff, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_ZEROSRC1( 24, or, 0xff00ff00, 0xff00ff00 );
TEST_RR_ZEROSRC2( 25, or, 0x00ff00ff, 0x00ff00ff );
TEST_RR_ZEROSRC12( 26, or, 0 );
TEST_RR_ZERODEST( 27, or, 0x11111111, 0x22222222 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,597 | tb/tools/isa/rv32ui/srai.S | # See LICENSE for license details.
#*****************************************************************************
# srai.S
#-----------------------------------------------------------------------------
#
# Test srai instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, srai, 0xffffff8000000000, 0xffffff8000000000, 0 );
TEST_IMM_OP( 3, srai, 0xffffffffc0000000, 0xffffffff80000000, 1 );
TEST_IMM_OP( 4, srai, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_IMM_OP( 5, srai, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_IMM_OP( 6, srai, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_IMM_OP( 7, srai, 0x000000007fffffff, 0x000000007fffffff, 0 );
TEST_IMM_OP( 8, srai, 0x000000003fffffff, 0x000000007fffffff, 1 );
TEST_IMM_OP( 9, srai, 0x0000000000ffffff, 0x000000007fffffff, 7 );
TEST_IMM_OP( 10, srai, 0x000000000001ffff, 0x000000007fffffff, 14 );
TEST_IMM_OP( 11, srai, 0x0000000000000000, 0x000000007fffffff, 31 );
TEST_IMM_OP( 12, srai, 0xffffffff81818181, 0xffffffff81818181, 0 );
TEST_IMM_OP( 13, srai, 0xffffffffc0c0c0c0, 0xffffffff81818181, 1 );
TEST_IMM_OP( 14, srai, 0xffffffffff030303, 0xffffffff81818181, 7 );
TEST_IMM_OP( 15, srai, 0xfffffffffffe0606, 0xffffffff81818181, 14 );
TEST_IMM_OP( 16, srai, 0xffffffffffffffff, 0xffffffff81818181, 31 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, srai, 0xffffffffff000000, 0xffffffff80000000, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, srai, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_IMM_DEST_BYPASS( 19, 1, srai, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_IMM_DEST_BYPASS( 20, 2, srai, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_IMM_SRC1_BYPASS( 21, 0, srai, 0xffffffffff000000, 0xffffffff80000000, 7 );
TEST_IMM_SRC1_BYPASS( 22, 1, srai, 0xfffffffffffe0000, 0xffffffff80000000, 14 );
TEST_IMM_SRC1_BYPASS( 23, 2, srai, 0xffffffffffffffff, 0xffffffff80000001, 31 );
TEST_IMM_ZEROSRC1( 24, srai, 0, 4 );
TEST_IMM_ZERODEST( 25, srai, 33, 10 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,013 | tb/tools/isa/rv32ui/bne.S | # See LICENSE for license details.
#*****************************************************************************
# bne.S
#-----------------------------------------------------------------------------
#
# Test bne instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Branch tests
#-------------------------------------------------------------
# Each test checks both forward and backward branches
TEST_BR2_OP_TAKEN( 2, bne, 0, 1 );
TEST_BR2_OP_TAKEN( 3, bne, 1, 0 );
TEST_BR2_OP_TAKEN( 4, bne, -1, 1 );
TEST_BR2_OP_TAKEN( 5, bne, 1, -1 );
TEST_BR2_OP_NOTTAKEN( 6, bne, 0, 0 );
TEST_BR2_OP_NOTTAKEN( 7, bne, 1, 1 );
TEST_BR2_OP_NOTTAKEN( 8, bne, -1, -1 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_BR2_SRC12_BYPASS( 9, 0, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 10, 0, 1, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 11, 0, 2, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 12, 1, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 13, 1, 1, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 14, 2, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 15, 0, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 16, 0, 1, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 17, 0, 2, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 18, 1, 0, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 19, 1, 1, bne, 0, 0 );
TEST_BR2_SRC12_BYPASS( 20, 2, 0, bne, 0, 0 );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 21, x1, 3, \
li x1, 1; \
bne x1, x0, 1f; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
1: addi x1, x1, 1; \
addi x1, x1, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,201 | tb/tools/isa/rv32ui/sltiu.S | # See LICENSE for license details.
#*****************************************************************************
# sltiu.S
#-----------------------------------------------------------------------------
#
# Test sltiu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, sltiu, 0, 0x0000000000000000, 0x000 );
TEST_IMM_OP( 3, sltiu, 0, 0x0000000000000001, 0x001 );
TEST_IMM_OP( 4, sltiu, 1, 0x0000000000000003, 0x007 );
TEST_IMM_OP( 5, sltiu, 0, 0x0000000000000007, 0x003 );
TEST_IMM_OP( 6, sltiu, 1, 0x0000000000000000, 0x800 );
TEST_IMM_OP( 7, sltiu, 0, 0xffffffff80000000, 0x000 );
TEST_IMM_OP( 8, sltiu, 1, 0xffffffff80000000, 0x800 );
TEST_IMM_OP( 9, sltiu, 1, 0x0000000000000000, 0x7ff );
TEST_IMM_OP( 10, sltiu, 0, 0x000000007fffffff, 0x000 );
TEST_IMM_OP( 11, sltiu, 0, 0x000000007fffffff, 0x7ff );
TEST_IMM_OP( 12, sltiu, 0, 0xffffffff80000000, 0x7ff );
TEST_IMM_OP( 13, sltiu, 1, 0x000000007fffffff, 0x800 );
TEST_IMM_OP( 14, sltiu, 1, 0x0000000000000000, 0xfff );
TEST_IMM_OP( 15, sltiu, 0, 0xffffffffffffffff, 0x001 );
TEST_IMM_OP( 16, sltiu, 0, 0xffffffffffffffff, 0xfff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, sltiu, 1, 11, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, sltiu, 0, 15, 10 );
TEST_IMM_DEST_BYPASS( 19, 1, sltiu, 1, 10, 16 );
TEST_IMM_DEST_BYPASS( 20, 2, sltiu, 0, 16, 9 );
TEST_IMM_SRC1_BYPASS( 21, 0, sltiu, 1, 11, 15 );
TEST_IMM_SRC1_BYPASS( 22, 1, sltiu, 0, 17, 8 );
TEST_IMM_SRC1_BYPASS( 23, 2, sltiu, 1, 12, 14 );
TEST_IMM_ZEROSRC1( 24, sltiu, 1, 0xfff );
TEST_IMM_ZERODEST( 25, sltiu, 0x00ff00ff, 0xfff );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 1,087 | tb/tools/isa/rv32ui/jal.S | # See LICENSE for license details.
#*****************************************************************************
# jal.S
#-----------------------------------------------------------------------------
#
# Test jal instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Test 2: Basic test
#-------------------------------------------------------------
test_2:
li TESTNUM, 2
li ra, 0
jal x4, target_2
linkaddr_2:
nop
nop
j fail
target_2:
la x2, linkaddr_2
bne x2, x4, fail
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 3, ra, 3, \
li ra, 1; \
jal x0, 1f; \
addi ra, ra, 1; \
addi ra, ra, 1; \
addi ra, ra, 1; \
addi ra, ra, 1; \
1: addi ra, ra, 1; \
addi ra, ra, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 1,425 | tb/tools/isa/rv32ui/jalr.S | # See LICENSE for license details.
#*****************************************************************************
# jalr.S
#-----------------------------------------------------------------------------
#
# Test jalr instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Test 2: Basic test
#-------------------------------------------------------------
test_2:
li TESTNUM, 2
li t0, 0
la t1, target_2
jalr t0, t1, 0
linkaddr_2:
j fail
target_2:
la t1, linkaddr_2
bne t0, t1, fail
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_JALR_SRC1_BYPASS( 4, 0, jalr );
TEST_JALR_SRC1_BYPASS( 5, 1, jalr );
TEST_JALR_SRC1_BYPASS( 6, 2, jalr );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
.option push
.option norvc
TEST_CASE( 7, t0, 4, \
li t0, 1; \
la t1, 1f; \
jr t1, -4; \
addi t0, t0, 1; \
addi t0, t0, 1; \
addi t0, t0, 1; \
addi t0, t0, 1; \
1: addi t0, t0, 1; \
addi t0, t0, 1; \
)
.option pop
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,160 | tb/tools/isa/rv32ui/srli.S | # See LICENSE for license details.
#*****************************************************************************
# srli.S
#-----------------------------------------------------------------------------
#
# Test srli instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
#define TEST_SRL(n, v, a) \
TEST_IMM_OP(n, srli, ((v) & ((1 << (__riscv_xlen-1) << 1) - 1)) >> (a), v, a)
TEST_SRL( 2, 0xffffffff80000000, 0 );
TEST_SRL( 3, 0xffffffff80000000, 1 );
TEST_SRL( 4, 0xffffffff80000000, 7 );
TEST_SRL( 5, 0xffffffff80000000, 14 );
TEST_SRL( 6, 0xffffffff80000001, 31 );
TEST_SRL( 7, 0xffffffffffffffff, 0 );
TEST_SRL( 8, 0xffffffffffffffff, 1 );
TEST_SRL( 9, 0xffffffffffffffff, 7 );
TEST_SRL( 10, 0xffffffffffffffff, 14 );
TEST_SRL( 11, 0xffffffffffffffff, 31 );
TEST_SRL( 12, 0x0000000021212121, 0 );
TEST_SRL( 13, 0x0000000021212121, 1 );
TEST_SRL( 14, 0x0000000021212121, 7 );
TEST_SRL( 15, 0x0000000021212121, 14 );
TEST_SRL( 16, 0x0000000021212121, 31 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, srli, 0x01000000, 0x80000000, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, srli, 0x01000000, 0x80000000, 7 );
TEST_IMM_DEST_BYPASS( 19, 1, srli, 0x00020000, 0x80000000, 14 );
TEST_IMM_DEST_BYPASS( 20, 2, srli, 0x00000001, 0x80000001, 31 );
TEST_IMM_SRC1_BYPASS( 21, 0, srli, 0x01000000, 0x80000000, 7 );
TEST_IMM_SRC1_BYPASS( 22, 1, srli, 0x00020000, 0x80000000, 14 );
TEST_IMM_SRC1_BYPASS( 23, 2, srli, 0x00000001, 0x80000001, 31 );
TEST_IMM_ZEROSRC1( 24, srli, 0, 4 );
TEST_IMM_ZERODEST( 25, srli, 33, 10 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,821 | tb/tools/isa/rv32ui/slli.S | # See LICENSE for license details.
#*****************************************************************************
# slli.S
#-----------------------------------------------------------------------------
#
# Test slli instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, slli, 0x0000000000000001, 0x0000000000000001, 0 );
TEST_IMM_OP( 3, slli, 0x0000000000000002, 0x0000000000000001, 1 );
TEST_IMM_OP( 4, slli, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_IMM_OP( 5, slli, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_IMM_OP( 6, slli, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_IMM_OP( 7, slli, 0xffffffffffffffff, 0xffffffffffffffff, 0 );
TEST_IMM_OP( 8, slli, 0xfffffffffffffffe, 0xffffffffffffffff, 1 );
TEST_IMM_OP( 9, slli, 0xffffffffffffff80, 0xffffffffffffffff, 7 );
TEST_IMM_OP( 10, slli, 0xffffffffffffc000, 0xffffffffffffffff, 14 );
TEST_IMM_OP( 11, slli, 0xffffffff80000000, 0xffffffffffffffff, 31 );
TEST_IMM_OP( 12, slli, 0x0000000021212121, 0x0000000021212121, 0 );
TEST_IMM_OP( 13, slli, 0x0000000042424242, 0x0000000021212121, 1 );
TEST_IMM_OP( 14, slli, 0x0000001090909080, 0x0000000021212121, 7 );
TEST_IMM_OP( 15, slli, 0x0000084848484000, 0x0000000021212121, 14 );
TEST_IMM_OP( 16, slli, 0x1090909080000000, 0x0000000021212121, 31 );
#if __riscv_xlen == 64
TEST_RR_OP( 50, sll, 0x8000000000000000, 0x0000000000000001, 63 );
TEST_RR_OP( 51, sll, 0xffffff8000000000, 0xffffffffffffffff, 39 );
TEST_RR_OP( 52, sll, 0x0909080000000000, 0x0000000021212121, 43 );
#endif
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, slli, 0x00000080, 0x00000001, 7 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, slli, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_IMM_DEST_BYPASS( 19, 1, slli, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_IMM_DEST_BYPASS( 20, 2, slli, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_IMM_SRC1_BYPASS( 21, 0, slli, 0x0000000000000080, 0x0000000000000001, 7 );
TEST_IMM_SRC1_BYPASS( 22, 1, slli, 0x0000000000004000, 0x0000000000000001, 14 );
TEST_IMM_SRC1_BYPASS( 23, 2, slli, 0x0000000080000000, 0x0000000000000001, 31 );
TEST_IMM_ZEROSRC1( 24, slli, 0, 31 );
TEST_IMM_ZERODEST( 25, slli, 33, 20 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,290 | tb/tools/isa/rv32ui/lh.S | # See LICENSE for license details.
#*****************************************************************************
# lh.S
#-----------------------------------------------------------------------------
#
# Test lh instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, lh, 0x00000000000000ff, 0, tdat );
TEST_LD_OP( 3, lh, 0xffffffffffffff00, 2, tdat );
TEST_LD_OP( 4, lh, 0x0000000000000ff0, 4, tdat );
TEST_LD_OP( 5, lh, 0xfffffffffffff00f, 6, tdat );
# Test with negative offset
TEST_LD_OP( 6, lh, 0x00000000000000ff, -6, tdat4 );
TEST_LD_OP( 7, lh, 0xffffffffffffff00, -4, tdat4 );
TEST_LD_OP( 8, lh, 0x0000000000000ff0, -2, tdat4 );
TEST_LD_OP( 9, lh, 0xfffffffffffff00f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0x00000000000000ff, \
la x1, tdat; \
addi x1, x1, -32; \
lh x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0xffffffffffffff00, \
la x1, tdat; \
addi x1, x1, -5; \
lh x5, 7(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, lh, 0x0000000000000ff0, 2, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, lh, 0xfffffffffffff00f, 2, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, lh, 0xffffffffffffff00, 2, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, lh, 0x0000000000000ff0, 2, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, lh, 0xfffffffffffff00f, 2, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, lh, 0xffffffffffffff00, 2, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
lh x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
lh x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .half 0x00ff
tdat2: .half 0xff00
tdat3: .half 0x0ff0
tdat4: .half 0xf00f
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 3,145 | tb/tools/isa/rv32ui/add.S | # See LICENSE for license details.
#*****************************************************************************
# add.S
#-----------------------------------------------------------------------------
#
# Test add instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, add, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, add, 0x00000002, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, add, 0x0000000a, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, add, 0xffffffffffff8000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, add, 0xffffffff80000000, 0xffffffff80000000, 0x00000000 );
TEST_RR_OP( 7, add, 0xffffffff7fff8000, 0xffffffff80000000, 0xffffffffffff8000 );
TEST_RR_OP( 8, add, 0x0000000000007fff, 0x0000000000000000, 0x0000000000007fff );
TEST_RR_OP( 9, add, 0x000000007fffffff, 0x000000007fffffff, 0x0000000000000000 );
TEST_RR_OP( 10, add, 0x0000000080007ffe, 0x000000007fffffff, 0x0000000000007fff );
TEST_RR_OP( 11, add, 0xffffffff80007fff, 0xffffffff80000000, 0x0000000000007fff );
TEST_RR_OP( 12, add, 0x000000007fff7fff, 0x000000007fffffff, 0xffffffffffff8000 );
TEST_RR_OP( 13, add, 0xffffffffffffffff, 0x0000000000000000, 0xffffffffffffffff );
TEST_RR_OP( 14, add, 0x0000000000000000, 0xffffffffffffffff, 0x0000000000000001 );
TEST_RR_OP( 15, add, 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff );
TEST_RR_OP( 16, add, 0x0000000080000000, 0x0000000000000001, 0x000000007fffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 17, add, 24, 13, 11 );
TEST_RR_SRC2_EQ_DEST( 18, add, 25, 14, 11 );
TEST_RR_SRC12_EQ_DEST( 19, add, 26, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 20, 0, add, 24, 13, 11 );
TEST_RR_DEST_BYPASS( 21, 1, add, 25, 14, 11 );
TEST_RR_DEST_BYPASS( 22, 2, add, 26, 15, 11 );
TEST_RR_SRC12_BYPASS( 23, 0, 0, add, 24, 13, 11 );
TEST_RR_SRC12_BYPASS( 24, 0, 1, add, 25, 14, 11 );
TEST_RR_SRC12_BYPASS( 25, 0, 2, add, 26, 15, 11 );
TEST_RR_SRC12_BYPASS( 26, 1, 0, add, 24, 13, 11 );
TEST_RR_SRC12_BYPASS( 27, 1, 1, add, 25, 14, 11 );
TEST_RR_SRC12_BYPASS( 28, 2, 0, add, 26, 15, 11 );
TEST_RR_SRC21_BYPASS( 29, 0, 0, add, 24, 13, 11 );
TEST_RR_SRC21_BYPASS( 30, 0, 1, add, 25, 14, 11 );
TEST_RR_SRC21_BYPASS( 31, 0, 2, add, 26, 15, 11 );
TEST_RR_SRC21_BYPASS( 32, 1, 0, add, 24, 13, 11 );
TEST_RR_SRC21_BYPASS( 33, 1, 1, add, 25, 14, 11 );
TEST_RR_SRC21_BYPASS( 34, 2, 0, add, 26, 15, 11 );
TEST_RR_ZEROSRC1( 35, add, 15, 15 );
TEST_RR_ZEROSRC2( 36, add, 32, 32 );
TEST_RR_ZEROSRC12( 37, add, 0 );
TEST_RR_ZERODEST( 38, add, 16, 30 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,282 | tb/tools/isa/rv32ui/lb.S | # See LICENSE for license details.
#*****************************************************************************
# lb.S
#-----------------------------------------------------------------------------
#
# Test lb instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, lb, 0xffffffffffffffff, 0, tdat );
TEST_LD_OP( 3, lb, 0x0000000000000000, 1, tdat );
TEST_LD_OP( 4, lb, 0xfffffffffffffff0, 2, tdat );
TEST_LD_OP( 5, lb, 0x000000000000000f, 3, tdat );
# Test with negative offset
TEST_LD_OP( 6, lb, 0xffffffffffffffff, -3, tdat4 );
TEST_LD_OP( 7, lb, 0x0000000000000000, -2, tdat4 );
TEST_LD_OP( 8, lb, 0xfffffffffffffff0, -1, tdat4 );
TEST_LD_OP( 9, lb, 0x000000000000000f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0xffffffffffffffff, \
la x1, tdat; \
addi x1, x1, -32; \
lb x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x0000000000000000, \
la x1, tdat; \
addi x1, x1, -6; \
lb x5, 7(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, lb, 0xfffffffffffffff0, 1, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, lb, 0x000000000000000f, 1, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, lb, 0x0000000000000000, 1, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, lb, 0xfffffffffffffff0, 1, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, lb, 0x000000000000000f, 1, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, lb, 0x0000000000000000, 1, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
lb x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
lb x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .byte 0xff
tdat2: .byte 0x00
tdat3: .byte 0xf0
tdat4: .byte 0x0f
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 1,861 | tb/tools/isa/rv32ui/xori.S | # See LICENSE for license details.
#*****************************************************************************
# xori.S
#-----------------------------------------------------------------------------
#
# Test xori instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Logical tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, xori, 0xffffffffff00f00f, 0x0000000000ff0f00, 0xf0f );
TEST_IMM_OP( 3, xori, 0x000000000ff00f00, 0x000000000ff00ff0, 0x0f0 );
TEST_IMM_OP( 4, xori, 0x0000000000ff0ff0, 0x0000000000ff08ff, 0x70f );
TEST_IMM_OP( 5, xori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 6, xori, 0xffffffffff00f00f, 0xffffffffff00f700, 0x70f );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 7, 0, xori, 0x000000000ff00f00, 0x000000000ff00ff0, 0x0f0 );
TEST_IMM_DEST_BYPASS( 8, 1, xori, 0x0000000000ff0ff0, 0x0000000000ff08ff, 0x70f );
TEST_IMM_DEST_BYPASS( 9, 2, xori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 );
TEST_IMM_SRC1_BYPASS( 10, 0, xori, 0x000000000ff00f00, 0x000000000ff00ff0, 0x0f0 );
TEST_IMM_SRC1_BYPASS( 11, 1, xori, 0x0000000000ff0ff0, 0x0000000000ff0fff, 0x00f );
TEST_IMM_SRC1_BYPASS( 12, 2, xori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 );
TEST_IMM_ZEROSRC1( 13, xori, 0x0f0, 0x0f0 );
TEST_IMM_ZERODEST( 14, xori, 0x00ff00ff, 0x70f );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,949 | tb/tools/isa/rv32ui/slt.S | # See LICENSE for license details.
#*****************************************************************************
# slt.S
#-----------------------------------------------------------------------------
#
# Test slt instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, slt, 0, 0x0000000000000000, 0x0000000000000000 );
TEST_RR_OP( 3, slt, 0, 0x0000000000000001, 0x0000000000000001 );
TEST_RR_OP( 4, slt, 1, 0x0000000000000003, 0x0000000000000007 );
TEST_RR_OP( 5, slt, 0, 0x0000000000000007, 0x0000000000000003 );
TEST_RR_OP( 6, slt, 0, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 7, slt, 1, 0xffffffff80000000, 0x0000000000000000 );
TEST_RR_OP( 8, slt, 1, 0xffffffff80000000, 0xffffffffffff8000 );
TEST_RR_OP( 9, slt, 1, 0x0000000000000000, 0x0000000000007fff );
TEST_RR_OP( 10, slt, 0, 0x000000007fffffff, 0x0000000000000000 );
TEST_RR_OP( 11, slt, 0, 0x000000007fffffff, 0x0000000000007fff );
TEST_RR_OP( 12, slt, 1, 0xffffffff80000000, 0x0000000000007fff );
TEST_RR_OP( 13, slt, 0, 0x000000007fffffff, 0xffffffffffff8000 );
TEST_RR_OP( 14, slt, 0, 0x0000000000000000, 0xffffffffffffffff );
TEST_RR_OP( 15, slt, 1, 0xffffffffffffffff, 0x0000000000000001 );
TEST_RR_OP( 16, slt, 0, 0xffffffffffffffff, 0xffffffffffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 17, slt, 0, 14, 13 );
TEST_RR_SRC2_EQ_DEST( 18, slt, 1, 11, 13 );
TEST_RR_SRC12_EQ_DEST( 19, slt, 0, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 20, 0, slt, 1, 11, 13 );
TEST_RR_DEST_BYPASS( 21, 1, slt, 0, 14, 13 );
TEST_RR_DEST_BYPASS( 22, 2, slt, 1, 12, 13 );
TEST_RR_SRC12_BYPASS( 23, 0, 0, slt, 0, 14, 13 );
TEST_RR_SRC12_BYPASS( 24, 0, 1, slt, 1, 11, 13 );
TEST_RR_SRC12_BYPASS( 25, 0, 2, slt, 0, 15, 13 );
TEST_RR_SRC12_BYPASS( 26, 1, 0, slt, 1, 10, 13 );
TEST_RR_SRC12_BYPASS( 27, 1, 1, slt, 0, 16, 13 );
TEST_RR_SRC12_BYPASS( 28, 2, 0, slt, 1, 9, 13 );
TEST_RR_SRC21_BYPASS( 29, 0, 0, slt, 0, 17, 13 );
TEST_RR_SRC21_BYPASS( 30, 0, 1, slt, 1, 8, 13 );
TEST_RR_SRC21_BYPASS( 31, 0, 2, slt, 0, 18, 13 );
TEST_RR_SRC21_BYPASS( 32, 1, 0, slt, 1, 7, 13 );
TEST_RR_SRC21_BYPASS( 33, 1, 1, slt, 0, 19, 13 );
TEST_RR_SRC21_BYPASS( 34, 2, 0, slt, 1, 6, 13 );
TEST_RR_ZEROSRC1( 35, slt, 0, -1 );
TEST_RR_ZEROSRC2( 36, slt, 1, -1 );
TEST_RR_ZEROSRC12( 37, slt, 0 );
TEST_RR_ZERODEST( 38, slt, 16, 30 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,149 | tb/tools/isa/rv32ui/bge.S | # See LICENSE for license details.
#*****************************************************************************
# bge.S
#-----------------------------------------------------------------------------
#
# Test bge instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Branch tests
#-------------------------------------------------------------
# Each test checks both forward and backward branches
TEST_BR2_OP_TAKEN( 2, bge, 0, 0 );
TEST_BR2_OP_TAKEN( 3, bge, 1, 1 );
TEST_BR2_OP_TAKEN( 4, bge, -1, -1 );
TEST_BR2_OP_TAKEN( 5, bge, 1, 0 );
TEST_BR2_OP_TAKEN( 6, bge, 1, -1 );
TEST_BR2_OP_TAKEN( 7, bge, -1, -2 );
TEST_BR2_OP_NOTTAKEN( 8, bge, 0, 1 );
TEST_BR2_OP_NOTTAKEN( 9, bge, -1, 1 );
TEST_BR2_OP_NOTTAKEN( 10, bge, -2, -1 );
TEST_BR2_OP_NOTTAKEN( 11, bge, -2, 1 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_BR2_SRC12_BYPASS( 12, 0, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 13, 0, 1, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 14, 0, 2, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 15, 1, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 16, 1, 1, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 17, 2, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 18, 0, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 19, 0, 1, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 20, 0, 2, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 21, 1, 0, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 22, 1, 1, bge, -1, 0 );
TEST_BR2_SRC12_BYPASS( 23, 2, 0, bge, -1, 0 );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 24, x1, 3, \
li x1, 1; \
bge x1, x0, 1f; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
1: addi x1, x1, 1; \
addi x1, x1, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,028 | tb/tools/isa/rv32ui/blt.S | # See LICENSE for license details.
#*****************************************************************************
# blt.S
#-----------------------------------------------------------------------------
#
# Test blt instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Branch tests
#-------------------------------------------------------------
# Each test checks both forward and backward branches
TEST_BR2_OP_TAKEN( 2, blt, 0, 1 );
TEST_BR2_OP_TAKEN( 3, blt, -1, 1 );
TEST_BR2_OP_TAKEN( 4, blt, -2, -1 );
TEST_BR2_OP_NOTTAKEN( 5, blt, 1, 0 );
TEST_BR2_OP_NOTTAKEN( 6, blt, 1, -1 );
TEST_BR2_OP_NOTTAKEN( 7, blt, -1, -2 );
TEST_BR2_OP_NOTTAKEN( 8, blt, 1, -2 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_BR2_SRC12_BYPASS( 9, 0, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 10, 0, 1, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 11, 0, 2, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 12, 1, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 13, 1, 1, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 14, 2, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 15, 0, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 16, 0, 1, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 17, 0, 2, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 18, 1, 0, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 19, 1, 1, blt, 0, -1 );
TEST_BR2_SRC12_BYPASS( 20, 2, 0, blt, 0, -1 );
#-------------------------------------------------------------
# Test delay slot instructions not executed nor bypassed
#-------------------------------------------------------------
TEST_CASE( 21, x1, 3, \
li x1, 1; \
blt x0, x1, 1f; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
addi x1, x1, 1; \
1: addi x1, x1, 1; \
addi x1, x1, 1; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,960 | tb/tools/isa/rv32um/mulhu.S | # See LICENSE for license details.
#*****************************************************************************
# mulhu.S
#-----------------------------------------------------------------------------
#
# Test mulhu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV32U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulhu, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulhu, 0x00000000, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulhu, 0x00000000, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulhu, 0x00000000, 0x00000000, 0xffff8000 );
TEST_RR_OP( 6, mulhu, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP( 7, mulhu, 0x7fffc000, 0x80000000, 0xffff8000 );
TEST_RR_OP(30, mulhu, 0x0001fefe, 0xaaaaaaab, 0x0002fe7d );
TEST_RR_OP(31, mulhu, 0x0001fefe, 0x0002fe7d, 0xaaaaaaab );
TEST_RR_OP(32, mulhu, 0xfe010000, 0xff000000, 0xff000000 );
TEST_RR_OP(33, mulhu, 0xfffffffe, 0xffffffff, 0xffffffff );
TEST_RR_OP(34, mulhu, 0x00000000, 0xffffffff, 0x00000001 );
TEST_RR_OP(35, mulhu, 0x00000000, 0x00000001, 0xffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC2_EQ_DEST( 9, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_EQ_DEST( 10, mulhu, 43264, 13<<20 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 12, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 13, 2, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulhu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulhu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulhu, 42240, 15<<20, 11<<20 );
TEST_RR_ZEROSRC1( 26, mulhu, 0, 31<<26 );
TEST_RR_ZEROSRC2( 27, mulhu, 0, 32<<26 );
TEST_RR_ZEROSRC12( 28, mulhu, 0 );
TEST_RR_ZERODEST( 29, mulhu, 33<<20, 34<<20 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,923 | tb/tools/isa/rv32um/mulh.S | # See LICENSE for license details.
#*****************************************************************************
# mulh.S
#-----------------------------------------------------------------------------
#
# Test mulh instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV32U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulh, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulh, 0x00000000, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulh, 0x00000000, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulh, 0x00000000, 0x00000000, 0xffff8000 );
TEST_RR_OP( 6, mulh, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP( 7, mulh, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP(30, mulh, 0xffff0081, 0xaaaaaaab, 0x0002fe7d );
TEST_RR_OP(31, mulh, 0xffff0081, 0x0002fe7d, 0xaaaaaaab );
TEST_RR_OP(32, mulh, 0x00010000, 0xff000000, 0xff000000 );
TEST_RR_OP(33, mulh, 0x00000000, 0xffffffff, 0xffffffff );
TEST_RR_OP(34, mulh, 0xffffffff, 0xffffffff, 0x00000001 );
TEST_RR_OP(35, mulh, 0xffffffff, 0x00000001, 0xffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC2_EQ_DEST( 9, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_EQ_DEST( 10, mulh, 43264, 13<<20 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 12, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 13, 2, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulh, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulh, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulh, 42240, 15<<20, 11<<20 );
TEST_RR_ZEROSRC1( 26, mulh, 0, 31<<26 );
TEST_RR_ZEROSRC2( 27, mulh, 0, 32<<26 );
TEST_RR_ZEROSRC12( 28, mulh, 0 );
TEST_RR_ZERODEST( 29, mulh, 33<<20, 34<<20 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,997 | tb/tools/isa/rv32um/mulhsu.S | # See LICENSE for license details.
#*****************************************************************************
# mulhsu.S
#-----------------------------------------------------------------------------
#
# Test mulhsu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV32U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, mulhsu, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mulhsu, 0x00000000, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mulhsu, 0x00000000, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mulhsu, 0x00000000, 0x00000000, 0xffff8000 );
TEST_RR_OP( 6, mulhsu, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP( 7, mulhsu, 0x80004000, 0x80000000, 0xffff8000 );
TEST_RR_OP(30, mulhsu, 0xffff0081, 0xaaaaaaab, 0x0002fe7d );
TEST_RR_OP(31, mulhsu, 0x0001fefe, 0x0002fe7d, 0xaaaaaaab );
TEST_RR_OP(32, mulhsu, 0xff010000, 0xff000000, 0xff000000 );
TEST_RR_OP(33, mulhsu, 0xffffffff, 0xffffffff, 0xffffffff );
TEST_RR_OP(34, mulhsu, 0xffffffff, 0xffffffff, 0x00000001 );
TEST_RR_OP(35, mulhsu, 0x00000000, 0x00000001, 0xffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC2_EQ_DEST( 9, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_EQ_DEST( 10, mulhsu, 43264, 13<<20 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 12, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_DEST_BYPASS( 13, 2, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mulhsu, 36608, 13<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mulhsu, 39424, 14<<20, 11<<20 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mulhsu, 42240, 15<<20, 11<<20 );
TEST_RR_ZEROSRC1( 26, mulhsu, 0, 31<<26 );
TEST_RR_ZEROSRC2( 27, mulhsu, 0, 32<<26 );
TEST_RR_ZEROSRC12( 28, mulhsu, 0 );
TEST_RR_ZERODEST( 29, mulhsu, 33<<20, 34<<20 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 2,818 | tb/tools/isa/rv32um/mul.S | # See LICENSE for license details.
#*****************************************************************************
# mul.S
#-----------------------------------------------------------------------------
#
# Test mul instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV32U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP(32, mul, 0x00001200, 0x00007e00, 0xb6db6db7 );
TEST_RR_OP(33, mul, 0x00001240, 0x00007fc0, 0xb6db6db7 );
TEST_RR_OP( 2, mul, 0x00000000, 0x00000000, 0x00000000 );
TEST_RR_OP( 3, mul, 0x00000001, 0x00000001, 0x00000001 );
TEST_RR_OP( 4, mul, 0x00000015, 0x00000003, 0x00000007 );
TEST_RR_OP( 5, mul, 0x00000000, 0x00000000, 0xffff8000 );
TEST_RR_OP( 6, mul, 0x00000000, 0x80000000, 0x00000000 );
TEST_RR_OP( 7, mul, 0x00000000, 0x80000000, 0xffff8000 );
TEST_RR_OP(30, mul, 0x0000ff7f, 0xaaaaaaab, 0x0002fe7d );
TEST_RR_OP(31, mul, 0x0000ff7f, 0x0002fe7d, 0xaaaaaaab );
TEST_RR_OP(34, mul, 0x00000000, 0xff000000, 0xff000000 );
TEST_RR_OP(35, mul, 0x00000001, 0xffffffff, 0xffffffff );
TEST_RR_OP(36, mul, 0xffffffff, 0xffffffff, 0x00000001 );
TEST_RR_OP(37, mul, 0xffffffff, 0x00000001, 0xffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 8, mul, 143, 13, 11 );
TEST_RR_SRC2_EQ_DEST( 9, mul, 154, 14, 11 );
TEST_RR_SRC12_EQ_DEST( 10, mul, 169, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 11, 0, mul, 143, 13, 11 );
TEST_RR_DEST_BYPASS( 12, 1, mul, 154, 14, 11 );
TEST_RR_DEST_BYPASS( 13, 2, mul, 165, 15, 11 );
TEST_RR_SRC12_BYPASS( 14, 0, 0, mul, 143, 13, 11 );
TEST_RR_SRC12_BYPASS( 15, 0, 1, mul, 154, 14, 11 );
TEST_RR_SRC12_BYPASS( 16, 0, 2, mul, 165, 15, 11 );
TEST_RR_SRC12_BYPASS( 17, 1, 0, mul, 143, 13, 11 );
TEST_RR_SRC12_BYPASS( 18, 1, 1, mul, 154, 14, 11 );
TEST_RR_SRC12_BYPASS( 19, 2, 0, mul, 165, 15, 11 );
TEST_RR_SRC21_BYPASS( 20, 0, 0, mul, 143, 13, 11 );
TEST_RR_SRC21_BYPASS( 21, 0, 1, mul, 154, 14, 11 );
TEST_RR_SRC21_BYPASS( 22, 0, 2, mul, 165, 15, 11 );
TEST_RR_SRC21_BYPASS( 23, 1, 0, mul, 143, 13, 11 );
TEST_RR_SRC21_BYPASS( 24, 1, 1, mul, 154, 14, 11 );
TEST_RR_SRC21_BYPASS( 25, 2, 0, mul, 165, 15, 11 );
TEST_RR_ZEROSRC1( 26, mul, 0, 31 );
TEST_RR_ZEROSRC2( 27, mul, 0, 32 );
TEST_RR_ZEROSRC12( 28, mul, 0 );
TEST_RR_ZERODEST( 29, mul, 33, 34 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
xiaowuzxc/SparrowRV | 1,207 | tb/tools/bootrom/lib/start.S | .section .init; /*声明此处段名为.init*/
.globl _start; /*声明_start是全局的*/
.type _start,@function /*声明_start是函数*/
_start:
.option push /*保存编译设置*/
.option norelax /*禁用相对寻址*/
nop /*空操作*/
la gp, __global_pointer$ /*设置gp全局指针,__global_pointer$来源于链接脚本,与data段关联,指向全局变量*/
.option pop
la sp, _sp /*设置sp堆栈指针,_sp来源于链接脚本,指向普通的局部变量*/
/*加载data段,存储需要初始化的全局变量和静态变量*/
la a0, _data_lma /*程序存储器的data段起始地址 加载至 a0*/
la a1, _data /*数据存储器的data段起始地址 加载至 a1*/
la a2, _edata /*数据存储器的data段结束地址 加载至 a2*/
bgeu a1, a2, 2f /*a1大于等于a2,跳转至往下第一个2标签;否则向下执行*/
1:
lw t0, (a0) /*a0指向的地址 写入 t0*/
sw t0, (a1) /*t0的数据 写入 a1指向的地址 */
addi a0, a0, 4 /*a0+4*/
addi a1, a1, 4 /*a1+4*/
bltu a1, a2, 1b /*a1小于a2,跳转至往上第一个1标签;否则向下执行*/
/*加载data段*/
2:
/*清空bss段,存储不用初始化的全局变量和静态变量*/
la a0, __bss_start /*bss段起始地址 加载至 a0*/
la a1, _end /*bss段结束地址 加载至 a1*/
bgeu a0, a1, 2f /*a0大于等于a1,跳转至往下第一个2标签;否则向下执行*/
1:
sw zero, (a0) /*a0指向的地址 写入 0*/
addi a0, a0, 4 /*a0+4*/
bltu a0, a1, 1b /*a0小于a1,跳转至往上第一个1标签;否则向下执行*/
/*清空bss段*/
2:
call _init /*初始化函数*/
call main /*main函数*/
csrwi 0x347,1 /*仿真专用,退出仿真*/
loop: /*无限循环*/
j loop
trap_vector_base:/*备用中断向量表*/
j _start
.word 0
.word 0
.word 0
|
xiaowuzxc/SparrowRV | 1,250 | tb/tools/bootrom/lib/trap/trap_entry.S | .section .text.entry
.align 2
.global trap_entry
trap_entry:
addi sp, sp, -16*4
sw x1 , 0*4(sp)
sw x5 , 1*4(sp)
sw x6 , 2*4(sp)
sw x7 , 3*4(sp)
sw x10, 4*4(sp)
sw x11, 5*4(sp)
sw x12, 6*4(sp)
sw x13, 7*4(sp)
sw x14, 8*4(sp)
sw x15, 9*4(sp)
sw x16, 10*4(sp)
sw x17, 11*4(sp)
sw x28, 12*4(sp)
sw x29, 13*4(sp)
sw x30, 14*4(sp)
sw x31, 15*4(sp)
csrr a0, mcause
csrr a1, mepc
test_if_asynchronous:
srli a2, a0, 31 /* MSB of mcause is 1 if handing an asynchronous interrupt - shift to LSB to clear other bits. */
beq a2, x0, handle_synchronous /* Branch past interrupt handing if not asynchronous. */
call trap_handler
j asynchronous_return
handle_synchronous:
addi a1, a1, 4
csrw mepc, a1
asynchronous_return:
lw x1 , 0*4(sp)
lw x5 , 1*4(sp)
lw x6 , 2*4(sp)
lw x7 , 3*4(sp)
lw x10, 4*4(sp)
lw x11, 5*4(sp)
lw x12, 6*4(sp)
lw x13, 7*4(sp)
lw x14, 8*4(sp)
lw x15, 9*4(sp)
lw x16, 10*4(sp)
lw x17, 11*4(sp)
lw x28, 12*4(sp)
lw x29, 13*4(sp)
lw x30, 14*4(sp)
lw x31, 15*4(sp)
addi sp, sp, 16*4
mret
.weak trap_handler
trap_handler:
1:
j 1b
|
xiaowuzxc/SparrowRV | 1,207 | doc/小教程/start.S | .section .init; /*声明此处段名为.init*/
.globl _start; /*声明_start是全局的*/
.type _start,@function /*声明_start是函数*/
_start:
.option push /*保存编译设置*/
.option norelax /*禁用相对寻址*/
nop /*空操作*/
la gp, __global_pointer$ /*设置gp全局指针,__global_pointer$来源于链接脚本,与data段关联,指向全局变量*/
.option pop
la sp, _sp /*设置sp堆栈指针,_sp来源于链接脚本,指向普通的局部变量*/
/*加载data段,存储需要初始化的全局变量和静态变量*/
la a0, _data_lma /*程序存储器的data段起始地址 加载至 a0*/
la a1, _data /*数据存储器的data段起始地址 加载至 a1*/
la a2, _edata /*数据存储器的data段结束地址 加载至 a2*/
bgeu a1, a2, 2f /*a1大于等于a2,跳转至往下第一个2标签;否则向下执行*/
1:
lw t0, (a0) /*a0指向的地址 写入 t0*/
sw t0, (a1) /*t0的数据 写入 a1指向的地址 */
addi a0, a0, 4 /*a0+4*/
addi a1, a1, 4 /*a1+4*/
bltu a1, a2, 1b /*a1小于a2,跳转至往上第一个1标签;否则向下执行*/
/*加载data段*/
2:
/*清空bss段,存储不用初始化的全局变量和静态变量*/
la a0, __bss_start /*bss段起始地址 加载至 a0*/
la a1, _end /*bss段结束地址 加载至 a1*/
bgeu a0, a1, 2f /*a0大于等于a1,跳转至往下第一个2标签;否则向下执行*/
1:
sw zero, (a0) /*a0指向的地址 写入 0*/
addi a0, a0, 4 /*a0+4*/
bltu a0, a1, 1b /*a0小于a1,跳转至往上第一个1标签;否则向下执行*/
/*清空bss段*/
2:
call _init /*初始化函数*/
call main /*main函数*/
csrwi 0x347,1 /*仿真专用,退出仿真*/
loop: /*无限循环*/
j loop
trap_vector_base:/*备用中断向量表*/
j _start
.word 0
.word 0
.word 0
|
xiazhuo/nscc2022_personal | 1,820 | asm/user-sample.s | .org 0x0
.set noreorder
.set noat
.text
.global _start
_start:
lui $sp,0x807E
lui $a3,0x8060
move $v0,$zero
lui $a1,0x1
move $v1,$a3
.L1:
mul $a0,$v0,$v0
addiu $v1,$v1,4
addiu $v0,$v0,1
bne $v0,$a1,.L1
sw $a0,-4($v1)
lui $t2,0x8040
lui $t1,0x8050
move $t0,$zero
lui $t3,0x10
.L6:
addu $v0,$t2,$t0
move $v1,$zero
li $a1,0xffff
lw $a2,0($v0)
addu $a0,$a1,$v1
.L7:
addiu $a0,$a0,1
sra $a0,$a0,0x1
sll $v0,$a0,0x2
addu $v0,$a3,$v0
lw $v0,0($v0)
sltu $v0,$a2,$v0
beqz $v0,.L2
nop
b .L3
move $v0,$a0
.L5:
sra $v0,$v0,0x1
sll $v1,$v0,0x2
addu $v1,$a3,$v1
lw $v1,0($v1)
sltu $v1,$a2,$v1
bnez $v1,.L4
move $v1,$a0
move $a0,$v0
.L2:
addu $v0,$a1,$a0
slt $v1,$a0,$a1
bnez $v1,.L5
addiu $v0,$v0,1
addu $v0,$t1,$t0
addiu $t0,$t0,4
bne $t0,$t3,.L6
sw $a0,0($v0)
.L8:
jr $ra
move $v0,$zero
.L4:
addiu $a1,$v0,-1
.L9:
slt $v0,$v1,$a1
bnez $v0,.L7
addu $a0,$a1,$v1
addu $v0,$t1,$t0
addiu $t0,$t0,4
move $a0,$v1
bne $t0,$t3,.L6
sw $a0,0($v0)
b .L8
nop
.L3:
b .L9
addiu $a1,$v0,-1
bsearchr:
move $v0,$a0
lui $t1,0x8060
slt $v1,$v0,$a1
beqz $v1,.L10
addu $v1,$a1,$v0
.L13:
addiu $v1,$v1,1
sra $v1,$v1,0x1
sll $a3,$v1,0x2
addu $a3,$t1,$a3
lw $a3,0($a3)
sltu $a3,$a2,$a3
bnez $a3,.L11
nop
.L12:
addu $t0,$a1,$v1
slt $a3,$v1,$a1
move $v0,$v1
beqz $a3,.L10
addiu $v1,$t0,1
sra $v1,$v1,0x1
sll $a0,$v1,0x2
addu $a0,$t1,$a0
lw $a0,0($a0)
sltu $a0,$a2,$a0
beqz $a0,.L12
nop
.L11:
addiu $a1,$v1,-1
slt $v1,$v0,$a1
bnez $v1,.L13
addu $v1,$a1,$v0
.L10:
jr $ra
nop
|
xiazhuo/nscc2022_personal | 5,748 | thinpad_top.srcs/sim_1/testbin/lab2/lab2.S | #include <inst_test.h>
#include <regdef.h>
.org 0x0
.set noreorder
.set noat
.text
.global _start
_start:
b fib
nop
info:
.asciz "Fib Finish."
.p2align 2
feed:
.asciz "All PASS!"
.p2align 2
fib:
ori t0, zero, 0x1 # t0 = 1
ori t1, zero, 0x1 # t1 = 1
lui a0, 0x8040 # a0 = 0x80400000
addiu t4, a0, 0x100 # t4 = 0x80400100
loop0:
addu t2, t0, t1 # t2 = t0+t1
ori t0, t1, 0x0 # t0 = t1
ori t1, t2, 0x0 # t1 = t2
sw t1, 0(a0)
lw t3, 0(a0)
bne t1, t3, end
nop
addiu a0, a0, 0x4 # a0 += 4
bne a0, t4, loop0
nop
WRITESERIAL:
lui s0, %hi(info)
addiu s0, %lo(info)
lb a0, 0(s0)
lui s1, 0xBFD0
loop1:
addiu s0, s0, 0x1
.TESTW:
lb t0, 0x03FC(s1)
andi t0, t0, 0x0001
beqz t0, .TESTW
nop
sb a0, 0x03F8(s1)
lb a0, 0(s0)
bne a0, zero, loop1
nop
READSERIAL:
# lui s1, 0xBFD0
.TESTR:
lb t0, 0x03FC(s1)
andi t0, t0, 0x0002
beqz t0, .TESTR
nop
lb a0, 0x03F8(s1)
ori t0, zero, 0x0054 # char 'T'
bne a0, t0, READSERIAL
nop
RANDOMTEST:
li s0, 0x80400100
li s1, 0
lw s2, 4(s0) # selection
lw s3, 8(s0) # random
add:
andi t0, s2, 0x1
beqz t0, add_next # skip test
nop
TEST_ADD(0xbccd54c8, 0x0a2c3b14, 0xc6f98fdc)
TEST_ADD(0x95f0c7b0, 0x00000000, 0x95f0c7b0)
TEST_ADD(0x00000000, 0x6e701f70, 0x6e701f70)
TEST_ADD(0x00000000, 0x00000000, 0x00000000)
ori s1, s1, 0x1
add_next:
addi:
andi t0, s2, 0x2
beqz t0, addi_next # skip test
nop
TEST_ADDI(0xbcbfdbe0, 0x0000f6c0, 0xbcbfd2a0)
TEST_ADDI(0xd33b01eb, 0x00000000, 0xd33b01eb)
TEST_ADDI(0x00000000, 0x00003e0c, 0x00003e0c)
TEST_ADDI(0x00000000, 0x00000000, 0x00000000)
ori s1, s1, 0x2
addi_next:
sub:
andi t0, s2, 0x4
beqz t0, sub_next # skip test
nop
TEST_SUB(0x08635494, 0x9eb51e6c, 0x69ae3628)
TEST_SUB(0x93bd45a8, 0x00000000, 0x93bd45a8)
TEST_SUB(0x00000000, 0xd2e54c39, 0x2d1ab3c7)
TEST_SUB(0x00000000, 0x00000000, 0x00000000)
ori s1, s1, 0x4
sub_next:
slt:
andi t0, s2, 0x8
beqz t0, slt_next # skip test
nop
TEST_SLT(0x77ea82d8, 0x273da080, 0x00000000)
TEST_SLT(0x5d1eb494, 0x00000000, 0x00000000)
TEST_SLT(0x00000000, 0x1eaac5e3, 0x00000001)
TEST_SLT(0x00000000, 0x00000000, 0x00000000)
ori s1, s1, 8
slt_next:
sllv:
andi t0, s2, 0x10
beqz t0, sllv_next # skip test
nop
TEST_SLLV(0x1b9690c3, 0x0000001b, 0x18000000)
TEST_SLLV(0x999ff140, 0x00000000, 0x999ff140)
TEST_SLLV(0x00000000, 0x00000001, 0x00000000)
TEST_SLLV(0x00000000, 0x00000000, 0x00000000)
ori s1, s1, 0x10
sllv_next:
srav:
andi t0, s2, 0x20
beqz t0, srav_next # skip test
nop
TEST_SRAV(0xdbbad0b0, 0x00000005, 0xfeddd685)
TEST_SRAV(0x0b24d560, 0x00000000, 0x0b24d560)
TEST_SRAV(0x00000000, 0x00000012, 0x00000000)
TEST_SRAV(0x00000000, 0x00000000, 0x00000000)
ori s1, s1, 0x20
srav_next:
sra:
andi t0, s2, 0x40
beqz t0, sra_next # skip test
nop
TEST_SRA(0xb3bf7ef8, 0x0000001c, 0xfffffffb)
TEST_SRA(0x7f797f19, 0x00000000, 0x7f797f19)
TEST_SRA(0x00000000, 0x00000016, 0x00000000)
TEST_SRA(0x00000000, 0x00000000, 0x00000000)
ori s1, s1, 0x40
sra_next:
srlv:
andi t0, s2, 0x80
beqz t0, srlv_next # skip test
nop
TEST_SRLV(0xcfe54c57, 0x00000006, 0x033f9531)
TEST_SRLV(0x16f9a9d8, 0x00000000, 0x16f9a9d8)
TEST_SRLV(0x00000000, 0x0000000c, 0x00000000)
TEST_SRLV(0x00000000, 0x00000000, 0x00000000)
ori s1, s1, 0x80
srlv_next:
jalr:
andi t0, s2, 0x100
beqz t0, jalr_next # skip test
nop
TEST_JALR(0x6e6b362a, 0x240baf26, 0x6e6b362a, 0x240baf26)
TEST_JALR(0x5d0fcbc0, 0xdba3ac64, 0x5d0fcbc0, 0xdba3ac64)
TEST_JALR(0xa832bb00, 0xf43fa620, 0xa832bb00, 0xf43fa620)
TEST_JALR(0x1e2aa5ca, 0x4782fd22, 0x1e2aa5ca, 0x4782fd22)
ori s1, s1, 0x100
jalr_next:
bgez:
andi t0, s2, 0x200
beqz t0, bgez_next # skip test
nop
TEST_BGEZ(0xbb00130a, 0x1b151f5e, 0x0e6929ba, 0x00000000, 0x00000000)
TEST_BGEZ(0x44f2bff4, 0x4020bf52, 0x4efb06d3, 0x4020bf52, 0x4efb06d3)
TEST_BGEZ(0x12bd8028, 0xab215888, 0x55aca678, 0xab215888, 0x55aca678)
TEST_BGEZ(0x00000000, 0x1f139900, 0xaf6531c4, 0x1f139900, 0xaf6531c4)
ori s1, s1, 0x200
bgez_next:
blez:
andi t0, s2, 0x400
beqz t0, blez_next # skip test
nop
TEST_BLEZ(0x95b9206f, 0x6ad0a0b8, 0x8f08db03, 0x6ad0a0b8, 0x8f08db03)
TEST_BLEZ(0x2c09b044, 0xc7826200, 0x13ea2762, 0x00000000, 0x00000000)
TEST_BLEZ(0xdab562ac, 0xea867f00, 0x6d7650e5, 0xea867f00, 0x6d7650e5)
TEST_BLEZ(0x00000000, 0xbb9734a0, 0xff0af084, 0xbb9734a0, 0xff0af084)
ori s1, s1, 0x400
blez_next:
bltz:
andi t0, s2, 0x800
beqz t0, bltz_next # skip test
nop
TEST_BLTZ(0xfff90290, 0x6a2f7d7c, 0xc275fd82, 0x6a2f7d7c, 0xc275fd82)
TEST_BLTZ(0x35ae8150, 0x291a3374, 0x60d28110, 0x00000000, 0x00000000)
TEST_BLTZ(0x00000000, 0x5f7d6c5f, 0x632c31aa, 0x00000000, 0x00000000)
TEST_BLTZ(0xf5969546, 0x18389fc2, 0xeb9542c8, 0x18389fc2, 0xeb9542c8)
ori s1, s1, 0x800
bltz_next:
li t0, 0xfeed0000
or t0, t0, s1
xor t0, t0, s3
sw t0, 0(s0) # *s0 = t0
bne s1, s2, end
nop
FEEDBACK:
lui s0, %hi(feed)
addiu s0, %lo(feed)
lb a0, 0(s0)
lui s1, 0xBFD0
loop2:
addiu s0, s0, 0x1
.TEST:
lb t0, 0x03FC(s1)
andi t0, t0, 0x0001
beqz t0, .TEST
nop
sb a0, 0x03F8(s1)
lb a0, 0(s0)
bne a0, zero, loop2
nop
end:
b end
nop
|
xiazhuo/nscc2022_personal | 2,995 | thinpad_top.srcs/sim_1/testbin/self_test/test.S | .org 0x0
.set noreorder
.set noat
.text
.global _start
_start:
lui $sp,0x807E
addiu $sp,$sp,-24
move $s8,$sp
move $a0,$zero #传参0
jal QUICKSORT
li $a1,8 #传参8
move $sp,$s8
addiu $sp,$sp,24
READ:
lui $t0,0x8040
loop:
lw $t1,0($t0)
bne $t1,$zero,loop
addiu $t0,$t0,4
j WRITESERIAL
nop
QUICKSORT:
addiu $sp,$sp,-40
sw $ra,36($sp)
sw $s8,32($sp)
move $s8,$sp
sw $a0,40($s8)
sw $a1,44($s8)
move $v1,$a0
move $v0,$a1
slt $v0,$v1,$v0
bnez $v0,.L2
nop
b .L1
nop
.L2:
lw $v1,40($s8)
lw $v0,44($s8)
addu $v0,$v1,$v0
sra $v1,$v0,0x1
lui $v0,0x8040
sll $v1,$v1,0x2
addu $v0,$v1,$v0
lw $v0,0($v0)
sw $v0,24($s8) # x
lw $v0,40($s8)
addiu $v0,$v0,-1
sw $v0,16($s8) # i
lw $v0,44($s8)
addiu $v0,$v0,1
sw $v0,20($s8) # j
b .L4
nop
.L5:
lw $v0,16($s8)
addiu $v0,$v0,1
sw $v0,16($s8) # i = i+1
lui $v0,0x8040
lw $v1,16($s8)
sll $v1,$v1,0x2
addu $v0,$v1,$v0
lw $v1,0($v0)
lw $v0,24($s8)
slt $v0,$v1,$v0
bnez $v0,.L5
nop
.L6:
lw $v0,20($s8)
addiu $v0,$v0,-1 # j = j-1
sw $v0,20($s8)
lui $v0,0x8040
lw $v1,20($s8)
sll $v1,$v1,0x2
addu $v0,$v1,$v0
lw $v1,0($v0)
lw $v0,24($s8)
slt $v0,$v0,$v1
bnez $v0,.L6
nop
lw $v1,16($s8)
lw $v0,20($s8)
slt $v0,$v1,$v0
beqz $v0,.L4
nop
SWAP:
lui $t0,0x8040
lw $v1,16($s8)
sll $v1,$v1,0x2
addu $t0,$v1,$t0
lw $t1,0($t0) # a[i]
lui $t2,0x8040
lw $v1,20($s8)
sll $v1,$v1,0x2
addu $t2,$v1,$t2
lw $t3,0($t2) # a[j]
sw $t1,0($t2)
sw $t3,0($t0)
# lui $v0,0x8040
# lw $v1,16($s8)
# sll $v1,$v1,0x2
# addu $v0,$v1,$v0
# lw $v0,0($v0) # a[i]
# sw $v0,28($s8)
# lui $v0,0x8040
# lw $v1,20($s8)
# sll $v1,$v1,0x2
# addu $v0,$v1,$v0
# lw $v1,0($v0) # a[j]
# lui $v0,0x8040
# lw $a0,16($s8)
# sll $a0,$a0,0x2
# addu $v0,$a0,$v0
# sw $v1,0($v0) # a[i] = a[j]
# lui $v0,0x8040
# lw $v1,20($s8)
# sll $v1,$v1,0x2
# addu $v0,$v1,$v0
# lw $v1,28($s8)
# sw $v1,0($v0) # a[j] = t
.L4:
lw $v1,16($s8) # i
lw $v0,20($s8) # j
slt $v0,$v1,$v0
bnez $v0,.L5
nop
lw $a0,40($s8)
jal QUICKSORT
lw $a1,20($s8)
lw $v0,20($s8)
addiu $v0,$v0,1
move $a0,$v0
jal QUICKSORT
lw $a1,44($s8)
.L1:
move $sp,$s8
lw $ra,36($sp)
lw $s8,32($sp)
addiu $sp,$sp,40
jr $ra
nop
WRITESERIAL:
lui $s1,0xBFD0
la $s0,feed
lb $a0,0($s0)
addiu $s0,$s0,1
.TESTW:
lb $t0, 0x03FC($s1)
andi $t0, $t0, 0x0001
beqz $t0, .TESTW
nop
sb $a0, 0x03F8($s1)
lb $a0, 0($s0)
addiu $s0,$s0,1
bnez $a0, .TESTW
nop
end:
b end
nop
feed:
.asciz "finished!"
.p2align 2
|
xiazhuo/nscc2022_personal | 1,899 | add2windows_env/testbin/test_.S | .org 0x0
.set noreorder
.set noat
.text
.global _start
_start:
lui $sp,0x807E
addiu $sp,$sp,-32
sw $ra,28($sp)
sw $s8,24($sp)
move $s8,$sp
sw $zero,16($s8)
b .L4
nop
.L3:
lw $v1,16($s8)
lw $v0,16($s8)
mul $v0,$v1,$v0
move $a0,$v0
lui $v0,0x8062 #b [i]
lw $v1,16($s8)
sll $v1,$v1,0x2
addu $v0,$v1,$v0
sw $a0,0($v0)
lw $v0,16($s8)
addiu $v0,$v0,1
sw $v0,16($s8)
.L4:
lw $v1,16($s8)
lui $v0,0x1
slt $v0,$v1,$v0
bnez $v0,.L3
nop
sw $zero,16($s8)
b .L2
nop
.L1:
lui $v0,0x8040 # a[i]
lw $v1,16($s8)
sll $v1,$v1,0x2
addu $v0,$v1,$v0
lw $v0,0($v0)
move $a0,$zero
li $a1,0xffff
move $a2,$v0
jal bsearchr
nop
move $a0,$v0
lui $v0,0x8050 # c[i]
lw $v1,16($s8)
sll $v1,$v1,0x2
addu $v0,$v1,$v0
sw $a0,0($v0)
lw $v0,16($s8)
addiu $v0,$v0,1
sw $v0,16($s8)
.L2:
lw $v0,16($s8)
lui $t0,0x4
slt $v0,$v0,$t0
bnez $v0,.L1
nop
move $v0,$zero
move $sp,$s8
lw $ra,28($sp)
lw $s8,24($sp)
addiu $sp,$sp,32
jr $ra
nop
bsearchr:
addiu $sp,$sp,-16
sw $s8,12($sp)
move $s8,$sp
sw $a0,16($s8)
sw $a1,20($s8)
sw $a2,24($s8)
b .L6
nop
.L5:
lw $v1,16($s8)
lw $v0,20($s8)
addu $v0,$v1,$v0
addiu $v0,$v0,1
sra $v0,$v0,0x1
sw $v0,0($s8)
lui $v0,0x8062
lw $v1,0($s8)
sll $v1,$v1,0x2
addu $v0,$v1,$v0
lw $v1,0($v0)
lw $v0,24($s8)
sltu $v0,$v0,$v1
bnez $v0,.L7
nop
lw $v0,0($s8)
sw $v0,16($s8)
b .L6
nop
.L7:
lw $v0,0($s8)
addiu $v0,$v0,-1
sw $v0,20($s8)
.L6:
lw $v1,16($s8)
lw $v0,20($s8)
slt $v0,$v1,$v0
bnez $v0,.L5
nop
lw $v0,16($s8)
move $sp,$s8
lw $s8,12($sp)
addiu $sp,$sp,16
jr $ra
nop
|
xiazhuo/nscc2022_personal | 1,820 | add2windows_env/testbin/test.S | .org 0x0
.set noreorder
.set noat
.text
.global _start
_start:
lui $sp,0x807E
lui $a3,0x8060
move $v0,$zero
lui $a1,0x1
move $v1,$a3
.L1:
mul $a0,$v0,$v0
addiu $v1,$v1,4
addiu $v0,$v0,1
bne $v0,$a1,.L1
sw $a0,-4($v1)
lui $t2,0x8040
lui $t1,0x8050
move $t0,$zero
lui $t3,0x10
.L6:
addu $v0,$t2,$t0
move $v1,$zero
li $a1,0xffff
lw $a2,0($v0)
addu $a0,$a1,$v1
.L7:
addiu $a0,$a0,1
sra $a0,$a0,0x1
sll $v0,$a0,0x2
addu $v0,$a3,$v0
lw $v0,0($v0)
sltu $v0,$a2,$v0
beqz $v0,.L2
nop
b .L3
move $v0,$a0
.L5:
sra $v0,$v0,0x1
sll $v1,$v0,0x2
addu $v1,$a3,$v1
lw $v1,0($v1)
sltu $v1,$a2,$v1
bnez $v1,.L4
move $v1,$a0
move $a0,$v0
.L2:
addu $v0,$a1,$a0
slt $v1,$a0,$a1
bnez $v1,.L5
addiu $v0,$v0,1
addu $v0,$t1,$t0
addiu $t0,$t0,4
bne $t0,$t3,.L6
sw $a0,0($v0)
.L8:
jr $ra
move $v0,$zero
.L4:
addiu $a1,$v0,-1
.L9:
slt $v0,$v1,$a1
bnez $v0,.L7
addu $a0,$a1,$v1
addu $v0,$t1,$t0
addiu $t0,$t0,4
move $a0,$v1
bne $t0,$t3,.L6
sw $a0,0($v0)
b .L8
nop
.L3:
b .L9
addiu $a1,$v0,-1
bsearchr:
move $v0,$a0
lui $t1,0x8060
slt $v1,$v0,$a1
beqz $v1,.L10
addu $v1,$a1,$v0
.L13:
addiu $v1,$v1,1
sra $v1,$v1,0x1
sll $a3,$v1,0x2
addu $a3,$t1,$a3
lw $a3,0($a3)
sltu $a3,$a2,$a3
bnez $a3,.L11
nop
.L12:
addu $t0,$a1,$v1
slt $a3,$v1,$a1
move $v0,$v1
beqz $a3,.L10
addiu $v1,$t0,1
sra $v1,$v1,0x1
sll $a0,$v1,0x2
addu $a0,$t1,$a0
lw $a0,0($a0)
sltu $a0,$a2,$a0
beqz $a0,.L12
nop
.L11:
addiu $a1,$v1,-1
slt $v1,$v0,$a1
bnez $v1,.L13
addu $v1,$a1,$v0
.L10:
jr $ra
nop
|
xiazhuo/nscc2022_personal | 2,383 | add2windows_env/C_code/test.s | .file 1 "test.c"
.section .mdebug.abi32
.previous
.nan legacy
.module fp=xx
.module nooddspreg
.globl a
.data
.align 2
.type a, @object
.size a, 20
a:
.word 9
.word 8
.word 10
.word 0
.word -1
.comm b,262180,4
.comm c,20,4
.text
.align 2
.globl bsearchr
.set nomips16
.set nomicromips
.ent bsearchr
.type bsearchr, @function
bsearchr:
.frame $fp,16,$31 # vars= 8, regs= 1/0, args= 0, gp= 0
.mask 0x40000000,-4
.fmask 0x00000000,0
.set noreorder
.set nomacro
addiu $sp,$sp,-16
sw $fp,12($sp)
move $fp,$sp
sw $4,16($fp)
sw $5,20($fp)
sw $6,24($fp)
b .L2
nop
.L4:
lw $3,16($fp)
lw $2,20($fp)
addu $2,$3,$2
addiu $2,$2,1
sra $2,$2,1
sw $2,0($fp)
lui $2,%hi(b)
lw $3,0($fp)
sll $3,$3,2
addiu $2,$2,%lo(b)
addu $2,$3,$2
lw $3,0($2)
lw $2,24($fp)
sltu $2,$2,$3
bne $2,$0,.L3
nop
lw $2,0($fp)
sw $2,16($fp)
b .L2
nop
.L3:
lw $2,0($fp)
addiu $2,$2,-1
sw $2,20($fp)
.L2:
lw $3,16($fp)
lw $2,20($fp)
slt $2,$3,$2
bne $2,$0,.L4
nop
lw $2,16($fp)
move $sp,$fp
lw $fp,12($sp)
addiu $sp,$sp,16
jr $31
nop
.set macro
.set reorder
.end bsearchr
.size bsearchr, .-bsearchr
.align 2
.globl main
.set nomips16
.set nomicromips
.ent main
.type main, @function
main:
.frame $fp,32,$31 # vars= 8, regs= 2/0, args= 16, gp= 0
.mask 0xc0000000,-4
.fmask 0x00000000,0
.set noreorder
.set nomacro
addiu $sp,$sp,-32
sw $31,28($sp)
sw $fp,24($sp)
move $fp,$sp
sw $0,16($fp)
b .L7
nop
.L8:
lw $3,16($fp)
lw $2,16($fp)
mul $2,$3,$2
move $4,$2
lui $2,%hi(b)
lw $3,16($fp)
sll $3,$3,2
addiu $2,$2,%lo(b)
addu $2,$3,$2
sw $4,0($2)
lw $2,16($fp)
addiu $2,$2,1
sw $2,16($fp)
.L7:
lw $3,16($fp)
li $2,65536 # 0x10000
slt $2,$3,$2
bne $2,$0,.L8
nop
sw $0,16($fp)
b .L9
nop
.L10:
lui $2,%hi(a)
lw $3,16($fp)
sll $3,$3,2
addiu $2,$2,%lo(a)
addu $2,$3,$2
lw $2,0($2)
move $4,$0
li $5,65535 # 0xffff
move $6,$2
jal bsearchr
nop
move $4,$2
lui $2,%hi(c)
lw $3,16($fp)
sll $3,$3,2
addiu $2,$2,%lo(c)
addu $2,$3,$2
sw $4,0($2)
lw $2,16($fp)
addiu $2,$2,1
sw $2,16($fp)
.L9:
lw $3,16($fp)
li $2,262144 # 0x40000
slt $2,$3,$2
bne $2,$0,.L10
nop
move $2,$0
move $sp,$fp
lw $31,28($sp)
lw $fp,24($sp)
addiu $sp,$sp,32
jr $31
nop
.set macro
.set reorder
.end main
.size main, .-main
.ident "GCC: (Codescape GNU Tools 2016.05-03 for MIPS MTI Bare Metal) 4.9.2"
|
xiazhuo/nscc2022_personal | 2,995 | add2windows_env/testbin/快速排序/test.S | .org 0x0
.set noreorder
.set noat
.text
.global _start
_start:
lui $sp,0x807E
addiu $sp,$sp,-24
move $s8,$sp
move $a0,$zero #传参0
jal QUICKSORT
li $a1,8 #传参8
move $sp,$s8
addiu $sp,$sp,24
READ:
lui $t0,0x8040
loop:
lw $t1,0($t0)
bne $t1,$zero,loop
addiu $t0,$t0,4
j WRITESERIAL
nop
QUICKSORT:
addiu $sp,$sp,-40
sw $ra,36($sp)
sw $s8,32($sp)
move $s8,$sp
sw $a0,40($s8)
sw $a1,44($s8)
move $v1,$a0
move $v0,$a1
slt $v0,$v1,$v0
bnez $v0,.L2
nop
b .L1
nop
.L2:
lw $v1,40($s8)
lw $v0,44($s8)
addu $v0,$v1,$v0
sra $v1,$v0,0x1
lui $v0,0x8040
sll $v1,$v1,0x2
addu $v0,$v1,$v0
lw $v0,0($v0)
sw $v0,24($s8) # x
lw $v0,40($s8)
addiu $v0,$v0,-1
sw $v0,16($s8) # i
lw $v0,44($s8)
addiu $v0,$v0,1
sw $v0,20($s8) # j
b .L4
nop
.L5:
lw $v0,16($s8)
addiu $v0,$v0,1
sw $v0,16($s8) # i = i+1
lui $v0,0x8040
lw $v1,16($s8)
sll $v1,$v1,0x2
addu $v0,$v1,$v0
lw $v1,0($v0)
lw $v0,24($s8)
slt $v0,$v1,$v0
bnez $v0,.L5
nop
.L6:
lw $v0,20($s8)
addiu $v0,$v0,-1 # j = j-1
sw $v0,20($s8)
lui $v0,0x8040
lw $v1,20($s8)
sll $v1,$v1,0x2
addu $v0,$v1,$v0
lw $v1,0($v0)
lw $v0,24($s8)
slt $v0,$v0,$v1
bnez $v0,.L6
nop
lw $v1,16($s8)
lw $v0,20($s8)
slt $v0,$v1,$v0
beqz $v0,.L4
nop
SWAP:
lui $t0,0x8040
lw $v1,16($s8)
sll $v1,$v1,0x2
addu $t0,$v1,$t0
lw $t1,0($t0) # a[i]
lui $t2,0x8040
lw $v1,20($s8)
sll $v1,$v1,0x2
addu $t2,$v1,$t2
lw $t3,0($t2) # a[j]
sw $t1,0($t2)
sw $t3,0($t0)
# lui $v0,0x8040
# lw $v1,16($s8)
# sll $v1,$v1,0x2
# addu $v0,$v1,$v0
# lw $v0,0($v0) # a[i]
# sw $v0,28($s8)
# lui $v0,0x8040
# lw $v1,20($s8)
# sll $v1,$v1,0x2
# addu $v0,$v1,$v0
# lw $v1,0($v0) # a[j]
# lui $v0,0x8040
# lw $a0,16($s8)
# sll $a0,$a0,0x2
# addu $v0,$a0,$v0
# sw $v1,0($v0) # a[i] = a[j]
# lui $v0,0x8040
# lw $v1,20($s8)
# sll $v1,$v1,0x2
# addu $v0,$v1,$v0
# lw $v1,28($s8)
# sw $v1,0($v0) # a[j] = t
.L4:
lw $v1,16($s8) # i
lw $v0,20($s8) # j
slt $v0,$v1,$v0
bnez $v0,.L5
nop
lw $a0,40($s8)
jal QUICKSORT
lw $a1,20($s8)
lw $v0,20($s8)
addiu $v0,$v0,1
move $a0,$v0
jal QUICKSORT
lw $a1,44($s8)
.L1:
move $sp,$s8
lw $ra,36($sp)
lw $s8,32($sp)
addiu $sp,$sp,40
jr $ra
nop
WRITESERIAL:
lui $s1,0xBFD0
la $s0,feed
lb $a0,0($s0)
addiu $s0,$s0,1
.TESTW:
lb $t0, 0x03FC($s1)
andi $t0, $t0, 0x0001
beqz $t0, .TESTW
nop
sb $a0, 0x03F8($s1)
lb $a0, 0($s0)
addiu $s0,$s0,1
bnez $a0, .TESTW
nop
end:
b end
nop
feed:
.asciz "finished!"
.p2align 2
|
linkease/lcdsimple-be3600 | 21,041 | gl-be3600-lcd/src-glinet-be3600/lvgl/src/draw/sw/blend/helium/lv_blend_helium.S | /**
* @file lv_blend_helium.S
*
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "lv_blend_helium.h"
#if LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_HELIUM && defined(__ARM_FEATURE_MVE) && __ARM_FEATURE_MVE && LV_USE_NATIVE_HELIUM_ASM
.data
reciprocal:
.byte 0xFF, 0xE2, 0xCC, 0xB9, 0xAA, 0x9C, 0x91, 0x88
.text
.syntax unified
.p2align 2
TMP .req r0
DST_ADDR .req r1
DST_W .req r2
DST_H .req r3
DST_STRIDE .req r4
SRC_ADDR .req r5
SRC_STRIDE .req r6
MASK_ADDR .req r7
MASK_STRIDE .req r8
H .req r9
OPA .req r10
RCP .req r11
S_B .req q0
S_G .req q1
S_R .req q2
S_A .req q3
D_B .req q4
D_G .req q5
D_R .req q6
D_A .req q7
N .req q0
V .req q1
R .req q2
L .req q4
S_565 .req q0
D_565 .req q1
S_L .req q2
D_L .req q4
D_T .req q5
BITMASK .req q6
.macro ldst st, op, bpp, mem, reg, areg, cvt, alt_index, wb, aligned
.if \bpp == 0
.if \cvt
ldr TMP, [\mem\()_ADDR]
bfi TMP, TMP, #2, #8
bfi TMP, TMP, #3, #16
lsr TMP, TMP, #8
vdup.16 \reg\()_565, TMP
.else
ldr TMP, [\mem\()_ADDR]
vdup.8 \reg\()_B, TMP
lsr TMP, #8
vdup.8 \reg\()_G, TMP
lsr TMP, #8
vdup.8 \reg\()_R, TMP
.endif
.elseif \bpp == 8
.if \cvt
v\op\()rb.u16 \reg\()_A, [\mem\()_ADDR], #8
.else
v\op\()rb.8 \reg\()_A, [\mem\()_ADDR], #16
.endif
.elseif \bpp == 16
.if \cvt
.if \st
vsri.8 \reg\()_R, \reg\()_G, #5
vshr.u8 \reg\()_G, \reg\()_G, #2
vshr.u8 \reg\()_B, \reg\()_B, #3
vsli.8 \reg\()_B, \reg\()_G, #5
.endif
.if \alt_index
v\op\()rb.8 \reg\()_B, [\mem\()_ADDR, S_B]
v\op\()rb.8 \reg\()_R, [\mem\()_ADDR, S_G]
.else
v\op\()rb.8 \reg\()_B, [\mem\()_ADDR, \reg\()_A]
add \mem\()_ADDR, #1
v\op\()rb.8 \reg\()_R, [\mem\()_ADDR, \reg\()_A]
.endif
.if \st == 0
vshl.u8 \reg\()_G, \reg\()_R, #5
vsri.u8 \reg\()_G, \reg\()_B, #3
vshl.u8 \reg\()_B, \reg\()_B, #3
vsri.u8 \reg\()_R, \reg\()_R, #5
vsri.u8 \reg\()_G, \reg\()_G, #6
vsri.u8 \reg\()_B, \reg\()_B, #5
.endif
.ifc \wb, !
.if \alt_index
add \mem\()_ADDR, #32
.else
add \mem\()_ADDR, #31
.endif
.elseif \alt_index == 0
sub \mem\()_ADDR, #1
.endif
.else @ cvt
.ifc \wb, !
v\op\()rh.16 \reg\()_565, [\mem\()_ADDR], #16
.else
v\op\()rh.16 \reg\()_565, [\mem\()_ADDR]
.endif
.endif
.elseif \bpp == 24
.if \alt_index == 1
v\op\()rb.8 \reg\()_B, [\mem\()_ADDR, S_B]
v\op\()rb.8 \reg\()_G, [\mem\()_ADDR, S_G]
v\op\()rb.8 \reg\()_R, [\mem\()_ADDR, S_R]
.elseif \alt_index == 2
v\op\()rb.8 \reg\()_B, [\mem\()_ADDR, S_R]
v\op\()rb.8 \reg\()_G, [\mem\()_ADDR, S_A]
v\op\()rb.8 \reg\()_R, [\mem\()_ADDR, D_A]
.else
v\op\()rb.8 \reg\()_B, [\mem\()_ADDR, \reg\()_A]
add \mem\()_ADDR, #1
v\op\()rb.8 \reg\()_G, [\mem\()_ADDR, \reg\()_A]
add \mem\()_ADDR, #1
v\op\()rb.8 \reg\()_R, [\mem\()_ADDR, \reg\()_A]
.endif
.ifc \wb, !
.if \alt_index
add \mem\()_ADDR, #48
.else
add \mem\()_ADDR, #46
.endif
.elseif \alt_index == 0
sub \mem\()_ADDR, #2
.endif
.elseif \aligned
v\op\()40.8 {\reg\()_B, \reg\()_G, \reg\()_R, \reg\()_A}, [\mem\()_ADDR]
v\op\()41.8 {\reg\()_B, \reg\()_G, \reg\()_R, \reg\()_A}, [\mem\()_ADDR]
v\op\()42.8 {\reg\()_B, \reg\()_G, \reg\()_R, \reg\()_A}, [\mem\()_ADDR]
v\op\()43.8 {\reg\()_B, \reg\()_G, \reg\()_R, \reg\()_A}, [\mem\()_ADDR]\wb
.else
v\op\()rb.8 \reg\()_B, [\mem\()_ADDR, \areg\()_A]
add \mem\()_ADDR, #1
v\op\()rb.8 \reg\()_G, [\mem\()_ADDR, \areg\()_A]
add \mem\()_ADDR, #1
v\op\()rb.8 \reg\()_R, [\mem\()_ADDR, \areg\()_A]
.if (\bpp == 32) || (\bpp == 31) && \st
add \mem\()_ADDR, #1
v\op\()rb.8 \reg\()_A, [\mem\()_ADDR, \areg\()_A]
.endif
.ifc \wb, !
.if (\bpp == 32) || (\bpp == 31) && \st
add \mem\()_ADDR, #61
.else
add \mem\()_ADDR, #62
.endif
.else
.if (\bpp == 32) || (\bpp == 31) && \st
sub \mem\()_ADDR, #3
.else
sub \mem\()_ADDR, #2
.endif
.endif
.endif
.endm
.macro load_index bpp, reg, areg, aligned
.if (\bpp > 0) && ((\bpp < 31) || (\aligned == 0))
mov TMP, #0
.if \bpp == 8
vidup.u8 \reg\()_A, TMP, #1
.elseif \bpp == 16
vidup.u8 \reg\()_A, TMP, #2
.elseif \bpp == 24
vidup.u8 \reg\()_A, TMP, #1
mov TMP, #3
vmul.u8 \reg\()_A, \reg\()_A, TMP
.else
vidup.u8 \areg\()_A, TMP, #4
.endif
.endif
.endm
.macro init src_bpp, dst_bpp, mask, opa
ldr DST_ADDR, [r0, #4]
ldr DST_W, [r0, #8]
ldr DST_H, [r0, #12]
ldr DST_STRIDE, [r0, #16]
ldr SRC_ADDR, [r0, #20]
.if \src_bpp > 0
ldr SRC_STRIDE, [r0, #24]
.endif
.if \mask
ldr MASK_ADDR, [r0, #28]
ldr MASK_STRIDE, [r0, #32]
.endif
.if \opa
ldr OPA, [r0]
.endif
.if (\src_bpp <= 16) && (\dst_bpp == 16)
.if \opa || \mask
mov TMP, #0xF81F
movt TMP, #0x7E0
vdup.32 BITMASK, TMP
.endif
add TMP, DST_W, #0x7
bic TMP, TMP, #0x7
.else
add TMP, DST_W, #0xF
bic TMP, TMP, #0xF
.endif
.if \dst_bpp == 32
ldr RCP, =(reciprocal - 8)
.endif
.if \dst_bpp == 16
sub DST_STRIDE, DST_STRIDE, TMP, lsl #1
.elseif \dst_bpp == 24
sub DST_STRIDE, DST_STRIDE, TMP
sub DST_STRIDE, DST_STRIDE, TMP, lsl #1
.elseif \dst_bpp >= 31
sub DST_STRIDE, DST_STRIDE, TMP, lsl #2
.endif
.if \mask
sub MASK_STRIDE, MASK_STRIDE, TMP
.endif
.if \src_bpp == 0
.if \mask || \opa
.if \dst_bpp > 16
ldst 0, ld, \src_bpp, SRC, S, D, 0, 0
vmov.u8 S_A, #0xFF
.else
ldst 0, ld, \src_bpp, SRC, S, D, 1, 0
vmovlb.u16 S_L, S_565
vsli.32 S_L, S_L, #16
vand S_L, S_L, BITMASK
.endif
.else
.if \dst_bpp > 16
ldst 0, ld, \src_bpp, SRC, D, S, 0, 0
.else
ldst 0, ld, \src_bpp, SRC, D, S, 1, 0
.endif
.endif
.else
.if \src_bpp == 16
sub SRC_STRIDE, SRC_STRIDE, TMP, lsl #1
.elseif \src_bpp == 24
sub SRC_STRIDE, SRC_STRIDE, TMP
sub SRC_STRIDE, SRC_STRIDE, TMP, lsl #1
.elseif \src_bpp >= 31
sub SRC_STRIDE, SRC_STRIDE, TMP, lsl #2
.endif
.endif
.if (\src_bpp < 32) && (\mask == 0) && (\opa == 0) && !((\src_bpp <= 16) && (\dst_bpp == 16))
@ 16 to 31/32 or reverse: index @ q0, q1
@ 24 to 31/32 or reverse: index @ q0, q1, q2
@ 16 to 24 or reverse: 16 index @ q0, q1, 24 index @ q2, q3, q7
@ 31 to 31/32: index @ q3 (tail only)
mov TMP, #0
.if (\src_bpp == 16) || (\dst_bpp == 16)
vidup.u8 S_B, TMP, #2
mov TMP, #1
vadd.u8 S_G, S_B, TMP
.if (\src_bpp == 24) || (\dst_bpp == 24)
vshl.u8 S_R, S_B, #1
vadd.u8 S_R, S_R, S_B
vshr.u8 S_R, S_R, #1
vadd.u8 S_A, S_R, TMP
vadd.u8 D_A, S_A, TMP
.endif
.elseif (\src_bpp == 24) || (\dst_bpp == 24)
vidup.u8 S_B, TMP, #1
mov TMP, #3
vmul.u8 S_B, S_B, TMP
mov TMP, #1
vadd.u8 S_G, S_B, TMP
vadd.u8 S_R, S_G, TMP
.endif
.if \dst_bpp >= 31
load_index \dst_bpp, D, S, 0
vmov.u8 D_A, #0xFF
.endif
.endif
.endm
.macro vqrdmulh_u8 Qd, Qn, Qm @ 1 bit precision loss
vmulh.u8 \Qd, \Qn, \Qm
vqshl.u8 \Qd, \Qd, #1
.endm
.macro premult mem, alpha
vrmulh.u8 \mem\()_B, \mem\()_B, \alpha
vrmulh.u8 \mem\()_G, \mem\()_G, \alpha
vrmulh.u8 \mem\()_R, \mem\()_R, \alpha
.endm
.macro blend_565 p
vmovl\p\().u16 D_L, D_565
vsli.32 D_L, D_L, #16
vand D_L, D_L, BITMASK
vsub.u32 D_T, S_L, D_L
vmovl\p\().u16 D_A, S_A
vmul.u32 D_T, D_T, D_A
vshr.u32 D_T, D_T, #5
vadd.u32 D_L, D_L, D_T
vand D_L, D_L, BITMASK
vshr.u32 D_T, D_L, #16
vorr D_L, D_L, D_T
vmovn\p\().u32 D_565, D_L
.endm
.macro late_init src_bpp, dst_bpp, mask, opa, mode
.if (\src_bpp <= 16) && (\dst_bpp == 16) && (\mask == 0)
.if \opa == 2
mov TMP, #0x7BEF
vdup.16 BITMASK, TMP
.if \src_bpp == 0
vshr.u16 S_L, S_565, #1
vand S_L, S_L, BITMASK
.endif
.elseif \opa == 1
vdup.16 S_A, OPA
mov TMP, #4
vadd.u16 S_A, S_A, TMP
vshr.u16 S_A, S_A, #3
.endif
.endif
.endm
.macro blend src_bpp, dst_bpp, mask, opa, mode
.if (\mask == 0) && (\opa == 2)
.if (\src_bpp <= 16) && (\dst_bpp == 16)
.if \src_bpp > 0
vshr.u16 S_L, S_565, #1
vand S_L, S_L, BITMASK
.endif
vshr.u16 D_L, D_565, #1
vand D_L, D_L, BITMASK
vadd.u16 D_565, S_L, D_L
.else
vhadd.u8 D_B, D_B, S_B
vhadd.u8 D_G, D_G, S_G
vhadd.u8 D_R, D_R, S_R
.endif
.elseif (\src_bpp <= 16) && (\dst_bpp == 16)
lsl lr, #1
.if \src_bpp > 0
vmovlb.u16 S_L, S_565
vsli.32 S_L, S_L, #16
vand S_L, S_L, BITMASK
.endif
blend_565 b
.if \src_bpp > 0
vmovlt.u16 S_L, S_565
vsli.32 S_L, S_L, #16
vand S_L, S_L, BITMASK
.endif
blend_565 t
lsr lr, #1
.else
.if \dst_bpp < 32
.if (\opa == 0) && (\mask == 0)
vmov.u8 D_A, #0xFF
mov TMP, #0
vabav.u8 TMP, S_A, D_A
cbnz TMP, 91f
vmov D_B, S_B
vmov D_G, S_G
vmov D_R, S_R
b 88f
91:
.endif
vmvn D_A, S_A
premult S, S_A
premult D, D_A
.else
vpush {d0-d5}
vmov.u8 S_B, #0xFF
vmov.u8 S_G, #0
mov TMP, #0
vabav.u8 TMP, S_A, S_B
cbz TMP, 91f @ if(fg.alpha == 255
mov TMP, #0
vabav.u8 TMP, D_A, S_G
cbnz TMP, 90f @ || bg.alpha == 0)
91:
vpop {d8-d13} @ return fg;
vmov.u8 D_A, #0xFF
b 88f
90:
mov TMP, #0
vabav.u8 TMP, S_A, S_G
cmp TMP, #2 @ if(fg.alpha <= LV_OPA_MIN)
itt le @ return bg;
vpople {d0-d5}
ble 88f
mov TMP, #0
vabav.u8 TMP, D_A, S_B @ if (bg.alpha == 255)
cbnz TMP, 89f @ return lv_color_mix32(fg, bg);
vpop {d0-d5}
vmvn D_A, S_A
premult S, S_A
premult D, D_A
vqadd.u8 D_B, D_B, S_B
vqadd.u8 D_G, D_G, S_G
vqadd.u8 D_R, D_R, S_R
vmov.u8 D_A, #0xFF
b 88f
89:
vmvn N, S_A
vmvn D_A, D_A
vrmulh.u8 D_A, N, D_A
vmvn D_A, D_A @ D_A = 255 - LV_OPA_MIX2(255 - fg.alpha, 255 - bg.alpha)
vclz.i8 N, D_A @ n = clz(D_A)
vshl.u8 V, D_A, N @ v = D_A << n
vshl.u8 S_A, S_A, N
vshr.u8 N, V, #4 @ N is used as tmp from now on
vldrb.u8 R, [RCP, N] @ r = reciprocal[(v >> 4) - 8]
vrmulh.u8 N, V, R @ r = newton(v,r)
vmvn N, N @ = vqrdmulh.u8(vmvn(vrmulh(v, r)), r)
vqrdmulh_u8 R, N, R @ but vqrdmulh does not support u8, so we implement one
vrmulh.u8 N, V, R @ and do it twice
vmvn N, N
vqrdmulh_u8 R, N, R
vqrdmulh_u8 S_A, S_A, R @ S_A' = S_A * 255 / D_A = vrdmulh(S_A << n, r)
vpop {d0-d5}
premult S, S_A
vmvn S_A, S_A
premult D, S_A
.endif
vqadd.u8 D_B, D_B, S_B
vqadd.u8 D_G, D_G, S_G
vqadd.u8 D_R, D_R, S_R
.endif
.if \dst_bpp == 31
vmov.u8 D_A, #0xFF
.endif
88:
.endm
.macro blend_line src_bpp, dst_bpp, mask, opa, mode
.if (\src_bpp < 31) && (\dst_bpp < 31)
blend_block \src_bpp, \dst_bpp, \mask, \opa, \mode, DST_W, 0
.else
bics TMP, DST_W, #0xF
beq 87f
blend_block \src_bpp, \dst_bpp, \mask, \opa, \mode, TMP, 1
87:
ands TMP, DST_W, #0xF
beq 86f
blend_block \src_bpp, \dst_bpp, \mask, \opa, \mode, TMP, 0
86:
.endif
.endm
.macro blend_block src_bpp, dst_bpp, mask, opa, mode, w, aligned
.if (\src_bpp <= 16) && (\dst_bpp == 16)
wlstp.16 lr, \w, 1f
.else
wlstp.8 lr, \w, 1f
.endif
2:
.if (\src_bpp < 32) && (\mask == 0) && (\opa == 0)
@ no blend
.if \src_bpp == 0
ldst 1, st, \dst_bpp, DST, D, S, 0, 1, !, \aligned
.elseif (\src_bpp == \dst_bpp) || (\src_bpp == 31) && (\dst_bpp == 32)
.if \dst_bpp < 31
.if \src_bpp < 31
ldst 0, ld, \src_bpp, SRC, D, S, 0, 1, !, \aligned
.else
ldst 0, ld, \src_bpp, SRC, D, S, 0, 1, !, \aligned
.endif
ldst 1, st, \dst_bpp, DST, D, S, 0, 1, !, \aligned
.else
ldst 0, ld, \src_bpp, SRC, D, S, 0, 1, !, \aligned
ldst 1, st, \dst_bpp, DST, D, S, 0, 1, !, \aligned
.endif
.else
.if (\dst_bpp < 31) && (\src_bpp < 31)
ldst 0, ld, \src_bpp, SRC, D, S, 1, 2, !, \aligned
ldst 1, st, \dst_bpp, DST, D, S, 1, 2, !, \aligned
.else
ldst 0, ld, \src_bpp, SRC, D, S, 1, 1, !, \aligned
ldst 1, st, \dst_bpp, DST, D, S, 1, 1, !, \aligned
.endif
.endif
.elseif (\src_bpp <= 16) && (\dst_bpp == 16)
.if \src_bpp > 0
ldst 0, ld, \src_bpp, SRC, S, D, 0, 0, !, \aligned
.endif
ldst 0, ld, \dst_bpp, DST, D, S, 0, 0, , \aligned
.if \mask
ldst 0, ld, 8, MASK, S, D, 1, 0, !
.if \opa == 2
vshr.u16 S_A, S_A, #1
.elseif \opa == 1
vmul.u16 S_A, S_A, OPA
vshr.u16 S_A, S_A, #8
.endif
mov TMP, #4
vadd.u16 S_A, S_A, TMP
vshr.u16 S_A, S_A, #3
.endif
blend \src_bpp, \dst_bpp, \mask, \opa, \mode
ldst 1, st, \dst_bpp, DST, D, S, 0, 0, !, \aligned
.elseif \src_bpp < 32
@ no src_a
.if \src_bpp > 0
load_index \src_bpp, S, D, \aligned
ldst 0, ld, \src_bpp, SRC, S, D, 1, 0, !, \aligned
.elseif (\opa == 1) || \mask
vpush {d0-d5}
.endif
load_index \dst_bpp, D, S, \aligned
ldst 0, ld, \dst_bpp, DST, D, S, 1, 0, , \aligned
.if \mask
ldst 0, ld, 8, MASK, S, D, 0, 0, !, \aligned
.if \opa == 2
vshr.u8 S_A, S_A, #1
.elseif \opa == 1
.if \dst_bpp == 32
vpush {d14-d15}
.endif
vdup.8 D_A, OPA
vrmulh.u8 S_A, S_A, D_A
.if \dst_bpp == 32
vpop {d14-d15}
.endif
.endif
.elseif \opa == 1
vdup.8 S_A, OPA
.endif
blend \src_bpp, \dst_bpp, \mask, \opa, \mode
.if (\src_bpp == 0) && ((\opa == 1) || \mask)
vpop {d0-d5}
.endif
.if (\dst_bpp == 32) || \mask || (\opa == 1)
load_index \dst_bpp, D, S, \aligned
.endif
ldst 1, st, \dst_bpp, DST, D, S, 1, 0, !, \aligned
.else
@ src_a (+\mask) (+\opa)
load_index \dst_bpp, D, S, \aligned
ldst 0, ld, \dst_bpp, DST, D, S, 1, 0, , \aligned
.if (\dst_bpp == 32) && (\mask || \opa || (\aligned == 0))
vpush {d14-d15}
.endif
load_index \src_bpp, S, D, \aligned
ldst 0, ld, \src_bpp, SRC, S, D, 1, 0, !, \aligned
.if \mask == 0
.if \opa
vdup.8 D_A, OPA
vrmulh.u8 S_A, S_A, D_A
.endif
.else
ldst 0, ld, 8, MASK, D, S, 0, 0, !, \aligned
vrmulh.u8 S_A, S_A, D_A
.if \opa
vdup.8 D_A, OPA
vrmulh.u8 S_A, S_A, D_A
.endif
.endif
.if (\dst_bpp == 32) && (\mask || \opa || (\aligned == 0))
vpop {d14-d15}
.endif
blend \src_bpp, \dst_bpp, \mask, \opa, \mode
load_index \dst_bpp, D, S, \aligned
ldst 1, st, \dst_bpp, DST, D, S, 1, 0, !, \aligned
.endif
letp lr, 2b
1:
.endm
.macro enter complex
push {r4-r11, lr}
.if \complex
vpush {d8-d15}
.endif
.endm
.macro exit complex
.if \complex
vpop {d8-d15}
.endif
pop {r4-r11, pc}
.endm
.macro preload mem, bpp
.if \bpp >= 31
pld [\mem\()_ADDR, DST_W, lsl #2]
.elseif \bpp == 24
add TMP, DST_W, DST_W, lsl #1
pld [\mem\()_ADDR, TMP]
.elseif \bpp == 16
pld [\mem\()_ADDR, DST_W, lsl #1]
.elseif \bpp == 8
pld [\mem\()_ADDR, DST_W]
.endif
.endm
.macro next src_bpp, mask
add DST_ADDR, DST_ADDR, DST_STRIDE
.if \src_bpp > 0
add SRC_ADDR, SRC_ADDR, SRC_STRIDE
.endif
.if \mask
add MASK_ADDR, MASK_ADDR, MASK_STRIDE
.endif
.endm
.macro blender src_bpp, dst_bpp, mask, opa, mode
.if (\src_bpp <= 16) && (\dst_bpp == 16) && (\opa == 0) && (\mask == 0)
enter 0
.else
enter 1
.endif
init \src_bpp, \dst_bpp, \mask, \opa
movs H, DST_H
beq 0f
preload SRC, \src_bpp
.if \mask || \opa || (\src_bpp == 32)
preload DST, \dst_bpp
.endif
.if \opa && (\src_bpp < 32) && (\dst_bpp < 32)
4:
@ 50% OPA can be accelerated (OPA == 0x7F/0x80)
add TMP, OPA, #1
tst TMP, #0x7E
bne 3f
late_init \src_bpp, \dst_bpp, \mask, 2, \mode
blend_line \src_bpp, \dst_bpp, \mask, 2, \mode
next \src_bpp, \mask
subs H, #1
bne 4b
b 0f
.endif
3:
late_init \src_bpp, \dst_bpp, \mask, \opa, \mode
blend_line \src_bpp, \dst_bpp, \mask, \opa, \mode
next \src_bpp, \mask
subs H, #1
bne 3b
0:
.if (\src_bpp <= 16) && (\dst_bpp == 16) && (\opa == 0) && (\mask == 0)
exit 0
.else
exit 1
.endif
.ltorg
.endm
.macro export name, src_bpp, dst_bpp, mask, opa, mode
.thumb_func
.global \name
\name\():
blender \src_bpp, \dst_bpp, \mask, \opa, \mode
.endm
.macro export_set src, dst, src_bpp, dst_bpp, mode
.ifc \src, color
export lv_\src\()_blend_to_\dst\()_helium, \src_bpp, \dst_bpp, 0, 0, \mode
export lv_\src\()_blend_to_\dst\()_with_opa_helium, \src_bpp, \dst_bpp, 0, 1, \mode
export lv_\src\()_blend_to_\dst\()_with_mask_helium, \src_bpp, \dst_bpp, 1, 0, \mode
export lv_\src\()_blend_to_\dst\()_mix_mask_opa_helium, \src_bpp, \dst_bpp, 1, 1, \mode
.else
export lv_\src\()_blend_\mode\()_to_\dst\()_helium, \src_bpp, \dst_bpp, 0, 0, \mode
export lv_\src\()_blend_\mode\()_to_\dst\()_with_opa_helium, \src_bpp, \dst_bpp, 0, 1, \mode
export lv_\src\()_blend_\mode\()_to_\dst\()_with_mask_helium, \src_bpp, \dst_bpp, 1, 0, \mode
export lv_\src\()_blend_\mode\()_to_\dst\()_mix_mask_opa_helium, \src_bpp, \dst_bpp, 1, 1, \mode
.endif
.endm
export_set color, rgb565, 0, 16, normal
export_set rgb565, rgb565, 16, 16, normal
export_set rgb888, rgb565, 24, 16, normal
export_set xrgb8888, rgb565, 31, 16, normal
export_set argb8888, rgb565, 32, 16, normal
export_set color, rgb888, 0, 24, normal
export_set rgb565, rgb888, 16, 24, normal
export_set rgb888, rgb888, 24, 24, normal
export_set xrgb8888, rgb888, 31, 24, normal
export_set argb8888, rgb888, 32, 24, normal
export_set color, xrgb8888, 0, 31, normal
export_set rgb565, xrgb8888, 16, 31, normal
export_set rgb888, xrgb8888, 24, 31, normal
export_set xrgb8888, xrgb8888, 31, 31, normal
export_set argb8888, xrgb8888, 32, 31, normal
export_set color, argb8888, 0, 32, normal
export_set rgb565, argb8888, 16, 32, normal
export_set rgb888, argb8888, 24, 32, normal
export_set xrgb8888, argb8888, 31, 32, normal
export_set argb8888, argb8888, 32, 32, normal
#endif /*LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_HELIUM && defined(__ARM_FEATURE_MVE) && __ARM_FEATURE_MVE && LV_USE_NATIVE_HELIUM_ASM*/
|
linkease/lcdsimple-be3600 | 21,316 | gl-be3600-lcd/src-glinet-be3600/lvgl/src/draw/sw/blend/neon/lv_blend_neon.S | /**
* @file lv_blend_neon.S
*
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "lv_blend_neon.h"
#if LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_NEON
.text
.fpu neon
.arch armv7a
.syntax unified
.altmacro
.p2align 2
@ d0 ~ d3 : src B,G,R,A
@ d4 ~ d7 : dst B,G,R,A
@ q8 : src RGB565 raw
@ q9 : dst RGB565 raw
@ q10 ~ q12: pre-multiplied src
@ d26~29 : temp
@ d30 : mask
@ d31 : opa
FG_MASK .req r0
BG_MASK .req r1
DST_ADDR .req r2
DST_W .req r3
DST_H .req r4
DST_STRIDE .req r5
SRC_ADDR .req r6
SRC_STRIDE .req r7
MASK_ADDR .req r8
MASK_STRIDE .req r9
W .req r10
H .req r11
S_8888_L .qn q0
S_8888_H .qn q1
D_8888_L .qn q2
D_8888_H .qn q3
S_B .dn d0
S_G .dn d1
S_R .dn d2
S_A .dn d3
D_B .dn d4
D_G .dn d5
D_R .dn d6
D_A .dn d7
S_565 .qn q8
D_565 .qn q9
S_565_L .dn d16
S_565_H .dn d17
D_565_L .dn d18
D_565_H .dn d19
PREMULT_B .qn q10
PREMULT_G .qn q11
PREMULT_R .qn q12
TMP_Q0 .qn q13
TMP_D0 .dn d26
TMP_D1 .dn d27
TMP_Q1 .qn q14
TMP_D2 .dn d28
TMP_D3 .dn d29
M_A .dn d30
OPA .dn d31
.macro convert reg, bpp, intlv
.if bpp >= 31
.if intlv
vzip.8 reg&_B, reg&_R @ BRBRBRBR GGGGGGGG BRBRBRBR AAAAAAAA
vzip.8 reg&_G, reg&_A @ BRBRBRBR GAGAGAGA BRBRBRBR GAGAGAGA
vzip.8 reg&_R, reg&_A @ BRBRBRBR GAGAGAGA BGRABGRA BGRABGRA
vzip.8 reg&_B, reg&_G @ BGRABGRA BGRABGRA BGRABGRA BGRABGRA
.else
vuzp.8 reg&_B, reg&_G @ BRBRBRBR GAGAGAGA BGRABGRA BGRABGRA
vuzp.8 reg&_R, reg&_A @ BRBRBRBR GAGAGAGA BRBRBRBR GAGAGAGA
vuzp.8 reg&_G, reg&_A @ BRBRBRBR GGGGGGGG BRBRBRBR AAAAAAAA
vuzp.8 reg&_B, reg&_R @ BBBBBBBB GGGGGGGG RRRRRRRR AAAAAAAA
.endif
.elseif bpp == 24
.if intlv @ for init only (same B,G,R for all channel)
vzip.8 reg&_B, reg&_G @ BGBGBGBG BGBGBGBG RRRRRRRR
vzip.16 reg&_B, reg&_R @ BGRRBGRR BGBGBGBG BGRRBGRR
vsli.64 reg&_8888_L, reg&_8888_L, #24 @ BGRBGRRB BGBBGBGB
vsli.64 reg&_B, reg&_G, #48 @ BGRBGRBG
vsri.64 reg&_R, reg&_B, #8 @ GRBGRBGR
vsri.64 reg&_G, reg&_R, #8 @ RBGRBGRB
.endif
.elseif bpp == 16
.if intlv
vshll.u8 reg&_565, reg&_R, #8 @ RRRrrRRR 00000000
vshll.u8 TMP_Q0, reg&_G, #8 @ GGGgggGG 00000000
vshll.u8 TMP_Q1, reg&_B, #8 @ BBBbbBBB 00000000
vsri.16 reg&_565, TMP_Q0, #5 @ RRRrrGGG gggGG000
vsri.16 reg&_565, TMP_Q1, #11 @ RRRrrGGG gggBBBbb
.else
vshr.u8 TMP_Q0, reg&_565, #3 @ 000RRRrr 000gggBB
vshrn.i16 reg&_G, reg&_565, #5 @ rrGGGggg
vshrn.i16 reg&_R, TMP_Q0, #5 @ RRRrr000
vshl.i8 reg&_G, reg&_G, #2 @ GGGggg00
vshl.i16 TMP_Q1, reg&_565, #3 @ rrGGGggg BBBbb000
vsri.8 reg&_R, reg&_R, #5 @ RRRrrRRR
vmovn.i16 reg&_B, TMP_Q1 @ BBBbb000
vsri.8 reg&_G, reg&_G, #6 @ GGGgggGG
vsri.8 reg&_B, reg&_B, #5 @ BBBbbBBB
.endif
.endif
.endm
.macro ldst op, bpp, len, mem, reg, cvt, wb
.if bpp >= 31
.if len == 8
.if cvt
v&op&4.8 {reg&_B, reg&_G, reg&_R, reg&_A}, [mem&_ADDR]&wb
.else
v&op&1.32 {reg&_8888_L, reg&_8888_H}, [mem&_ADDR]&wb
.endif
.else
.if (op == st) && cvt
convert reg, bpp, 1
.endif
.if len == 7
v&op&1.32 {reg&_8888_L}, [mem&_ADDR]!
v&op&1.32 {reg&_R}, [mem&_ADDR]!
v&op&1.32 {reg&_A[0]}, [mem&_ADDR]!
.elseif len == 6
v&op&1.32 {reg&_8888_L}, [mem&_ADDR]!
v&op&1.32 {reg&_R}, [mem&_ADDR]!
.elseif len == 5
v&op&1.32 {reg&_8888_L}, [mem&_ADDR]!
v&op&1.32 {reg&_R[0]}, [mem&_ADDR]!
.elseif len == 4
v&op&1.32 {reg&_8888_L}, [mem&_ADDR]&wb
.elseif len == 3
v&op&1.32 {reg&_B}, [mem&_ADDR]!
v&op&1.32 {reg&_G[0]}, [mem&_ADDR]!
.elseif len == 2
v&op&1.32 {reg&_B}, [mem&_ADDR]&wb
.elseif len == 1
v&op&1.32 {reg&_B[0]}, [mem&_ADDR]&wb
.else
.error "[32bpp]len should be 1~8"
.endif
.if (op == ld) && cvt
convert reg, bpp, 0
.endif
.if (wb&1) && (len != 4) && (len != 2) && (len != 1)
sub mem&_ADDR, #4*len
.endif
.endif
.elseif bpp == 24
.if len == 8
.if cvt
v&op&3.8 {reg&_B, reg&_G, reg&_R}, [mem&_ADDR]&wb
.else
v&op&1.8 {reg&_B, reg&_G, reg&_R}, [mem&_ADDR]&wb
.endif
.elseif (len < 8) && (len > 0)
.if cvt
v&op&3.8 {reg&_B[0], reg&_G[0], reg&_R[0]}, [mem&_ADDR]!
.if len > 1
v&op&3.8 {reg&_B[1], reg&_G[1], reg&_R[1]}, [mem&_ADDR]!
.endif
.if len > 2
v&op&3.8 {reg&_B[2], reg&_G[2], reg&_R[2]}, [mem&_ADDR]!
.endif
.if len > 3
v&op&3.8 {reg&_B[3], reg&_G[3], reg&_R[3]}, [mem&_ADDR]!
.endif
.if len > 4
v&op&3.8 {reg&_B[4], reg&_G[4], reg&_R[4]}, [mem&_ADDR]!
.endif
.if len > 5
v&op&3.8 {reg&_B[5], reg&_G[5], reg&_R[5]}, [mem&_ADDR]!
.endif
.if len > 6
v&op&3.8 {reg&_B[6], reg&_G[6], reg&_R[6]}, [mem&_ADDR]!
.endif
.if wb&1
sub mem&_ADDR, #3*len
.endif
.else
.if len == 7
v&op&1.32 {reg&_8888_L}, [mem&_ADDR]!
v&op&1.32 {reg&_R[0]}, [mem&_ADDR]!
v&op&1.8 {reg&_R[4]}, [mem&_ADDR]!
.elseif len == 6
v&op&1.32 {reg&_8888_L}, [mem&_ADDR]!
v&op&1.16 {reg&_R[0]}, [mem&_ADDR]!
.elseif len == 5
v&op&1.32 {reg&_B}, [mem&_ADDR]!
v&op&1.32 {reg&_G[0]}, [mem&_ADDR]!
v&op&1.16 {reg&_G[2]}, [mem&_ADDR]!
v&op&1.8 {reg&_G[6]}, [mem&_ADDR]!
.elseif len == 4
v&op&1.32 {reg&_B}, [mem&_ADDR]!
v&op&1.32 {reg&_G[0]}, [mem&_ADDR]!
.elseif len == 3
v&op&1.32 {reg&_B}, [mem&_ADDR]!
v&op&1.8 {reg&_G[0]}, [mem&_ADDR]!
.elseif len == 2
v&op&1.32 {reg&_B[0]}, [mem&_ADDR]!
v&op&1.16 {reg&_B[2]}, [mem&_ADDR]!
.elseif len == 1
v&op&1.16 {reg&_B[0]}, [mem&_ADDR]!
v&op&1.8 {reg&_B[2]}, [mem&_ADDR]!
.endif
.if wb&1
sub mem&_ADDR, #3*len
.endif
.endif
.else
.error "[24bpp]len should be 1~8"
.endif
.elseif bpp == 16
.if (op == st) && cvt
convert reg, bpp, 1
.endif
.if len == 8
v&op&1.16 {reg&_565}, [mem&_ADDR]&wb
.elseif len == 7
v&op&1.16 {reg&_565_L}, [mem&_ADDR]!
v&op&1.32 {reg&_565_H[0]}, [mem&_ADDR]!
v&op&1.16 {reg&_565_H[2]}, [mem&_ADDR]!
.if wb&1
sub mem&_ADDR, #14
.endif
.elseif len == 6
v&op&1.16 {reg&_565_L}, [mem&_ADDR]!
v&op&1.32 {reg&_565_H[0]}, [mem&_ADDR]!
.if wb&1
sub mem&_ADDR, #12
.endif
.elseif len == 5
v&op&1.16 {reg&_565_L}, [mem&_ADDR]!
v&op&1.16 {reg&_565_H[0]}, [mem&_ADDR]!
.if wb&1
sub mem&_ADDR, #10
.endif
.elseif len == 4
v&op&1.16 {reg&_565_L}, [mem&_ADDR]&wb
.elseif len == 3
v&op&1.32 {reg&_565_L[0]}, [mem&_ADDR]!
v&op&1.16 {reg&_565_L[2]}, [mem&_ADDR]!
.if wb&1
sub mem&_ADDR, #6
.endif
.elseif len == 2
v&op&1.32 {reg&_565_L[0]}, [mem&_ADDR]&wb
.elseif len == 1
v&op&1.16 {reg&_565_L[0]}, [mem&_ADDR]&wb
.else
.error "[16bpp]len should be 1~8"
.endif
.if (op == ld) && cvt
convert reg, bpp, 0
.endif
.elseif bpp == 8
.if len == 8
v&op&1.8 {reg&_A}, [mem&_ADDR]&wb
.elseif len == 7
v&op&1.32 {reg&_A[0]}, [mem&_ADDR]!
v&op&1.16 {reg&_A[2]}, [mem&_ADDR]!
v&op&1.8 {reg&_A[6]}, [mem&_ADDR]!
.if wb&1
sub mem&_ADDR, #7
.endif
.elseif len == 6
v&op&1.32 {reg&_A[0]}, [mem&_ADDR]!
v&op&1.16 {reg&_A[2]}, [mem&_ADDR]!
.if wb&1
sub mem&_ADDR, #6
.endif
.elseif len == 5
v&op&1.32 {reg&_A[0]}, [mem&_ADDR]!
v&op&1.8 {reg&_A[4]}, [mem&_ADDR]!
.if wb&1
sub mem&_ADDR, #5
.endif
.elseif len == 4
v&op&1.32 {reg&_A[0]}, [mem&_ADDR]&wb
.elseif len == 3
v&op&1.16 {reg&_A[0]}, [mem&_ADDR]!
v&op&1.8 {reg&_A[2]}, [mem&_ADDR]!
.if wb&1
sub mem&_ADDR, #3
.endif
.elseif len == 2
v&op&1.16 {reg&_A[0]}, [mem&_ADDR]&wb
.elseif len == 1
v&op&1.8 {reg&_A[0]}, [mem&_ADDR]&wb
.else
.error "[8bpp]len should be 1~8"
.endif
.elseif (bpp == 0) && wb&1
.if len == 8
v&op&3.8 {reg&_B[], reg&_G[], reg&_R[]}, [mem&_ADDR]
.else
.error "[color]len should be 8"
.endif
.endif
.if (op == ld) && cvt && (bpp > 8) && (bpp < 32)
vmov.u8 reg&_A, #0xFF
.endif
.endm
.macro premult alpha
vmull.u8 PREMULT_B, S_B, alpha
vmull.u8 PREMULT_G, S_G, alpha
vmull.u8 PREMULT_R, S_R, alpha
.endm
.macro init src_bpp, dst_bpp, mask, opa
ldr DST_ADDR, [r0, #4]
ldr DST_W, [r0, #8]
ldr DST_H, [r0, #12]
ldr DST_STRIDE, [r0, #16]
ldr SRC_ADDR, [r0, #20]
.if src_bpp > 0
ldr SRC_STRIDE, [r0, #24]
.endif
.if mask
ldr MASK_ADDR, [r0, #28]
ldr MASK_STRIDE, [r0, #32]
sub MASK_STRIDE, MASK_STRIDE, DST_W
.endif
.if opa
vld1.8 {OPA[]}, [r0]
.else
vmov.u8 OPA, #0xFF
.endif
vmvn D_A, OPA
.if dst_bpp == 16
sub DST_STRIDE, DST_STRIDE, DST_W, lsl #1
.elseif dst_bpp == 24
sub DST_STRIDE, DST_STRIDE, DST_W
sub DST_STRIDE, DST_STRIDE, DST_W, lsl #1
.elseif dst_bpp >= 31
sub DST_STRIDE, DST_STRIDE, DST_W, lsl #2
.endif
.if src_bpp == 0
.if mask || opa
ldst ld, src_bpp, 8, SRC, S, 1
vmov.u8 S_A, #0xFF
premult OPA
.else
ldst ld, src_bpp, 8, SRC, D, 1
vmov.u8 D_A, #0xFF
convert D, dst_bpp, 1
.endif
.else
.if src_bpp == 16
sub SRC_STRIDE, SRC_STRIDE, DST_W, lsl #1
.elseif src_bpp == 24
sub SRC_STRIDE, SRC_STRIDE, DST_W
sub SRC_STRIDE, SRC_STRIDE, DST_W, lsl #1
.elseif src_bpp >= 31
sub SRC_STRIDE, SRC_STRIDE, DST_W, lsl #2
.endif
.endif
mvn FG_MASK, #0
mvn BG_MASK, #0
.endm
@ input: M_A = 255 - fg.alpha
.macro calc_alpha len
vmov.u8 TMP_D0, #0xFD
vmvn D_A, D_A
vcge.u8 TMP_D1, S_A, TMP_D0 @ if (fg.alpha >= LV_OPA_MAX
vcge.u8 TMP_D2, D_A, TMP_D0 @ || bg.alpha <= LV_OPA_MIN)
vorr TMP_D2, TMP_D1
vcge.u8 TMP_D3, M_A, TMP_D0 @ elseif (fg.alpha <= LV_OPA_MIN)
vmvn TMP_Q1, TMP_Q1
vshrn.i16 TMP_D0, TMP_Q1, #4
vmov FG_MASK, BG_MASK, TMP_D0
cbz FG_MASK, 99f @ return fg;
vmull.u8 TMP_Q0, M_A, D_A @ D_A = 255 - LV_OPA_MIX2(255 - fg.alpha, 255 - bg.alpha)
vqrshrn.u16 M_A, TMP_Q0, #8
vbif M_A, D_A, TMP_D3 @ insert original D_A when fg.alpha <= LV_OPA_MIN
vmvn D_A, M_A
cbz BG_MASK, 99f @ return bg;
vmov.u8 TMP_D2, #0xFF
vmovl.u8 TMP_Q0, D_A
.if len > 4
vmovl.u16 S_565, TMP_D1
.endif
vmovl.u16 TMP_Q0, TMP_D0
vmull.u8 TMP_Q1, S_A, TMP_D2
vcvt.f32.u32 TMP_Q0, TMP_Q0
.if len > 4
vmovl.u16 D_565, TMP_D3
vcvt.f32.u32 S_565, S_565
.endif
vmovl.u16 TMP_Q1, TMP_D2
vrecpe.f32 TMP_Q0, TMP_Q0
vcvt.f32.u32 TMP_Q1, TMP_Q1
.if len > 4
vcvt.f32.u32 D_565, D_565
vrecpe.f32 S_565, S_565
.endif
vmul.f32 TMP_Q0, TMP_Q0, TMP_Q1
.if len > 4
vmul.f32 S_565, S_565, D_565
.endif
vcvt.u32.f32 TMP_Q0, TMP_Q0
.if len > 4
vcvt.u32.f32 S_565, S_565
.endif
vmovn.u32 TMP_D0, TMP_Q0
.if len > 4
vmovn.u32 TMP_D1, S_565
.endif
vmovn.u16 TMP_D0, TMP_Q0
premult TMP_D0
vmvn M_A, TMP_D0
99:
.endm
.macro blend mode, dst_bpp
.if dst_bpp == 32
vmov TMP_D0, FG_MASK, BG_MASK
vmovl.s8 TMP_Q0, TMP_D0
vsli.8 TMP_Q0, TMP_Q0, #4
cbz FG_MASK, 98f
.endif
.if mode == normal
.if dst_bpp == 32
cbz BG_MASK, 97f
mvns BG_MASK, BG_MASK
beq 96f
vmov S_565_L, D_B
vmov S_565_H, D_G
vmov D_565_L, D_R
.endif
96:
vmlal.u8 PREMULT_B, D_B, M_A
vmlal.u8 PREMULT_G, D_G, M_A
vmlal.u8 PREMULT_R, D_R, M_A
vqrshrn.u16 D_B, PREMULT_B, #8
vqrshrn.u16 D_G, PREMULT_G, #8
vqrshrn.u16 D_R, PREMULT_R, #8
.if dst_bpp == 32
beq 97f
vbif D_B, S_565_L, TMP_D1
vbif D_G, S_565_H, TMP_D1
vbif D_R, D_565_L, TMP_D1
97:
mvns FG_MASK, FG_MASK
beq 99f
.endif
.else
.error "blend mode is unsupported"
.endif
.if dst_bpp == 32
98:
vbif D_B, S_B, TMP_D0
vbif D_G, S_G, TMP_D0
vbif D_R, S_R, TMP_D0
vbif D_A, S_A, TMP_D0
99:
.endif
.endm
.macro process len, src_bpp, dst_bpp, mask, opa, mode
.if (src_bpp < 32) && (mask == 0) && (opa == 0)
@ no blend
.if src_bpp == 0 || src_bpp == dst_bpp
ldst ld, src_bpp, len, SRC, D, 0, !
ldst st, dst_bpp, len, DST, D, 0, !
.else
ldst ld, src_bpp, len, SRC, D, 1, !
ldst st, dst_bpp, len, DST, D, 1, !
.endif
.elseif src_bpp < 32
@ no src_a
.if src_bpp > 0
ldst ld, src_bpp, len, SRC, S, 1, !
.endif
ldst ld, dst_bpp, len, DST, D, 1
.if mask
ldst ld, 8, len, MASK, S, 1, !
.if opa
vmull.u8 TMP_Q0, S_A, OPA
vqrshrn.u16 S_A, TMP_Q0, #8
.endif
vmvn M_A, S_A
.if dst_bpp < 32
premult S_A
.else
calc_alpha len
.endif
.else
vmvn M_A, OPA
.if dst_bpp < 32
premult OPA
.else
vmov S_A, OPA
calc_alpha len
.endif
.endif
blend mode, dst_bpp
ldst st, dst_bpp, len, DST, D, 1, !
.else
@ src_a (+mask) (+opa)
ldst ld, src_bpp, len, SRC, S, 1, !
ldst ld, dst_bpp, len, DST, D, 1
.if mask == 0
.if opa
vmull.u8 TMP_Q0, S_A, OPA
vqrshrn.u16 S_A, TMP_Q0, #8
.endif
.else
ldst ld, 8, len, MASK, M, 1, !
vmull.u8 TMP_Q0, S_A, M_A
vqrshrn.u16 S_A, TMP_Q0, #8
.if opa
vmull.u8 TMP_Q0, S_A, OPA
vqrshrn.u16 S_A, TMP_Q0, #8
.endif
.endif
vmvn M_A, S_A
.if dst_bpp < 32
premult S_A
.else
calc_alpha len
.endif
blend mode, dst_bpp
ldst st, dst_bpp, len, DST, D, 1, !
.endif
.endm
.macro tail src_bpp, dst_bpp, mask, opa, mode
tst DST_W, #4
beq 3f
tst DST_W, #2
beq 5f
tst DST_W, #1
beq 6f
process 7, src_bpp, dst_bpp, mask, opa, mode
b 0f
6:
process 6, src_bpp, dst_bpp, mask, opa, mode
b 0f
5:
tst DST_W, #1
beq 4f
process 5, src_bpp, dst_bpp, mask, opa, mode
b 0f
4:
process 4, src_bpp, dst_bpp, mask, opa, mode
b 0f
3:
tst DST_W, #2
beq 1f
tst DST_W, #1
beq 2f
process 3, src_bpp, dst_bpp, mask, opa, mode
b 0f
2:
process 2, src_bpp, dst_bpp, mask, opa, mode
b 0f
1:
process 1, src_bpp, dst_bpp, mask, opa, mode
0:
.endm
.macro next src_bpp, mask
add DST_ADDR, DST_ADDR, DST_STRIDE
.if src_bpp
add SRC_ADDR, SRC_ADDR, SRC_STRIDE
.endif
.if mask
add MASK_ADDR, MASK_ADDR, MASK_STRIDE
.endif
.endm
.macro enter
push {r4-r11, lr}
.endm
.macro exit
pop {r4-r11, pc}
.endm
.macro preload mem, bpp
.if bpp >= 31
pld [mem&_ADDR, DST_W, lsl #2]
.elseif bpp == 24
add W, DST_W, DST_W, lsl #1
pld [mem&_ADDR, W]
.elseif bpp == 16
pld [mem&_ADDR, DST_W, lsl #1]
.elseif bpp == 8
pld [mem&_ADDR, DST_W]
.endif
.endm
.macro blender src_bpp, dst_bpp, mask, opa, mode
enter
init src_bpp, dst_bpp, mask, opa
movs H, DST_H
beq 0f
preload SRC, src_bpp
.if mask || opa || (src_bpp == 32)
preload DST, dst_bpp
.endif
subs W, DST_W, #8
blt 7f
9:
process 8, src_bpp, dst_bpp, mask, opa, mode
subs W, W, #8
bge 9b
tst DST_W, #7
beq 8f
tail src_bpp, dst_bpp, mask, opa, mode
8:
next src_bpp, mask
preload SRC, src_bpp
.if mask || opa || (src_bpp == 32)
preload DST, dst_bpp
.endif
sub W, DST_W, #8
subs H, H, #1
bgt 9b
exit
7:
tail src_bpp, dst_bpp, mask, opa, mode
next src_bpp, mask
subs H, H, #1
bgt 7b
exit
.endm
.macro export name, src_bpp, dst_bpp, mask, opa, mode
.thumb_func
.func name
.global name
.hidden name
name&:
blender src_bpp, dst_bpp, mask, opa, mode
.endfunc
.endm
.macro export_set src, dst, src_bpp, dst_bpp, mode
.if src == color
export _lv_&src&_blend_to_&dst&_neon, src_bpp, dst_bpp, 0, 0, mode
export _lv_&src&_blend_to_&dst&_with_opa_neon, src_bpp, dst_bpp, 0, 1, mode
export _lv_&src&_blend_to_&dst&_with_mask_neon, src_bpp, dst_bpp, 1, 0, mode
export _lv_&src&_blend_to_&dst&_mix_mask_opa_neon, src_bpp, dst_bpp, 1, 1, mode
.else
export _lv_&src&_blend_&mode&_to_&dst&_neon, src_bpp, dst_bpp, 0, 0, mode
export _lv_&src&_blend_&mode&_to_&dst&_with_opa_neon, src_bpp, dst_bpp, 0, 1, mode
export _lv_&src&_blend_&mode&_to_&dst&_with_mask_neon, src_bpp, dst_bpp, 1, 0, mode
export _lv_&src&_blend_&mode&_to_&dst&_mix_mask_opa_neon, src_bpp, dst_bpp, 1, 1, mode
.endif
.endm
export_set color, rgb565, 0, 16, normal
export_set rgb565, rgb565, 16, 16, normal
export_set rgb888, rgb565, 24, 16, normal
export_set xrgb8888, rgb565, 31, 16, normal
export_set argb8888, rgb565, 32, 16, normal
export_set color, rgb888, 0, 24, normal
export_set rgb565, rgb888, 16, 24, normal
export_set rgb888, rgb888, 24, 24, normal
export_set xrgb8888, rgb888, 31, 24, normal
export_set argb8888, rgb888, 32, 24, normal
export_set color, xrgb8888, 0, 31, normal
export_set rgb565, xrgb8888, 16, 31, normal
export_set rgb888, xrgb8888, 24, 31, normal
export_set xrgb8888, xrgb8888, 31, 31, normal
export_set argb8888, xrgb8888, 32, 31, normal
export_set color, argb8888, 0, 32, normal
export_set rgb565, argb8888, 16, 32, normal
export_set rgb888, argb8888, 24, 32, normal
export_set xrgb8888, argb8888, 31, 32, normal
export_set argb8888, argb8888, 32, 32, normal
#endif /*LV_USE_DRAW_SW_ASM == LV_DRAW_SW_ASM_NEON*/
|
linux-sunxi/cedarx-libs | 1,333 | libcedarv/linux-armhf2/include/include_platform/CHIP_A23/mach/entry-macro.S | /*
* arch/arm/mach-versatile/include/mach/entry-macro.S
*
* Low-level IRQ helper macros for Versatile platforms
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
#include <mach/vic_regs.h>
#include <asm/hardware/vic.h>
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
ldr \base, =VA_VIC_BASE @IO_ADDRESS(VERSATILE_VIC_BASE)
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \irqstat, [\base, #0x08] @ get pending status
mov \irqnr, #0
teq \irqstat, #0
beq 1003f
1001: tst \irqstat, #15
bne 1002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 1001b
1002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 1002b
1003: /* EQ will be set if no irqs pending */
ldr \irqstat, [\base, #0x0c] @ get pending status
teq \irqstat, #0
beq 3000f
mov \irqnr, #32
2001: tst \irqstat, #15
bne 2002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 2001b
2002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 2002b
3000:
.endm
|
linux-sunxi/cedarx-libs | 1,333 | libcedarv/linux-armhf2/include/include_platform/CHIP_A13/mach/entry-macro.S | /*
* arch/arm/mach-versatile/include/mach/entry-macro.S
*
* Low-level IRQ helper macros for Versatile platforms
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
#include <mach/vic_regs.h>
#include <asm/hardware/vic.h>
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
ldr \base, =VA_VIC_BASE @IO_ADDRESS(VERSATILE_VIC_BASE)
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \irqstat, [\base, #0x08] @ get pending status
mov \irqnr, #0
teq \irqstat, #0
beq 1003f
1001: tst \irqstat, #15
bne 1002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 1001b
1002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 1002b
1003: /* EQ will be set if no irqs pending */
ldr \irqstat, [\base, #0x0c] @ get pending status
teq \irqstat, #0
beq 3000f
mov \irqnr, #32
2001: tst \irqstat, #15
bne 2002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 2001b
2002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 2002b
3000:
.endm
|
linux-sunxi/cedarx-libs | 1,333 | libcedarv/linux-armhf2/include/include_platform/CHIP_A20/mach/entry-macro.S | /*
* arch/arm/mach-versatile/include/mach/entry-macro.S
*
* Low-level IRQ helper macros for Versatile platforms
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
#include <mach/vic_regs.h>
#include <asm/hardware/vic.h>
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
ldr \base, =VA_VIC_BASE @IO_ADDRESS(VERSATILE_VIC_BASE)
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \irqstat, [\base, #0x08] @ get pending status
mov \irqnr, #0
teq \irqstat, #0
beq 1003f
1001: tst \irqstat, #15
bne 1002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 1001b
1002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 1002b
1003: /* EQ will be set if no irqs pending */
ldr \irqstat, [\base, #0x0c] @ get pending status
teq \irqstat, #0
beq 3000f
mov \irqnr, #32
2001: tst \irqstat, #15
bne 2002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 2001b
2002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 2002b
3000:
.endm
|
linux-sunxi/cedarx-libs | 1,333 | libcedarv/linux-armhf2/include/include_platform/CHIP_A10/mach/entry-macro.S | /*
* arch/arm/mach-versatile/include/mach/entry-macro.S
*
* Low-level IRQ helper macros for Versatile platforms
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
#include <mach/vic_regs.h>
#include <asm/hardware/vic.h>
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
ldr \base, =VA_VIC_BASE @IO_ADDRESS(VERSATILE_VIC_BASE)
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \irqstat, [\base, #0x08] @ get pending status
mov \irqnr, #0
teq \irqstat, #0
beq 1003f
1001: tst \irqstat, #15
bne 1002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 1001b
1002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 1002b
1003: /* EQ will be set if no irqs pending */
ldr \irqstat, [\base, #0x0c] @ get pending status
teq \irqstat, #0
beq 3000f
mov \irqnr, #32
2001: tst \irqstat, #15
bne 2002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 2001b
2002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 2002b
3000:
.endm
|
linux-sunxi/cedarx-libs | 1,333 | libcedarv/linux-armhf2/include/include_platform/CHIP_A31/mach/entry-macro.S | /*
* arch/arm/mach-versatile/include/mach/entry-macro.S
*
* Low-level IRQ helper macros for Versatile platforms
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
#include <mach/vic_regs.h>
#include <asm/hardware/vic.h>
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
ldr \base, =VA_VIC_BASE @IO_ADDRESS(VERSATILE_VIC_BASE)
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \irqstat, [\base, #0x08] @ get pending status
mov \irqnr, #0
teq \irqstat, #0
beq 1003f
1001: tst \irqstat, #15
bne 1002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 1001b
1002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 1002b
1003: /* EQ will be set if no irqs pending */
ldr \irqstat, [\base, #0x0c] @ get pending status
teq \irqstat, #0
beq 3000f
mov \irqnr, #32
2001: tst \irqstat, #15
bne 2002f
add \irqnr, \irqnr, #4
movs \irqstat, \irqstat, lsr #4
bne 2001b
2002: tst \irqstat, #1
bne 3000f
add \irqnr, \irqnr, #1
movs \irqstat, \irqstat, lsr #1
bne 2002b
3000:
.endm
|
liruqi/Mume-iOS | 18,086 | Library/ShadowPath/ShadowPath/shadowsocks-libev/libsodium/src/libsodium/crypto_stream/salsa20/amd64_xmm6/stream_salsa20_amd64_xmm6.S | #ifdef HAVE_AMD64_ASM
.text
.p2align 5
.globl crypto_stream_salsa20
.globl _crypto_stream_salsa20
#ifdef __ELF__
.type crypto_stream_salsa20, @function
.type _crypto_stream_salsa20, @function
#endif
crypto_stream_salsa20:
_crypto_stream_salsa20:
mov %rsp,%r11
and $31,%r11
add $512,%r11
sub %r11,%rsp
movq %r11,416(%rsp)
movq %r12,424(%rsp)
movq %r13,432(%rsp)
movq %r14,440(%rsp)
movq %r15,448(%rsp)
movq %rbx,456(%rsp)
movq %rbp,464(%rsp)
mov %rsi,%r9
mov %rdi,%rdi
mov %rdi,%rsi
mov %rdx,%rdx
mov %rcx,%r10
cmp $0,%r9
jbe ._done
mov $0,%rax
mov %r9,%rcx
rep stosb
sub %r9,%rdi
movq $0,472(%rsp)
jmp ._start
.text
.p2align 5
.globl crypto_stream_salsa20_xor_ic
.globl _crypto_stream_salsa20_xor_ic
#ifdef __ELF__
.type crypto_stream_salsa20_xor_ic, @function
.type _crypto_stream_salsa20_xor_ic, @function
#endif
crypto_stream_salsa20_xor_ic:
_crypto_stream_salsa20_xor_ic:
mov %rsp,%r11
and $31,%r11
add $512,%r11
sub %r11,%rsp
movq %r11,416(%rsp)
movq %r12,424(%rsp)
movq %r13,432(%rsp)
movq %r14,440(%rsp)
movq %r15,448(%rsp)
movq %rbx,456(%rsp)
movq %rbp,464(%rsp)
mov %rdi,%rdi
mov %rsi,%rsi
mov %r9,%r10
movq %r8,472(%rsp)
mov %rdx,%r9
mov %rcx,%rdx
cmp $0,%r9
jbe ._done
._start:
movl 20(%r10),%ecx
movl 0(%r10),%r8d
movl 0(%rdx),%eax
movl 16(%r10),%r11d
movl %ecx,64(%rsp)
movl %r8d,4+64(%rsp)
movl %eax,8+64(%rsp)
movl %r11d,12+64(%rsp)
movl 24(%r10),%r8d
movl 4(%r10),%eax
movl 4(%rdx),%edx
movq 472(%rsp),%rcx
movl %ecx,80(%rsp)
movl %r8d,4+80(%rsp)
movl %eax,8+80(%rsp)
movl %edx,12+80(%rsp)
movl 12(%r10),%edx
shr $32,%rcx
movl 28(%r10),%r8d
movl 8(%r10),%eax
movl %edx,96(%rsp)
movl %ecx,4+96(%rsp)
movl %r8d,8+96(%rsp)
movl %eax,12+96(%rsp)
mov $1634760805,%rdx
mov $857760878,%rcx
mov $2036477234,%r8
mov $1797285236,%rax
movl %edx,112(%rsp)
movl %ecx,4+112(%rsp)
movl %r8d,8+112(%rsp)
movl %eax,12+112(%rsp)
cmp $256,%r9
jb ._bytesbetween1and255
movdqa 112(%rsp),%xmm0
pshufd $0x55,%xmm0,%xmm1
pshufd $0xaa,%xmm0,%xmm2
pshufd $0xff,%xmm0,%xmm3
pshufd $0x00,%xmm0,%xmm0
movdqa %xmm1,128(%rsp)
movdqa %xmm2,144(%rsp)
movdqa %xmm3,160(%rsp)
movdqa %xmm0,176(%rsp)
movdqa 64(%rsp),%xmm0
pshufd $0xaa,%xmm0,%xmm1
pshufd $0xff,%xmm0,%xmm2
pshufd $0x00,%xmm0,%xmm3
pshufd $0x55,%xmm0,%xmm0
movdqa %xmm1,192(%rsp)
movdqa %xmm2,208(%rsp)
movdqa %xmm3,224(%rsp)
movdqa %xmm0,240(%rsp)
movdqa 80(%rsp),%xmm0
pshufd $0xff,%xmm0,%xmm1
pshufd $0x55,%xmm0,%xmm2
pshufd $0xaa,%xmm0,%xmm0
movdqa %xmm1,256(%rsp)
movdqa %xmm2,272(%rsp)
movdqa %xmm0,288(%rsp)
movdqa 96(%rsp),%xmm0
pshufd $0x00,%xmm0,%xmm1
pshufd $0xaa,%xmm0,%xmm2
pshufd $0xff,%xmm0,%xmm0
movdqa %xmm1,304(%rsp)
movdqa %xmm2,320(%rsp)
movdqa %xmm0,336(%rsp)
._bytesatleast256:
movq 472(%rsp),%rdx
mov %rdx,%rcx
shr $32,%rcx
movl %edx,352(%rsp)
movl %ecx,368(%rsp)
add $1,%rdx
mov %rdx,%rcx
shr $32,%rcx
movl %edx,4+352(%rsp)
movl %ecx,4+368(%rsp)
add $1,%rdx
mov %rdx,%rcx
shr $32,%rcx
movl %edx,8+352(%rsp)
movl %ecx,8+368(%rsp)
add $1,%rdx
mov %rdx,%rcx
shr $32,%rcx
movl %edx,12+352(%rsp)
movl %ecx,12+368(%rsp)
add $1,%rdx
mov %rdx,%rcx
shr $32,%rcx
movl %edx,80(%rsp)
movl %ecx,4+96(%rsp)
movq %rdx,472(%rsp)
movq %r9,480(%rsp)
mov $20,%rdx
movdqa 128(%rsp),%xmm0
movdqa 144(%rsp),%xmm1
movdqa 160(%rsp),%xmm2
movdqa 320(%rsp),%xmm3
movdqa 336(%rsp),%xmm4
movdqa 192(%rsp),%xmm5
movdqa 208(%rsp),%xmm6
movdqa 240(%rsp),%xmm7
movdqa 256(%rsp),%xmm8
movdqa 272(%rsp),%xmm9
movdqa 288(%rsp),%xmm10
movdqa 368(%rsp),%xmm11
movdqa 176(%rsp),%xmm12
movdqa 224(%rsp),%xmm13
movdqa 304(%rsp),%xmm14
movdqa 352(%rsp),%xmm15
._mainloop1:
movdqa %xmm1,384(%rsp)
movdqa %xmm2,400(%rsp)
movdqa %xmm13,%xmm1
paddd %xmm12,%xmm1
movdqa %xmm1,%xmm2
pslld $7,%xmm1
pxor %xmm1,%xmm14
psrld $25,%xmm2
pxor %xmm2,%xmm14
movdqa %xmm7,%xmm1
paddd %xmm0,%xmm1
movdqa %xmm1,%xmm2
pslld $7,%xmm1
pxor %xmm1,%xmm11
psrld $25,%xmm2
pxor %xmm2,%xmm11
movdqa %xmm12,%xmm1
paddd %xmm14,%xmm1
movdqa %xmm1,%xmm2
pslld $9,%xmm1
pxor %xmm1,%xmm15
psrld $23,%xmm2
pxor %xmm2,%xmm15
movdqa %xmm0,%xmm1
paddd %xmm11,%xmm1
movdqa %xmm1,%xmm2
pslld $9,%xmm1
pxor %xmm1,%xmm9
psrld $23,%xmm2
pxor %xmm2,%xmm9
movdqa %xmm14,%xmm1
paddd %xmm15,%xmm1
movdqa %xmm1,%xmm2
pslld $13,%xmm1
pxor %xmm1,%xmm13
psrld $19,%xmm2
pxor %xmm2,%xmm13
movdqa %xmm11,%xmm1
paddd %xmm9,%xmm1
movdqa %xmm1,%xmm2
pslld $13,%xmm1
pxor %xmm1,%xmm7
psrld $19,%xmm2
pxor %xmm2,%xmm7
movdqa %xmm15,%xmm1
paddd %xmm13,%xmm1
movdqa %xmm1,%xmm2
pslld $18,%xmm1
pxor %xmm1,%xmm12
psrld $14,%xmm2
pxor %xmm2,%xmm12
movdqa 384(%rsp),%xmm1
movdqa %xmm12,384(%rsp)
movdqa %xmm9,%xmm2
paddd %xmm7,%xmm2
movdqa %xmm2,%xmm12
pslld $18,%xmm2
pxor %xmm2,%xmm0
psrld $14,%xmm12
pxor %xmm12,%xmm0
movdqa %xmm5,%xmm2
paddd %xmm1,%xmm2
movdqa %xmm2,%xmm12
pslld $7,%xmm2
pxor %xmm2,%xmm3
psrld $25,%xmm12
pxor %xmm12,%xmm3
movdqa 400(%rsp),%xmm2
movdqa %xmm0,400(%rsp)
movdqa %xmm6,%xmm0
paddd %xmm2,%xmm0
movdqa %xmm0,%xmm12
pslld $7,%xmm0
pxor %xmm0,%xmm4
psrld $25,%xmm12
pxor %xmm12,%xmm4
movdqa %xmm1,%xmm0
paddd %xmm3,%xmm0
movdqa %xmm0,%xmm12
pslld $9,%xmm0
pxor %xmm0,%xmm10
psrld $23,%xmm12
pxor %xmm12,%xmm10
movdqa %xmm2,%xmm0
paddd %xmm4,%xmm0
movdqa %xmm0,%xmm12
pslld $9,%xmm0
pxor %xmm0,%xmm8
psrld $23,%xmm12
pxor %xmm12,%xmm8
movdqa %xmm3,%xmm0
paddd %xmm10,%xmm0
movdqa %xmm0,%xmm12
pslld $13,%xmm0
pxor %xmm0,%xmm5
psrld $19,%xmm12
pxor %xmm12,%xmm5
movdqa %xmm4,%xmm0
paddd %xmm8,%xmm0
movdqa %xmm0,%xmm12
pslld $13,%xmm0
pxor %xmm0,%xmm6
psrld $19,%xmm12
pxor %xmm12,%xmm6
movdqa %xmm10,%xmm0
paddd %xmm5,%xmm0
movdqa %xmm0,%xmm12
pslld $18,%xmm0
pxor %xmm0,%xmm1
psrld $14,%xmm12
pxor %xmm12,%xmm1
movdqa 384(%rsp),%xmm0
movdqa %xmm1,384(%rsp)
movdqa %xmm4,%xmm1
paddd %xmm0,%xmm1
movdqa %xmm1,%xmm12
pslld $7,%xmm1
pxor %xmm1,%xmm7
psrld $25,%xmm12
pxor %xmm12,%xmm7
movdqa %xmm8,%xmm1
paddd %xmm6,%xmm1
movdqa %xmm1,%xmm12
pslld $18,%xmm1
pxor %xmm1,%xmm2
psrld $14,%xmm12
pxor %xmm12,%xmm2
movdqa 400(%rsp),%xmm12
movdqa %xmm2,400(%rsp)
movdqa %xmm14,%xmm1
paddd %xmm12,%xmm1
movdqa %xmm1,%xmm2
pslld $7,%xmm1
pxor %xmm1,%xmm5
psrld $25,%xmm2
pxor %xmm2,%xmm5
movdqa %xmm0,%xmm1
paddd %xmm7,%xmm1
movdqa %xmm1,%xmm2
pslld $9,%xmm1
pxor %xmm1,%xmm10
psrld $23,%xmm2
pxor %xmm2,%xmm10
movdqa %xmm12,%xmm1
paddd %xmm5,%xmm1
movdqa %xmm1,%xmm2
pslld $9,%xmm1
pxor %xmm1,%xmm8
psrld $23,%xmm2
pxor %xmm2,%xmm8
movdqa %xmm7,%xmm1
paddd %xmm10,%xmm1
movdqa %xmm1,%xmm2
pslld $13,%xmm1
pxor %xmm1,%xmm4
psrld $19,%xmm2
pxor %xmm2,%xmm4
movdqa %xmm5,%xmm1
paddd %xmm8,%xmm1
movdqa %xmm1,%xmm2
pslld $13,%xmm1
pxor %xmm1,%xmm14
psrld $19,%xmm2
pxor %xmm2,%xmm14
movdqa %xmm10,%xmm1
paddd %xmm4,%xmm1
movdqa %xmm1,%xmm2
pslld $18,%xmm1
pxor %xmm1,%xmm0
psrld $14,%xmm2
pxor %xmm2,%xmm0
movdqa 384(%rsp),%xmm1
movdqa %xmm0,384(%rsp)
movdqa %xmm8,%xmm0
paddd %xmm14,%xmm0
movdqa %xmm0,%xmm2
pslld $18,%xmm0
pxor %xmm0,%xmm12
psrld $14,%xmm2
pxor %xmm2,%xmm12
movdqa %xmm11,%xmm0
paddd %xmm1,%xmm0
movdqa %xmm0,%xmm2
pslld $7,%xmm0
pxor %xmm0,%xmm6
psrld $25,%xmm2
pxor %xmm2,%xmm6
movdqa 400(%rsp),%xmm2
movdqa %xmm12,400(%rsp)
movdqa %xmm3,%xmm0
paddd %xmm2,%xmm0
movdqa %xmm0,%xmm12
pslld $7,%xmm0
pxor %xmm0,%xmm13
psrld $25,%xmm12
pxor %xmm12,%xmm13
movdqa %xmm1,%xmm0
paddd %xmm6,%xmm0
movdqa %xmm0,%xmm12
pslld $9,%xmm0
pxor %xmm0,%xmm15
psrld $23,%xmm12
pxor %xmm12,%xmm15
movdqa %xmm2,%xmm0
paddd %xmm13,%xmm0
movdqa %xmm0,%xmm12
pslld $9,%xmm0
pxor %xmm0,%xmm9
psrld $23,%xmm12
pxor %xmm12,%xmm9
movdqa %xmm6,%xmm0
paddd %xmm15,%xmm0
movdqa %xmm0,%xmm12
pslld $13,%xmm0
pxor %xmm0,%xmm11
psrld $19,%xmm12
pxor %xmm12,%xmm11
movdqa %xmm13,%xmm0
paddd %xmm9,%xmm0
movdqa %xmm0,%xmm12
pslld $13,%xmm0
pxor %xmm0,%xmm3
psrld $19,%xmm12
pxor %xmm12,%xmm3
movdqa %xmm15,%xmm0
paddd %xmm11,%xmm0
movdqa %xmm0,%xmm12
pslld $18,%xmm0
pxor %xmm0,%xmm1
psrld $14,%xmm12
pxor %xmm12,%xmm1
movdqa %xmm9,%xmm0
paddd %xmm3,%xmm0
movdqa %xmm0,%xmm12
pslld $18,%xmm0
pxor %xmm0,%xmm2
psrld $14,%xmm12
pxor %xmm12,%xmm2
movdqa 384(%rsp),%xmm12
movdqa 400(%rsp),%xmm0
sub $2,%rdx
ja ._mainloop1
paddd 176(%rsp),%xmm12
paddd 240(%rsp),%xmm7
paddd 288(%rsp),%xmm10
paddd 336(%rsp),%xmm4
movd %xmm12,%rdx
movd %xmm7,%rcx
movd %xmm10,%r8
movd %xmm4,%r9
pshufd $0x39,%xmm12,%xmm12
pshufd $0x39,%xmm7,%xmm7
pshufd $0x39,%xmm10,%xmm10
pshufd $0x39,%xmm4,%xmm4
xorl 0(%rsi),%edx
xorl 4(%rsi),%ecx
xorl 8(%rsi),%r8d
xorl 12(%rsi),%r9d
movl %edx,0(%rdi)
movl %ecx,4(%rdi)
movl %r8d,8(%rdi)
movl %r9d,12(%rdi)
movd %xmm12,%rdx
movd %xmm7,%rcx
movd %xmm10,%r8
movd %xmm4,%r9
pshufd $0x39,%xmm12,%xmm12
pshufd $0x39,%xmm7,%xmm7
pshufd $0x39,%xmm10,%xmm10
pshufd $0x39,%xmm4,%xmm4
xorl 64(%rsi),%edx
xorl 68(%rsi),%ecx
xorl 72(%rsi),%r8d
xorl 76(%rsi),%r9d
movl %edx,64(%rdi)
movl %ecx,68(%rdi)
movl %r8d,72(%rdi)
movl %r9d,76(%rdi)
movd %xmm12,%rdx
movd %xmm7,%rcx
movd %xmm10,%r8
movd %xmm4,%r9
pshufd $0x39,%xmm12,%xmm12
pshufd $0x39,%xmm7,%xmm7
pshufd $0x39,%xmm10,%xmm10
pshufd $0x39,%xmm4,%xmm4
xorl 128(%rsi),%edx
xorl 132(%rsi),%ecx
xorl 136(%rsi),%r8d
xorl 140(%rsi),%r9d
movl %edx,128(%rdi)
movl %ecx,132(%rdi)
movl %r8d,136(%rdi)
movl %r9d,140(%rdi)
movd %xmm12,%rdx
movd %xmm7,%rcx
movd %xmm10,%r8
movd %xmm4,%r9
xorl 192(%rsi),%edx
xorl 196(%rsi),%ecx
xorl 200(%rsi),%r8d
xorl 204(%rsi),%r9d
movl %edx,192(%rdi)
movl %ecx,196(%rdi)
movl %r8d,200(%rdi)
movl %r9d,204(%rdi)
paddd 304(%rsp),%xmm14
paddd 128(%rsp),%xmm0
paddd 192(%rsp),%xmm5
paddd 256(%rsp),%xmm8
movd %xmm14,%rdx
movd %xmm0,%rcx
movd %xmm5,%r8
movd %xmm8,%r9
pshufd $0x39,%xmm14,%xmm14
pshufd $0x39,%xmm0,%xmm0
pshufd $0x39,%xmm5,%xmm5
pshufd $0x39,%xmm8,%xmm8
xorl 16(%rsi),%edx
xorl 20(%rsi),%ecx
xorl 24(%rsi),%r8d
xorl 28(%rsi),%r9d
movl %edx,16(%rdi)
movl %ecx,20(%rdi)
movl %r8d,24(%rdi)
movl %r9d,28(%rdi)
movd %xmm14,%rdx
movd %xmm0,%rcx
movd %xmm5,%r8
movd %xmm8,%r9
pshufd $0x39,%xmm14,%xmm14
pshufd $0x39,%xmm0,%xmm0
pshufd $0x39,%xmm5,%xmm5
pshufd $0x39,%xmm8,%xmm8
xorl 80(%rsi),%edx
xorl 84(%rsi),%ecx
xorl 88(%rsi),%r8d
xorl 92(%rsi),%r9d
movl %edx,80(%rdi)
movl %ecx,84(%rdi)
movl %r8d,88(%rdi)
movl %r9d,92(%rdi)
movd %xmm14,%rdx
movd %xmm0,%rcx
movd %xmm5,%r8
movd %xmm8,%r9
pshufd $0x39,%xmm14,%xmm14
pshufd $0x39,%xmm0,%xmm0
pshufd $0x39,%xmm5,%xmm5
pshufd $0x39,%xmm8,%xmm8
xorl 144(%rsi),%edx
xorl 148(%rsi),%ecx
xorl 152(%rsi),%r8d
xorl 156(%rsi),%r9d
movl %edx,144(%rdi)
movl %ecx,148(%rdi)
movl %r8d,152(%rdi)
movl %r9d,156(%rdi)
movd %xmm14,%rdx
movd %xmm0,%rcx
movd %xmm5,%r8
movd %xmm8,%r9
xorl 208(%rsi),%edx
xorl 212(%rsi),%ecx
xorl 216(%rsi),%r8d
xorl 220(%rsi),%r9d
movl %edx,208(%rdi)
movl %ecx,212(%rdi)
movl %r8d,216(%rdi)
movl %r9d,220(%rdi)
paddd 352(%rsp),%xmm15
paddd 368(%rsp),%xmm11
paddd 144(%rsp),%xmm1
paddd 208(%rsp),%xmm6
movd %xmm15,%rdx
movd %xmm11,%rcx
movd %xmm1,%r8
movd %xmm6,%r9
pshufd $0x39,%xmm15,%xmm15
pshufd $0x39,%xmm11,%xmm11
pshufd $0x39,%xmm1,%xmm1
pshufd $0x39,%xmm6,%xmm6
xorl 32(%rsi),%edx
xorl 36(%rsi),%ecx
xorl 40(%rsi),%r8d
xorl 44(%rsi),%r9d
movl %edx,32(%rdi)
movl %ecx,36(%rdi)
movl %r8d,40(%rdi)
movl %r9d,44(%rdi)
movd %xmm15,%rdx
movd %xmm11,%rcx
movd %xmm1,%r8
movd %xmm6,%r9
pshufd $0x39,%xmm15,%xmm15
pshufd $0x39,%xmm11,%xmm11
pshufd $0x39,%xmm1,%xmm1
pshufd $0x39,%xmm6,%xmm6
xorl 96(%rsi),%edx
xorl 100(%rsi),%ecx
xorl 104(%rsi),%r8d
xorl 108(%rsi),%r9d
movl %edx,96(%rdi)
movl %ecx,100(%rdi)
movl %r8d,104(%rdi)
movl %r9d,108(%rdi)
movd %xmm15,%rdx
movd %xmm11,%rcx
movd %xmm1,%r8
movd %xmm6,%r9
pshufd $0x39,%xmm15,%xmm15
pshufd $0x39,%xmm11,%xmm11
pshufd $0x39,%xmm1,%xmm1
pshufd $0x39,%xmm6,%xmm6
xorl 160(%rsi),%edx
xorl 164(%rsi),%ecx
xorl 168(%rsi),%r8d
xorl 172(%rsi),%r9d
movl %edx,160(%rdi)
movl %ecx,164(%rdi)
movl %r8d,168(%rdi)
movl %r9d,172(%rdi)
movd %xmm15,%rdx
movd %xmm11,%rcx
movd %xmm1,%r8
movd %xmm6,%r9
xorl 224(%rsi),%edx
xorl 228(%rsi),%ecx
xorl 232(%rsi),%r8d
xorl 236(%rsi),%r9d
movl %edx,224(%rdi)
movl %ecx,228(%rdi)
movl %r8d,232(%rdi)
movl %r9d,236(%rdi)
paddd 224(%rsp),%xmm13
paddd 272(%rsp),%xmm9
paddd 320(%rsp),%xmm3
paddd 160(%rsp),%xmm2
movd %xmm13,%rdx
movd %xmm9,%rcx
movd %xmm3,%r8
movd %xmm2,%r9
pshufd $0x39,%xmm13,%xmm13
pshufd $0x39,%xmm9,%xmm9
pshufd $0x39,%xmm3,%xmm3
pshufd $0x39,%xmm2,%xmm2
xorl 48(%rsi),%edx
xorl 52(%rsi),%ecx
xorl 56(%rsi),%r8d
xorl 60(%rsi),%r9d
movl %edx,48(%rdi)
movl %ecx,52(%rdi)
movl %r8d,56(%rdi)
movl %r9d,60(%rdi)
movd %xmm13,%rdx
movd %xmm9,%rcx
movd %xmm3,%r8
movd %xmm2,%r9
pshufd $0x39,%xmm13,%xmm13
pshufd $0x39,%xmm9,%xmm9
pshufd $0x39,%xmm3,%xmm3
pshufd $0x39,%xmm2,%xmm2
xorl 112(%rsi),%edx
xorl 116(%rsi),%ecx
xorl 120(%rsi),%r8d
xorl 124(%rsi),%r9d
movl %edx,112(%rdi)
movl %ecx,116(%rdi)
movl %r8d,120(%rdi)
movl %r9d,124(%rdi)
movd %xmm13,%rdx
movd %xmm9,%rcx
movd %xmm3,%r8
movd %xmm2,%r9
pshufd $0x39,%xmm13,%xmm13
pshufd $0x39,%xmm9,%xmm9
pshufd $0x39,%xmm3,%xmm3
pshufd $0x39,%xmm2,%xmm2
xorl 176(%rsi),%edx
xorl 180(%rsi),%ecx
xorl 184(%rsi),%r8d
xorl 188(%rsi),%r9d
movl %edx,176(%rdi)
movl %ecx,180(%rdi)
movl %r8d,184(%rdi)
movl %r9d,188(%rdi)
movd %xmm13,%rdx
movd %xmm9,%rcx
movd %xmm3,%r8
movd %xmm2,%r9
xorl 240(%rsi),%edx
xorl 244(%rsi),%ecx
xorl 248(%rsi),%r8d
xorl 252(%rsi),%r9d
movl %edx,240(%rdi)
movl %ecx,244(%rdi)
movl %r8d,248(%rdi)
movl %r9d,252(%rdi)
movq 480(%rsp),%r9
sub $256,%r9
add $256,%rsi
add $256,%rdi
cmp $256,%r9
jae ._bytesatleast256
cmp $0,%r9
jbe ._done
._bytesbetween1and255:
cmp $64,%r9
jae ._nocopy
mov %rdi,%rdx
leaq 0(%rsp),%rdi
mov %r9,%rcx
rep movsb
leaq 0(%rsp),%rdi
leaq 0(%rsp),%rsi
._nocopy:
movq %r9,480(%rsp)
movdqa 112(%rsp),%xmm0
movdqa 64(%rsp),%xmm1
movdqa 80(%rsp),%xmm2
movdqa 96(%rsp),%xmm3
movdqa %xmm1,%xmm4
mov $20,%rcx
._mainloop2:
paddd %xmm0,%xmm4
movdqa %xmm0,%xmm5
movdqa %xmm4,%xmm6
pslld $7,%xmm4
psrld $25,%xmm6
pxor %xmm4,%xmm3
pxor %xmm6,%xmm3
paddd %xmm3,%xmm5
movdqa %xmm3,%xmm4
movdqa %xmm5,%xmm6
pslld $9,%xmm5
psrld $23,%xmm6
pxor %xmm5,%xmm2
pshufd $0x93,%xmm3,%xmm3
pxor %xmm6,%xmm2
paddd %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm4,%xmm6
pslld $13,%xmm4
psrld $19,%xmm6
pxor %xmm4,%xmm1
pshufd $0x4e,%xmm2,%xmm2
pxor %xmm6,%xmm1
paddd %xmm1,%xmm5
movdqa %xmm3,%xmm4
movdqa %xmm5,%xmm6
pslld $18,%xmm5
psrld $14,%xmm6
pxor %xmm5,%xmm0
pshufd $0x39,%xmm1,%xmm1
pxor %xmm6,%xmm0
paddd %xmm0,%xmm4
movdqa %xmm0,%xmm5
movdqa %xmm4,%xmm6
pslld $7,%xmm4
psrld $25,%xmm6
pxor %xmm4,%xmm1
pxor %xmm6,%xmm1
paddd %xmm1,%xmm5
movdqa %xmm1,%xmm4
movdqa %xmm5,%xmm6
pslld $9,%xmm5
psrld $23,%xmm6
pxor %xmm5,%xmm2
pshufd $0x93,%xmm1,%xmm1
pxor %xmm6,%xmm2
paddd %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm4,%xmm6
pslld $13,%xmm4
psrld $19,%xmm6
pxor %xmm4,%xmm3
pshufd $0x4e,%xmm2,%xmm2
pxor %xmm6,%xmm3
paddd %xmm3,%xmm5
movdqa %xmm1,%xmm4
movdqa %xmm5,%xmm6
pslld $18,%xmm5
psrld $14,%xmm6
pxor %xmm5,%xmm0
pshufd $0x39,%xmm3,%xmm3
pxor %xmm6,%xmm0
paddd %xmm0,%xmm4
movdqa %xmm0,%xmm5
movdqa %xmm4,%xmm6
pslld $7,%xmm4
psrld $25,%xmm6
pxor %xmm4,%xmm3
pxor %xmm6,%xmm3
paddd %xmm3,%xmm5
movdqa %xmm3,%xmm4
movdqa %xmm5,%xmm6
pslld $9,%xmm5
psrld $23,%xmm6
pxor %xmm5,%xmm2
pshufd $0x93,%xmm3,%xmm3
pxor %xmm6,%xmm2
paddd %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm4,%xmm6
pslld $13,%xmm4
psrld $19,%xmm6
pxor %xmm4,%xmm1
pshufd $0x4e,%xmm2,%xmm2
pxor %xmm6,%xmm1
paddd %xmm1,%xmm5
movdqa %xmm3,%xmm4
movdqa %xmm5,%xmm6
pslld $18,%xmm5
psrld $14,%xmm6
pxor %xmm5,%xmm0
pshufd $0x39,%xmm1,%xmm1
pxor %xmm6,%xmm0
paddd %xmm0,%xmm4
movdqa %xmm0,%xmm5
movdqa %xmm4,%xmm6
pslld $7,%xmm4
psrld $25,%xmm6
pxor %xmm4,%xmm1
pxor %xmm6,%xmm1
paddd %xmm1,%xmm5
movdqa %xmm1,%xmm4
movdqa %xmm5,%xmm6
pslld $9,%xmm5
psrld $23,%xmm6
pxor %xmm5,%xmm2
pshufd $0x93,%xmm1,%xmm1
pxor %xmm6,%xmm2
paddd %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm4,%xmm6
pslld $13,%xmm4
psrld $19,%xmm6
pxor %xmm4,%xmm3
pshufd $0x4e,%xmm2,%xmm2
pxor %xmm6,%xmm3
sub $4,%rcx
paddd %xmm3,%xmm5
movdqa %xmm1,%xmm4
movdqa %xmm5,%xmm6
pslld $18,%xmm5
pxor %xmm7,%xmm7
psrld $14,%xmm6
pxor %xmm5,%xmm0
pshufd $0x39,%xmm3,%xmm3
pxor %xmm6,%xmm0
ja ._mainloop2
paddd 112(%rsp),%xmm0
paddd 64(%rsp),%xmm1
paddd 80(%rsp),%xmm2
paddd 96(%rsp),%xmm3
movd %xmm0,%rcx
movd %xmm1,%r8
movd %xmm2,%r9
movd %xmm3,%rax
pshufd $0x39,%xmm0,%xmm0
pshufd $0x39,%xmm1,%xmm1
pshufd $0x39,%xmm2,%xmm2
pshufd $0x39,%xmm3,%xmm3
xorl 0(%rsi),%ecx
xorl 48(%rsi),%r8d
xorl 32(%rsi),%r9d
xorl 16(%rsi),%eax
movl %ecx,0(%rdi)
movl %r8d,48(%rdi)
movl %r9d,32(%rdi)
movl %eax,16(%rdi)
movd %xmm0,%rcx
movd %xmm1,%r8
movd %xmm2,%r9
movd %xmm3,%rax
pshufd $0x39,%xmm0,%xmm0
pshufd $0x39,%xmm1,%xmm1
pshufd $0x39,%xmm2,%xmm2
pshufd $0x39,%xmm3,%xmm3
xorl 20(%rsi),%ecx
xorl 4(%rsi),%r8d
xorl 52(%rsi),%r9d
xorl 36(%rsi),%eax
movl %ecx,20(%rdi)
movl %r8d,4(%rdi)
movl %r9d,52(%rdi)
movl %eax,36(%rdi)
movd %xmm0,%rcx
movd %xmm1,%r8
movd %xmm2,%r9
movd %xmm3,%rax
pshufd $0x39,%xmm0,%xmm0
pshufd $0x39,%xmm1,%xmm1
pshufd $0x39,%xmm2,%xmm2
pshufd $0x39,%xmm3,%xmm3
xorl 40(%rsi),%ecx
xorl 24(%rsi),%r8d
xorl 8(%rsi),%r9d
xorl 56(%rsi),%eax
movl %ecx,40(%rdi)
movl %r8d,24(%rdi)
movl %r9d,8(%rdi)
movl %eax,56(%rdi)
movd %xmm0,%rcx
movd %xmm1,%r8
movd %xmm2,%r9
movd %xmm3,%rax
xorl 60(%rsi),%ecx
xorl 44(%rsi),%r8d
xorl 28(%rsi),%r9d
xorl 12(%rsi),%eax
movl %ecx,60(%rdi)
movl %r8d,44(%rdi)
movl %r9d,28(%rdi)
movl %eax,12(%rdi)
movq 480(%rsp),%r9
movq 472(%rsp),%rcx
add $1,%rcx
mov %rcx,%r8
shr $32,%r8
movl %ecx,80(%rsp)
movl %r8d,4+96(%rsp)
movq %rcx,472(%rsp)
cmp $64,%r9
ja ._bytesatleast65
jae ._bytesatleast64
mov %rdi,%rsi
mov %rdx,%rdi
mov %r9,%rcx
rep movsb
._bytesatleast64:
._done:
movq 416(%rsp),%r11
movq 424(%rsp),%r12
movq 432(%rsp),%r13
movq 440(%rsp),%r14
movq 448(%rsp),%r15
movq 456(%rsp),%rbx
movq 464(%rsp),%rbp
add %r11,%rsp
xor %rax,%rax
mov %rsi,%rdx
ret
._bytesatleast65:
sub $64,%r9
add $64,%rdi
add $64,%rsi
jmp ._bytesbetween1and255
#endif
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
liruqi/Mume-iOS | 2,671 | Library/ShadowPath/ShadowPath/shadowsocks-libev/libsodium/src/libsodium/crypto_scalarmult/curve25519/sandy2x/fe51_nsquare.S | #ifdef IN_SANDY2X
/*
This file is adapted from amd64-51/fe25519_square.s:
Adding loop to perform n squares.
*/
#include "fe51_namespace.h"
#include "consts_namespace.h"
.p2align 5
.globl fe51_nsquare
.globl _fe51_nsquare
#ifdef __ELF__
.type fe51_nsquare, @function
.type _fe51_nsquare, @function
#endif
fe51_nsquare:
_fe51_nsquare:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
movq %r11,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
movq %rbx,40(%rsp)
movq %rbp,48(%rsp)
movq 0(%rsi),%rcx
movq 8(%rsi),%r8
movq 16(%rsi),%r9
movq 24(%rsi),%rax
movq 32(%rsi),%rsi
movq %r9,16(%rdi)
movq %rax,24(%rdi)
movq %rsi,32(%rdi)
mov %rdx,%rsi
._loop:
sub $1,%rsi
mov %rcx,%rax
mul %rcx
add %rcx,%rcx
mov %rax,%r9
mov %rdx,%r10
mov %rcx,%rax
mul %r8
mov %rax,%r11
mov %rdx,%r12
mov %rcx,%rax
mulq 16(%rdi)
mov %rax,%r13
mov %rdx,%r14
mov %rcx,%rax
mulq 24(%rdi)
mov %rax,%r15
mov %rdx,%rbx
mov %rcx,%rax
mulq 32(%rdi)
mov %rax,%rcx
mov %rdx,%rbp
mov %r8,%rax
mul %r8
add %r8,%r8
add %rax,%r13
adc %rdx,%r14
mov %r8,%rax
mulq 16(%rdi)
add %rax,%r15
adc %rdx,%rbx
mov %r8,%rax
imulq $19, %r8,%r8
mulq 24(%rdi)
add %rax,%rcx
adc %rdx,%rbp
mov %r8,%rax
mulq 32(%rdi)
add %rax,%r9
adc %rdx,%r10
movq 16(%rdi),%rax
mulq 16(%rdi)
add %rax,%rcx
adc %rdx,%rbp
shld $13,%rcx,%rbp
movq 16(%rdi),%rax
imulq $38, %rax,%rax
mulq 24(%rdi)
add %rax,%r9
adc %rdx,%r10
shld $13,%r9,%r10
movq 16(%rdi),%rax
imulq $38, %rax,%rax
mulq 32(%rdi)
add %rax,%r11
adc %rdx,%r12
movq 24(%rdi),%rax
imulq $19, %rax,%rax
mulq 24(%rdi)
add %rax,%r11
adc %rdx,%r12
shld $13,%r11,%r12
movq 24(%rdi),%rax
imulq $38, %rax,%rax
mulq 32(%rdi)
add %rax,%r13
adc %rdx,%r14
shld $13,%r13,%r14
movq 32(%rdi),%rax
imulq $19, %rax,%rax
mulq 32(%rdi)
add %rax,%r15
adc %rdx,%rbx
shld $13,%r15,%rbx
movq REDMASK51(%rip),%rdx
and %rdx,%rcx
add %rbx,%rcx
and %rdx,%r9
and %rdx,%r11
add %r10,%r11
and %rdx,%r13
add %r12,%r13
and %rdx,%r15
add %r14,%r15
imulq $19, %rbp,%rbp
lea (%r9,%rbp),%r9
mov %r9,%rax
shr $51,%r9
add %r11,%r9
and %rdx,%rax
mov %r9,%r8
shr $51,%r9
add %r13,%r9
and %rdx,%r8
mov %r9,%r10
shr $51,%r9
add %r15,%r9
and %rdx,%r10
movq %r10,16(%rdi)
mov %r9,%r10
shr $51,%r9
add %rcx,%r9
and %rdx,%r10
movq %r10,24(%rdi)
mov %r9,%r10
shr $51,%r9
imulq $19, %r9,%r9
lea (%rax,%r9),%rcx
and %rdx,%r10
movq %r10,32(%rdi)
cmp $0,%rsi
jne ._loop
movq %rcx,0(%rdi)
movq %r8,8(%rdi)
movq 0(%rsp),%r11
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
movq 40(%rsp),%rbx
movq 48(%rsp),%rbp
add %r11,%rsp
ret
#endif
|
liruqi/Mume-iOS | 3,040 | Library/ShadowPath/ShadowPath/shadowsocks-libev/libsodium/src/libsodium/crypto_scalarmult/curve25519/sandy2x/fe51_mul.S | #ifdef IN_SANDY2X
/*
This file is basically amd64-51/fe25519_mul.s.
*/
#include "fe51_namespace.h"
#include "consts_namespace.h"
.text
.p2align 5
.globl _fe51_mul
.globl fe51_mul
_fe51_mul:
fe51_mul:
mov %rsp,%r11
and $31,%r11
add $96,%r11
sub %r11,%rsp
movq %r11,0(%rsp)
movq %r12,8(%rsp)
movq %r13,16(%rsp)
movq %r14,24(%rsp)
movq %r15,32(%rsp)
movq %rbx,40(%rsp)
movq %rbp,48(%rsp)
movq %rdi,56(%rsp)
mov %rdx,%rcx
movq 24(%rsi),%rdx
imulq $19,%rdx,%rax
movq %rax,64(%rsp)
mulq 16(%rcx)
mov %rax,%r8
mov %rdx,%r9
movq 32(%rsi),%rdx
imulq $19,%rdx,%rax
movq %rax,72(%rsp)
mulq 8(%rcx)
add %rax,%r8
adc %rdx,%r9
movq 0(%rsi),%rax
mulq 0(%rcx)
add %rax,%r8
adc %rdx,%r9
movq 0(%rsi),%rax
mulq 8(%rcx)
mov %rax,%r10
mov %rdx,%r11
movq 0(%rsi),%rax
mulq 16(%rcx)
mov %rax,%r12
mov %rdx,%r13
movq 0(%rsi),%rax
mulq 24(%rcx)
mov %rax,%r14
mov %rdx,%r15
movq 0(%rsi),%rax
mulq 32(%rcx)
mov %rax,%rbx
mov %rdx,%rbp
movq 8(%rsi),%rax
mulq 0(%rcx)
add %rax,%r10
adc %rdx,%r11
movq 8(%rsi),%rax
mulq 8(%rcx)
add %rax,%r12
adc %rdx,%r13
movq 8(%rsi),%rax
mulq 16(%rcx)
add %rax,%r14
adc %rdx,%r15
movq 8(%rsi),%rax
mulq 24(%rcx)
add %rax,%rbx
adc %rdx,%rbp
movq 8(%rsi),%rdx
imulq $19,%rdx,%rax
mulq 32(%rcx)
add %rax,%r8
adc %rdx,%r9
movq 16(%rsi),%rax
mulq 0(%rcx)
add %rax,%r12
adc %rdx,%r13
movq 16(%rsi),%rax
mulq 8(%rcx)
add %rax,%r14
adc %rdx,%r15
movq 16(%rsi),%rax
mulq 16(%rcx)
add %rax,%rbx
adc %rdx,%rbp
movq 16(%rsi),%rdx
imulq $19,%rdx,%rax
mulq 24(%rcx)
add %rax,%r8
adc %rdx,%r9
movq 16(%rsi),%rdx
imulq $19,%rdx,%rax
mulq 32(%rcx)
add %rax,%r10
adc %rdx,%r11
movq 24(%rsi),%rax
mulq 0(%rcx)
add %rax,%r14
adc %rdx,%r15
movq 24(%rsi),%rax
mulq 8(%rcx)
add %rax,%rbx
adc %rdx,%rbp
movq 64(%rsp),%rax
mulq 24(%rcx)
add %rax,%r10
adc %rdx,%r11
movq 64(%rsp),%rax
mulq 32(%rcx)
add %rax,%r12
adc %rdx,%r13
movq 32(%rsi),%rax
mulq 0(%rcx)
add %rax,%rbx
adc %rdx,%rbp
movq 72(%rsp),%rax
mulq 16(%rcx)
add %rax,%r10
adc %rdx,%r11
movq 72(%rsp),%rax
mulq 24(%rcx)
add %rax,%r12
adc %rdx,%r13
movq 72(%rsp),%rax
mulq 32(%rcx)
add %rax,%r14
adc %rdx,%r15
movq REDMASK51(%rip),%rsi
shld $13,%r8,%r9
and %rsi,%r8
shld $13,%r10,%r11
and %rsi,%r10
add %r9,%r10
shld $13,%r12,%r13
and %rsi,%r12
add %r11,%r12
shld $13,%r14,%r15
and %rsi,%r14
add %r13,%r14
shld $13,%rbx,%rbp
and %rsi,%rbx
add %r15,%rbx
imulq $19,%rbp,%rdx
add %rdx,%r8
mov %r8,%rdx
shr $51,%rdx
add %r10,%rdx
mov %rdx,%rcx
shr $51,%rdx
and %rsi,%r8
add %r12,%rdx
mov %rdx,%r9
shr $51,%rdx
and %rsi,%rcx
add %r14,%rdx
mov %rdx,%rax
shr $51,%rdx
and %rsi,%r9
add %rbx,%rdx
mov %rdx,%r10
shr $51,%rdx
and %rsi,%rax
imulq $19,%rdx,%rdx
add %rdx,%r8
and %rsi,%r10
movq %r8,0(%rdi)
movq %rcx,8(%rdi)
movq %r9,16(%rdi)
movq %rax,24(%rdi)
movq %r10,32(%rdi)
movq 0(%rsp),%r11
movq 8(%rsp),%r12
movq 16(%rsp),%r13
movq 24(%rsp),%r14
movq 32(%rsp),%r15
movq 40(%rsp),%rbx
movq 48(%rsp),%rbp
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret
#endif
|
liruqi/Mume-iOS | 33,827 | Library/ShadowPath/ShadowPath/shadowsocks-libev/libsodium/src/libsodium/crypto_scalarmult/curve25519/sandy2x/ladder.S | #ifdef IN_SANDY2X
#include "ladder_namespace.h"
#include "consts_namespace.h"
.p2align 5
.globl ladder
.globl _ladder
#ifdef __ELF__
.type ladder, @function
.type _ladder, @function
#endif
ladder:
_ladder:
mov %rsp,%r11
and $31,%r11
add $1856,%r11
sub %r11,%rsp
movq %r11,1824(%rsp)
movq %r12,1832(%rsp)
movq %r13,1840(%rsp)
movq %r14,1848(%rsp)
movdqa v0_0(%rip),%xmm0
movdqa v1_0(%rip),%xmm1
movdqu 0(%rdi),%xmm2
movdqa %xmm2,0(%rsp)
movdqu 16(%rdi),%xmm2
movdqa %xmm2,16(%rsp)
movdqu 32(%rdi),%xmm2
movdqa %xmm2,32(%rsp)
movdqu 48(%rdi),%xmm2
movdqa %xmm2,48(%rsp)
movdqu 64(%rdi),%xmm2
movdqa %xmm2,64(%rsp)
movdqa %xmm1,80(%rsp)
movdqa %xmm0,96(%rsp)
movdqa %xmm0,112(%rsp)
movdqa %xmm0,128(%rsp)
movdqa %xmm0,144(%rsp)
movdqa %xmm1,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
pxor %xmm8,%xmm8
pxor %xmm9,%xmm9
movdqu 0(%rdi),%xmm10
movdqa %xmm10,160(%rsp)
movdqu 16(%rdi),%xmm10
movdqa %xmm10,176(%rsp)
pmuludq v19_19(%rip),%xmm10
movdqa %xmm10,192(%rsp)
movdqu 32(%rdi),%xmm10
movdqa %xmm10,208(%rsp)
pmuludq v19_19(%rip),%xmm10
movdqa %xmm10,224(%rsp)
movdqu 48(%rdi),%xmm10
movdqa %xmm10,240(%rsp)
pmuludq v19_19(%rip),%xmm10
movdqa %xmm10,256(%rsp)
movdqu 64(%rdi),%xmm10
movdqa %xmm10,272(%rsp)
pmuludq v19_19(%rip),%xmm10
movdqa %xmm10,288(%rsp)
movdqu 8(%rdi),%xmm10
pmuludq v2_1(%rip),%xmm10
movdqa %xmm10,304(%rsp)
pmuludq v19_19(%rip),%xmm10
movdqa %xmm10,320(%rsp)
movdqu 24(%rdi),%xmm10
pmuludq v2_1(%rip),%xmm10
movdqa %xmm10,336(%rsp)
pmuludq v19_19(%rip),%xmm10
movdqa %xmm10,352(%rsp)
movdqu 40(%rdi),%xmm10
pmuludq v2_1(%rip),%xmm10
movdqa %xmm10,368(%rsp)
pmuludq v19_19(%rip),%xmm10
movdqa %xmm10,384(%rsp)
movdqu 56(%rdi),%xmm10
pmuludq v2_1(%rip),%xmm10
movdqa %xmm10,400(%rsp)
pmuludq v19_19(%rip),%xmm10
movdqa %xmm10,416(%rsp)
movdqu 0(%rdi),%xmm10
movdqu 64(%rdi),%xmm11
blendps $12, %xmm11, %xmm10
pshufd $2,%xmm10,%xmm10
pmuludq v38_1(%rip),%xmm10
movdqa %xmm10,432(%rsp)
movq 0(%rsi),%rdx
movq 8(%rsi),%rcx
movq 16(%rsi),%r8
movq 24(%rsi),%r9
shrd $1,%rcx,%rdx
shrd $1,%r8,%rcx
shrd $1,%r9,%r8
shr $1,%r9
xorq 0(%rsi),%rdx
xorq 8(%rsi),%rcx
xorq 16(%rsi),%r8
xorq 24(%rsi),%r9
leaq 800(%rsp),%rsi
mov $64,%rax
._ladder_small_loop:
mov %rdx,%r10
mov %rcx,%r11
mov %r8,%r12
mov %r9,%r13
shr $1,%rdx
shr $1,%rcx
shr $1,%r8
shr $1,%r9
and $1,%r10d
and $1,%r11d
and $1,%r12d
and $1,%r13d
neg %r10
neg %r11
neg %r12
neg %r13
movl %r10d,0(%rsi)
movl %r11d,256(%rsi)
movl %r12d,512(%rsi)
movl %r13d,768(%rsi)
add $4,%rsi
sub $1,%rax
jne ._ladder_small_loop
mov $255,%rdx
add $760,%rsi
._ladder_loop:
sub $1,%rdx
vbroadcastss 0(%rsi),%xmm10
sub $4,%rsi
movdqa 0(%rsp),%xmm11
movdqa 80(%rsp),%xmm12
vpxor %xmm11,%xmm0,%xmm13
pand %xmm10,%xmm13
pxor %xmm13,%xmm0
pxor %xmm13,%xmm11
vpxor %xmm12,%xmm1,%xmm13
pand %xmm10,%xmm13
pxor %xmm13,%xmm1
pxor %xmm13,%xmm12
movdqa 16(%rsp),%xmm13
movdqa 96(%rsp),%xmm14
vpxor %xmm13,%xmm2,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm2
pxor %xmm15,%xmm13
vpxor %xmm14,%xmm3,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm3
pxor %xmm15,%xmm14
movdqa %xmm13,0(%rsp)
movdqa %xmm14,16(%rsp)
movdqa 32(%rsp),%xmm13
movdqa 112(%rsp),%xmm14
vpxor %xmm13,%xmm4,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm4
pxor %xmm15,%xmm13
vpxor %xmm14,%xmm5,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm5
pxor %xmm15,%xmm14
movdqa %xmm13,32(%rsp)
movdqa %xmm14,80(%rsp)
movdqa 48(%rsp),%xmm13
movdqa 128(%rsp),%xmm14
vpxor %xmm13,%xmm6,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm6
pxor %xmm15,%xmm13
vpxor %xmm14,%xmm7,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm7
pxor %xmm15,%xmm14
movdqa %xmm13,48(%rsp)
movdqa %xmm14,96(%rsp)
movdqa 64(%rsp),%xmm13
movdqa 144(%rsp),%xmm14
vpxor %xmm13,%xmm8,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm8
pxor %xmm15,%xmm13
vpxor %xmm14,%xmm9,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm9
pxor %xmm15,%xmm14
movdqa %xmm13,64(%rsp)
movdqa %xmm14,112(%rsp)
vpaddq subc0(%rip),%xmm11,%xmm10
psubq %xmm12,%xmm10
paddq %xmm12,%xmm11
vpunpckhqdq %xmm10,%xmm11,%xmm12
vpunpcklqdq %xmm10,%xmm11,%xmm10
vpaddq %xmm1,%xmm0,%xmm11
paddq subc0(%rip),%xmm0
psubq %xmm1,%xmm0
vpunpckhqdq %xmm11,%xmm0,%xmm1
vpunpcklqdq %xmm11,%xmm0,%xmm0
vpmuludq %xmm0,%xmm10,%xmm11
vpmuludq %xmm1,%xmm10,%xmm13
movdqa %xmm1,128(%rsp)
paddq %xmm1,%xmm1
vpmuludq %xmm0,%xmm12,%xmm14
movdqa %xmm0,144(%rsp)
paddq %xmm14,%xmm13
vpmuludq %xmm1,%xmm12,%xmm0
movdqa %xmm1,448(%rsp)
vpaddq %xmm3,%xmm2,%xmm1
paddq subc2(%rip),%xmm2
psubq %xmm3,%xmm2
vpunpckhqdq %xmm1,%xmm2,%xmm3
vpunpcklqdq %xmm1,%xmm2,%xmm1
vpmuludq %xmm1,%xmm10,%xmm2
paddq %xmm2,%xmm0
vpmuludq %xmm3,%xmm10,%xmm2
movdqa %xmm3,464(%rsp)
paddq %xmm3,%xmm3
vpmuludq %xmm1,%xmm12,%xmm14
movdqa %xmm1,480(%rsp)
paddq %xmm14,%xmm2
vpmuludq %xmm3,%xmm12,%xmm1
movdqa %xmm3,496(%rsp)
vpaddq %xmm5,%xmm4,%xmm3
paddq subc2(%rip),%xmm4
psubq %xmm5,%xmm4
vpunpckhqdq %xmm3,%xmm4,%xmm5
vpunpcklqdq %xmm3,%xmm4,%xmm3
vpmuludq %xmm3,%xmm10,%xmm4
paddq %xmm4,%xmm1
vpmuludq %xmm5,%xmm10,%xmm4
movdqa %xmm5,512(%rsp)
paddq %xmm5,%xmm5
vpmuludq %xmm3,%xmm12,%xmm14
movdqa %xmm3,528(%rsp)
paddq %xmm14,%xmm4
vpaddq %xmm7,%xmm6,%xmm3
paddq subc2(%rip),%xmm6
psubq %xmm7,%xmm6
vpunpckhqdq %xmm3,%xmm6,%xmm7
vpunpcklqdq %xmm3,%xmm6,%xmm3
vpmuludq %xmm3,%xmm10,%xmm6
vpmuludq %xmm5,%xmm12,%xmm14
movdqa %xmm5,544(%rsp)
pmuludq v19_19(%rip),%xmm5
movdqa %xmm5,560(%rsp)
paddq %xmm14,%xmm6
vpmuludq %xmm7,%xmm10,%xmm5
movdqa %xmm7,576(%rsp)
paddq %xmm7,%xmm7
vpmuludq %xmm3,%xmm12,%xmm14
movdqa %xmm3,592(%rsp)
paddq %xmm14,%xmm5
pmuludq v19_19(%rip),%xmm3
movdqa %xmm3,608(%rsp)
vpaddq %xmm9,%xmm8,%xmm3
paddq subc2(%rip),%xmm8
psubq %xmm9,%xmm8
vpunpckhqdq %xmm3,%xmm8,%xmm9
vpunpcklqdq %xmm3,%xmm8,%xmm3
movdqa %xmm3,624(%rsp)
vpmuludq %xmm7,%xmm12,%xmm8
movdqa %xmm7,640(%rsp)
pmuludq v19_19(%rip),%xmm7
movdqa %xmm7,656(%rsp)
vpmuludq %xmm3,%xmm10,%xmm7
paddq %xmm7,%xmm8
vpmuludq %xmm9,%xmm10,%xmm7
movdqa %xmm9,672(%rsp)
paddq %xmm9,%xmm9
vpmuludq %xmm3,%xmm12,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
movdqa %xmm3,688(%rsp)
pmuludq v19_19(%rip),%xmm12
vpmuludq %xmm9,%xmm12,%xmm3
movdqa %xmm9,704(%rsp)
paddq %xmm3,%xmm11
movdqa 0(%rsp),%xmm3
movdqa 16(%rsp),%xmm9
vpaddq subc2(%rip),%xmm3,%xmm10
psubq %xmm9,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm10,%xmm3,%xmm9
vpunpcklqdq %xmm10,%xmm3,%xmm3
vpmuludq 144(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm0
vpmuludq 128(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm2
vpmuludq 480(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm1
vpmuludq 464(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm4
vpmuludq 528(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm6
vpmuludq 512(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm5
vpmuludq 592(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm8
vpmuludq 576(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
vpmuludq 624(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm11
pmuludq 672(%rsp),%xmm3
paddq %xmm3,%xmm13
vpmuludq 144(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm2
vpmuludq 448(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm1
vpmuludq 480(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm4
vpmuludq 496(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm6
vpmuludq 528(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm5
vpmuludq 544(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm8
vpmuludq 592(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm7
pmuludq v19_19(%rip),%xmm9
vpmuludq 640(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm11
vpmuludq 624(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm13
pmuludq 704(%rsp),%xmm9
paddq %xmm9,%xmm0
movdqa 32(%rsp),%xmm3
movdqa 80(%rsp),%xmm9
vpaddq subc2(%rip),%xmm3,%xmm10
psubq %xmm9,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm10,%xmm3,%xmm9
vpunpcklqdq %xmm10,%xmm3,%xmm3
vpmuludq 144(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm1
vpmuludq 128(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm4
vpmuludq 480(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm6
vpmuludq 464(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm5
vpmuludq 528(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm8
vpmuludq 512(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
vpmuludq 592(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm11
vpmuludq 576(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm13
vpmuludq 624(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm0
pmuludq 672(%rsp),%xmm3
paddq %xmm3,%xmm2
vpmuludq 144(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm4
vpmuludq 448(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm6
vpmuludq 480(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm5
vpmuludq 496(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm8
vpmuludq 528(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm7
pmuludq v19_19(%rip),%xmm9
vpmuludq 544(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm11
vpmuludq 592(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm13
vpmuludq 640(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm0
vpmuludq 624(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm2
pmuludq 704(%rsp),%xmm9
paddq %xmm9,%xmm1
movdqa 48(%rsp),%xmm3
movdqa 96(%rsp),%xmm9
vpaddq subc2(%rip),%xmm3,%xmm10
psubq %xmm9,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm10,%xmm3,%xmm9
vpunpcklqdq %xmm10,%xmm3,%xmm3
vpmuludq 144(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm6
vpmuludq 128(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm5
vpmuludq 480(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm8
vpmuludq 464(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
vpmuludq 528(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm11
vpmuludq 512(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm13
vpmuludq 592(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm0
vpmuludq 576(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm2
vpmuludq 624(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm1
pmuludq 672(%rsp),%xmm3
paddq %xmm3,%xmm4
vpmuludq 144(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm5
vpmuludq 448(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm8
vpmuludq 480(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm7
pmuludq v19_19(%rip),%xmm9
vpmuludq 496(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm11
vpmuludq 528(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm13
vpmuludq 544(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm0
vpmuludq 592(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm2
vpmuludq 640(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm1
vpmuludq 624(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm4
pmuludq 704(%rsp),%xmm9
paddq %xmm9,%xmm6
movdqa 64(%rsp),%xmm3
movdqa 112(%rsp),%xmm9
vpaddq subc2(%rip),%xmm3,%xmm10
psubq %xmm9,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm10,%xmm3,%xmm9
vpunpcklqdq %xmm10,%xmm3,%xmm3
vpmuludq 144(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm8
vpmuludq 128(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
vpmuludq 480(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm11
vpmuludq 464(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm13
vpmuludq 528(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm0
vpmuludq 512(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm2
vpmuludq 592(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm1
vpmuludq 576(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm4
vpmuludq 624(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm6
pmuludq 672(%rsp),%xmm3
paddq %xmm3,%xmm5
vpmuludq 144(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm7
pmuludq v19_19(%rip),%xmm9
vpmuludq 448(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm11
vpmuludq 480(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm13
vpmuludq 496(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm0
vpmuludq 528(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm2
vpmuludq 544(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm1
vpmuludq 592(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm4
vpmuludq 640(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm6
vpmuludq 624(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm5
pmuludq 704(%rsp),%xmm9
paddq %xmm9,%xmm8
vpsrlq $25,%xmm4,%xmm3
paddq %xmm3,%xmm6
pand m25(%rip),%xmm4
vpsrlq $26,%xmm11,%xmm3
paddq %xmm3,%xmm13
pand m26(%rip),%xmm11
vpsrlq $26,%xmm6,%xmm3
paddq %xmm3,%xmm5
pand m26(%rip),%xmm6
vpsrlq $25,%xmm13,%xmm3
paddq %xmm3,%xmm0
pand m25(%rip),%xmm13
vpsrlq $25,%xmm5,%xmm3
paddq %xmm3,%xmm8
pand m25(%rip),%xmm5
vpsrlq $26,%xmm0,%xmm3
paddq %xmm3,%xmm2
pand m26(%rip),%xmm0
vpsrlq $26,%xmm8,%xmm3
paddq %xmm3,%xmm7
pand m26(%rip),%xmm8
vpsrlq $25,%xmm2,%xmm3
paddq %xmm3,%xmm1
pand m25(%rip),%xmm2
vpsrlq $25,%xmm7,%xmm3
vpsllq $4,%xmm3,%xmm9
paddq %xmm3,%xmm11
psllq $1,%xmm3
paddq %xmm3,%xmm9
paddq %xmm9,%xmm11
pand m25(%rip),%xmm7
vpsrlq $26,%xmm1,%xmm3
paddq %xmm3,%xmm4
pand m26(%rip),%xmm1
vpsrlq $26,%xmm11,%xmm3
paddq %xmm3,%xmm13
pand m26(%rip),%xmm11
vpsrlq $25,%xmm4,%xmm3
paddq %xmm3,%xmm6
pand m25(%rip),%xmm4
vpunpcklqdq %xmm13,%xmm11,%xmm3
vpunpckhqdq %xmm13,%xmm11,%xmm9
vpaddq subc0(%rip),%xmm9,%xmm10
psubq %xmm3,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm3,%xmm10,%xmm9
punpcklqdq %xmm3,%xmm10
vpmuludq %xmm10,%xmm10,%xmm3
paddq %xmm10,%xmm10
vpmuludq %xmm9,%xmm10,%xmm11
vpunpcklqdq %xmm2,%xmm0,%xmm12
vpunpckhqdq %xmm2,%xmm0,%xmm0
vpaddq subc2(%rip),%xmm0,%xmm2
psubq %xmm12,%xmm2
paddq %xmm0,%xmm12
vpunpckhqdq %xmm12,%xmm2,%xmm0
punpcklqdq %xmm12,%xmm2
vpmuludq %xmm2,%xmm10,%xmm12
vpaddq %xmm9,%xmm9,%xmm13
vpmuludq %xmm13,%xmm9,%xmm9
paddq %xmm9,%xmm12
vpmuludq %xmm0,%xmm10,%xmm9
vpmuludq %xmm2,%xmm13,%xmm14
paddq %xmm14,%xmm9
vpunpcklqdq %xmm4,%xmm1,%xmm14
vpunpckhqdq %xmm4,%xmm1,%xmm1
vpaddq subc2(%rip),%xmm1,%xmm4
psubq %xmm14,%xmm4
paddq %xmm1,%xmm14
vpunpckhqdq %xmm14,%xmm4,%xmm1
punpcklqdq %xmm14,%xmm4
movdqa %xmm1,0(%rsp)
paddq %xmm1,%xmm1
movdqa %xmm1,16(%rsp)
pmuludq v19_19(%rip),%xmm1
movdqa %xmm1,32(%rsp)
vpmuludq %xmm4,%xmm10,%xmm1
vpmuludq %xmm2,%xmm2,%xmm14
paddq %xmm14,%xmm1
vpmuludq 0(%rsp),%xmm10,%xmm14
vpmuludq %xmm4,%xmm13,%xmm15
paddq %xmm15,%xmm14
vpunpcklqdq %xmm5,%xmm6,%xmm15
vpunpckhqdq %xmm5,%xmm6,%xmm5
vpaddq subc2(%rip),%xmm5,%xmm6
psubq %xmm15,%xmm6
paddq %xmm5,%xmm15
vpunpckhqdq %xmm15,%xmm6,%xmm5
punpcklqdq %xmm15,%xmm6
movdqa %xmm6,48(%rsp)
pmuludq v19_19(%rip),%xmm6
movdqa %xmm6,64(%rsp)
movdqa %xmm5,80(%rsp)
pmuludq v38_38(%rip),%xmm5
movdqa %xmm5,96(%rsp)
vpmuludq 48(%rsp),%xmm10,%xmm5
vpaddq %xmm0,%xmm0,%xmm6
vpmuludq %xmm6,%xmm0,%xmm0
paddq %xmm0,%xmm5
vpmuludq 80(%rsp),%xmm10,%xmm0
vpmuludq %xmm4,%xmm6,%xmm15
paddq %xmm15,%xmm0
vpmuludq %xmm6,%xmm13,%xmm15
paddq %xmm15,%xmm1
vpmuludq %xmm6,%xmm2,%xmm15
paddq %xmm15,%xmm14
vpunpcklqdq %xmm7,%xmm8,%xmm15
vpunpckhqdq %xmm7,%xmm8,%xmm7
vpaddq subc2(%rip),%xmm7,%xmm8
psubq %xmm15,%xmm8
paddq %xmm7,%xmm15
vpunpckhqdq %xmm15,%xmm8,%xmm7
punpcklqdq %xmm15,%xmm8
movdqa %xmm8,112(%rsp)
pmuludq v19_19(%rip),%xmm8
movdqa %xmm8,448(%rsp)
vpmuludq 112(%rsp),%xmm10,%xmm8
vpmuludq %xmm7,%xmm10,%xmm10
vpmuludq v38_38(%rip),%xmm7,%xmm15
vpmuludq %xmm15,%xmm7,%xmm7
paddq %xmm7,%xmm8
vpmuludq %xmm15,%xmm13,%xmm7
paddq %xmm7,%xmm3
vpmuludq %xmm15,%xmm2,%xmm7
paddq %xmm7,%xmm11
vpmuludq 80(%rsp),%xmm13,%xmm7
paddq %xmm7,%xmm7
paddq %xmm7,%xmm8
vpmuludq 16(%rsp),%xmm13,%xmm7
paddq %xmm7,%xmm5
vpmuludq 48(%rsp),%xmm13,%xmm7
paddq %xmm7,%xmm0
vpmuludq 112(%rsp),%xmm13,%xmm7
paddq %xmm7,%xmm10
vpmuludq %xmm15,%xmm6,%xmm7
paddq %xmm7,%xmm12
vpmuludq %xmm15,%xmm4,%xmm7
paddq %xmm7,%xmm9
vpaddq %xmm2,%xmm2,%xmm2
vpmuludq %xmm4,%xmm2,%xmm7
paddq %xmm7,%xmm5
vpmuludq 448(%rsp),%xmm2,%xmm7
paddq %xmm7,%xmm3
vpmuludq 448(%rsp),%xmm6,%xmm7
paddq %xmm7,%xmm11
vpmuludq 0(%rsp),%xmm2,%xmm7
paddq %xmm7,%xmm0
vpmuludq 48(%rsp),%xmm2,%xmm7
paddq %xmm7,%xmm8
vpmuludq 80(%rsp),%xmm2,%xmm2
paddq %xmm2,%xmm10
vpmuludq 96(%rsp),%xmm4,%xmm2
paddq %xmm2,%xmm11
vpmuludq %xmm4,%xmm4,%xmm2
paddq %xmm2,%xmm8
vpaddq %xmm4,%xmm4,%xmm2
vpmuludq 448(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm12
vpmuludq 16(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm1
vpmuludq 48(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm14
vpmuludq 96(%rsp),%xmm6,%xmm4
paddq %xmm4,%xmm3
movdqa 16(%rsp),%xmm4
pmuludq 448(%rsp),%xmm4
paddq %xmm4,%xmm9
vpmuludq 16(%rsp),%xmm6,%xmm4
paddq %xmm4,%xmm8
vpmuludq 48(%rsp),%xmm6,%xmm4
paddq %xmm4,%xmm10
vpmuludq 80(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm4
paddq %xmm4,%xmm5
vpmuludq 112(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm0
movdqa 48(%rsp),%xmm4
paddq %xmm4,%xmm4
pmuludq 448(%rsp),%xmm4
paddq %xmm4,%xmm1
movdqa 80(%rsp),%xmm4
paddq %xmm4,%xmm4
pmuludq 448(%rsp),%xmm4
paddq %xmm4,%xmm14
vpmuludq 64(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm3
movdqa 16(%rsp),%xmm4
pmuludq 64(%rsp),%xmm4
paddq %xmm4,%xmm11
movdqa 16(%rsp),%xmm4
pmuludq 96(%rsp),%xmm4
paddq %xmm4,%xmm12
movdqa 48(%rsp),%xmm4
pmuludq 96(%rsp),%xmm4
paddq %xmm4,%xmm9
vpmuludq 0(%rsp),%xmm2,%xmm2
paddq %xmm2,%xmm10
movdqa 32(%rsp),%xmm2
pmuludq 0(%rsp),%xmm2
paddq %xmm2,%xmm3
movdqa 64(%rsp),%xmm2
pmuludq 48(%rsp),%xmm2
paddq %xmm2,%xmm12
movdqa 96(%rsp),%xmm2
pmuludq 80(%rsp),%xmm2
paddq %xmm2,%xmm1
movdqa 448(%rsp),%xmm2
pmuludq 112(%rsp),%xmm2
paddq %xmm2,%xmm5
vpsrlq $26,%xmm3,%xmm2
paddq %xmm2,%xmm11
pand m26(%rip),%xmm3
vpsrlq $25,%xmm14,%xmm2
paddq %xmm2,%xmm5
pand m25(%rip),%xmm14
vpsrlq $25,%xmm11,%xmm2
paddq %xmm2,%xmm12
pand m25(%rip),%xmm11
vpsrlq $26,%xmm5,%xmm2
paddq %xmm2,%xmm0
pand m26(%rip),%xmm5
vpsrlq $26,%xmm12,%xmm2
paddq %xmm2,%xmm9
pand m26(%rip),%xmm12
vpsrlq $25,%xmm0,%xmm2
paddq %xmm2,%xmm8
pand m25(%rip),%xmm0
vpsrlq $25,%xmm9,%xmm2
paddq %xmm2,%xmm1
pand m25(%rip),%xmm9
vpsrlq $26,%xmm8,%xmm2
paddq %xmm2,%xmm10
pand m26(%rip),%xmm8
vpsrlq $26,%xmm1,%xmm2
paddq %xmm2,%xmm14
pand m26(%rip),%xmm1
vpsrlq $25,%xmm10,%xmm2
vpsllq $4,%xmm2,%xmm4
paddq %xmm2,%xmm3
psllq $1,%xmm2
paddq %xmm2,%xmm4
paddq %xmm4,%xmm3
pand m25(%rip),%xmm10
vpsrlq $25,%xmm14,%xmm2
paddq %xmm2,%xmm5
pand m25(%rip),%xmm14
vpsrlq $26,%xmm3,%xmm2
paddq %xmm2,%xmm11
pand m26(%rip),%xmm3
vpunpckhqdq %xmm11,%xmm3,%xmm2
movdqa %xmm2,0(%rsp)
pshufd $0,%xmm3,%xmm2
pshufd $0,%xmm11,%xmm3
vpmuludq 160(%rsp),%xmm2,%xmm4
vpmuludq 432(%rsp),%xmm3,%xmm6
paddq %xmm6,%xmm4
vpmuludq 176(%rsp),%xmm2,%xmm6
vpmuludq 304(%rsp),%xmm3,%xmm7
paddq %xmm7,%xmm6
vpmuludq 208(%rsp),%xmm2,%xmm7
vpmuludq 336(%rsp),%xmm3,%xmm11
paddq %xmm11,%xmm7
vpmuludq 240(%rsp),%xmm2,%xmm11
vpmuludq 368(%rsp),%xmm3,%xmm13
paddq %xmm13,%xmm11
vpmuludq 272(%rsp),%xmm2,%xmm2
vpmuludq 400(%rsp),%xmm3,%xmm3
paddq %xmm3,%xmm2
vpunpckhqdq %xmm9,%xmm12,%xmm3
movdqa %xmm3,16(%rsp)
pshufd $0,%xmm12,%xmm3
pshufd $0,%xmm9,%xmm9
vpmuludq 288(%rsp),%xmm3,%xmm12
paddq %xmm12,%xmm4
vpmuludq 416(%rsp),%xmm9,%xmm12
paddq %xmm12,%xmm4
vpmuludq 160(%rsp),%xmm3,%xmm12
paddq %xmm12,%xmm6
vpmuludq 432(%rsp),%xmm9,%xmm12
paddq %xmm12,%xmm6
vpmuludq 176(%rsp),%xmm3,%xmm12
paddq %xmm12,%xmm7
vpmuludq 304(%rsp),%xmm9,%xmm12
paddq %xmm12,%xmm7
vpmuludq 208(%rsp),%xmm3,%xmm12
paddq %xmm12,%xmm11
vpmuludq 336(%rsp),%xmm9,%xmm12
paddq %xmm12,%xmm11
vpmuludq 240(%rsp),%xmm3,%xmm3
paddq %xmm3,%xmm2
vpmuludq 368(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm2
vpunpckhqdq %xmm14,%xmm1,%xmm3
movdqa %xmm3,32(%rsp)
pshufd $0,%xmm1,%xmm1
pshufd $0,%xmm14,%xmm3
vpmuludq 256(%rsp),%xmm1,%xmm9
paddq %xmm9,%xmm4
vpmuludq 384(%rsp),%xmm3,%xmm9
paddq %xmm9,%xmm4
vpmuludq 288(%rsp),%xmm1,%xmm9
paddq %xmm9,%xmm6
vpmuludq 416(%rsp),%xmm3,%xmm9
paddq %xmm9,%xmm6
vpmuludq 160(%rsp),%xmm1,%xmm9
paddq %xmm9,%xmm7
vpmuludq 432(%rsp),%xmm3,%xmm9
paddq %xmm9,%xmm7
vpmuludq 176(%rsp),%xmm1,%xmm9
paddq %xmm9,%xmm11
vpmuludq 304(%rsp),%xmm3,%xmm9
paddq %xmm9,%xmm11
vpmuludq 208(%rsp),%xmm1,%xmm1
paddq %xmm1,%xmm2
vpmuludq 336(%rsp),%xmm3,%xmm1
paddq %xmm1,%xmm2
vpunpckhqdq %xmm0,%xmm5,%xmm1
movdqa %xmm1,48(%rsp)
pshufd $0,%xmm5,%xmm1
pshufd $0,%xmm0,%xmm0
vpmuludq 224(%rsp),%xmm1,%xmm3
paddq %xmm3,%xmm4
vpmuludq 352(%rsp),%xmm0,%xmm3
paddq %xmm3,%xmm4
vpmuludq 256(%rsp),%xmm1,%xmm3
paddq %xmm3,%xmm6
vpmuludq 384(%rsp),%xmm0,%xmm3
paddq %xmm3,%xmm6
vpmuludq 288(%rsp),%xmm1,%xmm3
paddq %xmm3,%xmm7
vpmuludq 416(%rsp),%xmm0,%xmm3
paddq %xmm3,%xmm7
vpmuludq 160(%rsp),%xmm1,%xmm3
paddq %xmm3,%xmm11
vpmuludq 432(%rsp),%xmm0,%xmm3
paddq %xmm3,%xmm11
vpmuludq 176(%rsp),%xmm1,%xmm1
paddq %xmm1,%xmm2
vpmuludq 304(%rsp),%xmm0,%xmm0
paddq %xmm0,%xmm2
vpunpckhqdq %xmm10,%xmm8,%xmm0
movdqa %xmm0,64(%rsp)
pshufd $0,%xmm8,%xmm0
pshufd $0,%xmm10,%xmm1
vpmuludq 192(%rsp),%xmm0,%xmm3
paddq %xmm3,%xmm4
vpmuludq 320(%rsp),%xmm1,%xmm3
paddq %xmm3,%xmm4
vpmuludq 224(%rsp),%xmm0,%xmm3
paddq %xmm3,%xmm6
vpmuludq 352(%rsp),%xmm1,%xmm3
paddq %xmm3,%xmm6
vpmuludq 256(%rsp),%xmm0,%xmm3
paddq %xmm3,%xmm7
vpmuludq 384(%rsp),%xmm1,%xmm3
paddq %xmm3,%xmm7
vpmuludq 288(%rsp),%xmm0,%xmm3
paddq %xmm3,%xmm11
vpmuludq 416(%rsp),%xmm1,%xmm3
paddq %xmm3,%xmm11
vpmuludq 160(%rsp),%xmm0,%xmm0
paddq %xmm0,%xmm2
vpmuludq 432(%rsp),%xmm1,%xmm0
paddq %xmm0,%xmm2
movdqa %xmm4,80(%rsp)
movdqa %xmm6,96(%rsp)
movdqa %xmm7,112(%rsp)
movdqa %xmm11,448(%rsp)
movdqa %xmm2,496(%rsp)
movdqa 144(%rsp),%xmm0
vpmuludq %xmm0,%xmm0,%xmm1
paddq %xmm0,%xmm0
movdqa 128(%rsp),%xmm2
vpmuludq %xmm2,%xmm0,%xmm3
movdqa 480(%rsp),%xmm4
vpmuludq %xmm4,%xmm0,%xmm5
movdqa 464(%rsp),%xmm6
vpmuludq %xmm6,%xmm0,%xmm7
movdqa 528(%rsp),%xmm8
vpmuludq %xmm8,%xmm0,%xmm9
vpmuludq 512(%rsp),%xmm0,%xmm10
vpmuludq 592(%rsp),%xmm0,%xmm11
vpmuludq 576(%rsp),%xmm0,%xmm12
vpmuludq 624(%rsp),%xmm0,%xmm13
movdqa 672(%rsp),%xmm14
vpmuludq %xmm14,%xmm0,%xmm0
vpmuludq v38_38(%rip),%xmm14,%xmm15
vpmuludq %xmm15,%xmm14,%xmm14
paddq %xmm14,%xmm13
vpaddq %xmm6,%xmm6,%xmm14
vpmuludq %xmm14,%xmm6,%xmm6
paddq %xmm6,%xmm11
vpaddq %xmm2,%xmm2,%xmm6
vpmuludq %xmm6,%xmm2,%xmm2
paddq %xmm2,%xmm5
vpmuludq %xmm15,%xmm6,%xmm2
paddq %xmm2,%xmm1
vpmuludq %xmm15,%xmm4,%xmm2
paddq %xmm2,%xmm3
vpmuludq 544(%rsp),%xmm6,%xmm2
paddq %xmm2,%xmm11
vpmuludq 592(%rsp),%xmm6,%xmm2
paddq %xmm2,%xmm12
vpmuludq 640(%rsp),%xmm6,%xmm2
paddq %xmm2,%xmm13
vpmuludq 624(%rsp),%xmm6,%xmm2
paddq %xmm2,%xmm0
vpmuludq %xmm4,%xmm6,%xmm2
paddq %xmm2,%xmm7
vpmuludq %xmm14,%xmm6,%xmm2
paddq %xmm2,%xmm9
vpmuludq %xmm8,%xmm6,%xmm2
paddq %xmm2,%xmm10
vpmuludq %xmm15,%xmm14,%xmm2
paddq %xmm2,%xmm5
vpmuludq %xmm15,%xmm8,%xmm2
paddq %xmm2,%xmm7
vpmuludq %xmm4,%xmm4,%xmm2
paddq %xmm2,%xmm9
vpmuludq %xmm14,%xmm4,%xmm2
paddq %xmm2,%xmm10
vpaddq %xmm4,%xmm4,%xmm2
vpmuludq %xmm8,%xmm2,%xmm4
paddq %xmm4,%xmm11
vpmuludq 688(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm1
vpmuludq 688(%rsp),%xmm14,%xmm4
paddq %xmm4,%xmm3
vpmuludq 512(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm12
vpmuludq 592(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm13
vpmuludq 576(%rsp),%xmm2,%xmm2
paddq %xmm2,%xmm0
vpmuludq 656(%rsp),%xmm8,%xmm2
paddq %xmm2,%xmm3
vpmuludq %xmm8,%xmm14,%xmm2
paddq %xmm2,%xmm12
vpmuludq %xmm8,%xmm8,%xmm2
paddq %xmm2,%xmm13
vpaddq %xmm8,%xmm8,%xmm2
vpmuludq 688(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm5
vpmuludq 544(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm9
vpmuludq 592(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm10
vpmuludq 656(%rsp),%xmm14,%xmm4
paddq %xmm4,%xmm1
movdqa 544(%rsp),%xmm4
pmuludq 688(%rsp),%xmm4
paddq %xmm4,%xmm7
vpmuludq 544(%rsp),%xmm14,%xmm4
paddq %xmm4,%xmm13
vpmuludq 592(%rsp),%xmm14,%xmm4
paddq %xmm4,%xmm0
vpmuludq 640(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm11
vpmuludq 624(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm12
movdqa 592(%rsp),%xmm4
paddq %xmm4,%xmm4
pmuludq 688(%rsp),%xmm4
paddq %xmm4,%xmm9
vpmuludq 608(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm1
movdqa 544(%rsp),%xmm4
pmuludq 608(%rsp),%xmm4
paddq %xmm4,%xmm3
movdqa 544(%rsp),%xmm4
pmuludq 656(%rsp),%xmm4
paddq %xmm4,%xmm5
movdqa 592(%rsp),%xmm4
pmuludq 656(%rsp),%xmm4
paddq %xmm4,%xmm7
movdqa 640(%rsp),%xmm4
pmuludq 688(%rsp),%xmm4
paddq %xmm4,%xmm10
vpmuludq 512(%rsp),%xmm2,%xmm2
paddq %xmm2,%xmm0
movdqa 560(%rsp),%xmm2
pmuludq 512(%rsp),%xmm2
paddq %xmm2,%xmm1
movdqa 608(%rsp),%xmm2
pmuludq 592(%rsp),%xmm2
paddq %xmm2,%xmm5
movdqa 656(%rsp),%xmm2
pmuludq 576(%rsp),%xmm2
paddq %xmm2,%xmm9
movdqa 688(%rsp),%xmm2
pmuludq 624(%rsp),%xmm2
paddq %xmm2,%xmm11
vpsrlq $26,%xmm1,%xmm2
paddq %xmm2,%xmm3
pand m26(%rip),%xmm1
vpsrlq $25,%xmm10,%xmm2
paddq %xmm2,%xmm11
pand m25(%rip),%xmm10
vpsrlq $25,%xmm3,%xmm2
paddq %xmm2,%xmm5
pand m25(%rip),%xmm3
vpsrlq $26,%xmm11,%xmm2
paddq %xmm2,%xmm12
pand m26(%rip),%xmm11
vpsrlq $26,%xmm5,%xmm2
paddq %xmm2,%xmm7
pand m26(%rip),%xmm5
vpsrlq $25,%xmm12,%xmm2
paddq %xmm2,%xmm13
pand m25(%rip),%xmm12
vpsrlq $25,%xmm7,%xmm2
paddq %xmm2,%xmm9
pand m25(%rip),%xmm7
vpsrlq $26,%xmm13,%xmm2
paddq %xmm2,%xmm0
pand m26(%rip),%xmm13
vpsrlq $26,%xmm9,%xmm2
paddq %xmm2,%xmm10
pand m26(%rip),%xmm9
vpsrlq $25,%xmm0,%xmm2
vpsllq $4,%xmm2,%xmm4
paddq %xmm2,%xmm1
psllq $1,%xmm2
paddq %xmm2,%xmm4
paddq %xmm4,%xmm1
pand m25(%rip),%xmm0
vpsrlq $25,%xmm10,%xmm2
paddq %xmm2,%xmm11
pand m25(%rip),%xmm10
vpsrlq $26,%xmm1,%xmm2
paddq %xmm2,%xmm3
pand m26(%rip),%xmm1
vpunpckhqdq %xmm3,%xmm1,%xmm2
vpunpcklqdq %xmm3,%xmm1,%xmm1
movdqa %xmm1,464(%rsp)
vpaddq subc0(%rip),%xmm2,%xmm3
psubq %xmm1,%xmm3
vpunpckhqdq %xmm3,%xmm2,%xmm1
vpunpcklqdq %xmm3,%xmm2,%xmm2
movdqa %xmm2,480(%rsp)
movdqa %xmm1,512(%rsp)
psllq $1,%xmm1
movdqa %xmm1,528(%rsp)
pmuludq v121666_121666(%rip),%xmm3
movdqa 80(%rsp),%xmm1
vpunpcklqdq %xmm1,%xmm3,%xmm2
vpunpckhqdq %xmm1,%xmm3,%xmm1
vpunpckhqdq %xmm7,%xmm5,%xmm3
vpunpcklqdq %xmm7,%xmm5,%xmm4
movdqa %xmm4,544(%rsp)
vpaddq subc2(%rip),%xmm3,%xmm5
psubq %xmm4,%xmm5
vpunpckhqdq %xmm5,%xmm3,%xmm4
vpunpcklqdq %xmm5,%xmm3,%xmm3
movdqa %xmm3,560(%rsp)
movdqa %xmm4,576(%rsp)
psllq $1,%xmm4
movdqa %xmm4,592(%rsp)
pmuludq v121666_121666(%rip),%xmm5
movdqa 96(%rsp),%xmm3
vpunpcklqdq %xmm3,%xmm5,%xmm4
vpunpckhqdq %xmm3,%xmm5,%xmm3
vpunpckhqdq %xmm10,%xmm9,%xmm5
vpunpcklqdq %xmm10,%xmm9,%xmm6
movdqa %xmm6,608(%rsp)
vpaddq subc2(%rip),%xmm5,%xmm7
psubq %xmm6,%xmm7
vpunpckhqdq %xmm7,%xmm5,%xmm6
vpunpcklqdq %xmm7,%xmm5,%xmm5
movdqa %xmm5,624(%rsp)
movdqa %xmm6,640(%rsp)
psllq $1,%xmm6
movdqa %xmm6,656(%rsp)
pmuludq v121666_121666(%rip),%xmm7
movdqa 112(%rsp),%xmm5
vpunpcklqdq %xmm5,%xmm7,%xmm6
vpunpckhqdq %xmm5,%xmm7,%xmm5
vpunpckhqdq %xmm12,%xmm11,%xmm7
vpunpcklqdq %xmm12,%xmm11,%xmm8
movdqa %xmm8,672(%rsp)
vpaddq subc2(%rip),%xmm7,%xmm9
psubq %xmm8,%xmm9
vpunpckhqdq %xmm9,%xmm7,%xmm8
vpunpcklqdq %xmm9,%xmm7,%xmm7
movdqa %xmm7,688(%rsp)
movdqa %xmm8,704(%rsp)
psllq $1,%xmm8
movdqa %xmm8,720(%rsp)
pmuludq v121666_121666(%rip),%xmm9
movdqa 448(%rsp),%xmm7
vpunpcklqdq %xmm7,%xmm9,%xmm8
vpunpckhqdq %xmm7,%xmm9,%xmm7
vpunpckhqdq %xmm0,%xmm13,%xmm9
vpunpcklqdq %xmm0,%xmm13,%xmm0
movdqa %xmm0,448(%rsp)
vpaddq subc2(%rip),%xmm9,%xmm10
psubq %xmm0,%xmm10
vpunpckhqdq %xmm10,%xmm9,%xmm0
vpunpcklqdq %xmm10,%xmm9,%xmm9
movdqa %xmm9,736(%rsp)
movdqa %xmm0,752(%rsp)
psllq $1,%xmm0
movdqa %xmm0,768(%rsp)
pmuludq v121666_121666(%rip),%xmm10
movdqa 496(%rsp),%xmm0
vpunpcklqdq %xmm0,%xmm10,%xmm9
vpunpckhqdq %xmm0,%xmm10,%xmm0
vpsrlq $26,%xmm2,%xmm10
paddq %xmm10,%xmm1
pand m26(%rip),%xmm2
vpsrlq $25,%xmm5,%xmm10
paddq %xmm10,%xmm8
pand m25(%rip),%xmm5
vpsrlq $25,%xmm1,%xmm10
paddq %xmm10,%xmm4
pand m25(%rip),%xmm1
vpsrlq $26,%xmm8,%xmm10
paddq %xmm10,%xmm7
pand m26(%rip),%xmm8
vpsrlq $26,%xmm4,%xmm10
paddq %xmm10,%xmm3
pand m26(%rip),%xmm4
vpsrlq $25,%xmm7,%xmm10
paddq %xmm10,%xmm9
pand m25(%rip),%xmm7
vpsrlq $25,%xmm3,%xmm10
paddq %xmm10,%xmm6
pand m25(%rip),%xmm3
vpsrlq $26,%xmm9,%xmm10
paddq %xmm10,%xmm0
pand m26(%rip),%xmm9
vpsrlq $26,%xmm6,%xmm10
paddq %xmm10,%xmm5
pand m26(%rip),%xmm6
vpsrlq $25,%xmm0,%xmm10
vpsllq $4,%xmm10,%xmm11
paddq %xmm10,%xmm2
psllq $1,%xmm10
paddq %xmm10,%xmm11
paddq %xmm11,%xmm2
pand m25(%rip),%xmm0
vpsrlq $25,%xmm5,%xmm10
paddq %xmm10,%xmm8
pand m25(%rip),%xmm5
vpsrlq $26,%xmm2,%xmm10
paddq %xmm10,%xmm1
pand m26(%rip),%xmm2
vpunpckhqdq %xmm1,%xmm2,%xmm10
movdqa %xmm10,80(%rsp)
vpunpcklqdq %xmm1,%xmm2,%xmm1
vpunpckhqdq %xmm3,%xmm4,%xmm2
movdqa %xmm2,96(%rsp)
vpunpcklqdq %xmm3,%xmm4,%xmm2
vpunpckhqdq %xmm5,%xmm6,%xmm3
movdqa %xmm3,112(%rsp)
vpunpcklqdq %xmm5,%xmm6,%xmm3
vpunpckhqdq %xmm7,%xmm8,%xmm4
movdqa %xmm4,128(%rsp)
vpunpcklqdq %xmm7,%xmm8,%xmm4
vpunpckhqdq %xmm0,%xmm9,%xmm5
movdqa %xmm5,144(%rsp)
vpunpcklqdq %xmm0,%xmm9,%xmm0
movdqa 464(%rsp),%xmm5
paddq %xmm5,%xmm1
vpunpcklqdq %xmm1,%xmm5,%xmm6
vpunpckhqdq %xmm1,%xmm5,%xmm1
vpmuludq 512(%rsp),%xmm6,%xmm5
vpmuludq 480(%rsp),%xmm1,%xmm7
paddq %xmm7,%xmm5
vpmuludq 560(%rsp),%xmm6,%xmm7
vpmuludq 528(%rsp),%xmm1,%xmm8
paddq %xmm8,%xmm7
vpmuludq 576(%rsp),%xmm6,%xmm8
vpmuludq 560(%rsp),%xmm1,%xmm9
paddq %xmm9,%xmm8
vpmuludq 624(%rsp),%xmm6,%xmm9
vpmuludq 592(%rsp),%xmm1,%xmm10
paddq %xmm10,%xmm9
vpmuludq 640(%rsp),%xmm6,%xmm10
vpmuludq 624(%rsp),%xmm1,%xmm11
paddq %xmm11,%xmm10
vpmuludq 688(%rsp),%xmm6,%xmm11
vpmuludq 656(%rsp),%xmm1,%xmm12
paddq %xmm12,%xmm11
vpmuludq 704(%rsp),%xmm6,%xmm12
vpmuludq 688(%rsp),%xmm1,%xmm13
paddq %xmm13,%xmm12
vpmuludq 736(%rsp),%xmm6,%xmm13
vpmuludq 720(%rsp),%xmm1,%xmm14
paddq %xmm14,%xmm13
vpmuludq 752(%rsp),%xmm6,%xmm14
vpmuludq 736(%rsp),%xmm1,%xmm15
paddq %xmm15,%xmm14
vpmuludq 480(%rsp),%xmm6,%xmm6
pmuludq v19_19(%rip),%xmm1
vpmuludq 768(%rsp),%xmm1,%xmm1
paddq %xmm1,%xmm6
movdqa 544(%rsp),%xmm1
paddq %xmm1,%xmm2
vpunpcklqdq %xmm2,%xmm1,%xmm15
vpunpckhqdq %xmm2,%xmm1,%xmm1
vpmuludq 480(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm7
vpmuludq 512(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm8
vpmuludq 560(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm9
vpmuludq 576(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm10
vpmuludq 624(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm11
vpmuludq 640(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm12
vpmuludq 688(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm13
vpmuludq 704(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm14
pmuludq v19_19(%rip),%xmm15
vpmuludq 736(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm6
pmuludq 752(%rsp),%xmm15
paddq %xmm15,%xmm5
vpmuludq 480(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm8
vpmuludq 528(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm9
vpmuludq 560(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm10
vpmuludq 592(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm11
vpmuludq 624(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm12
vpmuludq 656(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm13
vpmuludq 688(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm14
pmuludq v19_19(%rip),%xmm1
vpmuludq 720(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm6
vpmuludq 736(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm5
pmuludq 768(%rsp),%xmm1
paddq %xmm1,%xmm7
movdqa 608(%rsp),%xmm1
paddq %xmm1,%xmm3
vpunpcklqdq %xmm3,%xmm1,%xmm2
vpunpckhqdq %xmm3,%xmm1,%xmm1
vpmuludq 480(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm9
vpmuludq 512(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm10
vpmuludq 560(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm11
vpmuludq 576(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm12
vpmuludq 624(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm13
vpmuludq 640(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm14
pmuludq v19_19(%rip),%xmm2
vpmuludq 688(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm6
vpmuludq 704(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm5
vpmuludq 736(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm7
pmuludq 752(%rsp),%xmm2
paddq %xmm2,%xmm8
vpmuludq 480(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm10
vpmuludq 528(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm11
vpmuludq 560(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm12
vpmuludq 592(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm13
vpmuludq 624(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm14
pmuludq v19_19(%rip),%xmm1
vpmuludq 656(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm6
vpmuludq 688(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm5
vpmuludq 720(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm7
vpmuludq 736(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm8
pmuludq 768(%rsp),%xmm1
paddq %xmm1,%xmm9
movdqa 672(%rsp),%xmm1
paddq %xmm1,%xmm4
vpunpcklqdq %xmm4,%xmm1,%xmm2
vpunpckhqdq %xmm4,%xmm1,%xmm1
vpmuludq 480(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm11
vpmuludq 512(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm12
vpmuludq 560(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm13
vpmuludq 576(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm14
pmuludq v19_19(%rip),%xmm2
vpmuludq 624(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm6
vpmuludq 640(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm5
vpmuludq 688(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm7
vpmuludq 704(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm8
vpmuludq 736(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm9
pmuludq 752(%rsp),%xmm2
paddq %xmm2,%xmm10
vpmuludq 480(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm12
vpmuludq 528(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm13
vpmuludq 560(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm14
pmuludq v19_19(%rip),%xmm1
vpmuludq 592(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm6
vpmuludq 624(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm5
vpmuludq 656(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm7
vpmuludq 688(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm8
vpmuludq 720(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm9
vpmuludq 736(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm10
pmuludq 768(%rsp),%xmm1
paddq %xmm1,%xmm11
movdqa 448(%rsp),%xmm1
paddq %xmm1,%xmm0
vpunpcklqdq %xmm0,%xmm1,%xmm2
vpunpckhqdq %xmm0,%xmm1,%xmm0
vpmuludq 480(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm13
vpmuludq 512(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm14
pmuludq v19_19(%rip),%xmm2
vpmuludq 560(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm6
vpmuludq 576(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm5
vpmuludq 624(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm7
vpmuludq 640(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm8
vpmuludq 688(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm9
vpmuludq 704(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm10
vpmuludq 736(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm11
pmuludq 752(%rsp),%xmm2
paddq %xmm2,%xmm12
vpmuludq 480(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm14
pmuludq v19_19(%rip),%xmm0
vpmuludq 528(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm6
vpmuludq 560(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm5
vpmuludq 592(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm7
vpmuludq 624(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm8
vpmuludq 656(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm9
vpmuludq 688(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm10
vpmuludq 720(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm11
vpmuludq 736(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm12
pmuludq 768(%rsp),%xmm0
paddq %xmm0,%xmm13
vpsrlq $26,%xmm6,%xmm0
paddq %xmm0,%xmm5
pand m26(%rip),%xmm6
vpsrlq $25,%xmm10,%xmm0
paddq %xmm0,%xmm11
pand m25(%rip),%xmm10
vpsrlq $25,%xmm5,%xmm0
paddq %xmm0,%xmm7
pand m25(%rip),%xmm5
vpsrlq $26,%xmm11,%xmm0
paddq %xmm0,%xmm12
pand m26(%rip),%xmm11
vpsrlq $26,%xmm7,%xmm0
paddq %xmm0,%xmm8
pand m26(%rip),%xmm7
vpsrlq $25,%xmm12,%xmm0
paddq %xmm0,%xmm13
pand m25(%rip),%xmm12
vpsrlq $25,%xmm8,%xmm0
paddq %xmm0,%xmm9
pand m25(%rip),%xmm8
vpsrlq $26,%xmm13,%xmm0
paddq %xmm0,%xmm14
pand m26(%rip),%xmm13
vpsrlq $26,%xmm9,%xmm0
paddq %xmm0,%xmm10
pand m26(%rip),%xmm9
vpsrlq $25,%xmm14,%xmm0
vpsllq $4,%xmm0,%xmm1
paddq %xmm0,%xmm6
psllq $1,%xmm0
paddq %xmm0,%xmm1
paddq %xmm1,%xmm6
pand m25(%rip),%xmm14
vpsrlq $25,%xmm10,%xmm0
paddq %xmm0,%xmm11
pand m25(%rip),%xmm10
vpsrlq $26,%xmm6,%xmm0
paddq %xmm0,%xmm5
pand m26(%rip),%xmm6
vpunpckhqdq %xmm5,%xmm6,%xmm1
vpunpcklqdq %xmm5,%xmm6,%xmm0
vpunpckhqdq %xmm8,%xmm7,%xmm3
vpunpcklqdq %xmm8,%xmm7,%xmm2
vpunpckhqdq %xmm10,%xmm9,%xmm5
vpunpcklqdq %xmm10,%xmm9,%xmm4
vpunpckhqdq %xmm12,%xmm11,%xmm7
vpunpcklqdq %xmm12,%xmm11,%xmm6
vpunpckhqdq %xmm14,%xmm13,%xmm9
vpunpcklqdq %xmm14,%xmm13,%xmm8
cmp $0,%rdx
jne ._ladder_loop
movdqu %xmm1,160(%rdi)
movdqu %xmm0,80(%rdi)
movdqu %xmm3,176(%rdi)
movdqu %xmm2,96(%rdi)
movdqu %xmm5,192(%rdi)
movdqu %xmm4,112(%rdi)
movdqu %xmm7,208(%rdi)
movdqu %xmm6,128(%rdi)
movdqu %xmm9,224(%rdi)
movdqu %xmm8,144(%rdi)
movq 1824(%rsp),%r11
movq 1832(%rsp),%r12
movq 1840(%rsp),%r13
movq 1848(%rsp),%r14
add %r11,%rsp
ret
#endif
|
liruqi/Mume-iOS | 30,292 | Library/ShadowPath/ShadowPath/shadowsocks-libev/libsodium/src/libsodium/crypto_scalarmult/curve25519/sandy2x/ladder_base.S | #ifdef IN_SANDY2X
#include "ladder_base_namespace.h"
#include "consts_namespace.h"
.p2align 5
.globl ladder_base
.globl _ladder_base
#ifdef __ELF__
.type ladder_base, @function
.type _ladder_base, @function
#endif
ladder_base:
_ladder_base:
mov %rsp,%r11
and $31,%r11
add $1568,%r11
sub %r11,%rsp
movq %r11,1536(%rsp)
movq %r12,1544(%rsp)
movq %r13,1552(%rsp)
movdqa v0_0(%rip),%xmm0
movdqa v1_0(%rip),%xmm1
movdqa v9_0(%rip),%xmm2
movdqa %xmm2,0(%rsp)
movdqa %xmm0,16(%rsp)
movdqa %xmm0,32(%rsp)
movdqa %xmm0,48(%rsp)
movdqa %xmm0,64(%rsp)
movdqa %xmm1,80(%rsp)
movdqa %xmm0,96(%rsp)
movdqa %xmm0,112(%rsp)
movdqa %xmm0,128(%rsp)
movdqa %xmm0,144(%rsp)
movdqa %xmm1,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
pxor %xmm8,%xmm8
pxor %xmm9,%xmm9
movq 0(%rsi),%rdx
movq 8(%rsi),%rcx
movq 16(%rsi),%r8
movq 24(%rsi),%r9
shrd $1,%rcx,%rdx
shrd $1,%r8,%rcx
shrd $1,%r9,%r8
shr $1,%r9
xorq 0(%rsi),%rdx
xorq 8(%rsi),%rcx
xorq 16(%rsi),%r8
xorq 24(%rsi),%r9
leaq 512(%rsp),%rsi
mov $64,%rax
._ladder_base_small_loop:
mov %rdx,%r10
mov %rcx,%r11
mov %r8,%r12
mov %r9,%r13
shr $1,%rdx
shr $1,%rcx
shr $1,%r8
shr $1,%r9
and $1,%r10d
and $1,%r11d
and $1,%r12d
and $1,%r13d
neg %r10
neg %r11
neg %r12
neg %r13
movl %r10d,0(%rsi)
movl %r11d,256(%rsi)
movl %r12d,512(%rsi)
movl %r13d,768(%rsi)
add $4,%rsi
sub $1,%rax
jne ._ladder_base_small_loop
mov $255,%rdx
add $760,%rsi
._ladder_base_loop:
sub $1,%rdx
vbroadcastss 0(%rsi),%xmm10
sub $4,%rsi
movdqa 0(%rsp),%xmm11
movdqa 80(%rsp),%xmm12
vpxor %xmm11,%xmm0,%xmm13
pand %xmm10,%xmm13
pxor %xmm13,%xmm0
pxor %xmm13,%xmm11
vpxor %xmm12,%xmm1,%xmm13
pand %xmm10,%xmm13
pxor %xmm13,%xmm1
pxor %xmm13,%xmm12
movdqa 16(%rsp),%xmm13
movdqa 96(%rsp),%xmm14
vpxor %xmm13,%xmm2,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm2
pxor %xmm15,%xmm13
vpxor %xmm14,%xmm3,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm3
pxor %xmm15,%xmm14
movdqa %xmm13,0(%rsp)
movdqa %xmm14,16(%rsp)
movdqa 32(%rsp),%xmm13
movdqa 112(%rsp),%xmm14
vpxor %xmm13,%xmm4,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm4
pxor %xmm15,%xmm13
vpxor %xmm14,%xmm5,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm5
pxor %xmm15,%xmm14
movdqa %xmm13,32(%rsp)
movdqa %xmm14,80(%rsp)
movdqa 48(%rsp),%xmm13
movdqa 128(%rsp),%xmm14
vpxor %xmm13,%xmm6,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm6
pxor %xmm15,%xmm13
vpxor %xmm14,%xmm7,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm7
pxor %xmm15,%xmm14
movdqa %xmm13,48(%rsp)
movdqa %xmm14,96(%rsp)
movdqa 64(%rsp),%xmm13
movdqa 144(%rsp),%xmm14
vpxor %xmm13,%xmm8,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm8
pxor %xmm15,%xmm13
vpxor %xmm14,%xmm9,%xmm15
pand %xmm10,%xmm15
pxor %xmm15,%xmm9
pxor %xmm15,%xmm14
movdqa %xmm13,64(%rsp)
movdqa %xmm14,112(%rsp)
vpaddq subc0(%rip),%xmm11,%xmm10
psubq %xmm12,%xmm10
paddq %xmm12,%xmm11
vpunpckhqdq %xmm10,%xmm11,%xmm12
vpunpcklqdq %xmm10,%xmm11,%xmm10
vpaddq %xmm1,%xmm0,%xmm11
paddq subc0(%rip),%xmm0
psubq %xmm1,%xmm0
vpunpckhqdq %xmm11,%xmm0,%xmm1
vpunpcklqdq %xmm11,%xmm0,%xmm0
vpmuludq %xmm0,%xmm10,%xmm11
vpmuludq %xmm1,%xmm10,%xmm13
movdqa %xmm1,128(%rsp)
paddq %xmm1,%xmm1
vpmuludq %xmm0,%xmm12,%xmm14
movdqa %xmm0,144(%rsp)
paddq %xmm14,%xmm13
vpmuludq %xmm1,%xmm12,%xmm0
movdqa %xmm1,160(%rsp)
vpaddq %xmm3,%xmm2,%xmm1
paddq subc2(%rip),%xmm2
psubq %xmm3,%xmm2
vpunpckhqdq %xmm1,%xmm2,%xmm3
vpunpcklqdq %xmm1,%xmm2,%xmm1
vpmuludq %xmm1,%xmm10,%xmm2
paddq %xmm2,%xmm0
vpmuludq %xmm3,%xmm10,%xmm2
movdqa %xmm3,176(%rsp)
paddq %xmm3,%xmm3
vpmuludq %xmm1,%xmm12,%xmm14
movdqa %xmm1,192(%rsp)
paddq %xmm14,%xmm2
vpmuludq %xmm3,%xmm12,%xmm1
movdqa %xmm3,208(%rsp)
vpaddq %xmm5,%xmm4,%xmm3
paddq subc2(%rip),%xmm4
psubq %xmm5,%xmm4
vpunpckhqdq %xmm3,%xmm4,%xmm5
vpunpcklqdq %xmm3,%xmm4,%xmm3
vpmuludq %xmm3,%xmm10,%xmm4
paddq %xmm4,%xmm1
vpmuludq %xmm5,%xmm10,%xmm4
movdqa %xmm5,224(%rsp)
paddq %xmm5,%xmm5
vpmuludq %xmm3,%xmm12,%xmm14
movdqa %xmm3,240(%rsp)
paddq %xmm14,%xmm4
vpaddq %xmm7,%xmm6,%xmm3
paddq subc2(%rip),%xmm6
psubq %xmm7,%xmm6
vpunpckhqdq %xmm3,%xmm6,%xmm7
vpunpcklqdq %xmm3,%xmm6,%xmm3
vpmuludq %xmm3,%xmm10,%xmm6
vpmuludq %xmm5,%xmm12,%xmm14
movdqa %xmm5,256(%rsp)
pmuludq v19_19(%rip),%xmm5
movdqa %xmm5,272(%rsp)
paddq %xmm14,%xmm6
vpmuludq %xmm7,%xmm10,%xmm5
movdqa %xmm7,288(%rsp)
paddq %xmm7,%xmm7
vpmuludq %xmm3,%xmm12,%xmm14
movdqa %xmm3,304(%rsp)
paddq %xmm14,%xmm5
pmuludq v19_19(%rip),%xmm3
movdqa %xmm3,320(%rsp)
vpaddq %xmm9,%xmm8,%xmm3
paddq subc2(%rip),%xmm8
psubq %xmm9,%xmm8
vpunpckhqdq %xmm3,%xmm8,%xmm9
vpunpcklqdq %xmm3,%xmm8,%xmm3
movdqa %xmm3,336(%rsp)
vpmuludq %xmm7,%xmm12,%xmm8
movdqa %xmm7,352(%rsp)
pmuludq v19_19(%rip),%xmm7
movdqa %xmm7,368(%rsp)
vpmuludq %xmm3,%xmm10,%xmm7
paddq %xmm7,%xmm8
vpmuludq %xmm9,%xmm10,%xmm7
movdqa %xmm9,384(%rsp)
paddq %xmm9,%xmm9
vpmuludq %xmm3,%xmm12,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
movdqa %xmm3,400(%rsp)
pmuludq v19_19(%rip),%xmm12
vpmuludq %xmm9,%xmm12,%xmm3
movdqa %xmm9,416(%rsp)
paddq %xmm3,%xmm11
movdqa 0(%rsp),%xmm3
movdqa 16(%rsp),%xmm9
vpaddq subc2(%rip),%xmm3,%xmm10
psubq %xmm9,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm10,%xmm3,%xmm9
vpunpcklqdq %xmm10,%xmm3,%xmm3
vpmuludq 144(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm0
vpmuludq 128(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm2
vpmuludq 192(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm1
vpmuludq 176(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm4
vpmuludq 240(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm6
vpmuludq 224(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm5
vpmuludq 304(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm8
vpmuludq 288(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
vpmuludq 336(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm11
pmuludq 384(%rsp),%xmm3
paddq %xmm3,%xmm13
vpmuludq 144(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm2
vpmuludq 160(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm1
vpmuludq 192(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm4
vpmuludq 208(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm6
vpmuludq 240(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm5
vpmuludq 256(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm8
vpmuludq 304(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm7
pmuludq v19_19(%rip),%xmm9
vpmuludq 352(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm11
vpmuludq 336(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm13
pmuludq 416(%rsp),%xmm9
paddq %xmm9,%xmm0
movdqa 32(%rsp),%xmm3
movdqa 80(%rsp),%xmm9
vpaddq subc2(%rip),%xmm3,%xmm10
psubq %xmm9,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm10,%xmm3,%xmm9
vpunpcklqdq %xmm10,%xmm3,%xmm3
vpmuludq 144(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm1
vpmuludq 128(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm4
vpmuludq 192(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm6
vpmuludq 176(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm5
vpmuludq 240(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm8
vpmuludq 224(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
vpmuludq 304(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm11
vpmuludq 288(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm13
vpmuludq 336(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm0
pmuludq 384(%rsp),%xmm3
paddq %xmm3,%xmm2
vpmuludq 144(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm4
vpmuludq 160(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm6
vpmuludq 192(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm5
vpmuludq 208(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm8
vpmuludq 240(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm7
pmuludq v19_19(%rip),%xmm9
vpmuludq 256(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm11
vpmuludq 304(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm13
vpmuludq 352(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm0
vpmuludq 336(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm2
pmuludq 416(%rsp),%xmm9
paddq %xmm9,%xmm1
movdqa 48(%rsp),%xmm3
movdqa 96(%rsp),%xmm9
vpaddq subc2(%rip),%xmm3,%xmm10
psubq %xmm9,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm10,%xmm3,%xmm9
vpunpcklqdq %xmm10,%xmm3,%xmm3
vpmuludq 144(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm6
vpmuludq 128(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm5
vpmuludq 192(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm8
vpmuludq 176(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
vpmuludq 240(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm11
vpmuludq 224(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm13
vpmuludq 304(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm0
vpmuludq 288(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm2
vpmuludq 336(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm1
pmuludq 384(%rsp),%xmm3
paddq %xmm3,%xmm4
vpmuludq 144(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm5
vpmuludq 160(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm8
vpmuludq 192(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm7
pmuludq v19_19(%rip),%xmm9
vpmuludq 208(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm11
vpmuludq 240(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm13
vpmuludq 256(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm0
vpmuludq 304(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm2
vpmuludq 352(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm1
vpmuludq 336(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm4
pmuludq 416(%rsp),%xmm9
paddq %xmm9,%xmm6
movdqa 64(%rsp),%xmm3
movdqa 112(%rsp),%xmm9
vpaddq subc2(%rip),%xmm3,%xmm10
psubq %xmm9,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm10,%xmm3,%xmm9
vpunpcklqdq %xmm10,%xmm3,%xmm3
vpmuludq 144(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm8
vpmuludq 128(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm7
pmuludq v19_19(%rip),%xmm3
vpmuludq 192(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm11
vpmuludq 176(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm13
vpmuludq 240(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm0
vpmuludq 224(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm2
vpmuludq 304(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm1
vpmuludq 288(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm4
vpmuludq 336(%rsp),%xmm3,%xmm10
paddq %xmm10,%xmm6
pmuludq 384(%rsp),%xmm3
paddq %xmm3,%xmm5
vpmuludq 144(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm7
pmuludq v19_19(%rip),%xmm9
vpmuludq 160(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm11
vpmuludq 192(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm13
vpmuludq 208(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm0
vpmuludq 240(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm2
vpmuludq 256(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm1
vpmuludq 304(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm4
vpmuludq 352(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm6
vpmuludq 336(%rsp),%xmm9,%xmm3
paddq %xmm3,%xmm5
pmuludq 416(%rsp),%xmm9
paddq %xmm9,%xmm8
vpsrlq $25,%xmm4,%xmm3
paddq %xmm3,%xmm6
pand m25(%rip),%xmm4
vpsrlq $26,%xmm11,%xmm3
paddq %xmm3,%xmm13
pand m26(%rip),%xmm11
vpsrlq $26,%xmm6,%xmm3
paddq %xmm3,%xmm5
pand m26(%rip),%xmm6
vpsrlq $25,%xmm13,%xmm3
paddq %xmm3,%xmm0
pand m25(%rip),%xmm13
vpsrlq $25,%xmm5,%xmm3
paddq %xmm3,%xmm8
pand m25(%rip),%xmm5
vpsrlq $26,%xmm0,%xmm3
paddq %xmm3,%xmm2
pand m26(%rip),%xmm0
vpsrlq $26,%xmm8,%xmm3
paddq %xmm3,%xmm7
pand m26(%rip),%xmm8
vpsrlq $25,%xmm2,%xmm3
paddq %xmm3,%xmm1
pand m25(%rip),%xmm2
vpsrlq $25,%xmm7,%xmm3
vpsllq $4,%xmm3,%xmm9
paddq %xmm3,%xmm11
psllq $1,%xmm3
paddq %xmm3,%xmm9
paddq %xmm9,%xmm11
pand m25(%rip),%xmm7
vpsrlq $26,%xmm1,%xmm3
paddq %xmm3,%xmm4
pand m26(%rip),%xmm1
vpsrlq $26,%xmm11,%xmm3
paddq %xmm3,%xmm13
pand m26(%rip),%xmm11
vpsrlq $25,%xmm4,%xmm3
paddq %xmm3,%xmm6
pand m25(%rip),%xmm4
vpunpcklqdq %xmm13,%xmm11,%xmm3
vpunpckhqdq %xmm13,%xmm11,%xmm9
vpaddq subc0(%rip),%xmm9,%xmm10
psubq %xmm3,%xmm10
paddq %xmm9,%xmm3
vpunpckhqdq %xmm3,%xmm10,%xmm9
punpcklqdq %xmm3,%xmm10
vpmuludq %xmm10,%xmm10,%xmm3
paddq %xmm10,%xmm10
vpmuludq %xmm9,%xmm10,%xmm11
vpunpcklqdq %xmm2,%xmm0,%xmm12
vpunpckhqdq %xmm2,%xmm0,%xmm0
vpaddq subc2(%rip),%xmm0,%xmm2
psubq %xmm12,%xmm2
paddq %xmm0,%xmm12
vpunpckhqdq %xmm12,%xmm2,%xmm0
punpcklqdq %xmm12,%xmm2
vpmuludq %xmm2,%xmm10,%xmm12
vpaddq %xmm9,%xmm9,%xmm13
vpmuludq %xmm13,%xmm9,%xmm9
paddq %xmm9,%xmm12
vpmuludq %xmm0,%xmm10,%xmm9
vpmuludq %xmm2,%xmm13,%xmm14
paddq %xmm14,%xmm9
vpunpcklqdq %xmm4,%xmm1,%xmm14
vpunpckhqdq %xmm4,%xmm1,%xmm1
vpaddq subc2(%rip),%xmm1,%xmm4
psubq %xmm14,%xmm4
paddq %xmm1,%xmm14
vpunpckhqdq %xmm14,%xmm4,%xmm1
punpcklqdq %xmm14,%xmm4
movdqa %xmm1,0(%rsp)
paddq %xmm1,%xmm1
movdqa %xmm1,16(%rsp)
pmuludq v19_19(%rip),%xmm1
movdqa %xmm1,32(%rsp)
vpmuludq %xmm4,%xmm10,%xmm1
vpmuludq %xmm2,%xmm2,%xmm14
paddq %xmm14,%xmm1
vpmuludq 0(%rsp),%xmm10,%xmm14
vpmuludq %xmm4,%xmm13,%xmm15
paddq %xmm15,%xmm14
vpunpcklqdq %xmm5,%xmm6,%xmm15
vpunpckhqdq %xmm5,%xmm6,%xmm5
vpaddq subc2(%rip),%xmm5,%xmm6
psubq %xmm15,%xmm6
paddq %xmm5,%xmm15
vpunpckhqdq %xmm15,%xmm6,%xmm5
punpcklqdq %xmm15,%xmm6
movdqa %xmm6,48(%rsp)
pmuludq v19_19(%rip),%xmm6
movdqa %xmm6,64(%rsp)
movdqa %xmm5,80(%rsp)
pmuludq v38_38(%rip),%xmm5
movdqa %xmm5,96(%rsp)
vpmuludq 48(%rsp),%xmm10,%xmm5
vpaddq %xmm0,%xmm0,%xmm6
vpmuludq %xmm6,%xmm0,%xmm0
paddq %xmm0,%xmm5
vpmuludq 80(%rsp),%xmm10,%xmm0
vpmuludq %xmm4,%xmm6,%xmm15
paddq %xmm15,%xmm0
vpmuludq %xmm6,%xmm13,%xmm15
paddq %xmm15,%xmm1
vpmuludq %xmm6,%xmm2,%xmm15
paddq %xmm15,%xmm14
vpunpcklqdq %xmm7,%xmm8,%xmm15
vpunpckhqdq %xmm7,%xmm8,%xmm7
vpaddq subc2(%rip),%xmm7,%xmm8
psubq %xmm15,%xmm8
paddq %xmm7,%xmm15
vpunpckhqdq %xmm15,%xmm8,%xmm7
punpcklqdq %xmm15,%xmm8
movdqa %xmm8,112(%rsp)
pmuludq v19_19(%rip),%xmm8
movdqa %xmm8,160(%rsp)
vpmuludq 112(%rsp),%xmm10,%xmm8
vpmuludq %xmm7,%xmm10,%xmm10
vpmuludq v38_38(%rip),%xmm7,%xmm15
vpmuludq %xmm15,%xmm7,%xmm7
paddq %xmm7,%xmm8
vpmuludq %xmm15,%xmm13,%xmm7
paddq %xmm7,%xmm3
vpmuludq %xmm15,%xmm2,%xmm7
paddq %xmm7,%xmm11
vpmuludq 80(%rsp),%xmm13,%xmm7
paddq %xmm7,%xmm7
paddq %xmm7,%xmm8
vpmuludq 16(%rsp),%xmm13,%xmm7
paddq %xmm7,%xmm5
vpmuludq 48(%rsp),%xmm13,%xmm7
paddq %xmm7,%xmm0
vpmuludq 112(%rsp),%xmm13,%xmm7
paddq %xmm7,%xmm10
vpmuludq %xmm15,%xmm6,%xmm7
paddq %xmm7,%xmm12
vpmuludq %xmm15,%xmm4,%xmm7
paddq %xmm7,%xmm9
vpaddq %xmm2,%xmm2,%xmm2
vpmuludq %xmm4,%xmm2,%xmm7
paddq %xmm7,%xmm5
vpmuludq 160(%rsp),%xmm2,%xmm7
paddq %xmm7,%xmm3
vpmuludq 160(%rsp),%xmm6,%xmm7
paddq %xmm7,%xmm11
vpmuludq 0(%rsp),%xmm2,%xmm7
paddq %xmm7,%xmm0
vpmuludq 48(%rsp),%xmm2,%xmm7
paddq %xmm7,%xmm8
vpmuludq 80(%rsp),%xmm2,%xmm2
paddq %xmm2,%xmm10
vpmuludq 96(%rsp),%xmm4,%xmm2
paddq %xmm2,%xmm11
vpmuludq %xmm4,%xmm4,%xmm2
paddq %xmm2,%xmm8
vpaddq %xmm4,%xmm4,%xmm2
vpmuludq 160(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm12
vpmuludq 16(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm1
vpmuludq 48(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm14
vpmuludq 96(%rsp),%xmm6,%xmm4
paddq %xmm4,%xmm3
movdqa 16(%rsp),%xmm4
pmuludq 160(%rsp),%xmm4
paddq %xmm4,%xmm9
vpmuludq 16(%rsp),%xmm6,%xmm4
paddq %xmm4,%xmm8
vpmuludq 48(%rsp),%xmm6,%xmm4
paddq %xmm4,%xmm10
vpmuludq 80(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm4
paddq %xmm4,%xmm5
vpmuludq 112(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm0
movdqa 48(%rsp),%xmm4
paddq %xmm4,%xmm4
pmuludq 160(%rsp),%xmm4
paddq %xmm4,%xmm1
movdqa 80(%rsp),%xmm4
paddq %xmm4,%xmm4
pmuludq 160(%rsp),%xmm4
paddq %xmm4,%xmm14
vpmuludq 64(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm3
movdqa 16(%rsp),%xmm4
pmuludq 64(%rsp),%xmm4
paddq %xmm4,%xmm11
movdqa 16(%rsp),%xmm4
pmuludq 96(%rsp),%xmm4
paddq %xmm4,%xmm12
movdqa 48(%rsp),%xmm4
pmuludq 96(%rsp),%xmm4
paddq %xmm4,%xmm9
vpmuludq 0(%rsp),%xmm2,%xmm2
paddq %xmm2,%xmm10
movdqa 32(%rsp),%xmm2
pmuludq 0(%rsp),%xmm2
paddq %xmm2,%xmm3
movdqa 64(%rsp),%xmm2
pmuludq 48(%rsp),%xmm2
paddq %xmm2,%xmm12
movdqa 96(%rsp),%xmm2
pmuludq 80(%rsp),%xmm2
paddq %xmm2,%xmm1
movdqa 160(%rsp),%xmm2
pmuludq 112(%rsp),%xmm2
paddq %xmm2,%xmm5
vpsrlq $26,%xmm3,%xmm2
paddq %xmm2,%xmm11
pand m26(%rip),%xmm3
vpsrlq $25,%xmm14,%xmm2
paddq %xmm2,%xmm5
pand m25(%rip),%xmm14
vpsrlq $25,%xmm11,%xmm2
paddq %xmm2,%xmm12
pand m25(%rip),%xmm11
vpsrlq $26,%xmm5,%xmm2
paddq %xmm2,%xmm0
pand m26(%rip),%xmm5
vpsrlq $26,%xmm12,%xmm2
paddq %xmm2,%xmm9
pand m26(%rip),%xmm12
vpsrlq $25,%xmm0,%xmm2
paddq %xmm2,%xmm8
pand m25(%rip),%xmm0
vpsrlq $25,%xmm9,%xmm2
paddq %xmm2,%xmm1
pand m25(%rip),%xmm9
vpsrlq $26,%xmm8,%xmm2
paddq %xmm2,%xmm10
pand m26(%rip),%xmm8
vpsrlq $26,%xmm1,%xmm2
paddq %xmm2,%xmm14
pand m26(%rip),%xmm1
vpsrlq $25,%xmm10,%xmm2
vpsllq $4,%xmm2,%xmm4
paddq %xmm2,%xmm3
psllq $1,%xmm2
paddq %xmm2,%xmm4
paddq %xmm4,%xmm3
pand m25(%rip),%xmm10
vpsrlq $25,%xmm14,%xmm2
paddq %xmm2,%xmm5
pand m25(%rip),%xmm14
vpsrlq $26,%xmm3,%xmm2
paddq %xmm2,%xmm11
pand m26(%rip),%xmm3
vpunpckhqdq %xmm11,%xmm3,%xmm2
movdqa %xmm2,0(%rsp)
vpunpcklqdq %xmm11,%xmm3,%xmm2
pmuludq v9_9(%rip),%xmm2
movdqa %xmm2,80(%rsp)
vpunpckhqdq %xmm9,%xmm12,%xmm2
movdqa %xmm2,16(%rsp)
vpunpcklqdq %xmm9,%xmm12,%xmm2
pmuludq v9_9(%rip),%xmm2
movdqa %xmm2,96(%rsp)
vpunpckhqdq %xmm14,%xmm1,%xmm2
movdqa %xmm2,32(%rsp)
vpunpcklqdq %xmm14,%xmm1,%xmm1
pmuludq v9_9(%rip),%xmm1
movdqa %xmm1,112(%rsp)
vpunpckhqdq %xmm0,%xmm5,%xmm1
movdqa %xmm1,48(%rsp)
vpunpcklqdq %xmm0,%xmm5,%xmm0
pmuludq v9_9(%rip),%xmm0
movdqa %xmm0,160(%rsp)
vpunpckhqdq %xmm10,%xmm8,%xmm0
movdqa %xmm0,64(%rsp)
vpunpcklqdq %xmm10,%xmm8,%xmm0
pmuludq v9_9(%rip),%xmm0
movdqa %xmm0,208(%rsp)
movdqa 144(%rsp),%xmm0
vpmuludq %xmm0,%xmm0,%xmm1
paddq %xmm0,%xmm0
movdqa 128(%rsp),%xmm2
vpmuludq %xmm2,%xmm0,%xmm3
movdqa 192(%rsp),%xmm4
vpmuludq %xmm4,%xmm0,%xmm5
movdqa 176(%rsp),%xmm6
vpmuludq %xmm6,%xmm0,%xmm7
movdqa 240(%rsp),%xmm8
vpmuludq %xmm8,%xmm0,%xmm9
vpmuludq 224(%rsp),%xmm0,%xmm10
vpmuludq 304(%rsp),%xmm0,%xmm11
vpmuludq 288(%rsp),%xmm0,%xmm12
vpmuludq 336(%rsp),%xmm0,%xmm13
movdqa 384(%rsp),%xmm14
vpmuludq %xmm14,%xmm0,%xmm0
vpmuludq v38_38(%rip),%xmm14,%xmm15
vpmuludq %xmm15,%xmm14,%xmm14
paddq %xmm14,%xmm13
vpaddq %xmm6,%xmm6,%xmm14
vpmuludq %xmm14,%xmm6,%xmm6
paddq %xmm6,%xmm11
vpaddq %xmm2,%xmm2,%xmm6
vpmuludq %xmm6,%xmm2,%xmm2
paddq %xmm2,%xmm5
vpmuludq %xmm15,%xmm6,%xmm2
paddq %xmm2,%xmm1
vpmuludq %xmm15,%xmm4,%xmm2
paddq %xmm2,%xmm3
vpmuludq 256(%rsp),%xmm6,%xmm2
paddq %xmm2,%xmm11
vpmuludq 304(%rsp),%xmm6,%xmm2
paddq %xmm2,%xmm12
vpmuludq 352(%rsp),%xmm6,%xmm2
paddq %xmm2,%xmm13
vpmuludq 336(%rsp),%xmm6,%xmm2
paddq %xmm2,%xmm0
vpmuludq %xmm4,%xmm6,%xmm2
paddq %xmm2,%xmm7
vpmuludq %xmm14,%xmm6,%xmm2
paddq %xmm2,%xmm9
vpmuludq %xmm8,%xmm6,%xmm2
paddq %xmm2,%xmm10
vpmuludq %xmm15,%xmm14,%xmm2
paddq %xmm2,%xmm5
vpmuludq %xmm15,%xmm8,%xmm2
paddq %xmm2,%xmm7
vpmuludq %xmm4,%xmm4,%xmm2
paddq %xmm2,%xmm9
vpmuludq %xmm14,%xmm4,%xmm2
paddq %xmm2,%xmm10
vpaddq %xmm4,%xmm4,%xmm2
vpmuludq %xmm8,%xmm2,%xmm4
paddq %xmm4,%xmm11
vpmuludq 400(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm1
vpmuludq 400(%rsp),%xmm14,%xmm4
paddq %xmm4,%xmm3
vpmuludq 224(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm12
vpmuludq 304(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm13
vpmuludq 288(%rsp),%xmm2,%xmm2
paddq %xmm2,%xmm0
vpmuludq 368(%rsp),%xmm8,%xmm2
paddq %xmm2,%xmm3
vpmuludq %xmm8,%xmm14,%xmm2
paddq %xmm2,%xmm12
vpmuludq %xmm8,%xmm8,%xmm2
paddq %xmm2,%xmm13
vpaddq %xmm8,%xmm8,%xmm2
vpmuludq 400(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm5
vpmuludq 256(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm9
vpmuludq 304(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm10
vpmuludq 368(%rsp),%xmm14,%xmm4
paddq %xmm4,%xmm1
movdqa 256(%rsp),%xmm4
pmuludq 400(%rsp),%xmm4
paddq %xmm4,%xmm7
vpmuludq 256(%rsp),%xmm14,%xmm4
paddq %xmm4,%xmm13
vpmuludq 304(%rsp),%xmm14,%xmm4
paddq %xmm4,%xmm0
vpmuludq 352(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm11
vpmuludq 336(%rsp),%xmm15,%xmm4
paddq %xmm4,%xmm12
movdqa 304(%rsp),%xmm4
paddq %xmm4,%xmm4
pmuludq 400(%rsp),%xmm4
paddq %xmm4,%xmm9
vpmuludq 320(%rsp),%xmm2,%xmm4
paddq %xmm4,%xmm1
movdqa 256(%rsp),%xmm4
pmuludq 320(%rsp),%xmm4
paddq %xmm4,%xmm3
movdqa 256(%rsp),%xmm4
pmuludq 368(%rsp),%xmm4
paddq %xmm4,%xmm5
movdqa 304(%rsp),%xmm4
pmuludq 368(%rsp),%xmm4
paddq %xmm4,%xmm7
movdqa 352(%rsp),%xmm4
pmuludq 400(%rsp),%xmm4
paddq %xmm4,%xmm10
vpmuludq 224(%rsp),%xmm2,%xmm2
paddq %xmm2,%xmm0
movdqa 272(%rsp),%xmm2
pmuludq 224(%rsp),%xmm2
paddq %xmm2,%xmm1
movdqa 320(%rsp),%xmm2
pmuludq 304(%rsp),%xmm2
paddq %xmm2,%xmm5
movdqa 368(%rsp),%xmm2
pmuludq 288(%rsp),%xmm2
paddq %xmm2,%xmm9
movdqa 400(%rsp),%xmm2
pmuludq 336(%rsp),%xmm2
paddq %xmm2,%xmm11
vpsrlq $26,%xmm1,%xmm2
paddq %xmm2,%xmm3
pand m26(%rip),%xmm1
vpsrlq $25,%xmm10,%xmm2
paddq %xmm2,%xmm11
pand m25(%rip),%xmm10
vpsrlq $25,%xmm3,%xmm2
paddq %xmm2,%xmm5
pand m25(%rip),%xmm3
vpsrlq $26,%xmm11,%xmm2
paddq %xmm2,%xmm12
pand m26(%rip),%xmm11
vpsrlq $26,%xmm5,%xmm2
paddq %xmm2,%xmm7
pand m26(%rip),%xmm5
vpsrlq $25,%xmm12,%xmm2
paddq %xmm2,%xmm13
pand m25(%rip),%xmm12
vpsrlq $25,%xmm7,%xmm2
paddq %xmm2,%xmm9
pand m25(%rip),%xmm7
vpsrlq $26,%xmm13,%xmm2
paddq %xmm2,%xmm0
pand m26(%rip),%xmm13
vpsrlq $26,%xmm9,%xmm2
paddq %xmm2,%xmm10
pand m26(%rip),%xmm9
vpsrlq $25,%xmm0,%xmm2
vpsllq $4,%xmm2,%xmm4
paddq %xmm2,%xmm1
psllq $1,%xmm2
paddq %xmm2,%xmm4
paddq %xmm4,%xmm1
pand m25(%rip),%xmm0
vpsrlq $25,%xmm10,%xmm2
paddq %xmm2,%xmm11
pand m25(%rip),%xmm10
vpsrlq $26,%xmm1,%xmm2
paddq %xmm2,%xmm3
pand m26(%rip),%xmm1
vpunpckhqdq %xmm3,%xmm1,%xmm2
vpunpcklqdq %xmm3,%xmm1,%xmm1
movdqa %xmm1,176(%rsp)
vpaddq subc0(%rip),%xmm2,%xmm3
psubq %xmm1,%xmm3
vpunpckhqdq %xmm3,%xmm2,%xmm1
vpunpcklqdq %xmm3,%xmm2,%xmm2
movdqa %xmm2,192(%rsp)
movdqa %xmm1,224(%rsp)
psllq $1,%xmm1
movdqa %xmm1,240(%rsp)
pmuludq v121666_121666(%rip),%xmm3
movdqa 80(%rsp),%xmm1
vpunpcklqdq %xmm1,%xmm3,%xmm2
vpunpckhqdq %xmm1,%xmm3,%xmm1
vpunpckhqdq %xmm7,%xmm5,%xmm3
vpunpcklqdq %xmm7,%xmm5,%xmm4
movdqa %xmm4,256(%rsp)
vpaddq subc2(%rip),%xmm3,%xmm5
psubq %xmm4,%xmm5
vpunpckhqdq %xmm5,%xmm3,%xmm4
vpunpcklqdq %xmm5,%xmm3,%xmm3
movdqa %xmm3,272(%rsp)
movdqa %xmm4,288(%rsp)
psllq $1,%xmm4
movdqa %xmm4,304(%rsp)
pmuludq v121666_121666(%rip),%xmm5
movdqa 96(%rsp),%xmm3
vpunpcklqdq %xmm3,%xmm5,%xmm4
vpunpckhqdq %xmm3,%xmm5,%xmm3
vpunpckhqdq %xmm10,%xmm9,%xmm5
vpunpcklqdq %xmm10,%xmm9,%xmm6
movdqa %xmm6,320(%rsp)
vpaddq subc2(%rip),%xmm5,%xmm7
psubq %xmm6,%xmm7
vpunpckhqdq %xmm7,%xmm5,%xmm6
vpunpcklqdq %xmm7,%xmm5,%xmm5
movdqa %xmm5,336(%rsp)
movdqa %xmm6,352(%rsp)
psllq $1,%xmm6
movdqa %xmm6,368(%rsp)
pmuludq v121666_121666(%rip),%xmm7
movdqa 112(%rsp),%xmm5
vpunpcklqdq %xmm5,%xmm7,%xmm6
vpunpckhqdq %xmm5,%xmm7,%xmm5
vpunpckhqdq %xmm12,%xmm11,%xmm7
vpunpcklqdq %xmm12,%xmm11,%xmm8
movdqa %xmm8,384(%rsp)
vpaddq subc2(%rip),%xmm7,%xmm9
psubq %xmm8,%xmm9
vpunpckhqdq %xmm9,%xmm7,%xmm8
vpunpcklqdq %xmm9,%xmm7,%xmm7
movdqa %xmm7,400(%rsp)
movdqa %xmm8,416(%rsp)
psllq $1,%xmm8
movdqa %xmm8,432(%rsp)
pmuludq v121666_121666(%rip),%xmm9
movdqa 160(%rsp),%xmm7
vpunpcklqdq %xmm7,%xmm9,%xmm8
vpunpckhqdq %xmm7,%xmm9,%xmm7
vpunpckhqdq %xmm0,%xmm13,%xmm9
vpunpcklqdq %xmm0,%xmm13,%xmm0
movdqa %xmm0,160(%rsp)
vpaddq subc2(%rip),%xmm9,%xmm10
psubq %xmm0,%xmm10
vpunpckhqdq %xmm10,%xmm9,%xmm0
vpunpcklqdq %xmm10,%xmm9,%xmm9
movdqa %xmm9,448(%rsp)
movdqa %xmm0,464(%rsp)
psllq $1,%xmm0
movdqa %xmm0,480(%rsp)
pmuludq v121666_121666(%rip),%xmm10
movdqa 208(%rsp),%xmm0
vpunpcklqdq %xmm0,%xmm10,%xmm9
vpunpckhqdq %xmm0,%xmm10,%xmm0
vpsrlq $26,%xmm2,%xmm10
paddq %xmm10,%xmm1
pand m26(%rip),%xmm2
vpsrlq $25,%xmm5,%xmm10
paddq %xmm10,%xmm8
pand m25(%rip),%xmm5
vpsrlq $25,%xmm1,%xmm10
paddq %xmm10,%xmm4
pand m25(%rip),%xmm1
vpsrlq $26,%xmm8,%xmm10
paddq %xmm10,%xmm7
pand m26(%rip),%xmm8
vpsrlq $26,%xmm4,%xmm10
paddq %xmm10,%xmm3
pand m26(%rip),%xmm4
vpsrlq $25,%xmm7,%xmm10
paddq %xmm10,%xmm9
pand m25(%rip),%xmm7
vpsrlq $25,%xmm3,%xmm10
paddq %xmm10,%xmm6
pand m25(%rip),%xmm3
vpsrlq $26,%xmm9,%xmm10
paddq %xmm10,%xmm0
pand m26(%rip),%xmm9
vpsrlq $26,%xmm6,%xmm10
paddq %xmm10,%xmm5
pand m26(%rip),%xmm6
vpsrlq $25,%xmm0,%xmm10
vpsllq $4,%xmm10,%xmm11
paddq %xmm10,%xmm2
psllq $1,%xmm10
paddq %xmm10,%xmm11
paddq %xmm11,%xmm2
pand m25(%rip),%xmm0
vpsrlq $25,%xmm5,%xmm10
paddq %xmm10,%xmm8
pand m25(%rip),%xmm5
vpsrlq $26,%xmm2,%xmm10
paddq %xmm10,%xmm1
pand m26(%rip),%xmm2
vpunpckhqdq %xmm1,%xmm2,%xmm10
movdqa %xmm10,80(%rsp)
vpunpcklqdq %xmm1,%xmm2,%xmm1
vpunpckhqdq %xmm3,%xmm4,%xmm2
movdqa %xmm2,96(%rsp)
vpunpcklqdq %xmm3,%xmm4,%xmm2
vpunpckhqdq %xmm5,%xmm6,%xmm3
movdqa %xmm3,112(%rsp)
vpunpcklqdq %xmm5,%xmm6,%xmm3
vpunpckhqdq %xmm7,%xmm8,%xmm4
movdqa %xmm4,128(%rsp)
vpunpcklqdq %xmm7,%xmm8,%xmm4
vpunpckhqdq %xmm0,%xmm9,%xmm5
movdqa %xmm5,144(%rsp)
vpunpcklqdq %xmm0,%xmm9,%xmm0
movdqa 176(%rsp),%xmm5
paddq %xmm5,%xmm1
vpunpcklqdq %xmm1,%xmm5,%xmm6
vpunpckhqdq %xmm1,%xmm5,%xmm1
vpmuludq 224(%rsp),%xmm6,%xmm5
vpmuludq 192(%rsp),%xmm1,%xmm7
paddq %xmm7,%xmm5
vpmuludq 272(%rsp),%xmm6,%xmm7
vpmuludq 240(%rsp),%xmm1,%xmm8
paddq %xmm8,%xmm7
vpmuludq 288(%rsp),%xmm6,%xmm8
vpmuludq 272(%rsp),%xmm1,%xmm9
paddq %xmm9,%xmm8
vpmuludq 336(%rsp),%xmm6,%xmm9
vpmuludq 304(%rsp),%xmm1,%xmm10
paddq %xmm10,%xmm9
vpmuludq 352(%rsp),%xmm6,%xmm10
vpmuludq 336(%rsp),%xmm1,%xmm11
paddq %xmm11,%xmm10
vpmuludq 400(%rsp),%xmm6,%xmm11
vpmuludq 368(%rsp),%xmm1,%xmm12
paddq %xmm12,%xmm11
vpmuludq 416(%rsp),%xmm6,%xmm12
vpmuludq 400(%rsp),%xmm1,%xmm13
paddq %xmm13,%xmm12
vpmuludq 448(%rsp),%xmm6,%xmm13
vpmuludq 432(%rsp),%xmm1,%xmm14
paddq %xmm14,%xmm13
vpmuludq 464(%rsp),%xmm6,%xmm14
vpmuludq 448(%rsp),%xmm1,%xmm15
paddq %xmm15,%xmm14
vpmuludq 192(%rsp),%xmm6,%xmm6
pmuludq v19_19(%rip),%xmm1
vpmuludq 480(%rsp),%xmm1,%xmm1
paddq %xmm1,%xmm6
movdqa 256(%rsp),%xmm1
paddq %xmm1,%xmm2
vpunpcklqdq %xmm2,%xmm1,%xmm15
vpunpckhqdq %xmm2,%xmm1,%xmm1
vpmuludq 192(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm7
vpmuludq 224(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm8
vpmuludq 272(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm9
vpmuludq 288(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm10
vpmuludq 336(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm11
vpmuludq 352(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm12
vpmuludq 400(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm13
vpmuludq 416(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm14
pmuludq v19_19(%rip),%xmm15
vpmuludq 448(%rsp),%xmm15,%xmm2
paddq %xmm2,%xmm6
pmuludq 464(%rsp),%xmm15
paddq %xmm15,%xmm5
vpmuludq 192(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm8
vpmuludq 240(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm9
vpmuludq 272(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm10
vpmuludq 304(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm11
vpmuludq 336(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm12
vpmuludq 368(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm13
vpmuludq 400(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm14
pmuludq v19_19(%rip),%xmm1
vpmuludq 432(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm6
vpmuludq 448(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm5
pmuludq 480(%rsp),%xmm1
paddq %xmm1,%xmm7
movdqa 320(%rsp),%xmm1
paddq %xmm1,%xmm3
vpunpcklqdq %xmm3,%xmm1,%xmm2
vpunpckhqdq %xmm3,%xmm1,%xmm1
vpmuludq 192(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm9
vpmuludq 224(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm10
vpmuludq 272(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm11
vpmuludq 288(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm12
vpmuludq 336(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm13
vpmuludq 352(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm14
pmuludq v19_19(%rip),%xmm2
vpmuludq 400(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm6
vpmuludq 416(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm5
vpmuludq 448(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm7
pmuludq 464(%rsp),%xmm2
paddq %xmm2,%xmm8
vpmuludq 192(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm10
vpmuludq 240(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm11
vpmuludq 272(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm12
vpmuludq 304(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm13
vpmuludq 336(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm14
pmuludq v19_19(%rip),%xmm1
vpmuludq 368(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm6
vpmuludq 400(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm5
vpmuludq 432(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm7
vpmuludq 448(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm8
pmuludq 480(%rsp),%xmm1
paddq %xmm1,%xmm9
movdqa 384(%rsp),%xmm1
paddq %xmm1,%xmm4
vpunpcklqdq %xmm4,%xmm1,%xmm2
vpunpckhqdq %xmm4,%xmm1,%xmm1
vpmuludq 192(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm11
vpmuludq 224(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm12
vpmuludq 272(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm13
vpmuludq 288(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm14
pmuludq v19_19(%rip),%xmm2
vpmuludq 336(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm6
vpmuludq 352(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm5
vpmuludq 400(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm7
vpmuludq 416(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm8
vpmuludq 448(%rsp),%xmm2,%xmm3
paddq %xmm3,%xmm9
pmuludq 464(%rsp),%xmm2
paddq %xmm2,%xmm10
vpmuludq 192(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm12
vpmuludq 240(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm13
vpmuludq 272(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm14
pmuludq v19_19(%rip),%xmm1
vpmuludq 304(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm6
vpmuludq 336(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm5
vpmuludq 368(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm7
vpmuludq 400(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm8
vpmuludq 432(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm9
vpmuludq 448(%rsp),%xmm1,%xmm2
paddq %xmm2,%xmm10
pmuludq 480(%rsp),%xmm1
paddq %xmm1,%xmm11
movdqa 160(%rsp),%xmm1
paddq %xmm1,%xmm0
vpunpcklqdq %xmm0,%xmm1,%xmm2
vpunpckhqdq %xmm0,%xmm1,%xmm0
vpmuludq 192(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm13
vpmuludq 224(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm14
pmuludq v19_19(%rip),%xmm2
vpmuludq 272(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm6
vpmuludq 288(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm5
vpmuludq 336(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm7
vpmuludq 352(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm8
vpmuludq 400(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm9
vpmuludq 416(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm10
vpmuludq 448(%rsp),%xmm2,%xmm1
paddq %xmm1,%xmm11
pmuludq 464(%rsp),%xmm2
paddq %xmm2,%xmm12
vpmuludq 192(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm14
pmuludq v19_19(%rip),%xmm0
vpmuludq 240(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm6
vpmuludq 272(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm5
vpmuludq 304(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm7
vpmuludq 336(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm8
vpmuludq 368(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm9
vpmuludq 400(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm10
vpmuludq 432(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm11
vpmuludq 448(%rsp),%xmm0,%xmm1
paddq %xmm1,%xmm12
pmuludq 480(%rsp),%xmm0
paddq %xmm0,%xmm13
vpsrlq $26,%xmm6,%xmm0
paddq %xmm0,%xmm5
pand m26(%rip),%xmm6
vpsrlq $25,%xmm10,%xmm0
paddq %xmm0,%xmm11
pand m25(%rip),%xmm10
vpsrlq $25,%xmm5,%xmm0
paddq %xmm0,%xmm7
pand m25(%rip),%xmm5
vpsrlq $26,%xmm11,%xmm0
paddq %xmm0,%xmm12
pand m26(%rip),%xmm11
vpsrlq $26,%xmm7,%xmm0
paddq %xmm0,%xmm8
pand m26(%rip),%xmm7
vpsrlq $25,%xmm12,%xmm0
paddq %xmm0,%xmm13
pand m25(%rip),%xmm12
vpsrlq $25,%xmm8,%xmm0
paddq %xmm0,%xmm9
pand m25(%rip),%xmm8
vpsrlq $26,%xmm13,%xmm0
paddq %xmm0,%xmm14
pand m26(%rip),%xmm13
vpsrlq $26,%xmm9,%xmm0
paddq %xmm0,%xmm10
pand m26(%rip),%xmm9
vpsrlq $25,%xmm14,%xmm0
vpsllq $4,%xmm0,%xmm1
paddq %xmm0,%xmm6
psllq $1,%xmm0
paddq %xmm0,%xmm1
paddq %xmm1,%xmm6
pand m25(%rip),%xmm14
vpsrlq $25,%xmm10,%xmm0
paddq %xmm0,%xmm11
pand m25(%rip),%xmm10
vpsrlq $26,%xmm6,%xmm0
paddq %xmm0,%xmm5
pand m26(%rip),%xmm6
vpunpckhqdq %xmm5,%xmm6,%xmm1
vpunpcklqdq %xmm5,%xmm6,%xmm0
vpunpckhqdq %xmm8,%xmm7,%xmm3
vpunpcklqdq %xmm8,%xmm7,%xmm2
vpunpckhqdq %xmm10,%xmm9,%xmm5
vpunpcklqdq %xmm10,%xmm9,%xmm4
vpunpckhqdq %xmm12,%xmm11,%xmm7
vpunpcklqdq %xmm12,%xmm11,%xmm6
vpunpckhqdq %xmm14,%xmm13,%xmm9
vpunpcklqdq %xmm14,%xmm13,%xmm8
cmp $0,%rdx
jne ._ladder_base_loop
movdqu %xmm1,80(%rdi)
movdqu %xmm0,0(%rdi)
movdqu %xmm3,96(%rdi)
movdqu %xmm2,16(%rdi)
movdqu %xmm5,112(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm7,128(%rdi)
movdqu %xmm6,48(%rdi)
movdqu %xmm9,144(%rdi)
movdqu %xmm8,64(%rdi)
movq 1536(%rsp),%r11
movq 1544(%rsp),%r12
movq 1552(%rsp),%r13
add %r11,%rsp
ret
#endif
|
liruqi/Mume-iOS | 3,455 | Library/ShadowPath/ShadowPath/shadowsocks-libev/libsodium/src/libsodium/crypto_scalarmult/curve25519/sandy2x/fe51_pack.S | #ifdef IN_SANDY2X
/*
This file is the result of merging
amd64-51/fe25519_pack.c and amd64-51/fe25519_freeze.s.
*/
#include "fe51_namespace.h"
#include "consts_namespace.h"
.p2align 5
.globl fe51_pack
.globl _fe51_pack
#ifdef __ELF__
.type fe51_pack, @function
.type _fe51_pack, @function
#endif
fe51_pack:
_fe51_pack:
mov %rsp,%r11
and $31,%r11
add $32,%r11
sub %r11,%rsp
movq %r11,0(%rsp)
movq %r12,8(%rsp)
movq 0(%rsi),%rdx
movq 8(%rsi),%rcx
movq 16(%rsi),%r8
movq 24(%rsi),%r9
movq 32(%rsi),%rsi
movq REDMASK51(%rip),%rax
lea -18(%rax),%r10
mov $3,%r11
._reduceloop:
mov %rdx,%r12
shr $51,%r12
and %rax,%rdx
add %r12,%rcx
mov %rcx,%r12
shr $51,%r12
and %rax,%rcx
add %r12,%r8
mov %r8,%r12
shr $51,%r12
and %rax,%r8
add %r12,%r9
mov %r9,%r12
shr $51,%r12
and %rax,%r9
add %r12,%rsi
mov %rsi,%r12
shr $51,%r12
and %rax,%rsi
imulq $19, %r12,%r12
add %r12,%rdx
sub $1,%r11
ja ._reduceloop
mov $1,%r12
cmp %r10,%rdx
cmovl %r11,%r12
cmp %rax,%rcx
cmovne %r11,%r12
cmp %rax,%r8
cmovne %r11,%r12
cmp %rax,%r9
cmovne %r11,%r12
cmp %rax,%rsi
cmovne %r11,%r12
neg %r12
and %r12,%rax
and %r12,%r10
sub %r10,%rdx
sub %rax,%rcx
sub %rax,%r8
sub %rax,%r9
sub %rax,%rsi
mov %rdx,%rax
and $0xFF,%eax
movb %al,0(%rdi)
mov %rdx,%rax
shr $8,%rax
and $0xFF,%eax
movb %al,1(%rdi)
mov %rdx,%rax
shr $16,%rax
and $0xFF,%eax
movb %al,2(%rdi)
mov %rdx,%rax
shr $24,%rax
and $0xFF,%eax
movb %al,3(%rdi)
mov %rdx,%rax
shr $32,%rax
and $0xFF,%eax
movb %al,4(%rdi)
mov %rdx,%rax
shr $40,%rax
and $0xFF,%eax
movb %al,5(%rdi)
mov %rdx,%rdx
shr $48,%rdx
mov %rcx,%rax
shl $3,%rax
and $0xF8,%eax
xor %rdx,%rax
movb %al,6(%rdi)
mov %rcx,%rdx
shr $5,%rdx
and $0xFF,%edx
movb %dl,7(%rdi)
mov %rcx,%rdx
shr $13,%rdx
and $0xFF,%edx
movb %dl,8(%rdi)
mov %rcx,%rdx
shr $21,%rdx
and $0xFF,%edx
movb %dl,9(%rdi)
mov %rcx,%rdx
shr $29,%rdx
and $0xFF,%edx
movb %dl,10(%rdi)
mov %rcx,%rdx
shr $37,%rdx
and $0xFF,%edx
movb %dl,11(%rdi)
mov %rcx,%rdx
shr $45,%rdx
mov %r8,%rcx
shl $6,%rcx
and $0xC0,%ecx
xor %rdx,%rcx
movb %cl,12(%rdi)
mov %r8,%rdx
shr $2,%rdx
and $0xFF,%edx
movb %dl,13(%rdi)
mov %r8,%rdx
shr $10,%rdx
and $0xFF,%edx
movb %dl,14(%rdi)
mov %r8,%rdx
shr $18,%rdx
and $0xFF,%edx
movb %dl,15(%rdi)
mov %r8,%rdx
shr $26,%rdx
and $0xFF,%edx
movb %dl,16(%rdi)
mov %r8,%rdx
shr $34,%rdx
and $0xFF,%edx
movb %dl,17(%rdi)
mov %r8,%rdx
shr $42,%rdx
movb %dl,18(%rdi)
mov %r8,%rdx
shr $50,%rdx
mov %r9,%rcx
shl $1,%rcx
and $0xFE,%ecx
xor %rdx,%rcx
movb %cl,19(%rdi)
mov %r9,%rdx
shr $7,%rdx
and $0xFF,%edx
movb %dl,20(%rdi)
mov %r9,%rdx
shr $15,%rdx
and $0xFF,%edx
movb %dl,21(%rdi)
mov %r9,%rdx
shr $23,%rdx
and $0xFF,%edx
movb %dl,22(%rdi)
mov %r9,%rdx
shr $31,%rdx
and $0xFF,%edx
movb %dl,23(%rdi)
mov %r9,%rdx
shr $39,%rdx
and $0xFF,%edx
movb %dl,24(%rdi)
mov %r9,%rdx
shr $47,%rdx
mov %rsi,%rcx
shl $4,%rcx
and $0xF0,%ecx
xor %rdx,%rcx
movb %cl,25(%rdi)
mov %rsi,%rdx
shr $4,%rdx
and $0xFF,%edx
movb %dl,26(%rdi)
mov %rsi,%rdx
shr $12,%rdx
and $0xFF,%edx
movb %dl,27(%rdi)
mov %rsi,%rdx
shr $20,%rdx
and $0xFF,%edx
movb %dl,28(%rdi)
mov %rsi,%rdx
shr $28,%rdx
and $0xFF,%edx
movb %dl,29(%rdi)
mov %rsi,%rdx
shr $36,%rdx
and $0xFF,%edx
movb %dl,30(%rdi)
mov %rsi,%rsi
shr $44,%rsi
movb %sil,31(%rdi)
movq 0(%rsp),%r11
movq 8(%rsp),%r12
add %r11,%rsp
ret
#endif
|
litz-lab/scarab | 2,466 | src/test/scalar_mov_port_binding_test.s | .section .text
.globl _start
_start:
mov $0, %rax
LOOP: add $1, %rax
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
mov $1, %rbx
cmp $10000, %rax
jle LOOP
xor %edi, %edi
mov $231, %eax
syscall
.data
.align 32
var: .long 0, 0, 0, 0, 0, 0
|
litz-lab/scarab | 1,053 | src/test/gather_scatter_wrongpath.s | .section .text
.globl _start
_start:
kmovw full_mask, %k1
vmovdqa64 neg_indices,%zmm0
vpgatherdd output_ints(,%zmm0,4),%zmm1{%k1}
kmovw full_mask, %k1
lea output_ints, %rcx
vmovdqa64 pos_indices,%zmm0
vpscatterdd %zmm1,(%rcx,%zmm0,4){%k1}
vmovdqa64 input_ints_2,%zmm1
kmovw missing_one_mask, %k1
cmp $0, %rcx
jne skip
vpscatterdd %zmm1,(%rcx,%zmm0,4){%k1}
movl $3, output_ints
skip:
xor %edi, %edi
xor %rax, %rax
LOOP: add (%rcx, %rax, 4), %edi
add $1, %rax
cmp $15, %rax
jle LOOP
mov %edi, %ebx
LOOP2:
cmp $0, %ebx
je EXIT
add $-1, %ebx
jmp LOOP2
EXIT: mov $231, %eax
syscall
.data
full_mask: .word 0xFFFF
missing_one_mask: .word 0xFFFE
.align 64
pos_indices: .int 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
neg_indices: .int -1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16
input_ints_2: .fill 16, 4, 2
input_ints_1: .fill 16, 4, 1
output_ints: .fill 16, 4, 0
|
litz-lab/scarab | 2,519 | src/test/vector_mov_port_binding_test.s | .section .text
.globl _start
_start:
mov $0, %rax
LOOP: add $1, %rax
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
movmskps %xmm0, %rbx
cmp $10000, %rax
jle LOOP
xor %edi, %edi
mov $231, %eax
syscall
.data
.align 32
var: .long 0, 0, 0, 0, 0, 0
|
litz-lab/scarab | 3,573 | src/test/different_scatter_types.s | .section .text
.globl _start
_start:
vmovdqa64 input_ints,%zmm1
lea output_ints, %rcx
#vmovdqa64 indices, %zmm0
#kmovw mask_0, %k1
#vscatterdps %zmm1,(%rcx,%zmm0,4){%k1}
#vmovdqa64 indices, %zmm0
#kmovw mask_1, %k1
#vscatterdps %zmm1,output_ints(,%zmm0,4){%k1}
#kmovw mask_0, %k7
#vmovdqa32 indices_for_scale1_for_quads_vals, %ymm0
#vpscatterdq %zmm1,output_ints(,%ymm0,1){%k7}
#kmovw mask_1, %k7
#vmovdqa32 neg_indices_for_scale1_for_quads_vals, %ymm0
#vpscatterdq %ymm1,end(,%xmm0,1){%k7}
#kmovw mask_all, %k7
#vmovdqa32 indices, %ymm0
#lea end, %r15
#vpscatterdq %ymm1,-0x40(%r15,%xmm0,8){%k7}
#kmovw mask_all, %k7
#vmovdqa32 indices, %ymm0
#lea end, %r15
#vpscatterdq %ymm1,-0x40(%r15,%xmm0,8){%k7}
#kmovw mask_all, %k7
#vmovdqa32 indices, %ymm0
#mov -0x40, %eax
#vpscatterdq %ymm1,end(%eax,%xmm0,8){%k7}
kmovw mask_3, %k1
vmovdqa32 indices, %ymm0
lea input_ints, %eax
vpgatherdq (%eax,%xmm0,8),%ymm1{%k1}
kmovw mask_0, %k1
vmovdqa32 indices, %ymm0
mov $-0x40, %eax
vpgatherdq output_ints(%eax,%xmm0,8),%ymm1{%k1}
kmovw mask_3, %k7
vmovdqa32 indices, %ymm0
mov $-0x40, %r8d
vpscatterdq %ymm1,end(%r8d,%xmm0,8){%k7}
kmovw mask_0, %k7
vpscatterdq %ymm1,end(%r8d,%xmm0,8){%k7}
kmovw mask_1, %k1
vmovdqa32 indices, %ymm0
mov $-0x40, %rax
vpgatherdq output_ints(%rax,%xmm0,8),%xmm1{%k1}
lea output_ints,%rax
kmovw mask_1, %k1
vpscatterdq %xmm1,(%rax,%xmm0,8){%k1}
#
#kmovw mask_0, %k7
#vmovdqa32 neg_indices_for_scale1_for_quads_vals, %ymm0
#vpscatterdq %xmm1,end(,%xmm0,1){%k7}
#vmovdqa64 quad_indices, %zmm0
#kmovw mask_all, %k1
#vscatterqps %xmm1,output_ints(,%ymm0,4){%k1}
#vscatterdps %ymm1,(%rcx,%ymm0,4){%k1}
#vpscatterdd %zmm1,(%rcx,%zmm0,4){%k1}
#vmovdqa64 indices, %zmm0
#kmovw mask_all, %k1
#lea output_ints, %r8d
#vpscatterdd %zmm1,(%r8d,%zmm0,4){%k1}
vmovdqa64 input_ints,%zmm1
vmovdqa64 quad_indices, %zmm0
kmovw mask_8, %k2
xor %r8, %r8
vpscatterqq %zmm1,output_ints(%r8,%zmm0,8){%k2}
lea output_ints, %rcx
xor %edi, %edi
xor %rax, %rax
LOOP: add (%rcx, %rax, 4), %edi
add $1, %rax
cmp $15, %rax
jle LOOP
mov $231, %eax
syscall
.data
mask_all: .word 0xFFFF
mask_0: .word 0x0001
mask_1: .word 0x0002
mask_2: .word 0x0004
mask_3: .word 0x0008
mask_4: .word 0x0010
mask_5: .word 0x0020
mask_6: .word 0x0040
mask_7: .word 0x0080
mask_8: .word 0x0100
mask_9: .word 0x0200
mask_10: .word 0x0400
mask_11: .word 0x0800
mask_12: .word 0x1000
mask_13: .word 0x2000
mask_14: .word 0x4000
mask_15: .word 0x8000
.align 64
indices: .int 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.align 64
indices_for_scale1: .int 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
.align 64
indices_for_scale1_for_quads_vals: .int 0, 8, 16, 24, 32, 40, 48, 56
.align 64
neg_indices_for_scale1_for_quads_vals: .int -64, -56, -48, -40, -32, -24, -16, -8
.align 64
quad_indices: .quad 0,1,2,3,4,5,6,7
.align 64
r_indices: .int 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.align 64
same_indices: .fill 16, 4, 0
.align 64
input_ints: .int 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16
.align 64
output_ints: .fill 16, 4, 0
end:
#input_quads: .fill 8, 8, 2
#output_quads: .fill 8, 8, 0
|
litz-lab/scarab | 1,329 | src/test/gather_not_kmask.s | .section .text
.globl _start
_start:
// mov $-32, %ebx
// vmovdqa int_indices,%ymm0
// vmovdqa mask1_2,%ymm2
// vpgatherdd %ymm2,output_ints(%ebx,%ymm0,4),%ymm3
// vmovdqa %ymm3, output_ints
// vmovdqa int_indices,%ymm0
// vmovdqa mask3_4,%ymm7
// vpgatherdq %ymm7,input_ints(,%xmm0,8),%ymm4
// vmovdqa %ymm4, output_ints
// vmovdqa quad_indices_for_ints,%ymm0
// vmovdqa mask5_6,%ymm5
// vpgatherqd %xmm5,input_ints(,%ymm0,4),%xmm6
// vmovdqa %xmm6, output_ints
vmovdqa quad_indices_for_quads,%ymm0
vmovdqa mask7_8,%ymm5
vpgatherqq %ymm5,input_ints(,%ymm0,8),%ymm6
vmovdqa %ymm6, output_ints
xor %edi, %edi
xor %rax, %rax
LOOP: add output_ints(, %rax, 4), %edi
add $1, %rax
cmp $7, %rax
jle LOOP
mov $231, %eax
syscall
.data
.align 32
mask1_2: .int 0x80000000,0x80000000,0,0,0,0,0,0
.align 32
mask3_4: .quad 0,0x8000000000000000,0,0
.align 32
mask5_6: .int 0x80000000,0x80000000,0,0
.align 32
mask7_8: .quad 0,0,0,0x8000000000000000
.align 32
int_indices: .int 0,1,2,3,4,5,6,7
.align 32
quad_indices_for_ints: .quad 4,5,6,7
.align 32
quad_indices_for_quads: .quad 0,1,2,3
.align 32
input_ints: .int 1,2,3,4,5,6,7,8
.align 32
output_ints: .fill 8, 4, 0
|
litz-lab/scarab | 1,226 | src/test/isa_test.s | .section .text
.globl _start
_start:
mov $0, %rax
mov $0, %rbx
add %rax, %rbx
mov var(,1), %rcx
add $1, var(,1)
add %eax, var(,1)
add var(,1), %rdx
lea var(,1), %rbx
mov (%rbx, %rax), %rbp
mov (%rbx, %rax, 1), %rsp
mov var(, %rax, 4), %rdi
mov %ebp, (%rbx, %rax)
mov %esp, (%rbx, %rax, 4)
mov %di, var(, %rax, 4)
addpd %xmm0, %xmm1
vaddpd %ymm2, %ymm3, %ymm4
vaddps %ymm5, %ymm6, %ymm7
addss %xmm8, %xmm9 # <======
addsd var(,1), %xmm11 # <======
addsd var(, %rax, 1), %xmm12 # <======
addsd (%rbx, %rax, 1), %xmm12 # <======
bsf %rsi, %r8 # <======
bsf var(, %rax, 1), %r9 # <======
bswap %r10
btc %rax, var(, %rax, 1)
lea stack2, %rsp
lea stack2, %rbp
push %r13
push %r14
call func1
xor %edi, %edi
mov $231, %eax
syscall
func1: ret
.data
.align 32
var: .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
stack: .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
stack2: .long 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
|
LIV2/RIDE | 7,607 | Software/bootrom/bootrom.s | include exec/types.i
include exec/execbase.i
include exec/alerts.i
include exec/nodes.i
include exec/resident.i
include exec/memory.i
include exec/lists.i
include lvo/exec_lib.i
include lvo/expansion_lib.i
include libraries/configvars.i
include hardware/cia.i
include ../include/board.i
BonusEna = $3000
BonusBase = $A00000
BonusPri = 0
SysBase = 4
CIAA = $BFE001
; Frame Pointer offsets
ExpansionBase = -4
FastRamBase = -8
ControlBase = -12
;
Start: bra Init
rts
RomTag:
dc.w RTC_MATCHWORD
dc.l RomTag
dc.l EndCode
dc.b (RTF_COLDSTART)
dc.b 1
dc.b NT_LIBRARY
dc.b 0
dc.l Name
dc.l ID
dc.l Init
; A2 = ExpansionBase, A6 = SysBase
Init:
movem.l D2-D3/A2-A3/A6,-(SP)
link.w A5,#-16
move.l #0,FastRamBase(A5)
move.l #0,ExpansionBase(A5)
movea.l SysBase,A6
btst.b #CIAB_GAMEPORT0,CIAA ; Exit if LMB pressed
beq exit
moveq.l #0,D0
lea ExpansionName(PC),A1
jsr _LVOOpenLibrary(A6)
move.l D0,ExpansionBase(A5)
tst.l D0
beq exit
FindRam:
move.l D0,A6
;Check if board present
move.l #0,A0
.loop: move.l #MANUF_ID,D0
move.l #PROD_ID_RAM,D1
jsr _LVOFindConfigDev(A6)
tst.l D0
beq exit
move.l D0,A0
btst #CDB_SHUTUP,cd_Flags(A0)
bne .loop
move.l cd_BoardAddr(A0),A0
move.l A0,FastRamBase(A5)
FindCtrl:
;Check if board present
move.l #0,A0
move.l #MANUF_ID,D0
move.l #PROD_ID_IDE,D1
jsr _LVOFindConfigDev(A6)
tst.l D0
beq exit
move.l D0,A0
move.l cd_BoardAddr(A0),A0
add.l #$8000,A0
move.l A0,ControlBase(A5)
;; Sanity check, check to make sure that the memory block does not already exist
Sanity: move.l SysBase,A6
lea MemList(A6),A2
move.l LH_HEAD(A2),A2
.loop: move.l MH_UPPER(A2),D0
cmp.l #BonusBase,D0 ; Is this blocks Upper limit lower than BonusBase?
ble .next ; If yes ignore it
cmp.l #(BonusBase+$1F0000),D0 ; Is it's upper-bound higher than Bonus Base's upper bound?
bgt .next ; Yes, ignore it
.found: bra exit ; No, if we got here the BonusRAM block fits inside the current block so we must not add it again
.next: move.l LN_SUCC(A2),D0
tst.l D0 ; End of the list?
beq .notfound ; No matching block found, go ahead and add the Bonus RAM block
move.l D0,A2
bra.s .loop
.notfound
;; Turn on BonusRAM region
Enable: move.l ControlBase(A5),A0
or.b #$20,(A0)
;; Get the Bonus RAM size with a simple address test
Size: moveq.l #31,D3 ; Loop 31 times - A00000-BEFFFF
move.l #BonusBase+$B00,A2 ; We poke at Addr+$B00 because this address will not trash CIA registers if our overlay is not active/working
moveq.l #0,D2 ; BonusRAM Size
bra.s .start
.loop: move.w D1,(A2) ; Restore saved value
add.l #$10000,A2 ; Increment test address by 64K
add.l #$10000,D2 ; Increase BonusRAM size by 64K
.start: move.w (A2),D1 ; Save value at address
move.l A2,D0
lsr.l #8,D0
move.w D0,(A2) ; Store upper address bits
nop
nop
cmp.w (A2),D0 ; Compare memory value
bne.s .done ; Value didn't match
dbra D3,.loop
.done: move.w D1,(A2) ; Restore last saved value
tst.l D2 ; Did we find any RAM?
beq exit
;; Attempt to merge Z2 Fast with BonusRAM
;; A2 = Pointer to board/mem header
;; A1 = Pointer to Chunk
;; D2 = BonusSize
Merge: move.l #0,D3
move.l FastRamBase(A5),A2
move.l MH_UPPER(A2),D0 ; Check if board neighbors BonusRAM
cmp.l #BonusBase,D0
bne.s .NewBrd ; Nope
movea.l SysBase,A6
jsr _LVOForbid(A6)
lea MH_FIRST(A2),A3
.walk: move.l MC_NEXT(A3),D0 ; Skip through mem chunks until we get to the end
beq.s .last
move.l MC_NEXT(A3),A3
bra.s .walk
.last: move.l MC_BYTES(A3),D1 ; Add Chunk size to chunk address, see if chunk is at the end
move.l A3,D0
add.l D0,D1
cmp.l #BonusBase,D1
bne.s .MakeNew
move.l MC_BYTES(A3),D1 ; Yes it is, increase the chunk size by BonusSize
add.l D2,D1
move.l D1,MC_BYTES(A3) ; And store it back
bra.s .FixMH
.MakeNew: move.l #BonusBase,D0 ; No free chunk at the end of Z2 Board, Add a new chunk
move.l D0,MC_NEXT(A3)
move.l #BonusBase,A3
move.l #0,MC_NEXT(A3)
move.l D2,MC_BYTES(A3)
;; Now fixup the MemHeader to reflect the new memory size
.FixMH: move.l #BonusBase,D0 ; Add BonusSize to BonusBase
add.l D2,D0
move.l D0,MH_UPPER(A2) ; Store it as the MemHeader Upper limit
move.l MH_FREE(A2),D0 ; Add BonusSize to MemFree
add.l D2,D0
move.l D0,MH_FREE(A2) ; Save back to MemFree
move.l FastRamBase(A5),A2
lea GottaGoFast(PC),A3
move.l A3,LN_NAME(A2)
jsr _LVOPermit(A6)
bra.s .done
;; Couldn't expand an existing block so we just create a whole new one
.NewBrd: move.l SysBase,A6
move.l D2,D0
move.l #MEMF_FAST|MEMF_PUBLIC|MEMF_24BITDMA,D1
move.l #BonusPri,D2
move.l #BonusBase,A0
lea GottaGoFast(PC),A1
jsr _LVOAddMemList(A6)
.done:
;; Kickstart versions below 2.0 give Slow RAM the same priority as Fast
;; So take the opportunity to change the priority to -5
FixPrio:
move.l SysBase,A6 ; Skip if running Kick 2 and up
cmp.l #36,SoftVer(A6)
bge.s .end
lea MemList(A6),A2
move.l LH_HEAD(A2),A3
.loop: cmp.l #$C00000,MH_LOWER(A3) ; Memory Node is Ranger?
blt .next
cmp.l #$DEFFFF,MH_UPPER(A3)
bgt .next
bra.s .found
.next move.l LN_SUCC(A3),D0 ; Keep looking
tst.l D0
beq.s .end
move.l D0,A3
bra.s .loop
.found move.b LN_PRI(A3),D0 ; If the priority is non-zero then don't change it
tst.b D0
bne.s .end
jsr _LVOForbid(A6)
move.l A3,A1 ; Remove and Re-enqueue and move it to the right position in the list
jsr _LVORemove(A6)
move.l A3,A1
move.l A2,A0
move.b #-5,LN_PRI(A3)
jsr _LVOEnqueue(A6)
jsr _LVOPermit(A6)
.end:
exit: move.l ExpansionBase(A5),D0
tst.l D0
beq .noexp
move.l SysBase,A6
move.l D0,A1
jsr _LVOCloseLibrary(A6)
.noexp: unlk A5
movem.l (SP)+,D2-D3/A2-A3/A6
moveq.l #0,D0
rts
GottaGoFast: dc.b "GottaGoFast!",0,0
cnop 0,4
ExpansionName: dc.b "expansion.library",0
Name: dc.b "RIDE.library",0
ID: dc.b "RIDE Support",0
EndCode:
|
liva/minimal-linux | 1,346 | usr/initramfs_data.S | /* SPDX-License-Identifier: GPL-2.0 */
/*
initramfs_data includes the compressed binary that is the
filesystem used for early user space.
Note: Older versions of "as" (prior to binutils 2.11.90.0.23
released on 2001-07-14) dit not support .incbin.
If you are forced to use older binutils than that then the
following trick can be applied to create the resulting binary:
ld -m elf_i386 --format binary --oformat elf32-i386 -r \
-T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
ld -m elf_i386 -r -o built-in.o initramfs_data.o
For including the .init.ramfs sections, see include/asm-generic/vmlinux.lds.
The above example is for i386 - the parameters vary from architectures.
Eventually look up LDFLAGS_BLOB in an older version of the
arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
Using .incbin has the advantage over ld that the correct flags are set
in the ELF header, as required by certain architectures.
*/
#include <linux/stringify.h>
#include <asm-generic/vmlinux.lds.h>
.section .init.ramfs,"a"
__irf_start:
.incbin __stringify(INITRAMFS_IMAGE)
__irf_end:
.section .init.ramfs.info,"a"
.globl VMLINUX_SYMBOL(__initramfs_size)
VMLINUX_SYMBOL(__initramfs_size):
#ifdef CONFIG_64BIT
.quad __irf_end - __irf_start
#else
.long __irf_end - __irf_start
#endif
|
Live-CTF/LiveCTF-DEFCON33 | 4,529 | qualifiers/challenges/ropably/attempt2/checker.S | .file "checker.c"
.text
.section .text.checker,"ax",@progbits
.globl checker
.type checker, @function
checker:
endbr64
pushq %rbp
movq %rsp, %rbp
movq %rdi, -24(%rbp)
movl $1, -4(%rbp)
movq -24(%rbp), %rax
addq $5, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $13, %rax
movzbl (%rax), %eax
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $100, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $13, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $2, %rax
movzbl (%rax), %eax
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $116, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $2, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $8, %rax
movzbl (%rax), %eax
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $177, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $8, %rax
movzbl (%rax), %edx
movq -24(%rbp), %rax
addq $3, %rax
movzbl (%rax), %eax
xorl %edx, %eax
movsbl %al, %eax
movzwl %ax, %eax
cmpl $32, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $3, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $4, %rax
movzbl (%rax), %eax
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $166, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $4, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $12, %rax
movzbl (%rax), %eax
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $166, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $12, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $15, %rax
movzbl (%rax), %eax
movsbl %al, %eax
imull %edx, %eax
movzwl %ax, %eax
cmpl $8769, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $15, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $1, %rax
movzbl (%rax), %eax
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $223, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $1, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $9, %rax
movzbl (%rax), %eax
movsbl %al, %eax
subl %eax, %edx
movzwl %dx, %eax
cmpl $5, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $9, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $14, %rax
movzbl (%rax), %eax
movsbl %al, %eax
subl %eax, %edx
movzwl %dx, %eax
cmpl $65527, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $14, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $7, %rax
movzbl (%rax), %eax
movsbl %al, %eax
imull %edx, %eax
movzwl %ax, %eax
cmpl $13572, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $7, %rax
movzbl (%rax), %edx
movq -24(%rbp), %rax
movzbl (%rax), %eax
xorl %edx, %eax
movsbl %al, %eax
movzwl %ax, %eax
cmpl $37, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $11, %rax
movzbl (%rax), %eax
movsbl %al, %eax
imull %edx, %eax
movzwl %ax, %eax
cmpl $8400, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $11, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $6, %rax
movzbl (%rax), %eax
movsbl %al, %eax
subl %eax, %edx
movzwl %dx, %eax
cmpl $65528, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $6, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $10, %rax
movzbl (%rax), %eax
movsbl %al, %eax
imull %edx, %eax
movzwl %ax, %eax
cmpl $7797, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $10, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $5, %rax
movzbl (%rax), %eax
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $119, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movl -4(%rbp), %eax
popq %rbp
ret
.size checker, .-checker
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
Live-CTF/LiveCTF-DEFCON33 | 9,167 | qualifiers/challenges/ropably/attempt2/checker-obf.S | .file "checker.c"
.text
.section .text.checker,"ax",@progbits
.globl checker
.type checker, @function
checker:
.size checker, .-checker
leaq chain(%rip), %rbx
xchg %rsp, %rbx
ret
rop_chunk_4:
xchg %rsp, %rbx
movzbl (%rax), %eax
movsbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_54:
xchg %rsp, %rbx
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $119, %eax
sete %al
xchg %rsp, %rbx
ret
rop_chunk_43:
xchg %rsp, %rbx
addq $11, %rax
movzbl (%rax), %eax
xchg %rsp, %rbx
ret
rop_chunk_39:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $7, %rax
xchg %rsp, %rbx
ret
rop_chunk_18:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $4, %rax
movzbl (%rax), %eax
movsbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_3:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $13, %rax
xchg %rsp, %rbx
ret
rop_chunk_37:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $7, %rax
movzbl (%rax), %eax
movsbl %al, %eax
imull %edx, %eax
movzwl %ax, %eax
xchg %rsp, %rbx
ret
rop_chunk_2:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $5, %rax
movzbl (%rax), %eax
movsbl %al, %edx
xchg %rsp, %rbx
ret
rop_chunk_17:
xchg %rsp, %rbx
movzbl (%rax), %eax
movsbl %al, %edx
xchg %rsp, %rbx
ret
rop_chunk_26:
xchg %rsp, %rbx
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $15, %rax
movzbl (%rax), %eax
movsbl %al, %edx
xchg %rsp, %rbx
ret
rop_chunk_28:
xchg %rsp, %rbx
addl %edx, %eax
movzwl %ax, %eax
cmpl $223, %eax
xchg %rsp, %rbx
ret
rop_chunk_24:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $12, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $15, %rax
xchg %rsp, %rbx
ret
rop_chunk_8:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $2, %rax
movzbl (%rax), %eax
xchg %rsp, %rbx
ret
rop_chunk_22:
xchg %rsp, %rbx
movzwl %ax, %eax
cmpl $166, %eax
xchg %rsp, %rbx
ret
rop_chunk_53:
xchg %rsp, %rbx
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
addq $5, %rax
movzbl (%rax), %eax
xchg %rsp, %rbx
ret
rop_chunk_11:
xchg %rsp, %rbx
addq $2, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
xchg %rsp, %rbx
ret
rop_chunk_32:
xchg %rsp, %rbx
cmpl $5, %eax
sete %al
movzbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_9:
xchg %rsp, %rbx
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $116, %eax
sete %al
xchg %rsp, %rbx
ret
rop_chunk_7:
xchg %rsp, %rbx
movzbl (%rax), %eax
movsbl %al, %edx
xchg %rsp, %rbx
ret
rop_chunk_52:
xchg %rsp, %rbx
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $10, %rax
xchg %rsp, %rbx
ret
rop_chunk_16:
xchg %rsp, %rbx
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $3, %rax
xchg %rsp, %rbx
ret
rop_chunk_51:
xchg %rsp, %rbx
imull %edx, %eax
movzwl %ax, %eax
cmpl $7797, %eax
sete %al
xchg %rsp, %rbx
ret
rop_chunk_46:
xchg %rsp, %rbx
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $11, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
xchg %rsp, %rbx
ret
rop_chunk_0:
xchg %rsp, %rbx
endbr64
pushq %rbp
movq %rsp, %rbp
xchg %rsp, %rbx
ret
rop_chunk_47:
xchg %rsp, %rbx
addq $6, %rax
movzbl (%rax), %eax
movsbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_12:
xchg %rsp, %rbx
addq $8, %rax
movzbl (%rax), %eax
movsbl %al, %eax
addl %edx, %eax
movzwl %ax, %eax
cmpl $177, %eax
xchg %rsp, %rbx
ret
rop_chunk_44:
xchg %rsp, %rbx
movsbl %al, %eax
imull %edx, %eax
movzwl %ax, %eax
xchg %rsp, %rbx
ret
rop_chunk_55:
xchg %rsp, %rbx
movzbl %al, %eax
andl %eax, -4(%rbp)
xchg %rsp, %rbx
ret
rop_chunk_49:
xchg %rsp, %rbx
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $6, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
xchg %rsp, %rbx
ret
rop_chunk_6:
xchg %rsp, %rbx
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $13, %rax
xchg %rsp, %rbx
ret
rop_chunk_41:
xchg %rsp, %rbx
movzwl %ax, %eax
cmpl $37, %eax
sete %al
xchg %rsp, %rbx
ret
rop_chunk_31:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $9, %rax
movzbl (%rax), %eax
movsbl %al, %eax
subl %eax, %edx
movzwl %dx, %eax
xchg %rsp, %rbx
ret
rop_chunk_34:
xchg %rsp, %rbx
addq $14, %rax
movzbl (%rax), %eax
movsbl %al, %eax
subl %eax, %edx
movzwl %dx, %eax
xchg %rsp, %rbx
ret
rop_chunk_50:
xchg %rsp, %rbx
addq $10, %rax
movzbl (%rax), %eax
movsbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_14:
xchg %rsp, %rbx
movzbl (%rax), %edx
movq -24(%rbp), %rax
addq $3, %rax
movzbl (%rax), %eax
xorl %edx, %eax
xchg %rsp, %rbx
ret
rop_chunk_10:
xchg %rsp, %rbx
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
xchg %rsp, %rbx
ret
rop_chunk_40:
xchg %rsp, %rbx
movzbl (%rax), %edx
movq -24(%rbp), %rax
movzbl (%rax), %eax
xorl %edx, %eax
movsbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_19:
xchg %rsp, %rbx
addl %edx, %eax
movzwl %ax, %eax
cmpl $166, %eax
sete %al
xchg %rsp, %rbx
ret
rop_chunk_23:
xchg %rsp, %rbx
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
xchg %rsp, %rbx
ret
rop_chunk_33:
xchg %rsp, %rbx
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $9, %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
xchg %rsp, %rbx
ret
rop_chunk_36:
xchg %rsp, %rbx
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $14, %rax
movzbl (%rax), %eax
movsbl %al, %edx
xchg %rsp, %rbx
ret
rop_chunk_1:
xchg %rsp, %rbx
movq %rdi, -24(%rbp)
movl $1, -4(%rbp)
xchg %rsp, %rbx
ret
rop_chunk_5:
xchg %rsp, %rbx
addl %edx, %eax
movzwl %ax, %eax
cmpl $100, %eax
sete %al
movzbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_21:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $12, %rax
movzbl (%rax), %eax
movsbl %al, %eax
addl %edx, %eax
xchg %rsp, %rbx
ret
rop_chunk_42:
xchg %rsp, %rbx
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
movzbl (%rax), %eax
movsbl %al, %edx
movq -24(%rbp), %rax
xchg %rsp, %rbx
ret
rop_chunk_25:
xchg %rsp, %rbx
movzbl (%rax), %eax
movsbl %al, %eax
imull %edx, %eax
movzwl %ax, %eax
cmpl $8769, %eax
sete %al
xchg %rsp, %rbx
ret
rop_chunk_27:
xchg %rsp, %rbx
movq -24(%rbp), %rax
addq $1, %rax
movzbl (%rax), %eax
movsbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_13:
xchg %rsp, %rbx
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $8, %rax
xchg %rsp, %rbx
ret
rop_chunk_15:
xchg %rsp, %rbx
movsbl %al, %eax
movzwl %ax, %eax
cmpl $32, %eax
sete %al
xchg %rsp, %rbx
ret
rop_chunk_30:
xchg %rsp, %rbx
movzbl (%rax), %eax
movsbl %al, %edx
xchg %rsp, %rbx
ret
rop_chunk_35:
xchg %rsp, %rbx
cmpl $65527, %eax
sete %al
xchg %rsp, %rbx
ret
rop_chunk_48:
xchg %rsp, %rbx
subl %eax, %edx
movzwl %dx, %eax
cmpl $65528, %eax
sete %al
movzbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_29:
xchg %rsp, %rbx
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $1, %rax
xchg %rsp, %rbx
ret
rop_chunk_56:
xchg %rsp, %rbx
movl -4(%rbp), %eax
popq %rbp
ret
xchg %rsp, %rbx
ret
rop_chunk_45:
xchg %rsp, %rbx
cmpl $8400, %eax
sete %al
movzbl %al, %eax
xchg %rsp, %rbx
ret
rop_chunk_20:
xchg %rsp, %rbx
movzbl %al, %eax
andl %eax, -4(%rbp)
movq -24(%rbp), %rax
addq $4, %rax
movzbl (%rax), %eax
movsbl %al, %edx
xchg %rsp, %rbx
ret
rop_chunk_38:
xchg %rsp, %rbx
cmpl $13572, %eax
sete %al
movzbl %al, %eax
andl %eax, -4(%rbp)
xchg %rsp, %rbx
ret
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
.data
.align 8
chain:
.quad rop_chunk_0, rop_chunk_1, rop_chunk_2, rop_chunk_3, rop_chunk_4, rop_chunk_5, rop_chunk_6, rop_chunk_7, rop_chunk_8, rop_chunk_9, rop_chunk_10, rop_chunk_11, rop_chunk_12, rop_chunk_13, rop_chunk_14, rop_chunk_15, rop_chunk_16, rop_chunk_17, rop_chunk_18, rop_chunk_19, rop_chunk_20, rop_chunk_21, rop_chunk_22, rop_chunk_23, rop_chunk_24, rop_chunk_25, rop_chunk_26, rop_chunk_27, rop_chunk_28, rop_chunk_29, rop_chunk_30, rop_chunk_31, rop_chunk_32, rop_chunk_33, rop_chunk_34, rop_chunk_35, rop_chunk_36, rop_chunk_37, rop_chunk_38, rop_chunk_39, rop_chunk_40, rop_chunk_41, rop_chunk_42, rop_chunk_43, rop_chunk_44, rop_chunk_45, rop_chunk_46, rop_chunk_47, rop_chunk_48, rop_chunk_49, rop_chunk_50, rop_chunk_51, rop_chunk_52, rop_chunk_53, rop_chunk_54, rop_chunk_55, rop_chunk_56
|
Live-CTF/LiveCTF-DEFCON33 | 1,263 | qualifiers/challenges/ropably/attempt1/part2.S | .section .text.checker,"ax",@progbits
.globl checker
.type checker, @function
checker:
leaq chain(%rip), %rbx
xchg %rsp, %rbx
ret
rop_chunk0:
xchg %rsp, %rbx
endbr64
pushq %rbp
movq %rsp, %rbp
subq $32, %rsp
movq %fs:40, %rax
xchg %rsp, %rbx
ret
rop_chunk1:
xchg %rsp, %rbx
movq %rax, -8(%rbp)
xorl %eax, %eax
leaq -20(%rbp), %rax
movq %rax, %rsi
leaq .LC0(%rip), %rax
movq %rax, %rdi
xchg %rsp, %rbx
ret
rop_chunk2:
xchg %rsp, %rbx
movl $0, %eax
call __isoc99_scanf@PLT
leaq -16(%rbp), %rax
movq %rax, %rsi
leaq .LC0(%rip), %rax
xchg %rsp, %rbx
ret
rop_chunk3:
xchg %rsp, %rbx
movq %rax, %rdi
movl $0, %eax
call __isoc99_scanf@PLT
movl -20(%rbp), %edx
movl -16(%rbp), %eax
xchg %rsp, %rbx
ret
rop_chunk4:
xchg %rsp, %rbx
addl %edx, %eax
movl %eax, -12(%rbp)
movl -12(%rbp), %eax
movl %eax, %esi
xchg %rsp, %rbx
ret
rop_chunk5:
xchg %rsp, %rbx
leaq .LC1(%rip), %rax
movq %rax, %rdi
movl $0, %eax
call printf@PLT
xchg %rsp, %rbx
ret
rop_chunk6:
xchg %rsp, %rbx
nop
movq -8(%rbp), %rax
subq %fs:40, %rax
je .L2
call __stack_chk_fail@PLT
.L2:
leave
ret
.size checker, .-checker
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
|
Live-CTF/LiveCTF-DEFCON33 | 1,185 | qualifiers/challenges/ropably/attempt1/test-function.S | .file "test-function.c"
.text
.globl x
.data
.align 4
.type x, @object
.size x, 4
x:
.long 1
.section .rodata
.LC0:
.string "%d"
.LC1:
.string "%d\n"
.section .text.checker,"ax",@progbits
.globl checker
.type checker, @function
checker:
endbr64
pushq %rbp
movq %rsp, %rbp
subq $32, %rsp
movq %fs:40, %rax
movq %rax, -8(%rbp)
xorl %eax, %eax
leaq -20(%rbp), %rax
movq %rax, %rsi
leaq .LC0(%rip), %rax
movq %rax, %rdi
movl $0, %eax
call __isoc99_scanf@PLT
leaq -16(%rbp), %rax
movq %rax, %rsi
leaq .LC0(%rip), %rax
movq %rax, %rdi
movl $0, %eax
call __isoc99_scanf@PLT
movl -20(%rbp), %edx
movl -16(%rbp), %eax
addl %edx, %eax
movl %eax, -12(%rbp)
movl -12(%rbp), %eax
movl %eax, %esi
leaq .LC1(%rip), %rax
movq %rax, %rdi
movl $0, %eax
call printf@PLT
nop
movq -8(%rbp), %rax
subq %fs:40, %rax
je .L2
call __stack_chk_fail@PLT
.L2:
leave
ret
.size checker, .-checker
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
Live-CTF/LiveCTF-DEFCON33 | 6,678 | finals/challenges/alt-uni/challenge/src/alt_uni.s | @ upward_stack_ctf.s
@ ARM Assembly CTF with ONLY upward-growing stack
@ No push/pop - only manual stack operations in reverse
@ Compile: as -o upward_stack_ctf.o upward_stack_ctf.s
@ Link: ld -o upward_stack_ctf upward_stack_ctf.o
.global _start
.section .data
@ Messages
banner: .ascii "=== Alternate Universe===\n"
banner_len = . - banner
username_prompt: .ascii "Enter username: "
username_prompt_len = . - username_prompt
password_prompt: .ascii "Enter password: "
password_prompt_len = . - password_prompt
main_msg: .ascii "[main] Starting...\n"
main_len = . - main_msg
foo_msg: .ascii "[foo] Handling username\n"
foo_len = . - foo_msg
bar_msg: .ascii "[bar] Handling password\n"
bar_len = . - bar_msg
safe_msg: .ascii "[main] Exited safely\n"
safe_len = . - safe_msg
canary_warn: .ascii "Careful! Something bad almost happened...\n"
canary_warn_len = . - canary_warn
.section .bss
@ Stack space that grows upward (64KB)
.align 4
stack_bottom:
.space 0x10000
stack_top:
.section .text
@ Syscall numbers for ARM
.equ SYS_exit, 1
.equ SYS_read, 3
.equ SYS_write, 4
.equ STDOUT, 1
.equ STDIN, 0
@ Macro to print a message
.macro print_msg msg, len
mov r0, #STDOUT
ldr r1, =\msg
mov r2, #\len
mov r7, #SYS_write
svc #0
.endm
@ Upward stack operations (opposite of normal)
@ Normal push: str rX, [sp, #-4]! (pre-decrement)
@ Upward push: str rX, [sp], #4 (post-increment)
@ Normal pop: ldr rX, [sp], #4 (post-increment)
@ Upward pop: ldr rX, [sp, #-4]! (pre-decrement)
_start:
@ Initialize stack pointer to bottom (grows UP)
ldr sp, =stack_bottom
@ Print banner
print_msg banner, banner_len
@ Jump to main
b main
@ Main function
main:
@ Save lr on upward stack (post-increment sp)
str lr, [sp], #4 @ sp moves UP after store
@ Print main message
print_msg main_msg, main_len
@ Allocate space for main's local buffer (32 bytes)
@ In upward stack, we ADD to allocate
add sp, sp, #32
@ Call foo - lr will contain return address
bl foo
@ Deallocate main's buffer
sub sp, sp, #32
@ Print safe exit message
print_msg safe_msg, safe_len
@ Restore lr from upward stack (pre-decrement sp)
ldr lr, [sp, #-4]! @ sp moves DOWN before load
@ Exit program
mov r0, #0
mov r7, #SYS_exit
svc #0
@ Foo function
foo:
@ Save lr on upward stack
str lr, [sp], #4
@ Save frame pointer
str r11, [sp], #4
mov r11, sp @ Frame pointer points after saved regs
@ Print foo message
print_msg foo_msg, foo_len
@ Allocate space for:
@ 1. Username buffer (64 bytes)
mov r4, sp @ Save username buffer address
add sp, sp, #64
@ 2. Password buffer (32 bytes)
mov r5, sp @ Save password buffer address
add sp, sp, #32
@ 3. Saved "return address" location (4 bytes)
@ This is what we want to be overwritten
add sp, sp, #4
@ Store safe return point at sp-4 (our "return address")
adr r0, foo_safe_return
str r0, [sp, #-4] @ Store at top of our allocations
@ Get username input
print_msg username_prompt, username_prompt_len
@ Read username (up to 64 bytes)
mov r0, #STDIN
mov r1, r4 @ Username buffer address
mov r2, #64
mov r7, #SYS_read
svc #0
@ Call bar with password buffer address
mov r0, r5 @ Pass password buffer as argument
bl bar
@ Load potentially corrupted return address
ldr r0, [sp, #-4] @ This could be overwritten to win!
@ Restore stack pointer
mov sp, r11
@ Restore saved registers from upward stack
ldr r11, [sp, #-4]!
ldr lr, [sp, #-4]!
@ Jump to loaded address (potentially hijacked)
bx r0
foo_safe_return:
@ Normal return path
bx lr
@ Bar function - contains vulnerability
bar:
@ Save registers on upward stack
str lr, [sp], #4
str r4, [sp], #4
str r5, [sp], #4
str r6, [sp], #4
@ Write stack canary (4-byte ASCII value "Live")
mov r3, #0x65 @ 'e'
orr r3, r3, #0x7600 @ 'v' << 8
orr r3, r3, #0x690000 @ 'i' << 16
orr r3, r3, #0x4C000000 @ 'L' << 24
str r3, [sp], #4 @ Store canary and increment sp
mov r4, r0 @ Save password buffer address (32 bytes)
@ Print bar message
print_msg bar_msg, bar_len
@ Get password input
print_msg password_prompt, password_prompt_len
@ VULNERABILITY: Read up to 128 bytes into 32-byte buffer
mov r0, #STDIN
mov r1, r4 @ Destination buffer (only 32 bytes!)
mov r2, #128 @ Read up to 128 bytes
mov r7, #SYS_read
svc #0
@ If we read > 32 bytes, we overwrite:
@ +32: Start of foo's saved return address!
@ Check canary before returning
ldr r3, [sp, #-4]! @ Load canary value and decrement sp
mov r2, #0x65 @ 'e'
orr r2, r2, #0x7600 @ 'v' << 8
orr r2, r2, #0x690000 @ 'i' << 16
orr r2, r2, #0x4C000000 @ 'L' << 24
cmp r3, r2
bne canary_fail @ If canary doesn't match, exit
@ Restore registers from upward stack
ldr r6, [sp, #-4]!
ldr r5, [sp, #-4]!
ldr r4, [sp, #-4]!
ldr lr, [sp, #-4]!
bx lr
canary_fail:
@ Canary was corrupted - print warning and exit
print_msg canary_warn, canary_warn_len
mov r0, #1 @ Exit with error code
mov r7, #SYS_exit
svc #0
@ Win function - target for exploitation
win:
@ Execute /bin/sh
ldr r0, =shell_path @ First argument: path to /bin/sh
mov r1, #0 @ Second argument: argv (NULL)
mov r2, #0 @ Third argument: envp (NULL)
mov r7, #11 @ SYS_execve
svc #0
@ If execve fails, exit
mov r0, #1
mov r7, #SYS_exit
svc #0
.section .data
shell_path: .asciz "/bin/sh"
@ Stack layout when bar is called (all growing UPWARD):
@
@ stack_bottom:
@ [main's lr - 4 bytes]
@ [main's buffer - 32 bytes]
@ [foo's lr - 4 bytes]
@ [foo's r11 - 4 bytes]
@ [foo's username buffer - 64 bytes]
@ [foo's password buffer - 32 bytes] <- bar reads HERE
@ [foo's return addr - 4 bytes] <- TARGET at offset +32
@ [bar's lr - 4 bytes]
@ [bar's r4-r6 - 12 bytes]
@ [bar's canary - 4 bytes] <- "Live" canary value
@ sp points here ->
@
@ Overflow distance: 32 bytes to overwrite return address
@ Note: Overflowing further will corrupt the canary and cause warning + exit
|
Live-CTF/LiveCTF-DEFCON33 | 8,174 | finals/challenges/alt-uni/challenge/src/alt_uni_single_char.s | @ upward_stack_ctf.s
@ ARM Assembly CTF with ONLY upward-growing stack
@ No push/pop - only manual stack operations in reverse
@ Compile: as -o upward_stack_ctf.o upward_stack_ctf.s
@ Link: ld -o upward_stack_ctf upward_stack_ctf.o
.global _start
.section .data
@ Messages
banner: .ascii "=== Alt Uni ===\n"
banner_len = . - banner
username_prompt: .ascii "Enter username: "
username_prompt_len = . - username_prompt
password_prompt: .ascii "Enter password: "
password_prompt_len = . - password_prompt
main_msg: .ascii "[main] Starting...\n"
main_len = . - main_msg
foo_msg: .ascii "[foo] Called\n"
foo_len = . - foo_msg
bar_msg: .ascii "[bar] Processing password\n"
bar_len = . - bar_msg
safe_msg: .ascii "[main] Exited safely\n"
safe_len = . - safe_msg
canary_warn: .ascii "Careful! Something bad almost happened...\n"
canary_warn_len = . - canary_warn
.section .bss
@ Stack space that grows upward (64KB)
.align 4
stack_bottom:
.space 0x10000
stack_top:
.section .text
@ Syscall numbers for ARM
.equ SYS_exit, 1
.equ SYS_read, 3
.equ SYS_write, 4
.equ SYS_fsync, 118
.equ STDOUT, 1
.equ STDIN, 0
@ Macro to print a message
.macro print_msg msg, len
mov r0, #STDOUT
ldr r1, =\msg
mov r2, #\len
mov r7, #SYS_write
svc #0
@ Flush stdout
mov r0, #STDOUT
mov r7, #SYS_fsync
svc #0
.endm
@ Upward stack operations (opposite of normal)
@ Normal push: str rX, [sp, #-4]! (pre-decrement)
@ Upward push: str rX, [sp], #4 (post-increment)
@ Normal pop: ldr rX, [sp], #4 (post-increment)
@ Upward pop: ldr rX, [sp, #-4]! (pre-decrement)
@ Helper function to read a line of input
@ r0 = buffer address
@ r1 = max size
@ Returns: number of bytes read in r0
read_line:
str lr, [sp], #4 @ Save lr
str r4, [sp], #4
str r5, [sp], #4
str r6, [sp], #4
mov r4, r0 @ Buffer address
mov r5, r1 @ Max size
mov r6, #0 @ Counter
read_char_loop:
cmp r6, r5 @ Check if at max size
bge read_done @ If so, done
@ Read one character
mov r0, #STDIN
add r1, r4, r6 @ Current position in buffer
mov r2, #1 @ Read 1 byte
mov r7, #SYS_read
svc #0
cmp r0, #0 @ Check if read failed
ble read_done
@ Check for newline
ldrb r3, [r1] @ Load the character we just read
cmp r3, #0x0A @ Is it newline?
beq read_done @ If so, done
add r6, r6, #1 @ Increment counter
b read_char_loop
read_done:
@ Null terminate
mov r3, #0
strb r3, [r4, r6] @ Store null at current position
mov r0, r6 @ Return count
ldr r6, [sp, #-4]!
ldr r5, [sp, #-4]!
ldr r4, [sp, #-4]!
ldr lr, [sp, #-4]!
bx lr
_start:
@ Initialize stack pointer to bottom (grows UP)
ldr sp, =stack_bottom
@ Print banner
print_msg banner, banner_len
@ Jump to main
b main
@ Main function
main:
@ Save lr on upward stack (post-increment sp)
str lr, [sp], #4 @ sp moves UP after store
@ Print main message
print_msg main_msg, main_len
@ Allocate space for main's local buffer (32 bytes)
@ In upward stack, we ADD to allocate
add sp, sp, #32
@ Call foo - lr will contain return address
bl foo
@ Deallocate main's buffer
sub sp, sp, #32
@ Print safe exit message
print_msg safe_msg, safe_len
@ Restore lr from upward stack (pre-decrement sp)
ldr lr, [sp, #-4]! @ sp moves DOWN before load
@ Exit program
mov r0, #0
mov r7, #SYS_exit
svc #0
@ Foo function
foo:
@ Save lr on upward stack
str lr, [sp], #4
@ Save frame pointer
str r11, [sp], #4
mov r11, sp @ Frame pointer points after saved regs
@ Print foo message
print_msg foo_msg, foo_len
@ Allocate space for:
@ 1. Username buffer (64 bytes)
mov r4, sp @ Save username buffer address
add sp, sp, #64
@ 2. Password buffer (32 bytes)
mov r5, sp @ Save password buffer address
add sp, sp, #32
@ 3. Saved "return address" location (4 bytes)
@ This is what we want to be overwritten
add sp, sp, #4
@ Store safe return point at sp-4 (our "return address")
adr r0, foo_safe_return
str r0, [sp, #-4] @ Store at top of our allocations
@ Get username input
print_msg username_prompt, username_prompt_len
@ Read username using our helper (up to 64 bytes)
mov r0, r4 @ Username buffer address
mov r1, #64 @ Max size
bl read_line
@ Call bar with password buffer address
mov r0, r5 @ Pass password buffer as argument
bl bar
@ Load potentially corrupted return address
ldr r0, [sp, #-4] @ This could be overwritten to win!
@ Restore stack pointer
mov sp, r11
@ Restore saved registers from upward stack
ldr r11, [sp, #-4]!
ldr lr, [sp, #-4]!
@ Jump to loaded address (potentially hijacked)
bx r0
foo_safe_return:
@ Normal return path
bx lr
@ Bar function - contains vulnerability
bar:
@ Save registers on upward stack
str lr, [sp], #4
str r4, [sp], #4
str r5, [sp], #4
str r6, [sp], #4
@ Write stack canary (4-byte ASCII value "Live")
mov r3, #0x65 @ 'e'
orr r3, r3, #0x7600 @ 'v' << 8
orr r3, r3, #0x690000 @ 'i' << 16
orr r3, r3, #0x4C000000 @ 'L' << 24
str r3, [sp], #4 @ Store canary and increment sp
mov r4, r0 @ Save password buffer address (32 bytes)
@ Print bar message
print_msg bar_msg, bar_len
@ Get password input
print_msg password_prompt, password_prompt_len
@ VULNERABILITY: Read up to 128 bytes into 32-byte buffer
@ mov r0, #STDIN
@ mov r1, r4 @ Destination buffer (only 32 bytes!)
@ mov r2, #128 @ Read up to 128 bytes
@ mov r7, #SYS_read
@ svc #0
@ Read password using our helper (allowing overflow)
mov r0, r4 @ Username buffer address
mov r1, #128 @ Max size
bl read_line
@ Don't clear newline here - we want the raw overflow!
@ If we read > 32 bytes, we overwrite:
@ +32: Start of foo's saved return address!
@ Check canary before returning
ldr r3, [sp, #-4]! @ Load canary value and decrement sp
mov r2, #0x65 @ 'e'
orr r2, r2, #0x7600 @ 'v' << 8
orr r2, r2, #0x690000 @ 'i' << 16
orr r2, r2, #0x4C000000 @ 'L' << 24
cmp r3, r2
bne canary_fail @ If canary doesn't match, exit
@ Restore registers from upward stack
ldr r6, [sp, #-4]!
ldr r5, [sp, #-4]!
ldr r4, [sp, #-4]!
ldr lr, [sp, #-4]!
bx lr
canary_fail:
@ Canary was corrupted - print warning and exit
print_msg canary_warn, canary_warn_len
mov r0, #1 @ Exit with error code
mov r7, #SYS_exit
svc #0
@ Win function - target for exploitation
win:
@ Execute /bin/sh
ldr r0, =shell_path @ First argument: path to /bin/sh
mov r1, #0 @ Second argument: argv (NULL)
mov r2, #0 @ Third argument: envp (NULL)
mov r7, #11 @ SYS_execve
svc #0
@ If execve fails, exit
mov r0, #1
mov r7, #SYS_exit
svc #0
.section .data
shell_path: .asciz "/bin/sh"
@ Stack layout when bar is called (all growing UPWARD):
@
@ stack_bottom:
@ [main's lr - 4 bytes]
@ [main's buffer - 32 bytes]
@ [foo's lr - 4 bytes]
@ [foo's r11 - 4 bytes]
@ [foo's username buffer - 64 bytes]
@ [foo's password buffer - 32 bytes] <- bar reads HERE
@ [foo's return addr - 4 bytes] <- TARGET at offset +32
@ [bar's lr - 4 bytes]
@ [bar's r4-r6 - 12 bytes]
@ [bar's canary - 4 bytes] <- "Live" canary value
@ sp points here ->
@
@ Overflow distance: 32 bytes to overwrite return address
@ Note: Overflowing further will corrupt the canary and cause warning + exit
|
liva/minimal-linux | 1,743 | arch/x86/boot/pmjump.S | /* ----------------------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2.
*
* ----------------------------------------------------------------------- */
/*
* The actual transition into protected mode
*/
#include <asm/boot.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
#include <linux/linkage.h>
.text
.code16
/*
* void protected_mode_jump(u32 entrypoint, u32 bootparams);
*/
GLOBAL(protected_mode_jump)
movl %edx, %esi # Pointer to boot_params table
xorl %ebx, %ebx
movw %cs, %bx
shll $4, %ebx
addl %ebx, 2f
jmp 1f # Short jump to serialize on 386/486
1:
movw $__BOOT_DS, %cx
movw $__BOOT_TSS, %di
movl %cr0, %edx
orb $X86_CR0_PE, %dl # Protected mode
movl %edx, %cr0
# Transition to 32-bit mode
.byte 0x66, 0xea # ljmpl opcode
2: .long in_pm32 # offset
.word __BOOT_CS # segment
ENDPROC(protected_mode_jump)
.code32
.section ".text32","ax"
GLOBAL(in_pm32)
# Set up data segments for flat 32-bit mode
movl %ecx, %ds
movl %ecx, %es
movl %ecx, %fs
movl %ecx, %gs
movl %ecx, %ss
# The 32-bit code sets up its own stack, but this way we do have
# a valid stack if some debugging hack wants to use it.
addl %ebx, %esp
# Set up TR to make Intel VT happy
ltr %di
# Clear registers to allow for future extensions to the
# 32-bit boot protocol
xorl %ecx, %ecx
xorl %edx, %edx
xorl %ebx, %ebx
xorl %ebp, %ebp
xorl %edi, %edi
# Set up LDTR to make Intel VT happy
lldt %cx
jmpl *%eax # Jump to the 32-bit entrypoint
ENDPROC(in_pm32)
|
liva/minimal-linux | 1,052 | arch/x86/boot/copy.S | /* ----------------------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2.
*
* ----------------------------------------------------------------------- */
#include <linux/linkage.h>
/*
* Memory copy routines
*/
.code16
.text
GLOBAL(memcpy)
pushw %si
pushw %di
movw %ax, %di
movw %dx, %si
pushw %cx
shrw $2, %cx
rep; movsl
popw %cx
andw $3, %cx
rep; movsb
popw %di
popw %si
retl
ENDPROC(memcpy)
GLOBAL(memset)
pushw %di
movw %ax, %di
movzbl %dl, %eax
imull $0x01010101,%eax
pushw %cx
shrw $2, %cx
rep; stosl
popw %cx
andw $3, %cx
rep; stosb
popw %di
retl
ENDPROC(memset)
GLOBAL(copy_from_fs)
pushw %ds
pushw %fs
popw %ds
calll memcpy
popw %ds
retl
ENDPROC(copy_from_fs)
GLOBAL(copy_to_fs)
pushw %es
pushw %fs
popw %es
calll memcpy
popw %es
retl
ENDPROC(copy_to_fs)
|
liva/minimal-linux | 1,588 | arch/x86/boot/bioscall.S | /* -----------------------------------------------------------------------
*
* Copyright 2009-2014 Intel Corporation; author H. Peter Anvin
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2 or (at your
* option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
/*
* "Glove box" for BIOS calls. Avoids the constant problems with BIOSes
* touching registers they shouldn't be.
*/
.code16
.section ".inittext","ax"
.globl intcall
.type intcall, @function
intcall:
/* Self-modify the INT instruction. Ugly, but works. */
cmpb %al, 3f
je 1f
movb %al, 3f
jmp 1f /* Synchronize pipeline */
1:
/* Save state */
pushfl
pushw %fs
pushw %gs
pushal
/* Copy input state to stack frame */
subw $44, %sp
movw %dx, %si
movw %sp, %di
movw $11, %cx
rep; movsd
/* Pop full state from the stack */
popal
popw %gs
popw %fs
popw %es
popw %ds
popfl
/* Actual INT */
.byte 0xcd /* INT opcode */
3: .byte 0
/* Push full state to the stack */
pushfl
pushw %ds
pushw %es
pushw %fs
pushw %gs
pushal
/* Re-establish C environment invariants */
cld
movzwl %sp, %esp
movw %cs, %ax
movw %ax, %ds
movw %ax, %es
/* Copy output state from stack frame */
movw 68(%esp), %di /* Original %cx == 3rd argument */
andw %di, %di
jz 4f
movw %sp, %si
movw $11, %cx
rep; movsd
4: addw $44, %sp
/* Restore state and return */
popal
popw %gs
popw %fs
popfl
retl
.size intcall, .-intcall
|
liva/minimal-linux | 17,018 | arch/x86/boot/header.S | /* SPDX-License-Identifier: GPL-2.0 */
/*
* header.S
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Based on bootsect.S and setup.S
* modified by more people than can be counted
*
* Rewritten as a common file by H. Peter Anvin (Apr 2007)
*
* BIG FAT NOTE: We're in real mode using 64k segments. Therefore segment
* addresses must be multiplied by 16 to obtain their respective linear
* addresses. To avoid confusion, linear addresses are written using leading
* hex while segment addresses are written as segment:offset.
*
*/
#include <asm/segment.h>
#include <asm/boot.h>
#include <asm/page_types.h>
#include <asm/setup.h>
#include <asm/bootparam.h>
#include "boot.h"
#include "voffset.h"
#include "zoffset.h"
BOOTSEG = 0x07C0 /* original address of boot-sector */
SYSSEG = 0x1000 /* historical load address >> 4 */
#ifndef SVGA_MODE
#define SVGA_MODE ASK_VGA
#endif
#ifndef ROOT_RDONLY
#define ROOT_RDONLY 1
#endif
.code16
.section ".bstext", "ax"
.global bootsect_start
bootsect_start:
#ifdef CONFIG_EFI_STUB
# "MZ", MS-DOS header
.byte 0x4d
.byte 0x5a
#endif
# Normalize the start address
ljmp $BOOTSEG, $start2
start2:
movw %cs, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
xorw %sp, %sp
sti
cld
movw $bugger_off_msg, %si
msg_loop:
lodsb
andb %al, %al
jz bs_die
movb $0xe, %ah
movw $7, %bx
int $0x10
jmp msg_loop
bs_die:
# Allow the user to press a key, then reboot
xorw %ax, %ax
int $0x16
int $0x19
# int 0x19 should never return. In case it does anyway,
# invoke the BIOS reset code...
ljmp $0xf000,$0xfff0
#ifdef CONFIG_EFI_STUB
.org 0x3c
#
# Offset to the PE header.
#
.long pe_header
#endif /* CONFIG_EFI_STUB */
.section ".bsdata", "a"
bugger_off_msg:
.ascii "Use a boot loader.\r\n"
.ascii "\n"
.ascii "Remove disk and press any key to reboot...\r\n"
.byte 0
#ifdef CONFIG_EFI_STUB
pe_header:
.ascii "PE"
.word 0
coff_header:
#ifdef CONFIG_X86_32
.word 0x14c # i386
#else
.word 0x8664 # x86-64
#endif
.word 4 # nr_sections
.long 0 # TimeDateStamp
.long 0 # PointerToSymbolTable
.long 1 # NumberOfSymbols
.word section_table - optional_header # SizeOfOptionalHeader
#ifdef CONFIG_X86_32
.word 0x306 # Characteristics.
# IMAGE_FILE_32BIT_MACHINE |
# IMAGE_FILE_DEBUG_STRIPPED |
# IMAGE_FILE_EXECUTABLE_IMAGE |
# IMAGE_FILE_LINE_NUMS_STRIPPED
#else
.word 0x206 # Characteristics
# IMAGE_FILE_DEBUG_STRIPPED |
# IMAGE_FILE_EXECUTABLE_IMAGE |
# IMAGE_FILE_LINE_NUMS_STRIPPED
#endif
optional_header:
#ifdef CONFIG_X86_32
.word 0x10b # PE32 format
#else
.word 0x20b # PE32+ format
#endif
.byte 0x02 # MajorLinkerVersion
.byte 0x14 # MinorLinkerVersion
# Filled in by build.c
.long 0 # SizeOfCode
.long 0 # SizeOfInitializedData
.long 0 # SizeOfUninitializedData
# Filled in by build.c
.long 0x0000 # AddressOfEntryPoint
.long 0x0200 # BaseOfCode
#ifdef CONFIG_X86_32
.long 0 # data
#endif
extra_header_fields:
#ifdef CONFIG_X86_32
.long 0 # ImageBase
#else
.quad 0 # ImageBase
#endif
.long 0x20 # SectionAlignment
.long 0x20 # FileAlignment
.word 0 # MajorOperatingSystemVersion
.word 0 # MinorOperatingSystemVersion
.word 0 # MajorImageVersion
.word 0 # MinorImageVersion
.word 0 # MajorSubsystemVersion
.word 0 # MinorSubsystemVersion
.long 0 # Win32VersionValue
#
# The size of the bzImage is written in tools/build.c
#
.long 0 # SizeOfImage
.long 0x200 # SizeOfHeaders
.long 0 # CheckSum
.word 0xa # Subsystem (EFI application)
.word 0 # DllCharacteristics
#ifdef CONFIG_X86_32
.long 0 # SizeOfStackReserve
.long 0 # SizeOfStackCommit
.long 0 # SizeOfHeapReserve
.long 0 # SizeOfHeapCommit
#else
.quad 0 # SizeOfStackReserve
.quad 0 # SizeOfStackCommit
.quad 0 # SizeOfHeapReserve
.quad 0 # SizeOfHeapCommit
#endif
.long 0 # LoaderFlags
.long 0x6 # NumberOfRvaAndSizes
.quad 0 # ExportTable
.quad 0 # ImportTable
.quad 0 # ResourceTable
.quad 0 # ExceptionTable
.quad 0 # CertificationTable
.quad 0 # BaseRelocationTable
# Section table
section_table:
#
# The offset & size fields are filled in by build.c.
#
.ascii ".setup"
.byte 0
.byte 0
.long 0
.long 0x0 # startup_{32,64}
.long 0 # Size of initialized data
# on disk
.long 0x0 # startup_{32,64}
.long 0 # PointerToRelocations
.long 0 # PointerToLineNumbers
.word 0 # NumberOfRelocations
.word 0 # NumberOfLineNumbers
.long 0x60500020 # Characteristics (section flags)
#
# The EFI application loader requires a relocation section
# because EFI applications must be relocatable. The .reloc
# offset & size fields are filled in by build.c.
#
.ascii ".reloc"
.byte 0
.byte 0
.long 0
.long 0
.long 0 # SizeOfRawData
.long 0 # PointerToRawData
.long 0 # PointerToRelocations
.long 0 # PointerToLineNumbers
.word 0 # NumberOfRelocations
.word 0 # NumberOfLineNumbers
.long 0x42100040 # Characteristics (section flags)
#
# The offset & size fields are filled in by build.c.
#
.ascii ".text"
.byte 0
.byte 0
.byte 0
.long 0
.long 0x0 # startup_{32,64}
.long 0 # Size of initialized data
# on disk
.long 0x0 # startup_{32,64}
.long 0 # PointerToRelocations
.long 0 # PointerToLineNumbers
.word 0 # NumberOfRelocations
.word 0 # NumberOfLineNumbers
.long 0x60500020 # Characteristics (section flags)
#
# The offset & size fields are filled in by build.c.
#
.ascii ".bss"
.byte 0
.byte 0
.byte 0
.byte 0
.long 0
.long 0x0
.long 0 # Size of initialized data
# on disk
.long 0x0
.long 0 # PointerToRelocations
.long 0 # PointerToLineNumbers
.word 0 # NumberOfRelocations
.word 0 # NumberOfLineNumbers
.long 0xc8000080 # Characteristics (section flags)
#endif /* CONFIG_EFI_STUB */
# Kernel attributes; used by setup. This is part 1 of the
# header, from the old boot sector.
.section ".header", "a"
.globl sentinel
sentinel: .byte 0xff, 0xff /* Used to detect broken loaders */
.globl hdr
hdr:
setup_sects: .byte 0 /* Filled in by build.c */
root_flags: .word ROOT_RDONLY
syssize: .long 0 /* Filled in by build.c */
ram_size: .word 0 /* Obsolete */
vid_mode: .word SVGA_MODE
root_dev: .word 0 /* Filled in by build.c */
boot_flag: .word 0xAA55
# offset 512, entry point
.globl _start
_start:
# Explicitly enter this as bytes, or the assembler
# tries to generate a 3-byte jump here, which causes
# everything else to push off to the wrong offset.
.byte 0xeb # short (2-byte) jump
.byte start_of_setup-1f
1:
# Part 2 of the header, from the old setup.S
.ascii "HdrS" # header signature
.word 0x020d # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
.globl realmode_swtch
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
start_sys_seg: .word SYSSEG # obsolete and meaningless, but just
# in case something decided to "use" it
.word kernel_version-512 # pointing to kernel version string
# above section of header is compatible
# with loadlin-1.5 (header v1.5). Don't
# change it.
type_of_loader: .byte 0 # 0 means ancient bootloader, newer
# bootloaders know to change this.
# See Documentation/x86/boot.txt for
# assigned ids
# flags, unused bits must be zero (RFU) bit within loadflags
loadflags:
.byte LOADED_HIGH # The kernel is to be loaded high
setup_move_size: .word 0x8000 # size to move, when setup is not
# loaded at 0x90000. We will move setup
# to 0x90000 then just before jumping
# into the kernel. However, only the
# loader knows how much data behind
# us also needs to be loaded.
code32_start: # here loaders can put a different
# start address for 32-bit code.
.long 0x100000 # 0x100000 = default for big kernel
ramdisk_image: .long 0 # address of loaded ramdisk image
# Here the loader puts the 32-bit
# address where it loaded the image.
# This only will be read by the kernel.
ramdisk_size: .long 0 # its size in bytes
bootsect_kludge:
.long 0 # obsolete
heap_end_ptr: .word _end+STACK_SIZE-512
# (Header version 0x0201 or later)
# space from here (exclusive) down to
# end of setup code can be used by setup
# for local heap purposes.
ext_loader_ver:
.byte 0 # Extended boot loader version
ext_loader_type:
.byte 0 # Extended boot loader type
cmd_line_ptr: .long 0 # (Header version 0x0202 or later)
# If nonzero, a 32-bit pointer
# to the kernel command line.
# The command line should be
# located between the start of
# setup and the end of low
# memory (0xa0000), or it may
# get overwritten before it
# gets read. If this field is
# used, there is no longer
# anything magical about the
# 0x90000 segment; the setup
# can be located anywhere in
# low memory 0x10000 or higher.
initrd_addr_max: .long 0x7fffffff
# (Header version 0x0203 or later)
# The highest safe address for
# the contents of an initrd
# The current kernel allows up to 4 GB,
# but leave it at 2 GB to avoid
# possible bootloader bugs.
kernel_alignment: .long CONFIG_PHYSICAL_ALIGN #physical addr alignment
#required for protected mode
#kernel
#ifdef CONFIG_RELOCATABLE
relocatable_kernel: .byte 1
#else
relocatable_kernel: .byte 0
#endif
min_alignment: .byte MIN_KERNEL_ALIGN_LG2 # minimum alignment
xloadflags:
#ifdef CONFIG_X86_64
# define XLF0 XLF_KERNEL_64 /* 64-bit kernel */
#else
# define XLF0 0
#endif
#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64)
/* kernel/boot_param/ramdisk could be loaded above 4g */
# define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G
#else
# define XLF1 0
#endif
#ifdef CONFIG_EFI_STUB
# ifdef CONFIG_EFI_MIXED
# define XLF23 (XLF_EFI_HANDOVER_32|XLF_EFI_HANDOVER_64)
# else
# ifdef CONFIG_X86_64
# define XLF23 XLF_EFI_HANDOVER_64 /* 64-bit EFI handover ok */
# else
# define XLF23 XLF_EFI_HANDOVER_32 /* 32-bit EFI handover ok */
# endif
# endif
#else
# define XLF23 0
#endif
#if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC_CORE)
# define XLF4 XLF_EFI_KEXEC
#else
# define XLF4 0
#endif
.word XLF0 | XLF1 | XLF23 | XLF4
cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line,
#added with boot protocol
#version 2.06
hardware_subarch: .long 0 # subarchitecture, added with 2.07
# default to 0 for normal x86 PC
hardware_subarch_data: .quad 0
payload_offset: .long ZO_input_data
payload_length: .long ZO_z_input_len
setup_data: .quad 0 # 64-bit physical pointer to
# single linked list of
# struct setup_data
pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
#
# Getting to provably safe in-place decompression is hard. Worst case
# behaviours need to be analyzed. Here let's take the decompression of
# a gzip-compressed kernel as example, to illustrate it:
#
# The file layout of gzip compressed kernel is:
#
# magic[2]
# method[1]
# flags[1]
# timestamp[4]
# extraflags[1]
# os[1]
# compressed data blocks[N]
# crc[4] orig_len[4]
#
# ... resulting in +18 bytes overhead of uncompressed data.
#
# (For more information, please refer to RFC 1951 and RFC 1952.)
#
# Files divided into blocks
# 1 bit (last block flag)
# 2 bits (block type)
#
# 1 block occurs every 32K -1 bytes or when there 50% compression
# has been achieved. The smallest block type encoding is always used.
#
# stored:
# 32 bits length in bytes.
#
# fixed:
# magic fixed tree.
# symbols.
#
# dynamic:
# dynamic tree encoding.
# symbols.
#
#
# The buffer for decompression in place is the length of the uncompressed
# data, plus a small amount extra to keep the algorithm safe. The
# compressed data is placed at the end of the buffer. The output pointer
# is placed at the start of the buffer and the input pointer is placed
# where the compressed data starts. Problems will occur when the output
# pointer overruns the input pointer.
#
# The output pointer can only overrun the input pointer if the input
# pointer is moving faster than the output pointer. A condition only
# triggered by data whose compressed form is larger than the uncompressed
# form.
#
# The worst case at the block level is a growth of the compressed data
# of 5 bytes per 32767 bytes.
#
# The worst case internal to a compressed block is very hard to figure.
# The worst case can at least be bounded by having one bit that represents
# 32764 bytes and then all of the rest of the bytes representing the very
# very last byte.
#
# All of which is enough to compute an amount of extra data that is required
# to be safe. To avoid problems at the block level allocating 5 extra bytes
# per 32767 bytes of data is sufficient. To avoid problems internal to a
# block adding an extra 32767 bytes (the worst case uncompressed block size)
# is sufficient, to ensure that in the worst case the decompressed data for
# block will stop the byte before the compressed data for a block begins.
# To avoid problems with the compressed data's meta information an extra 18
# bytes are needed. Leading to the formula:
#
# extra_bytes = (uncompressed_size >> 12) + 32768 + 18
#
# Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
# Adding 32768 instead of 32767 just makes for round numbers.
#
# Above analysis is for decompressing gzip compressed kernel only. Up to
# now 6 different decompressor are supported all together. And among them
# xz stores data in chunks and has maximum chunk of 64K. Hence safety
# margin should be updated to cover all decompressors so that we don't
# need to deal with each of them separately. Please check
# the description in lib/decompressor_xxx.c for specific information.
#
# extra_bytes = (uncompressed_size >> 12) + 65536 + 128
#
# LZ4 is even worse: data that cannot be further compressed grows by 0.4%,
# or one byte per 256 bytes. OTOH, we can safely get rid of the +128 as
# the size-dependent part now grows so fast.
#
# extra_bytes = (uncompressed_size >> 8) + 65536
#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 65536)
#if ZO_z_output_len > ZO_z_input_len
# define ZO_z_extract_offset (ZO_z_output_len + ZO_z_extra_bytes - \
ZO_z_input_len)
#else
# define ZO_z_extract_offset ZO_z_extra_bytes
#endif
/*
* The extract_offset has to be bigger than ZO head section. Otherwise when
* the head code is running to move ZO to the end of the buffer, it will
* overwrite the head code itself.
*/
#if (ZO__ehead - ZO_startup_32) > ZO_z_extract_offset
# define ZO_z_min_extract_offset ((ZO__ehead - ZO_startup_32 + 4095) & ~4095)
#else
# define ZO_z_min_extract_offset ((ZO_z_extract_offset + 4095) & ~4095)
#endif
#define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_min_extract_offset)
#define VO_INIT_SIZE (VO__end - VO__text)
#if ZO_INIT_SIZE > VO_INIT_SIZE
# define INIT_SIZE ZO_INIT_SIZE
#else
# define INIT_SIZE VO_INIT_SIZE
#endif
init_size: .long INIT_SIZE # kernel initialization size
handover_offset: .long 0 # Filled in by build.c
# End of setup header #####################################################
.section ".entrytext", "ax"
start_of_setup:
# Force %es = %ds
movw %ds, %ax
movw %ax, %es
cld
# Apparently some ancient versions of LILO invoked the kernel with %ss != %ds,
# which happened to work by accident for the old code. Recalculate the stack
# pointer if %ss is invalid. Otherwise leave it alone, LOADLIN sets up the
# stack behind its own code, so we can't blindly put it directly past the heap.
movw %ss, %dx
cmpw %ax, %dx # %ds == %ss?
movw %sp, %dx
je 2f # -> assume %sp is reasonably set
# Invalid %ss, make up a new stack
movw $_end, %dx
testb $CAN_USE_HEAP, loadflags
jz 1f
movw heap_end_ptr, %dx
1: addw $STACK_SIZE, %dx
jnc 2f
xorw %dx, %dx # Prevent wraparound
2: # Now %dx should point to the end of our stack space
andw $~3, %dx # dword align (might as well...)
jnz 3f
movw $0xfffc, %dx # Make sure we're not zero
3: movw %ax, %ss
movzwl %dx, %esp # Clear upper half of %esp
sti # Now we should have a working stack
# We will have entered with %cs = %ds+0x20, normalize %cs so
# it is on par with the other segments.
pushw %ds
pushw $6f
lretw
6:
# Check signature at end of setup
cmpl $0x5a5aaa55, setup_sig
jne setup_bad
# Zero the bss
movw $__bss_start, %di
movw $_end+3, %cx
xorl %eax, %eax
subw %di, %cx
shrw $2, %cx
rep; stosl
# Jump to C code (should not return)
calll main
# Setup corrupt somehow...
setup_bad:
movl $setup_corrupt, %eax
calll puts
# Fall through...
.globl die
.type die, @function
die:
hlt
jmp die
.size die, .-die
.section ".initdata", "a"
setup_corrupt:
.byte 7
.string "No setup signature found...\n"
|
liva/minimal-linux | 3,586 | arch/x86/lib/memmove_64.S | /* SPDX-License-Identifier: GPL-2.0 */
/*
* Normally compiler builtins are used, but sometimes the compiler calls out
* of line code. Based on asm-i386/string.h.
*
* This assembly file is re-written from memmove_64.c file.
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
*/
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
#undef memmove
/*
* Implement memmove(). This can handle overlap between src and dst.
*
* Input:
* rdi: dest
* rsi: src
* rdx: count
*
* Output:
* rax: dest
*/
.weak memmove
ENTRY(memmove)
ENTRY(__memmove)
/* Handle more 32 bytes in loop */
mov %rdi, %rax
cmp $0x20, %rdx
jb 1f
/* Decide forward/backward copy mode */
cmp %rdi, %rsi
jge .Lmemmove_begin_forward
mov %rsi, %r8
add %rdx, %r8
cmp %rdi, %r8
jg 2f
.Lmemmove_begin_forward:
ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
/*
* movsq instruction have many startup latency
* so we handle small size by general register.
*/
cmp $680, %rdx
jb 3f
/*
* movsq instruction is only good for aligned case.
*/
cmpb %dil, %sil
je 4f
3:
sub $0x20, %rdx
/*
* We gobble 32 bytes forward in each loop.
*/
5:
sub $0x20, %rdx
movq 0*8(%rsi), %r11
movq 1*8(%rsi), %r10
movq 2*8(%rsi), %r9
movq 3*8(%rsi), %r8
leaq 4*8(%rsi), %rsi
movq %r11, 0*8(%rdi)
movq %r10, 1*8(%rdi)
movq %r9, 2*8(%rdi)
movq %r8, 3*8(%rdi)
leaq 4*8(%rdi), %rdi
jae 5b
addq $0x20, %rdx
jmp 1f
/*
* Handle data forward by movsq.
*/
.p2align 4
4:
movq %rdx, %rcx
movq -8(%rsi, %rdx), %r11
lea -8(%rdi, %rdx), %r10
shrq $3, %rcx
rep movsq
movq %r11, (%r10)
jmp 13f
.Lmemmove_end_forward:
/*
* Handle data backward by movsq.
*/
.p2align 4
7:
movq %rdx, %rcx
movq (%rsi), %r11
movq %rdi, %r10
leaq -8(%rsi, %rdx), %rsi
leaq -8(%rdi, %rdx), %rdi
shrq $3, %rcx
std
rep movsq
cld
movq %r11, (%r10)
jmp 13f
/*
* Start to prepare for backward copy.
*/
.p2align 4
2:
cmp $680, %rdx
jb 6f
cmp %dil, %sil
je 7b
6:
/*
* Calculate copy position to tail.
*/
addq %rdx, %rsi
addq %rdx, %rdi
subq $0x20, %rdx
/*
* We gobble 32 bytes backward in each loop.
*/
8:
subq $0x20, %rdx
movq -1*8(%rsi), %r11
movq -2*8(%rsi), %r10
movq -3*8(%rsi), %r9
movq -4*8(%rsi), %r8
leaq -4*8(%rsi), %rsi
movq %r11, -1*8(%rdi)
movq %r10, -2*8(%rdi)
movq %r9, -3*8(%rdi)
movq %r8, -4*8(%rdi)
leaq -4*8(%rdi), %rdi
jae 8b
/*
* Calculate copy position to head.
*/
addq $0x20, %rdx
subq %rdx, %rsi
subq %rdx, %rdi
1:
cmpq $16, %rdx
jb 9f
/*
* Move data from 16 bytes to 31 bytes.
*/
movq 0*8(%rsi), %r11
movq 1*8(%rsi), %r10
movq -2*8(%rsi, %rdx), %r9
movq -1*8(%rsi, %rdx), %r8
movq %r11, 0*8(%rdi)
movq %r10, 1*8(%rdi)
movq %r9, -2*8(%rdi, %rdx)
movq %r8, -1*8(%rdi, %rdx)
jmp 13f
.p2align 4
9:
cmpq $8, %rdx
jb 10f
/*
* Move data from 8 bytes to 15 bytes.
*/
movq 0*8(%rsi), %r11
movq -1*8(%rsi, %rdx), %r10
movq %r11, 0*8(%rdi)
movq %r10, -1*8(%rdi, %rdx)
jmp 13f
10:
cmpq $4, %rdx
jb 11f
/*
* Move data from 4 bytes to 7 bytes.
*/
movl (%rsi), %r11d
movl -4(%rsi, %rdx), %r10d
movl %r11d, (%rdi)
movl %r10d, -4(%rdi, %rdx)
jmp 13f
11:
cmp $2, %rdx
jb 12f
/*
* Move data from 2 bytes to 3 bytes.
*/
movw (%rsi), %r11w
movw -2(%rsi, %rdx), %r10w
movw %r11w, (%rdi)
movw %r10w, -2(%rdi, %rdx)
jmp 13f
12:
cmp $1, %rdx
jb 13f
/*
* Move data for 1 byte.
*/
movb (%rsi), %r11b
movb %r11b, (%rdi)
13:
retq
ENDPROC(__memmove)
ENDPROC(memmove)
EXPORT_SYMBOL(__memmove)
EXPORT_SYMBOL(memmove)
|
liva/minimal-linux | 3,746 | arch/x86/lib/csum-copy_64.S | /*
* Copyright 2002, 2003 Andi Kleen, SuSE Labs.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details. No warranty for anything given at all.
*/
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/asm.h>
/*
* Checksum copy with exception handling.
* On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the
* destination is zeroed.
*
* Input
* rdi source
* rsi destination
* edx len (32bit)
* ecx sum (32bit)
* r8 src_err_ptr (int)
* r9 dst_err_ptr (int)
*
* Output
* eax 64bit sum. undefined in case of exception.
*
* Wrappers need to take care of valid exception sum and zeroing.
* They also should align source or destination to 8 bytes.
*/
.macro source
10:
_ASM_EXTABLE(10b, .Lbad_source)
.endm
.macro dest
20:
_ASM_EXTABLE(20b, .Lbad_dest)
.endm
.macro ignore L=.Lignore
30:
_ASM_EXTABLE(30b, \L)
.endm
ENTRY(csum_partial_copy_generic)
cmpl $3*64, %edx
jle .Lignore
.Lignore:
subq $7*8, %rsp
movq %rbx, 2*8(%rsp)
movq %r12, 3*8(%rsp)
movq %r14, 4*8(%rsp)
movq %r13, 5*8(%rsp)
movq %r15, 6*8(%rsp)
movq %r8, (%rsp)
movq %r9, 1*8(%rsp)
movl %ecx, %eax
movl %edx, %ecx
xorl %r9d, %r9d
movq %rcx, %r12
shrq $6, %r12
jz .Lhandle_tail /* < 64 */
clc
/* main loop. clear in 64 byte blocks */
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
/* r11: temp3, rdx: temp4, r12 loopcnt */
/* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */
.p2align 4
.Lloop:
source
movq (%rdi), %rbx
source
movq 8(%rdi), %r8
source
movq 16(%rdi), %r11
source
movq 24(%rdi), %rdx
source
movq 32(%rdi), %r10
source
movq 40(%rdi), %r15
source
movq 48(%rdi), %r14
source
movq 56(%rdi), %r13
ignore 2f
prefetcht0 5*64(%rdi)
2:
adcq %rbx, %rax
adcq %r8, %rax
adcq %r11, %rax
adcq %rdx, %rax
adcq %r10, %rax
adcq %r15, %rax
adcq %r14, %rax
adcq %r13, %rax
decl %r12d
dest
movq %rbx, (%rsi)
dest
movq %r8, 8(%rsi)
dest
movq %r11, 16(%rsi)
dest
movq %rdx, 24(%rsi)
dest
movq %r10, 32(%rsi)
dest
movq %r15, 40(%rsi)
dest
movq %r14, 48(%rsi)
dest
movq %r13, 56(%rsi)
3:
leaq 64(%rdi), %rdi
leaq 64(%rsi), %rsi
jnz .Lloop
adcq %r9, %rax
/* do last up to 56 bytes */
.Lhandle_tail:
/* ecx: count */
movl %ecx, %r10d
andl $63, %ecx
shrl $3, %ecx
jz .Lfold
clc
.p2align 4
.Lloop_8:
source
movq (%rdi), %rbx
adcq %rbx, %rax
decl %ecx
dest
movq %rbx, (%rsi)
leaq 8(%rsi), %rsi /* preserve carry */
leaq 8(%rdi), %rdi
jnz .Lloop_8
adcq %r9, %rax /* add in carry */
.Lfold:
/* reduce checksum to 32bits */
movl %eax, %ebx
shrq $32, %rax
addl %ebx, %eax
adcl %r9d, %eax
/* do last up to 6 bytes */
.Lhandle_7:
movl %r10d, %ecx
andl $7, %ecx
shrl $1, %ecx
jz .Lhandle_1
movl $2, %edx
xorl %ebx, %ebx
clc
.p2align 4
.Lloop_1:
source
movw (%rdi), %bx
adcl %ebx, %eax
decl %ecx
dest
movw %bx, (%rsi)
leaq 2(%rdi), %rdi
leaq 2(%rsi), %rsi
jnz .Lloop_1
adcl %r9d, %eax /* add in carry */
/* handle last odd byte */
.Lhandle_1:
testb $1, %r10b
jz .Lende
xorl %ebx, %ebx
source
movb (%rdi), %bl
dest
movb %bl, (%rsi)
addl %ebx, %eax
adcl %r9d, %eax /* carry */
.Lende:
movq 2*8(%rsp), %rbx
movq 3*8(%rsp), %r12
movq 4*8(%rsp), %r14
movq 5*8(%rsp), %r13
movq 6*8(%rsp), %r15
addq $7*8, %rsp
ret
/* Exception handlers. Very simple, zeroing is done in the wrappers */
.Lbad_source:
movq (%rsp), %rax
testq %rax, %rax
jz .Lende
movl $-EFAULT, (%rax)
jmp .Lende
.Lbad_dest:
movq 8(%rsp), %rax
testq %rax, %rax
jz .Lende
movl $-EFAULT, (%rax)
jmp .Lende
ENDPROC(csum_partial_copy_generic)
|
liva/minimal-linux | 1,670 | arch/x86/lib/msr-reg.S | /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/msr.h>
#ifdef CONFIG_X86_64
/*
* int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
*
* reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
*
*/
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
pushq %rbx
pushq %r12
movq %rdi, %r10 /* Save pointer */
xorl %r11d, %r11d /* Return value */
movl (%rdi), %eax
movl 4(%rdi), %ecx
movl 8(%rdi), %edx
movl 12(%rdi), %ebx
movl 20(%rdi), %r12d
movl 24(%rdi), %esi
movl 28(%rdi), %edi
1: \op
2: movl %eax, (%r10)
movl %r11d, %eax /* Return value */
movl %ecx, 4(%r10)
movl %edx, 8(%r10)
movl %ebx, 12(%r10)
movl %r12d, 20(%r10)
movl %esi, 24(%r10)
movl %edi, 28(%r10)
popq %r12
popq %rbx
ret
3:
movl $-EIO, %r11d
jmp 2b
_ASM_EXTABLE(1b, 3b)
ENDPROC(\op\()_safe_regs)
.endm
#else /* X86_32 */
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
pushl %ebx
pushl %ebp
pushl %esi
pushl %edi
pushl $0 /* Return value */
pushl %eax
movl 4(%eax), %ecx
movl 8(%eax), %edx
movl 12(%eax), %ebx
movl 20(%eax), %ebp
movl 24(%eax), %esi
movl 28(%eax), %edi
movl (%eax), %eax
1: \op
2: pushl %eax
movl 4(%esp), %eax
popl (%eax)
addl $4, %esp
movl %ecx, 4(%eax)
movl %edx, 8(%eax)
movl %ebx, 12(%eax)
movl %ebp, 20(%eax)
movl %esi, 24(%eax)
movl %edi, 28(%eax)
popl %eax
popl %edi
popl %esi
popl %ebp
popl %ebx
ret
3:
movl $-EIO, 4(%esp)
jmp 2b
_ASM_EXTABLE(1b, 3b)
ENDPROC(\op\()_safe_regs)
.endm
#endif
op_safe_regs rdmsr
op_safe_regs wrmsr
|
liva/minimal-linux | 7,850 | arch/x86/lib/copy_user_64.S | /*
* Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU Public License v2.
*
* Functions to copy from and to user space.
*/
#include <linux/linkage.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
/*
* copy_user_generic_unrolled - memory copy with exception handling.
* This version is for CPUs like P4 that don't have efficient micro
* code for rep movsq
*
* Input:
* rdi destination
* rsi source
* rdx count
*
* Output:
* eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_generic_unrolled)
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
ALIGN_DESTINATION
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
jz .L_copy_short_string
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
4: movq 3*8(%rsi),%r11
5: movq %r8,(%rdi)
6: movq %r9,1*8(%rdi)
7: movq %r10,2*8(%rdi)
8: movq %r11,3*8(%rdi)
9: movq 4*8(%rsi),%r8
10: movq 5*8(%rsi),%r9
11: movq 6*8(%rsi),%r10
12: movq 7*8(%rsi),%r11
13: movq %r8,4*8(%rdi)
14: movq %r9,5*8(%rdi)
15: movq %r10,6*8(%rdi)
16: movq %r11,7*8(%rdi)
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
decl %ecx
jnz 1b
.L_copy_short_string:
movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
jz 20f
18: movq (%rsi),%r8
19: movq %r8,(%rdi)
leaq 8(%rsi),%rsi
leaq 8(%rdi),%rdi
decl %ecx
jnz 18b
20: andl %edx,%edx
jz 23f
movl %edx,%ecx
21: movb (%rsi),%al
22: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz 21b
23: xor %eax,%eax
ASM_CLAC
ret
.section .fixup,"ax"
30: shll $6,%ecx
addl %ecx,%edx
jmp 60f
40: leal (%rdx,%rcx,8),%edx
jmp 60f
50: movl %ecx,%edx
60: jmp copy_user_handle_tail /* ecx is zerorest also */
.previous
_ASM_EXTABLE(1b,30b)
_ASM_EXTABLE(2b,30b)
_ASM_EXTABLE(3b,30b)
_ASM_EXTABLE(4b,30b)
_ASM_EXTABLE(5b,30b)
_ASM_EXTABLE(6b,30b)
_ASM_EXTABLE(7b,30b)
_ASM_EXTABLE(8b,30b)
_ASM_EXTABLE(9b,30b)
_ASM_EXTABLE(10b,30b)
_ASM_EXTABLE(11b,30b)
_ASM_EXTABLE(12b,30b)
_ASM_EXTABLE(13b,30b)
_ASM_EXTABLE(14b,30b)
_ASM_EXTABLE(15b,30b)
_ASM_EXTABLE(16b,30b)
_ASM_EXTABLE(18b,40b)
_ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
ENDPROC(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions.
* This is also a lot simpler. Use them when possible.
*
* Only 4GB of copy is supported. This shouldn't be a problem
* because the kernel normally only writes from/to page sized chunks
* even if user space passed a longer buffer.
* And more would be dangerous because both Intel and AMD have
* errata with rep movsq > 4GB. If someone feels the need to fix
* this please consider this.
*
* Input:
* rdi destination
* rsi source
* rdx count
*
* Output:
* eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_generic_string)
ASM_STAC
cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */
ALIGN_DESTINATION
movl %edx,%ecx
shrl $3,%ecx
andl $7,%edx
1: rep
movsq
2: movl %edx,%ecx
3: rep
movsb
xorl %eax,%eax
ASM_CLAC
ret
.section .fixup,"ax"
11: leal (%rdx,%rcx,8),%ecx
12: movl %ecx,%edx /* ecx is zerorest also */
jmp copy_user_handle_tail
.previous
_ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b)
ENDPROC(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
/*
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
* It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
*
* Input:
* rdi destination
* rsi source
* rdx count
*
* Output:
* eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_enhanced_fast_string)
ASM_STAC
cmpl $64,%edx
jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
movl %edx,%ecx
1: rep
movsb
xorl %eax,%eax
ASM_CLAC
ret
.section .fixup,"ax"
12: movl %ecx,%edx /* ecx is zerorest also */
jmp copy_user_handle_tail
.previous
_ASM_EXTABLE(1b,12b)
ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
* copy_user_nocache - Uncached memory copy with exception handling
* This will force destination out of cache for more performance.
*
* Note: Cached memory copy is used when destination or size is not
* naturally aligned. That is:
* - Require 8-byte alignment when size is 8 bytes or larger.
* - Require 4-byte alignment when size is 4 bytes.
*/
ENTRY(__copy_user_nocache)
ASM_STAC
/* If size is less than 8 bytes, go to 4-byte copy */
cmpl $8,%edx
jb .L_4b_nocache_copy_entry
/* If destination is not 8-byte aligned, "cache" copy to align it */
ALIGN_DESTINATION
/* Set 4x8-byte copy count and remainder */
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
jz .L_8b_nocache_copy_entry /* jump if count is 0 */
/* Perform 4x8-byte nocache loop-copy */
.L_4x8b_nocache_copy_loop:
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
4: movq 3*8(%rsi),%r11
5: movnti %r8,(%rdi)
6: movnti %r9,1*8(%rdi)
7: movnti %r10,2*8(%rdi)
8: movnti %r11,3*8(%rdi)
9: movq 4*8(%rsi),%r8
10: movq 5*8(%rsi),%r9
11: movq 6*8(%rsi),%r10
12: movq 7*8(%rsi),%r11
13: movnti %r8,4*8(%rdi)
14: movnti %r9,5*8(%rdi)
15: movnti %r10,6*8(%rdi)
16: movnti %r11,7*8(%rdi)
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
decl %ecx
jnz .L_4x8b_nocache_copy_loop
/* Set 8-byte copy count and remainder */
.L_8b_nocache_copy_entry:
movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
jz .L_4b_nocache_copy_entry /* jump if count is 0 */
/* Perform 8-byte nocache loop-copy */
.L_8b_nocache_copy_loop:
20: movq (%rsi),%r8
21: movnti %r8,(%rdi)
leaq 8(%rsi),%rsi
leaq 8(%rdi),%rdi
decl %ecx
jnz .L_8b_nocache_copy_loop
/* If no byte left, we're done */
.L_4b_nocache_copy_entry:
andl %edx,%edx
jz .L_finish_copy
/* If destination is not 4-byte aligned, go to byte copy: */
movl %edi,%ecx
andl $3,%ecx
jnz .L_1b_cache_copy_entry
/* Set 4-byte copy count (1 or 0) and remainder */
movl %edx,%ecx
andl $3,%edx
shrl $2,%ecx
jz .L_1b_cache_copy_entry /* jump if count is 0 */
/* Perform 4-byte nocache copy: */
30: movl (%rsi),%r8d
31: movnti %r8d,(%rdi)
leaq 4(%rsi),%rsi
leaq 4(%rdi),%rdi
/* If no bytes left, we're done: */
andl %edx,%edx
jz .L_finish_copy
/* Perform byte "cache" loop-copy for the remainder */
.L_1b_cache_copy_entry:
movl %edx,%ecx
.L_1b_cache_copy_loop:
40: movb (%rsi),%al
41: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz .L_1b_cache_copy_loop
/* Finished copying; fence the prior stores */
.L_finish_copy:
xorl %eax,%eax
ASM_CLAC
sfence
ret
.section .fixup,"ax"
.L_fixup_4x8b_copy:
shll $6,%ecx
addl %ecx,%edx
jmp .L_fixup_handle_tail
.L_fixup_8b_copy:
lea (%rdx,%rcx,8),%rdx
jmp .L_fixup_handle_tail
.L_fixup_4b_copy:
lea (%rdx,%rcx,4),%rdx
jmp .L_fixup_handle_tail
.L_fixup_1b_copy:
movl %ecx,%edx
.L_fixup_handle_tail:
sfence
jmp copy_user_handle_tail
.previous
_ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(20b,.L_fixup_8b_copy)
_ASM_EXTABLE(21b,.L_fixup_8b_copy)
_ASM_EXTABLE(30b,.L_fixup_4b_copy)
_ASM_EXTABLE(31b,.L_fixup_4b_copy)
_ASM_EXTABLE(40b,.L_fixup_1b_copy)
_ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)
|
liva/minimal-linux | 1,253 | arch/x86/lib/retpoline.S | /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/stringify.h>
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
#include <asm/nospec-branch.h>
.macro THUNK reg
.section .text.__x86.indirect_thunk
ENTRY(__x86_indirect_thunk_\reg)
CFI_STARTPROC
JMP_NOSPEC %\reg
CFI_ENDPROC
ENDPROC(__x86_indirect_thunk_\reg)
.endm
/*
* Despite being an assembler file we can't just use .irp here
* because __KSYM_DEPS__ only uses the C preprocessor and would
* only see one instance of "__x86_indirect_thunk_\reg" rather
* than one per register with the correct names. So we do it
* the simple and nasty way...
*/
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
GENERATE_THUNK(_ASM_AX)
GENERATE_THUNK(_ASM_BX)
GENERATE_THUNK(_ASM_CX)
GENERATE_THUNK(_ASM_DX)
GENERATE_THUNK(_ASM_SI)
GENERATE_THUNK(_ASM_DI)
GENERATE_THUNK(_ASM_BP)
#ifdef CONFIG_64BIT
GENERATE_THUNK(r8)
GENERATE_THUNK(r9)
GENERATE_THUNK(r10)
GENERATE_THUNK(r11)
GENERATE_THUNK(r12)
GENERATE_THUNK(r13)
GENERATE_THUNK(r14)
GENERATE_THUNK(r15)
#endif
|
liva/minimal-linux | 2,772 | arch/x86/lib/atomic64_cx8_32.S | /*
* atomic64_t for 586+
*
* Copyright © 2010 Luca Barbieri
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
.macro read64 reg
movl %ebx, %eax
movl %ecx, %edx
/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
LOCK_PREFIX
cmpxchg8b (\reg)
.endm
ENTRY(atomic64_read_cx8)
read64 %ecx
ret
ENDPROC(atomic64_read_cx8)
ENTRY(atomic64_set_cx8)
1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */
cmpxchg8b (%esi)
jne 1b
ret
ENDPROC(atomic64_set_cx8)
ENTRY(atomic64_xchg_cx8)
1:
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
ret
ENDPROC(atomic64_xchg_cx8)
.macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl %eax, %esi
movl %edx, %edi
movl %ecx, %ebp
read64 %ecx
1:
movl %eax, %ebx
movl %edx, %ecx
\ins\()l %esi, %ebx
\insc\()l %edi, %ecx
LOCK_PREFIX
cmpxchg8b (%ebp)
jne 1b
10:
movl %ebx, %eax
movl %ecx, %edx
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
ENDPROC(atomic64_\func\()_return_cx8)
.endm
addsub_return add add adc
addsub_return sub sub sbb
.macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
pushl %ebx
read64 %esi
1:
movl %eax, %ebx
movl %edx, %ecx
\ins\()l $1, %ebx
\insc\()l $0, %ecx
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
10:
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
ret
ENDPROC(atomic64_\func\()_return_cx8)
.endm
incdec_return inc add adc
incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8)
pushl %ebx
read64 %esi
1:
movl %eax, %ebx
movl %edx, %ecx
subl $1, %ebx
sbb $0, %ecx
js 2f
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
2:
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
ret
ENDPROC(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8)
pushl %ebp
pushl %ebx
/* these just push these two parameters on the stack */
pushl %edi
pushl %ecx
movl %eax, %ebp
movl %edx, %edi
read64 %esi
1:
cmpl %eax, 0(%esp)
je 4f
2:
movl %eax, %ebx
movl %edx, %ecx
addl %ebp, %ebx
adcl %edi, %ecx
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
movl $1, %eax
3:
addl $8, %esp
popl %ebx
popl %ebp
ret
4:
cmpl %edx, 4(%esp)
jne 2b
xorl %eax, %eax
jmp 3b
ENDPROC(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8)
pushl %ebx
read64 %esi
1:
movl %eax, %ecx
orl %edx, %ecx
jz 3f
movl %eax, %ebx
xorl %ecx, %ecx
addl $1, %ebx
adcl %edx, %ecx
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
movl $1, %eax
3:
popl %ebx
ret
ENDPROC(atomic64_inc_not_zero_cx8)
|
liva/minimal-linux | 3,748 | arch/x86/lib/rwsem.S | /*
* x86 semaphore implementation.
*
* (C) Copyright 1999 Linus Torvalds
*
* Portions Copyright 1999 Red Hat, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/frame.h>
#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
#ifdef CONFIG_X86_32
/*
* The semaphore operations have a special calling sequence that
* allow us to do a simpler in-line version of them. These routines
* need to convert that sequence back into the C sequence when
* there is contention on the semaphore.
*
* %eax contains the semaphore pointer on entry. Save the C-clobbered
* registers (%eax, %edx and %ecx) except %eax which is either a return
* value or just gets clobbered. Same is true for %edx so make sure GCC
* reloads it after the slow path, by making it hold a temporary, for
* example see ____down_write().
*/
#define save_common_regs \
pushl %ecx
#define restore_common_regs \
popl %ecx
/* Avoid uglifying the argument copying x86-64 needs to do. */
.macro movq src, dst
.endm
#else
/*
* x86-64 rwsem wrappers
*
* This interfaces the inline asm code to the slow-path
* C routines. We need to save the call-clobbered regs
* that the asm does not mark as clobbered, and move the
* argument from %rax to %rdi.
*
* NOTE! We don't need to save %rax, because the functions
* will always return the semaphore pointer in %rax (which
* is also the input argument to these helpers)
*
* The following can clobber %rdx because the asm clobbers it:
* call_rwsem_down_write_failed
* call_rwsem_wake
* but %rdi, %rsi, %rcx, %r8-r11 always need saving.
*/
#define save_common_regs \
pushq %rdi; \
pushq %rsi; \
pushq %rcx; \
pushq %r8; \
pushq %r9; \
pushq %r10; \
pushq %r11
#define restore_common_regs \
popq %r11; \
popq %r10; \
popq %r9; \
popq %r8; \
popq %rcx; \
popq %rsi; \
popq %rdi
#endif
/* Fix up special calling conventions */
ENTRY(call_rwsem_down_read_failed)
FRAME_BEGIN
save_common_regs
__ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi
call rwsem_down_read_failed
__ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs
FRAME_END
ret
ENDPROC(call_rwsem_down_read_failed)
ENTRY(call_rwsem_down_read_failed_killable)
FRAME_BEGIN
save_common_regs
__ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi
call rwsem_down_read_failed_killable
__ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs
FRAME_END
ret
ENDPROC(call_rwsem_down_read_failed_killable)
ENTRY(call_rwsem_down_write_failed)
FRAME_BEGIN
save_common_regs
movq %rax,%rdi
call rwsem_down_write_failed
restore_common_regs
FRAME_END
ret
ENDPROC(call_rwsem_down_write_failed)
ENTRY(call_rwsem_down_write_failed_killable)
FRAME_BEGIN
save_common_regs
movq %rax,%rdi
call rwsem_down_write_failed_killable
restore_common_regs
FRAME_END
ret
ENDPROC(call_rwsem_down_write_failed_killable)
ENTRY(call_rwsem_wake)
FRAME_BEGIN
/* do nothing if still outstanding active readers */
__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
jnz 1f
save_common_regs
movq %rax,%rdi
call rwsem_wake
restore_common_regs
1: FRAME_END
ret
ENDPROC(call_rwsem_wake)
ENTRY(call_rwsem_downgrade_wake)
FRAME_BEGIN
save_common_regs
__ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi
call rwsem_downgrade_wake
__ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs
FRAME_END
ret
ENDPROC(call_rwsem_downgrade_wake)
|
liva/minimal-linux | 2,772 | arch/x86/lib/memset_64.S | /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
.weak memset
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
* string to get better performance than the original function. The code is
* simpler and shorter than the original function as well.
*
* rdi destination
* rsi value (char)
* rdx count (bytes)
*
* rax original destination
*/
ENTRY(memset)
ENTRY(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
* to use it when possible. If not available, use fast string instructions.
*
* Otherwise, use original memset function.
*/
ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memset_erms", X86_FEATURE_ERMS
movq %rdi,%r9
movq %rdx,%rcx
andl $7,%edx
shrq $3,%rcx
/* expand byte value */
movzbl %sil,%esi
movabs $0x0101010101010101,%rax
imulq %rsi,%rax
rep stosq
movl %edx,%ecx
rep stosb
movq %r9,%rax
ret
ENDPROC(memset)
ENDPROC(__memset)
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
* enhanced rep stosb to override the fast string function.
* The code is simpler and shorter than the fast string function as well.
*
* rdi destination
* rsi value (char)
* rdx count (bytes)
*
* rax original destination
*/
ENTRY(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
ret
ENDPROC(memset_erms)
ENTRY(memset_orig)
movq %rdi,%r10
/* expand byte value */
movzbl %sil,%ecx
movabs $0x0101010101010101,%rax
imulq %rcx,%rax
/* align dst */
movl %edi,%r9d
andl $7,%r9d
jnz .Lbad_alignment
.Lafter_bad_alignment:
movq %rdx,%rcx
shrq $6,%rcx
jz .Lhandle_tail
.p2align 4
.Lloop_64:
decq %rcx
movq %rax,(%rdi)
movq %rax,8(%rdi)
movq %rax,16(%rdi)
movq %rax,24(%rdi)
movq %rax,32(%rdi)
movq %rax,40(%rdi)
movq %rax,48(%rdi)
movq %rax,56(%rdi)
leaq 64(%rdi),%rdi
jnz .Lloop_64
/* Handle tail in loops. The loops should be faster than hard
to predict jump tables. */
.p2align 4
.Lhandle_tail:
movl %edx,%ecx
andl $63&(~7),%ecx
jz .Lhandle_7
shrl $3,%ecx
.p2align 4
.Lloop_8:
decl %ecx
movq %rax,(%rdi)
leaq 8(%rdi),%rdi
jnz .Lloop_8
.Lhandle_7:
andl $7,%edx
jz .Lende
.p2align 4
.Lloop_1:
decl %edx
movb %al,(%rdi)
leaq 1(%rdi),%rdi
jnz .Lloop_1
.Lende:
movq %r10,%rax
ret
.Lbad_alignment:
cmpq $7,%rdx
jbe .Lhandle_7
movq %rax,(%rdi) /* unaligned store */
movq $8,%r8
subq %r9,%r8
addq %r8,%rdi
subq %r8,%rdx
jmp .Lafter_bad_alignment
.Lfinal:
ENDPROC(memset_orig)
|
liva/minimal-linux | 2,479 | arch/x86/lib/hweight.S | /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/export.h>
#include <asm/asm.h>
/*
* unsigned int __sw_hweight32(unsigned int w)
* %rdi: w
*/
ENTRY(__sw_hweight32)
#ifdef CONFIG_X86_64
movl %edi, %eax # w
#endif
__ASM_SIZE(push,) %__ASM_REG(dx)
movl %eax, %edx # w -> t
shrl %edx # t >>= 1
andl $0x55555555, %edx # t &= 0x55555555
subl %edx, %eax # w -= t
movl %eax, %edx # w -> t
shrl $2, %eax # w_tmp >>= 2
andl $0x33333333, %edx # t &= 0x33333333
andl $0x33333333, %eax # w_tmp &= 0x33333333
addl %edx, %eax # w = w_tmp + t
movl %eax, %edx # w -> t
shrl $4, %edx # t >>= 4
addl %edx, %eax # w_tmp += t
andl $0x0f0f0f0f, %eax # w_tmp &= 0x0f0f0f0f
imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101
shrl $24, %eax # w = w_tmp >> 24
__ASM_SIZE(pop,) %__ASM_REG(dx)
ret
ENDPROC(__sw_hweight32)
EXPORT_SYMBOL(__sw_hweight32)
ENTRY(__sw_hweight64)
#ifdef CONFIG_X86_64
pushq %rdi
pushq %rdx
movq %rdi, %rdx # w -> t
movabsq $0x5555555555555555, %rax
shrq %rdx # t >>= 1
andq %rdx, %rax # t &= 0x5555555555555555
movabsq $0x3333333333333333, %rdx
subq %rax, %rdi # w -= t
movq %rdi, %rax # w -> t
shrq $2, %rdi # w_tmp >>= 2
andq %rdx, %rax # t &= 0x3333333333333333
andq %rdi, %rdx # w_tmp &= 0x3333333333333333
addq %rdx, %rax # w = w_tmp + t
movq %rax, %rdx # w -> t
shrq $4, %rdx # t >>= 4
addq %rdx, %rax # w_tmp += t
movabsq $0x0f0f0f0f0f0f0f0f, %rdx
andq %rdx, %rax # w_tmp &= 0x0f0f0f0f0f0f0f0f
movabsq $0x0101010101010101, %rdx
imulq %rdx, %rax # w_tmp *= 0x0101010101010101
shrq $56, %rax # w = w_tmp >> 56
popq %rdx
popq %rdi
ret
#else /* CONFIG_X86_32 */
/* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
pushl %ecx
call __sw_hweight32
movl %eax, %ecx # stash away result
movl %edx, %eax # second part of input
call __sw_hweight32
addl %ecx, %eax # result
popl %ecx
ret
#endif
ENDPROC(__sw_hweight64)
EXPORT_SYMBOL(__sw_hweight64)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.