repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
Lind-Project/wasmtime
4,052
crates/wasmtime/src/runtime/vm/arch/s390x.S
// Currently `global_asm!` isn't stable on s390x, so this is an external // assembler file built with the `build.rs`. .machine z13 .text .hidden host_to_wasm_trampoline .globl host_to_wasm_trampoline .type host_to_wasm_trampoline,@function .p2align 2 #define CONCAT2(a, b) a ## b #define CONCAT(a, b) CONCAT2(a , b) #define VERSIONED_SYMBOL(a) CONCAT(a, VERSIONED_SUFFIX) #define LIBCALL_TRAMPOLINE(libcall, libcall_impl) \ .hidden VERSIONED_SYMBOL(libcall) ; \ .globl VERSIONED_SYMBOL(libcall) ; \ .type VERSIONED_SYMBOL(libcall),@function ; \ .p2align 2 ; \ VERSIONED_SYMBOL(libcall): ; \ .cfi_startproc ; \ \ /* Load the pointer to `VMRuntimeLimits` in `%r1`. */ \ lg %r1, 8(%r2) ; \ \ /* Store the last Wasm FP into the `last_wasm_exit_fp` in the limits. */ \ lg %r0, 0(%r15) ; \ stg %r0, 24(%r1) ; \ \ /* Store the last Wasm PC into the `last_wasm_exit_pc` in the limits. */ \ stg %r14, 32(%r1) ; \ \ /* Tail call to the actual implementation of this libcall. */ \ jg VERSIONED_SYMBOL(libcall_impl) ; \ \ .cfi_endproc ; \ .size VERSIONED_SYMBOL(libcall),.-VERSIONED_SYMBOL(libcall) LIBCALL_TRAMPOLINE(memory32_grow, impl_memory32_grow) LIBCALL_TRAMPOLINE(table_grow_func_ref, impl_table_grow_func_ref) LIBCALL_TRAMPOLINE(table_grow_externref, impl_table_grow_externref) LIBCALL_TRAMPOLINE(table_fill_func_ref, impl_table_fill_func_ref) LIBCALL_TRAMPOLINE(table_fill_externref, impl_table_fill_externref) LIBCALL_TRAMPOLINE(table_copy, impl_table_copy) LIBCALL_TRAMPOLINE(table_init, impl_table_init) LIBCALL_TRAMPOLINE(elem_drop, impl_elem_drop) LIBCALL_TRAMPOLINE(memory_copy, impl_memory_copy) LIBCALL_TRAMPOLINE(memory_fill, impl_memory_fill) LIBCALL_TRAMPOLINE(memory_init, impl_memory_init) LIBCALL_TRAMPOLINE(ref_func, impl_ref_func) LIBCALL_TRAMPOLINE(data_drop, impl_data_drop) LIBCALL_TRAMPOLINE(table_get_lazy_init_func_ref, impl_table_get_lazy_init_func_ref) LIBCALL_TRAMPOLINE(drop_gc_ref, impl_drop_gc_ref) LIBCALL_TRAMPOLINE(gc, gc) LIBCALL_TRAMPOLINE(gc_ref_global_get, impl_gc_ref_global_get) LIBCALL_TRAMPOLINE(gc_ref_global_set, impl_gc_ref_global_set) LIBCALL_TRAMPOLINE(memory_atomic_notify, impl_memory_atomic_notify) LIBCALL_TRAMPOLINE(memory_atomic_wait32, impl_memory_atomic_wait32) LIBCALL_TRAMPOLINE(memory_atomic_wait64, impl_memory_atomic_wait64) LIBCALL_TRAMPOLINE(out_of_gas, impl_out_of_gas) LIBCALL_TRAMPOLINE(new_epoch, impl_new_epoch) LIBCALL_TRAMPOLINE(check_malloc, impl_check_malloc) LIBCALL_TRAMPOLINE(check_free, impl_check_free) LIBCALL_TRAMPOLINE(check_load, impl_check_load) LIBCALL_TRAMPOLINE(check_store, impl_check_store) LIBCALL_TRAMPOLINE(malloc_start, impl_malloc_start) LIBCALL_TRAMPOLINE(free_start, impl_free_start) LIBCALL_TRAMPOLINE(update_stack_pointer, impl_update_stack_pointer) LIBCALL_TRAMPOLINE(update_mem_size, impl_update_mem_size)
listentodella/rCore-Study
2,599
os/src/trap/trap.S
// 首先通过 __alltraps 将 Trap 上下文保存在内核栈上 // 然后跳转到使用 Rust 编写的 trap_handler 函数完成 Trap 分发及处理 // 当 trap_handler 返回之后,使用 __restore 从保存在内核栈上的 Trap 上下文恢复寄存器 // 最后通过一条 sret 指令回到应用程序执 //加上 .altmacro 才能正常使用 .rept 命令 .altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text .globl __alltraps .globl __restore // riscv 特权级规范, 4字节对齐 .align 2 __alltraps: csrr tp, sstatus andi tp, tp, 0x100 beqz tp, __user_trap_start j __real_trap_entry __user_trap_start: # csrrw rd, csr, rs1 # 控制状态寄存器读后写, 先记录csr的值t, 然后rs1存到csr, t存入rd # Xscratch 在异常中,提供一个字的临时存储, # 甚至可以当成一个普通的寄存器,如何使用完全取决于软件,硬件并不主动对它做什么 csrrw sp, sscratch, sp #保存 sp;设置 sp 为临时内存空间的地址 __real_trap_entry: # now sp->kernel stack, sscratch->user stack # allocate a TrapContext on kernel stack # 准备在内核栈上保存trap上下文,预分配34*8字节 # 但实际上,并没有把所有寄存器都备份了一遍 addi sp, sp, -34*8 # save general-purpose registers sd x1, 1*8(sp) # skip sp(x2), we will save it later sd x3, 3*8(sp) # skip tp(x4), application does not use it # save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr # we can use t0/t1/t2 freely, because they were saved on kernel stack csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) # read user stack from sscratch and save it on the kernel stack csrr t2, sscratch sd t2, 2*8(sp) # set input argument of trap_handler(cx: &mut TrapContext) mv a0, sp call trap_handler __restore: # case1: start running app by __restore # case2: back to U after handling trap # mv sp, a0 # no need because __switch handle it # now sp->kernel stack(after allocated), sscratch->user stack # restore sstatus/sepc ld t0, 32*8(sp) ld t1, 33*8(sp) ld t2, 2*8(sp) csrw sstatus, t0 csrw sepc, t1 csrw sscratch, t2 # get SPP andi t0, t0, 0x100 bnez t0, __kernel_trap_end __user_trap_end: # restore general-purpuse registers except sp/tp ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr # release TrapContext on kernel stack addi sp, sp, 34*8 # now sp->kernel stack, sscratch->user stack csrrw sp, sscratch, sp # 恢复 sp;设置 mscratch 为临时内存空间的地址 sret __kernel_trap_end: #restore general-purpose registers except sp/tp ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr # release TrapContext on kernel stack addi sp, sp, 34*8 sret
listentodella/rCore-Study
1,273
myos/mysbi/src/sbi_entry.S
// 加上 .altmacro 才能正常使用 .rept 命令 .altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm /* sbi_exception_vector M模式的异常向量入口 8字节对齐 */ .align 3 .global sbi_exception_vector sbi_exception_vector: // 从mscratch获取M模式之前备份的sp // 并将S模式的sp保存到mscratch csrrw sp, mscratch, sp // mepc + x1~x31 + mstatus = 33 addi sp, sp, -33*8 sd x1, 1*8(sp) # skip sp(x2), we will save it later sd x3, 3*8(sp) # skip tp(x4), application does not use it, but reserve it's mem # save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr # we can use t0/t1/t2 freely, because they were saved on kernel stack // 保存mepc csrr t0, mepc sd t0, 0(sp) // 保存mstatus csrr t0, mstatus sd t0, 32*8(sp) /* * 这里有两个目的: * 1. 保存S模式的SP保存到 sbi_trap_regs->sp * 2. 把M模式的SP保存到mscratch, * 以便下次陷入到M模式时候可以得到SP */ // 此时的SP为M模式的SP // mscratch保存的是S模式的SP addi t0, sp, 33*8 /* 把M模式的SP保存到mscratch * 把S模式的SP保存到 栈框sbi_trap_regs->sp里 */ csrrw t0, mscratch, t0 sd t0, 2*8(sp) // 调用rust的sbi_trap_handler // sbi_trap_regs mv a0, sp call sbi_trap_handler // restore context ld t0, 32*8(sp) csrw mstatus, t0 ld t0, 0(sp) csrw mepc, t0 ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr ld sp, 2*8(sp) mret
listentodella/rCore-Study
1,172
myos/os/src/entry.S
// 加上 .altmacro 才能正常使用 .rept 命令 .altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .macro kernel_entry // sepc + x1~x31 + sstatus + sbadaddr + scause + orig_a0 = 36 // orig_a0 is the value before syscall, just a sw backup, can remove it addi sp, sp, -36*8 sd x1, 1*8(sp) sd x3, 3*8(sp) # save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr csrr s1, sstatus sd s1, 32*8(sp) csrr s2, sepc sd s1, 0*8(sp) csrr s3, sbadaddr sd s3, 33*8(sp) csrr s4, scause sd s4, 34*8(sp) csrr s5, sscratch sd s5, 4*8(sp) addi s0, sp, 36*8 sd s0, 2*8(sp) .endm .macro kernel_exit ld a0, 32*8(sp) csrw sstatus, a0 ld a2, 0*8(sp) csrw sepc, a2 ld x1, 1*8(sp) # load x4~x31 .set n, 4 .rept 28 SAVE_GP %n .set n, n+1 .endr ld x2, 2*8(sp) .endm /* S模式的异常向量入口 do_exception_vector必须4字节对齐 否则写入stvec寄存器会不成功 */ .align 2 .global do_exception_vector do_exception_vector: kernel_entry csrw sscratch, x0 la ra, ret_from_exception mv a0, sp // registers context mv a1, s4 // scause tail do_exception ret_from_exception: restore_all: kernel_exit sret
listentodella/rCore-Study
1,231
myos/os/src/base/asm_test.S
.align 3 .globl my_test_data my_test_data: .dword 0x12345678abcdabcd .global load_store_test .global compare_and_return load_store_test: li t0, 0x80200000 lb t1, (t0) lb t1, 4(t0) lb t1, -4(t0) ld t1, (t0) lb t1, 4(t0) lui t0, 0x80200 lui t1, 0x40200 la t0, my_test_data lla t1, my_test_data ret .global my_memcpy_test my_memcpy_test: // mv在RV里是伪指令 // 等价于 addi rd, rs, 0 mv t0, a0 mv t1, a1 add t2, t0, a2 .loop: // RV 并没有ARM的多字节加载指令 ld t3, (t0) sd t3, (t1) addi t0, t0, 8 addi t1, t1, 8 blt t0, t2, .loop ret compare_and_return: bltu a0, a1, .L2 li a5, 0 j .L3 .L2: li a5, -1 .L3: mv a0, a5 ret .global beqz_test beqz_test: beqz a0, .L4 li a5, 0 j .L5 .L4: li a5, 1 .L5: mv a0, a5 ret .globl add_test add_test: add a0, a0, a1 nop // 加载它的父函数的返回地址 ret .globl branch_test branch_test: /*把返回地址ra寄存器保存到栈里*/ addi sp,sp,-8 // 此时的ra指向父函数 sd ra,(sp) // 执行完成后,将记录返回地址(父函数)保存到sp li a0, 1 li a1, 2 /* 调用add_test子函数 */ // call是伪指令, 本质是auipc与jalr的组合 // 将下一条指令的地址(pc+8)写入 x[rd],然后将 pc 设为 symbol // 它并不会主动为我们备份当前的ra, // 不过会将当前的运行函数备份到ra(具体要看编译器选了哪个寄存器备份) // 所以我们需要在上面自己备份父函数的返回地址到ra call add_test nop /* 从栈中恢复ra返回地址*/ ld ra,(sp) addi sp,sp,8 ret
listline/arceos-driver
2,001
modules/axhal/linker.lds.S
OUTPUT_ARCH(%ARCH%) BASE_ADDRESS = %KERNEL_BASE%; ENTRY(_start) SECTIONS { . = BASE_ADDRESS; _skernel = .; .text : ALIGN(4K) { _stext = .; *(.text.boot) *(.text .text.*) . = ALIGN(4K); _etext = .; } _srodata = .; .rodata : ALIGN(4K) { *(.rodata .rodata.*) *(.srodata .srodata.*) *(.sdata2 .sdata2.*) } .init_array : ALIGN(0x10) { __init_array_start = .; *(.init_array .init_array.*) __init_array_end = .; } . = ALIGN(4K); _erodata = .; .data : ALIGN(4K) { _sdata = .; *(.data.boot_page_table) . = ALIGN(4K); *(.data .data.*) *(.sdata .sdata.*) *(.got .got.*) } .tdata : ALIGN(0x10) { _stdata = .; *(.tdata .tdata.*) _etdata = .; } .tbss : ALIGN(0x10) { _stbss = .; *(.tbss .tbss.*) *(.tcommon) _etbss = .; } . = ALIGN(4K); _percpu_start = .; _percpu_end = _percpu_start + SIZEOF(.percpu); .percpu 0x0 : AT(_percpu_start) { _percpu_load_start = .; *(.percpu .percpu.*) _percpu_load_end = .; . = _percpu_load_start + ALIGN(64) * %SMP%; } . = _percpu_end; . = ALIGN(4K); _edata = .; .bss : AT(.) ALIGN(4K) { boot_stack = .; *(.bss.stack) . = ALIGN(4K); boot_stack_top = .; _sbss = .; *(.bss .bss.*) *(.sbss .sbss.*) *(COMMON) . = ALIGN(4K); _ebss = .; } _ekernel = .; /DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) } } SECTIONS { linkme_IRQ : { *(linkme_IRQ) } linkm2_IRQ : { *(linkm2_IRQ) } linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) } linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) } linkme_SYSCALL : { *(linkme_SYSCALL) } linkm2_SYSCALL : { *(linkm2_SYSCALL) } axns_resource : { *(axns_resource) } } INSERT AFTER .tbss;
listline/arceos-driver
4,325
modules/axhal/src/platform/x86_pc/multiboot.S
# Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section .text.boot .code32 .global _start _start: mov edi, eax # arg1: magic: 0x2BADB002 mov esi, ebx # arg2: multiboot info jmp bsp_entry32 .balign 4 .type multiboot_header, STT_OBJECT multiboot_header: .int {mb_hdr_magic} # magic: 0x1BADB002 .int {mb_hdr_flags} # flags .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum .int multiboot_header - {offset} # header_addr .int _skernel - {offset} # load_addr .int _edata - {offset} # load_end .int _ebss - {offset} # bss_end_addr .int _start - {offset} # entry_addr # Common code in 32-bit, prepare states to enter 64-bit. .macro ENTRY32_COMMON # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set PAE, PGE bit in CR4 mov eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [.Ltmp_pml4 - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} mov edx, 0 mov eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, {cr0} mov cr0, eax .endm # Common code in 64-bit .macro ENTRY64_COMMON # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax .endm .code32 bsp_entry32: lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT ENTRY32_COMMON ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment .code32 .global ap_entry32 ap_entry32: ENTRY32_COMMON ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment .code64 bsp_entry64: ENTRY64_COMMON # set RSP to boot stack movabs rsp, offset {boot_stack} add rsp, {boot_stack_size} # call rust_entry(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .code64 ap_entry64: ENTRY64_COMMON # set RSP to high address (already set in ap_start.S) mov rax, {offset} add rsp, rax # call rust_entry_secondary(magic) mov rdi, {mb_magic} movabs rax, offset {entry_secondary} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end: .balign 4096 .Ltmp_pml4: # 0x0000_0000 ~ 0xffff_ffff .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 255 # 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff .quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 255 # FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb) .Ltmp_pdpt_low: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508 .Ltmp_pdpt_high: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508
listline/arceos-driver
1,965
modules/axhal/src/platform/x86_pc/ap_start.S
# Boot application processors into the protected mode. # Each non-boot CPU ("AP") is started up in response to a STARTUP # IPI from the boot CPU. Section B.4.2 of the Multi-Processor # Specification says that the AP will start in real mode with CS:IP # set to XY00:0000, where XY is an 8-bit value sent with the # STARTUP. Thus this code must start at a 4096-byte boundary. # # Because this code sets DS to zero, it must sit # at an address in the low 2^16 bytes. .equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr} .equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr} .equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr} .equ stack_ptr, {start_page_paddr} + 0xff0 .equ entry_ptr, {start_page_paddr} + 0xff8 # 0x6000 .section .text .code16 .p2align 12 .global ap_start ap_start: cli wbinvd xor ax, ax mov ds, ax mov es, ax mov ss, ax mov fs, ax mov gs, ax # load the 64-bit GDT lgdt [pa_ap_gdt_desc] # switch to protected-mode mov eax, cr0 or eax, (1 << 0) mov cr0, eax # far jump to 32-bit code. 0x8 is code32 segment selector ljmp 0x8, offset pa_ap_start32 .code32 ap_start32: mov esp, [stack_ptr] mov eax, [entry_ptr] jmp eax .balign 8 # .type multiboot_header, STT_OBJECT .Lap_tmp_gdt_desc: .short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit .long pa_ap_gdt # base .balign 16 .Lap_tmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Lap_tmp_gdt_end: # 0x7000 .p2align 12 .global ap_end ap_end:
listline/Arceos-visor
2,001
modules/axhal/linker.lds.S
OUTPUT_ARCH(%ARCH%) BASE_ADDRESS = %KERNEL_BASE%; ENTRY(_start) SECTIONS { . = BASE_ADDRESS; _skernel = .; .text : ALIGN(4K) { _stext = .; *(.text.boot) *(.text .text.*) . = ALIGN(4K); _etext = .; } _srodata = .; .rodata : ALIGN(4K) { *(.rodata .rodata.*) *(.srodata .srodata.*) *(.sdata2 .sdata2.*) } .init_array : ALIGN(0x10) { __init_array_start = .; *(.init_array .init_array.*) __init_array_end = .; } . = ALIGN(4K); _erodata = .; .data : ALIGN(4K) { _sdata = .; *(.data.boot_page_table) . = ALIGN(4K); *(.data .data.*) *(.sdata .sdata.*) *(.got .got.*) } .tdata : ALIGN(0x10) { _stdata = .; *(.tdata .tdata.*) _etdata = .; } .tbss : ALIGN(0x10) { _stbss = .; *(.tbss .tbss.*) *(.tcommon) _etbss = .; } . = ALIGN(4K); _percpu_start = .; _percpu_end = _percpu_start + SIZEOF(.percpu); .percpu 0x0 : AT(_percpu_start) { _percpu_load_start = .; *(.percpu .percpu.*) _percpu_load_end = .; . = _percpu_load_start + ALIGN(64) * %SMP%; } . = _percpu_end; . = ALIGN(4K); _edata = .; .bss : AT(.) ALIGN(4K) { boot_stack = .; *(.bss.stack) . = ALIGN(4K); boot_stack_top = .; _sbss = .; *(.bss .bss.*) *(.sbss .sbss.*) *(COMMON) . = ALIGN(4K); _ebss = .; } _ekernel = .; /DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) } } SECTIONS { linkme_IRQ : { *(linkme_IRQ) } linkm2_IRQ : { *(linkm2_IRQ) } linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) } linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) } linkme_SYSCALL : { *(linkme_SYSCALL) } linkm2_SYSCALL : { *(linkm2_SYSCALL) } axns_resource : { *(axns_resource) } } INSERT AFTER .tbss;
listline/arceos-driver
2,544
tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com> //-------------------------------------------------------------------------------------------------- // Definitions //-------------------------------------------------------------------------------------------------- // Load the address of a symbol into a register, PC-relative. // // The symbol must lie within +/- 4 GiB of the Program Counter. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_REL register, symbol adrp \register, \symbol add \register, \register, #:lo12:\symbol .endm // Load the address of a symbol into a register, absolute. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_ABS register, symbol movz \register, #:abs_g2:\symbol movk \register, #:abs_g1_nc:\symbol movk \register, #:abs_g0_nc:\symbol .endm //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- .section .text._start //------------------------------------------------------------------------------ // fn _start() //------------------------------------------------------------------------------ _start: // Only proceed on the boot core. Park it otherwise. mrs x0, MPIDR_EL1 and x0, x0, {CONST_CORE_ID_MASK} ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs cmp x0, x1 b.ne .L_parking_loop // If execution reaches here, it is the boot core. // Initialize DRAM. ADR_ABS x0, __bss_start ADR_ABS x1, __bss_end_exclusive .L_bss_init_loop: cmp x0, x1 b.eq .L_relocate_binary stp xzr, xzr, [x0], #16 b .L_bss_init_loop // Next, relocate the binary. .L_relocate_binary: ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to. ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to. ADR_ABS x2, __binary_nonzero_end_exclusive .L_copy_loop: ldr x3, [x0], #8 str x3, [x1], #8 cmp x1, x2 b.lo .L_copy_loop // Prepare the jump to Rust code. // Set the stack pointer. ADR_ABS x0, __boot_core_stack_end_exclusive mov sp, x0 // Jump to the relocated Rust code. ADR_ABS x1, _start_rust br x1 // Infinitely wait for events (aka "park the core"). .L_parking_loop: wfe b .L_parking_loop .size _start, . - _start .type _start, function .global _start
listline/Arceos-visor
4,325
modules/axhal/src/platform/x86_pc/multiboot.S
# Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section .text.boot .code32 .global _start _start: mov edi, eax # arg1: magic: 0x2BADB002 mov esi, ebx # arg2: multiboot info jmp bsp_entry32 .balign 4 .type multiboot_header, STT_OBJECT multiboot_header: .int {mb_hdr_magic} # magic: 0x1BADB002 .int {mb_hdr_flags} # flags .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum .int multiboot_header - {offset} # header_addr .int _skernel - {offset} # load_addr .int _edata - {offset} # load_end .int _ebss - {offset} # bss_end_addr .int _start - {offset} # entry_addr # Common code in 32-bit, prepare states to enter 64-bit. .macro ENTRY32_COMMON # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set PAE, PGE bit in CR4 mov eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [.Ltmp_pml4 - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} mov edx, 0 mov eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, {cr0} mov cr0, eax .endm # Common code in 64-bit .macro ENTRY64_COMMON # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax .endm .code32 bsp_entry32: lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT ENTRY32_COMMON ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment .code32 .global ap_entry32 ap_entry32: ENTRY32_COMMON ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment .code64 bsp_entry64: ENTRY64_COMMON # set RSP to boot stack movabs rsp, offset {boot_stack} add rsp, {boot_stack_size} # call rust_entry(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .code64 ap_entry64: ENTRY64_COMMON # set RSP to high address (already set in ap_start.S) mov rax, {offset} add rsp, rax # call rust_entry_secondary(magic) mov rdi, {mb_magic} movabs rax, offset {entry_secondary} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end: .balign 4096 .Ltmp_pml4: # 0x0000_0000 ~ 0xffff_ffff .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 255 # 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff .quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 255 # FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb) .Ltmp_pdpt_low: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508 .Ltmp_pdpt_high: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508
listline/Arceos-visor
1,965
modules/axhal/src/platform/x86_pc/ap_start.S
# Boot application processors into the protected mode. # Each non-boot CPU ("AP") is started up in response to a STARTUP # IPI from the boot CPU. Section B.4.2 of the Multi-Processor # Specification says that the AP will start in real mode with CS:IP # set to XY00:0000, where XY is an 8-bit value sent with the # STARTUP. Thus this code must start at a 4096-byte boundary. # # Because this code sets DS to zero, it must sit # at an address in the low 2^16 bytes. .equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr} .equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr} .equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr} .equ stack_ptr, {start_page_paddr} + 0xff0 .equ entry_ptr, {start_page_paddr} + 0xff8 # 0x6000 .section .text .code16 .p2align 12 .global ap_start ap_start: cli wbinvd xor ax, ax mov ds, ax mov es, ax mov ss, ax mov fs, ax mov gs, ax # load the 64-bit GDT lgdt [pa_ap_gdt_desc] # switch to protected-mode mov eax, cr0 or eax, (1 << 0) mov cr0, eax # far jump to 32-bit code. 0x8 is code32 segment selector ljmp 0x8, offset pa_ap_start32 .code32 ap_start32: mov esp, [stack_ptr] mov eax, [entry_ptr] jmp eax .balign 8 # .type multiboot_header, STT_OBJECT .Lap_tmp_gdt_desc: .short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit .long pa_ap_gdt # base .balign 16 .Lap_tmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Lap_tmp_gdt_end: # 0x7000 .p2align 12 .global ap_end ap_end:
listline/Arceos-visor
1,705
modules/axhal/src/arch/loongarch64/trap.S
.macro SAVE_REGS, from_user move $t0, $sp .if \from_user == 1 csrrd $sp, KSAVE_KSP // restore kernel sp addi.d $sp, $sp, -{trapframe_size} STD $tp, $sp, 2 STD $r21, $sp, 21 csrrd $tp, KSAVE_TP csrrd $r21, KSAVE_R21 .else addi.d $sp, $sp, -{trapframe_size} .endif STD $t0, $sp, 3 csrrd $t0, KSAVE_TEMP PUSH_GENERAL_REGS csrrd $t1, LA_CSR_PRMD csrrd $t2, LA_CSR_ERA STD $t1, $sp, 32 // prmd STD $t2, $sp, 33 // era .endm .macro RESTORE_REGS, from_user .if \from_user == 1 csrwr $tp, KSAVE_TP csrwr $r21, KSAVE_R21 LDD $tp, $sp, 2 LDD $r21, $sp, 21 .endif LDD $t1, $sp, 33 // era LDD $t2, $sp, 32 // prmd csrwr $t1, LA_CSR_ERA csrwr $t2, LA_CSR_PRMD POP_GENERAL_REGS LDD $sp, $sp, 3 .endm .section .text .balign 4096 .global exception_entry_base exception_entry_base: csrwr $t0, KSAVE_TEMP csrrd $t0, LA_CSR_PRMD andi $t0, $t0, 0x3 bnez $t0, .Lfrom_userspace .Lfrom_kernel: SAVE_REGS 0 move $a0, $sp addi.d $a1, $zero, 0 bl loongarch64_trap_handler RESTORE_REGS 0 ertn .Lfrom_userspace: SAVE_REGS 1 move $a0, $sp addi.d $a1, $zero, 1 bl loongarch64_trap_handler RESTORE_REGS 1 ertn .section .text .balign 4096 .global handle_tlb_refill handle_tlb_refill: csrwr $t0, LA_CSR_TLBRSAVE csrrd $t0, LA_CSR_PGD lddir $t0, $t0, 3 lddir $t0, $t0, 2 lddir $t0, $t0, 1 ldpte $t0, 0 ldpte $t0, 1 tlbfill csrrd $t0, LA_CSR_TLBRSAVE ertn
listline/Arceos-visor
1,839
modules/axhal/src/arch/riscv/trap.S
.macro SAVE_REGS, from_user addi sp, sp, -{trapframe_size} PUSH_GENERAL_REGS csrr t0, sepc csrr t1, sstatus csrrw t2, sscratch, zero // save sscratch (sp) and zero it STR t0, sp, 31 // tf.sepc STR t1, sp, 32 // tf.sstatus STR t2, sp, 1 // tf.regs.sp .if \from_user == 1 LDR t0, sp, 2 // load supervisor gp LDR t1, sp, 3 // load supervisor tp STR gp, sp, 2 // save user gp and tp STR tp, sp, 3 mv gp, t0 mv tp, t1 .endif .endm .macro RESTORE_REGS, from_user .if \from_user == 1 LDR t1, sp, 2 // load user gp and tp LDR t0, sp, 3 STR gp, sp, 2 // save supervisor gp STR tp, sp, 3 // save supervisor gp and tp mv gp, t1 mv tp, t0 addi t0, sp, {trapframe_size} // put supervisor sp to scratch csrw sscratch, t0 .endif LDR t0, sp, 31 LDR t1, sp, 32 csrw sepc, t0 csrw sstatus, t1 POP_GENERAL_REGS LDR sp, sp, 1 // load sp from tf.regs.sp .endm .section .text .balign 4 .global trap_vector_base trap_vector_base: // sscratch == 0: trap from S mode // sscratch != 0: trap from U mode csrrw sp, sscratch, sp // swap sscratch and sp bnez sp, .Ltrap_entry_u csrr sp, sscratch // put supervisor sp back j .Ltrap_entry_s .Ltrap_entry_s: SAVE_REGS 0 mv a0, sp li a1, 0 call riscv_trap_handler RESTORE_REGS 0 sret .Ltrap_entry_u: SAVE_REGS 1 mv a0, sp li a1, 1 call riscv_trap_handler RESTORE_REGS 1 sret
listline/Arceos-visor
1,339
modules/axhal/src/arch/x86_64/syscall.S
.section .text .code64 syscall_entry: swapgs // switch to kernel gs mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp // save user rsp mov rsp, gs:[offset __PERCPU_TSS + {tss_rsp0_offset}] // switch to kernel stack sub rsp, 8 // skip user ss push gs:[offset __PERCPU_USER_RSP_OFFSET] // user rsp push r11 // rflags mov [rsp - 2 * 8], rcx // rip sub rsp, 4 * 8 // skip until general registers push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push rdi push rsi push rbp push rbx push rdx push rcx push rax mov rdi, rsp call x86_syscall_handler pop rax pop rcx pop rdx pop rbx pop rbp pop rsi pop rdi pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 add rsp, 7 * 8 mov rcx, [rsp - 5 * 8] // rip mov r11, [rsp - 3 * 8] // rflags mov rsp, [rsp - 2 * 8] // user rsp swapgs sysretq
listline/Arceos-visor
1,505
modules/axhal/src/arch/x86_64/trap.S
.equ NUM_INT, 256 .altmacro .macro DEF_HANDLER, i .Ltrap_handler_\i: .if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17 # error code pushed by CPU push \i # interrupt vector jmp .Ltrap_common .else push 0 # fill in error code in TrapFrame push \i # interrupt vector jmp .Ltrap_common .endif .endm .macro DEF_TABLE_ENTRY, i .quad .Ltrap_handler_\i .endm .section .text .code64 _trap_handlers: .set i, 0 .rept NUM_INT DEF_HANDLER %i .set i, i + 1 .endr .Ltrap_common: test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space jz 1f swapgs 1: push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push rdi push rsi push rbp push rbx push rdx push rcx push rax mov rdi, rsp call x86_trap_handler pop rax pop rcx pop rdx pop rbx pop rbp pop rsi pop rdi pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space jz 2f swapgs 2: add rsp, 16 # pop vector, error_code iretq .section .rodata .global trap_handler_table trap_handler_table: .set i, 0 .rept NUM_INT DEF_TABLE_ENTRY %i .set i, i + 1 .endr
listline/Arceos-visor
2,616
modules/axhal/src/arch/aarch64/trap.S
.macro SAVE_REGS sub sp, sp, 34 * 8 stp x0, x1, [sp] stp x2, x3, [sp, 2 * 8] stp x4, x5, [sp, 4 * 8] stp x6, x7, [sp, 6 * 8] stp x8, x9, [sp, 8 * 8] stp x10, x11, [sp, 10 * 8] stp x12, x13, [sp, 12 * 8] stp x14, x15, [sp, 14 * 8] stp x16, x17, [sp, 16 * 8] stp x18, x19, [sp, 18 * 8] stp x20, x21, [sp, 20 * 8] stp x22, x23, [sp, 22 * 8] stp x24, x25, [sp, 24 * 8] stp x26, x27, [sp, 26 * 8] stp x28, x29, [sp, 28 * 8] mrs x9, sp_el0 mrs x10, elr_el1 mrs x11, spsr_el1 stp x30, x9, [sp, 30 * 8] stp x10, x11, [sp, 32 * 8] # We may have interrupted userspace, or a guest, or exit-from or # return-to either of those. So we can't trust sp_el0, and need to # restore it. bl {cache_current_task_ptr} .endm .macro RESTORE_REGS ldp x10, x11, [sp, 32 * 8] ldp x30, x9, [sp, 30 * 8] msr sp_el0, x9 msr elr_el1, x10 msr spsr_el1, x11 ldp x28, x29, [sp, 28 * 8] ldp x26, x27, [sp, 26 * 8] ldp x24, x25, [sp, 24 * 8] ldp x22, x23, [sp, 22 * 8] ldp x20, x21, [sp, 20 * 8] ldp x18, x19, [sp, 18 * 8] ldp x16, x17, [sp, 16 * 8] ldp x14, x15, [sp, 14 * 8] ldp x12, x13, [sp, 12 * 8] ldp x10, x11, [sp, 10 * 8] ldp x8, x9, [sp, 8 * 8] ldp x6, x7, [sp, 6 * 8] ldp x4, x5, [sp, 4 * 8] ldp x2, x3, [sp, 2 * 8] ldp x0, x1, [sp] add sp, sp, 34 * 8 .endm .macro INVALID_EXCP, kind, source .p2align 7 SAVE_REGS mov x0, sp mov x1, \kind mov x2, \source bl invalid_exception b .Lexception_return .endm .macro HANDLE_SYNC .p2align 7 SAVE_REGS mov x0, sp bl handle_sync_exception b .Lexception_return .endm .macro HANDLE_IRQ .p2align 7 SAVE_REGS mov x0, sp bl handle_irq_exception b .Lexception_return .endm .section .text .p2align 11 .global exception_vector_base exception_vector_base: // current EL, with SP_EL0 INVALID_EXCP 0 0 INVALID_EXCP 1 0 INVALID_EXCP 2 0 INVALID_EXCP 3 0 // current EL, with SP_ELx HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 1 INVALID_EXCP 3 1 // lower EL, aarch64 HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 2 INVALID_EXCP 3 2 // lower EL, aarch32 INVALID_EXCP 0 3 INVALID_EXCP 1 3 INVALID_EXCP 2 3 INVALID_EXCP 3 3 .Lexception_return: RESTORE_REGS eret
listline/Arceos-visor
2,544
tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com> //-------------------------------------------------------------------------------------------------- // Definitions //-------------------------------------------------------------------------------------------------- // Load the address of a symbol into a register, PC-relative. // // The symbol must lie within +/- 4 GiB of the Program Counter. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_REL register, symbol adrp \register, \symbol add \register, \register, #:lo12:\symbol .endm // Load the address of a symbol into a register, absolute. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_ABS register, symbol movz \register, #:abs_g2:\symbol movk \register, #:abs_g1_nc:\symbol movk \register, #:abs_g0_nc:\symbol .endm //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- .section .text._start //------------------------------------------------------------------------------ // fn _start() //------------------------------------------------------------------------------ _start: // Only proceed on the boot core. Park it otherwise. mrs x0, MPIDR_EL1 and x0, x0, {CONST_CORE_ID_MASK} ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs cmp x0, x1 b.ne .L_parking_loop // If execution reaches here, it is the boot core. // Initialize DRAM. ADR_ABS x0, __bss_start ADR_ABS x1, __bss_end_exclusive .L_bss_init_loop: cmp x0, x1 b.eq .L_relocate_binary stp xzr, xzr, [x0], #16 b .L_bss_init_loop // Next, relocate the binary. .L_relocate_binary: ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to. ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to. ADR_ABS x2, __binary_nonzero_end_exclusive .L_copy_loop: ldr x3, [x0], #8 str x3, [x1], #8 cmp x1, x2 b.lo .L_copy_loop // Prepare the jump to Rust code. // Set the stack pointer. ADR_ABS x0, __boot_core_stack_end_exclusive mov sp, x0 // Jump to the relocated Rust code. ADR_ABS x1, _start_rust br x1 // Infinitely wait for events (aka "park the core"). .L_parking_loop: wfe b .L_parking_loop .size _start, . - _start .type _start, function .global _start
LittleLucifer1/duckos
1,151
os/src/entry.S
.section .text.entry .globl _start _start: # hart_id is in a0, a0 --> tp # pc = 0x8020_0000 # 1.According to hart_id, set sp to the top la sp, boot_stack_top mv tp, a0 slli t0, tp, 16 # 内核栈大小size: (4096 * 16) sub sp, sp, t0 # 2.set table & satp # satp: 8 << 60 | boot_pagetable la t0, boot_pagetable li t1, 8 << 60 srli t0, t0, 12 or t0, t0, t1 csrw satp, t0 sfence.vma # 3.reset stack # the offset is 0xffff_ffff_0000_0000 li t1, 0xffffffff00000000 add sp, sp, t1 # 4.jump to rust_main la t0, rust_main add t0, t0, t1 jr t0 .section .bss.stack .globl boot_stack_lower boot_stack_lower: # 内核栈总大小:最多8个核,每个核有15页,再加上一个guard page, 总共16页——64kb .space 4096 * 16 * 8 .globl boot_stack_top boot_stack_top: .section .data .align 12 boot_pagetable: # 0x0000_0000_8000_0000 ---> 0x0000_0000_8000_0000 # 0xffff_ffff_8000_0000 ---> 0x0000_0000_8000_0000 .quad 0 .quad 0 .quad (0x80000 << 10) | 0xcf #VRWXAD vpn L2 = 0b10 .zero 8 * 507 .quad (0x80000 << 10) | 0xcf # vpn L2 = 0b1_1111_1110 .quad 0
LittleLucifer1/duckos
1,894
os/src/process/trap/trap.S
.altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text .globl __alltraps .globl __restore .align 2 # stvec中的BASE必须保证是4字节对齐!因为BASE中表示的地址默认后两位为0 # TODO:暂时没有考虑中断嵌套的问题,如果 U -> S -> S 那会发生什么? __alltraps: # 不能使用spp来判断,因为此时没有空余的寄存器可以使用 # 于是这里把sp看作有符号数来判断,如果是在用户空间,则为正数,否则为负数。 # 用户态:则交换sp sscratch; 内核态:不用交换 bgtz sp, __user_to_kernel sd tp, -1*8(sp) __trap_entry: # sp -> kernel stack addi sp, sp, -35*8 sd x1, 1*8(sp) .set n, 3 .rept 29 SAVE_GP %n .set n, n+1 .endr csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) csrr t2, sscratch sd t2, 2*8(sp) ld tp, 34*8(sp) mv a0, sp call trap_handler __restore: # 从内核到用户态,或者从内核到内核。 # sp -> kernel stack, sscratch -> user stack # restore sstatus/sepc/user_stack_sp ld t0, 32*8(sp) # t0 <- sstatus ld t1, 33*8(sp) # t1 <- sepc ld t2, 2*8(sp) # t2 <- sp (user stack) sd tp, 34*8(sp) csrw sstatus, t0 csrw sepc, t1 csrw sscratch, t2 # get SPP 在第8位 # 之后要恢复t0,所以这里要先判断,后恢复 andi t0, t0, 0x100 bnez t0, __kernel_to_kernel ld x1, 1*8(sp) # skip x2(sp),已保存 .set n, 3 .rept 29 LOAD_GP %n .set n, n+1 .endr addi sp, sp, 35*8 csrrw sp, sscratch, sp # 如果在设置用户程序初始的trap之后,SPP设置为了User,但是还没有赶在switch之前触发异常 # 之后就发生了内核中断,此时在restore时,会进入user的restore,但是又是内核 -> 内核 # TODO:可不可以提前判断这种情况,从而相当于有三种分支? # 在restore的时候提前判断。现在先按照maturin的写法,求稳。 beqz sp, __idle sret __kernel_to_kernel: # 内核中断时,恢复现场,不用恢复fld,不用交换寄存器 ld x1, 1*8(sp) # skip x2(sp),已保存 .set n, 3 .rept 29 LOAD_GP %n .set n, n+1 .endr addi sp, sp, 35 sret __user_to_kernel: csrrw sp, sscratch, sp j __trap_entry __idle: csrrw sp, sscratch, sp sret
LittleLucifer1/duckos
17,376
dependency/riscv/asm.S
#include "asm.h" .section .text.__ebreak .global __ebreak __ebreak: ebreak ret .section .text.__wfi .global __wfi __wfi: wfi ret .section .text.__sfence_vma_all .global __sfence_vma_all __sfence_vma_all: sfence.vma ret .section .text.__sfence_vma .global __sfence_vma __sfence_vma: sfence.vma a0, a1 ret // RISC-V hypervisor instructions. // The switch for enabling LLVM support for asm generation. // #define LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT .section .text.__hfence_gvma .global __hfence_gvma __hfence_gvma: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hfence.gvma a0, a1 #else .word 1656029299 #endif ret .section .text.__hfence_vvma .global __hfence_vvma __hfence_vvma: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hfence.vvma a0, a1 #else .word 582287475 #endif ret .section .text.__hlv_b .global __hlv_b __hlv_b: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.b a0, a0 #else .word 1610958195 #endif ret .section .text.__hlv_bu .global __hlv_bu __hlv_bu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.bu a0, a0 #else .word 1612006771 #endif ret .section .text.__hlv_h .global __hlv_h __hlv_h: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.h a0, a0 #else .word 1678067059 #endif ret .section .text.__hlv_hu .global __hlv_hu __hlv_hu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.hu a0, a0 #else .word 1679115635 #endif ret .section .text.__hlvx_hu .global __hlvx_hu __hlvx_hu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlvx.hu a0, a0 #else .word 1681212787 #endif ret .section .text.__hlv_w .global __hlv_w __hlv_w: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.w a0, a0 #else .word 1745175923 #endif ret .section .text.__hlvx_wu .global __hlvx_wu __hlvx_wu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlvx.wu a0, a0 #else .word 1748321651 #endif ret .section .text.__hsv_b .global __hsv_b __hsv_b: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.b a0, a1 #else .word 1656045683 #endif ret .section .text.__hsv_h .global __hsv_h __hsv_h: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.h a0, a1 #else .word 1723154547 #endif ret .section .text.__hsv_w .global __hsv_w __hsv_w: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.w a0, a1 #else .word 1790263411 #endif ret .section .text.__hlv_wu .global __hlv_wu __hlv_wu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.wu a0, a0 #else .word 1746224499 #endif ret .section .text.__hlv_d .global __hlv_d __hlv_d: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.d a0, a0 #else .word 1812284787 #endif ret .section .text.__hsv_d .global __hsv_d __hsv_d: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.d a0, a1 #else .word 1857372275 #endif ret // User Trap Setup RW(0x000, ustatus) // User status register RW(0x004, uie) // User interrupt-enable register RW(0x005, utvec) // User trap handler base address // User Trap Handling RW(0x040, uscratch) // Scratch register for user trap handlers RW(0x041, uepc) // User exception program counter RW(0x042, ucause) // User trap cause RW(0x043, utval) // User bad address or instruction RW(0x044, uip) // User interrupt pending // User Floating-Point CSRs RW(0x001, fflags) // Floating-Point Accrued Exceptions RW(0x002, frm) // Floating-Point Dynamic Rounding Mode RW(0x003, fcsr) // Floating-Point Control and Status Register (frm + fflags) // User Counter/Timers RO( 0xC00, cycle) // Cycle counter for RDCYCLE instruction RO( 0xC01, time) // Timer for RDTIME instruction RO( 0xC02, instret) // Instructions-retired counter for RDINSTRET instruction RO( 0xC03, hpmcounter3) // Performance-monitoring counter RO( 0xC04, hpmcounter4) // Performance-monitoring counter RO( 0xC05, hpmcounter5) // Performance-monitoring counter RO( 0xC06, hpmcounter6) // Performance-monitoring counter RO( 0xC07, hpmcounter7) // Performance-monitoring counter RO( 0xC08, hpmcounter8) // Performance-monitoring counter RO( 0xC09, hpmcounter9) // Performance-monitoring counter RO( 0xC0A, hpmcounter10) // Performance-monitoring counter RO( 0xC0B, hpmcounter11) // Performance-monitoring counter RO( 0xC0C, hpmcounter12) // Performance-monitoring counter RO( 0xC0D, hpmcounter13) // Performance-monitoring counter RO( 0xC0E, hpmcounter14) // Performance-monitoring counter RO( 0xC0F, hpmcounter15) // Performance-monitoring counter RO( 0xC10, hpmcounter16) // Performance-monitoring counter RO( 0xC11, hpmcounter17) // Performance-monitoring counter RO( 0xC12, hpmcounter18) // Performance-monitoring counter RO( 0xC13, hpmcounter19) // Performance-monitoring counter RO( 0xC14, hpmcounter20) // Performance-monitoring counter RO( 0xC15, hpmcounter21) // Performance-monitoring counter RO( 0xC16, hpmcounter22) // Performance-monitoring counter RO( 0xC17, hpmcounter23) // Performance-monitoring counter RO( 0xC18, hpmcounter24) // Performance-monitoring counter RO( 0xC19, hpmcounter25) // Performance-monitoring counter RO( 0xC1A, hpmcounter26) // Performance-monitoring counter RO( 0xC1B, hpmcounter27) // Performance-monitoring counter RO( 0xC1C, hpmcounter28) // Performance-monitoring counter RO( 0xC1D, hpmcounter29) // Performance-monitoring counter RO( 0xC1E, hpmcounter30) // Performance-monitoring counter RO( 0xC1F, hpmcounter31) // Performance-monitoring counter RO32(0xC80, cycleh) // Upper 32 bits of cycle, RV32I only RO32(0xC81, timeh) // Upper 32 bits of time, RV32I only RO32(0xC82, instreth) // Upper 32 bits of instret, RV32I only RO32(0xC83, hpmcounter3h) // Upper 32 bits of hpmcounter3, RV32I only RO32(0xC84, hpmcounter4h) RO32(0xC85, hpmcounter5h) RO32(0xC86, hpmcounter6h) RO32(0xC87, hpmcounter7h) RO32(0xC88, hpmcounter8h) RO32(0xC89, hpmcounter9h) RO32(0xC8A, hpmcounter10h) RO32(0xC8B, hpmcounter11h) RO32(0xC8C, hpmcounter12h) RO32(0xC8D, hpmcounter13h) RO32(0xC8E, hpmcounter14h) RO32(0xC8F, hpmcounter15h) RO32(0xC90, hpmcounter16h) RO32(0xC91, hpmcounter17h) RO32(0xC92, hpmcounter18h) RO32(0xC93, hpmcounter19h) RO32(0xC94, hpmcounter20h) RO32(0xC95, hpmcounter21h) RO32(0xC96, hpmcounter22h) RO32(0xC97, hpmcounter23h) RO32(0xC98, hpmcounter24h) RO32(0xC99, hpmcounter25h) RO32(0xC9A, hpmcounter26h) RO32(0xC9B, hpmcounter27h) RO32(0xC9C, hpmcounter28h) RO32(0xC9D, hpmcounter29h) RO32(0xC9E, hpmcounter30h) RO32(0xC9F, hpmcounter31h) // Supervisor Trap Setup RW(0x100, sstatus) // Supervisor status register RW(0x102, sedeleg) // Supervisor exception delegation register RW(0x103, sideleg) // Supervisor interrupt delegation register RW(0x104, sie) // Supervisor interrupt-enable register RW(0x105, stvec) // Supervisor trap handler base address RW(0x106, scounteren) // Supervisor counter enable // Supervisor Trap Handling RW(0x140, sscratch) // Scratch register for supervisor trap handlers RW(0x141, sepc) // Supervisor exception program counter RW(0x142, scause) // Supervisor trap cause RW(0x143, stval) // Supervisor bad address or instruction RW(0x144, sip) // Supervisor interrupt pending // Supervisor Protection and Translation RW(0x180, satp) // Supervisor address translation and protection // Machine Information Registers RO(0xF11, mvendorid) // Vendor ID RO(0xF12, marchid) // Architecture ID RO(0xF13, mimpid) // Implementation ID RO(0xF14, mhartid) // Hardware thread ID // Machine Trap Setup RW(0x300, mstatus) // Machine status register RW(0x301, misa) // ISA and extensions RW(0x302, medeleg) // Machine exception delegation register RW(0x303, mideleg) // Machine interrupt delegation register RW(0x304, mie) // Machine interrupt-enable register RW(0x305, mtvec) // Machine trap handler base address RW(0x306, mcounteren) // Machine counter enable // Machine Trap Handling RW(0x340, mscratch) // Scratch register for machine trap handlers RW(0x341, mepc) // Machine exception program counter RW(0x342, mcause) // Machine trap cause RW(0x343, mtval) // Machine bad address or instruction RW(0x344, mip) // Machine interrupt pending // Machine Protection and Translation RW( 0x3A0, pmpcfg0) // Physical memory protection configuration RW32(0x3A1, pmpcfg1) // Physical memory protection configuration, RV32 only RW( 0x3A2, pmpcfg2) // Physical memory protection configuration RW32(0x3A3, pmpcfg3) // Physical memory protection configuration, RV32 only RW( 0x3B0, pmpaddr0) // Physical memory protection address register RW( 0x3B1, pmpaddr1) // Physical memory protection address register RW( 0x3B2, pmpaddr2) // Physical memory protection address register RW( 0x3B3, pmpaddr3) // Physical memory protection address register RW( 0x3B4, pmpaddr4) // Physical memory protection address register RW( 0x3B5, pmpaddr5) // Physical memory protection address register RW( 0x3B6, pmpaddr6) // Physical memory protection address register RW( 0x3B7, pmpaddr7) // Physical memory protection address register RW( 0x3B8, pmpaddr8) // Physical memory protection address register RW( 0x3B9, pmpaddr9) // Physical memory protection address register RW( 0x3BA, pmpaddr10) // Physical memory protection address register RW( 0x3BB, pmpaddr11) // Physical memory protection address register RW( 0x3BC, pmpaddr12) // Physical memory protection address register RW( 0x3BD, pmpaddr13) // Physical memory protection address register RW( 0x3BE, pmpaddr14) // Physical memory protection address register RW( 0x3BF, pmpaddr15) // Physical memory protection address register // Machine Counter/Timers RO( 0xB00, mcycle) // Machine cycle counter RO( 0xB02, minstret) // Machine instructions-retired counter RO( 0xB03, mhpmcounter3) // Machine performance-monitoring counter RO( 0xB04, mhpmcounter4) // Machine performance-monitoring counter RO( 0xB05, mhpmcounter5) // Machine performance-monitoring counter RO( 0xB06, mhpmcounter6) // Machine performance-monitoring counter RO( 0xB07, mhpmcounter7) // Machine performance-monitoring counter RO( 0xB08, mhpmcounter8) // Machine performance-monitoring counter RO( 0xB09, mhpmcounter9) // Machine performance-monitoring counter RO( 0xB0A, mhpmcounter10) // Machine performance-monitoring counter RO( 0xB0B, mhpmcounter11) // Machine performance-monitoring counter RO( 0xB0C, mhpmcounter12) // Machine performance-monitoring counter RO( 0xB0D, mhpmcounter13) // Machine performance-monitoring counter RO( 0xB0E, mhpmcounter14) // Machine performance-monitoring counter RO( 0xB0F, mhpmcounter15) // Machine performance-monitoring counter RO( 0xB10, mhpmcounter16) // Machine performance-monitoring counter RO( 0xB11, mhpmcounter17) // Machine performance-monitoring counter RO( 0xB12, mhpmcounter18) // Machine performance-monitoring counter RO( 0xB13, mhpmcounter19) // Machine performance-monitoring counter RO( 0xB14, mhpmcounter20) // Machine performance-monitoring counter RO( 0xB15, mhpmcounter21) // Machine performance-monitoring counter RO( 0xB16, mhpmcounter22) // Machine performance-monitoring counter RO( 0xB17, mhpmcounter23) // Machine performance-monitoring counter RO( 0xB18, mhpmcounter24) // Machine performance-monitoring counter RO( 0xB19, mhpmcounter25) // Machine performance-monitoring counter RO( 0xB1A, mhpmcounter26) // Machine performance-monitoring counter RO( 0xB1B, mhpmcounter27) // Machine performance-monitoring counter RO( 0xB1C, mhpmcounter28) // Machine performance-monitoring counter RO( 0xB1D, mhpmcounter29) // Machine performance-monitoring counter RO( 0xB1E, mhpmcounter30) // Machine performance-monitoring counter RO( 0xB1F, mhpmcounter31) // Machine performance-monitoring counter RO32(0xB80, mcycleh) // Upper 32 bits of mcycle, RV32I only RO32(0xB82, minstreth) // Upper 32 bits of minstret, RV32I only RO32(0xB83, mhpmcounter3h) // Upper 32 bits of mhpmcounter3, RV32I only RO32(0xB84, mhpmcounter4h) RO32(0xB85, mhpmcounter5h) RO32(0xB86, mhpmcounter6h) RO32(0xB87, mhpmcounter7h) RO32(0xB88, mhpmcounter8h) RO32(0xB89, mhpmcounter9h) RO32(0xB8A, mhpmcounter10h) RO32(0xB8B, mhpmcounter11h) RO32(0xB8C, mhpmcounter12h) RO32(0xB8D, mhpmcounter13h) RO32(0xB8E, mhpmcounter14h) RO32(0xB8F, mhpmcounter15h) RO32(0xB90, mhpmcounter16h) RO32(0xB91, mhpmcounter17h) RO32(0xB92, mhpmcounter18h) RO32(0xB93, mhpmcounter19h) RO32(0xB94, mhpmcounter20h) RO32(0xB95, mhpmcounter21h) RO32(0xB96, mhpmcounter22h) RO32(0xB97, mhpmcounter23h) RO32(0xB98, mhpmcounter24h) RO32(0xB99, mhpmcounter25h) RO32(0xB9A, mhpmcounter26h) RO32(0xB9B, mhpmcounter27h) RO32(0xB9C, mhpmcounter28h) RO32(0xB9D, mhpmcounter29h) RO32(0xB9E, mhpmcounter30h) RO32(0xB9F, mhpmcounter31h) RW(0x323, mhpmevent3) // Machine performance-monitoring event selector RW(0x324, mhpmevent4) // Machine performance-monitoring event selector RW(0x325, mhpmevent5) // Machine performance-monitoring event selector RW(0x326, mhpmevent6) // Machine performance-monitoring event selector RW(0x327, mhpmevent7) // Machine performance-monitoring event selector RW(0x328, mhpmevent8) // Machine performance-monitoring event selector RW(0x329, mhpmevent9) // Machine performance-monitoring event selector RW(0x32A, mhpmevent10) // Machine performance-monitoring event selector RW(0x32B, mhpmevent11) // Machine performance-monitoring event selector RW(0x32C, mhpmevent12) // Machine performance-monitoring event selector RW(0x32D, mhpmevent13) // Machine performance-monitoring event selector RW(0x32E, mhpmevent14) // Machine performance-monitoring event selector RW(0x32F, mhpmevent15) // Machine performance-monitoring event selector RW(0x330, mhpmevent16) // Machine performance-monitoring event selector RW(0x331, mhpmevent17) // Machine performance-monitoring event selector RW(0x332, mhpmevent18) // Machine performance-monitoring event selector RW(0x333, mhpmevent19) // Machine performance-monitoring event selector RW(0x334, mhpmevent20) // Machine performance-monitoring event selector RW(0x335, mhpmevent21) // Machine performance-monitoring event selector RW(0x336, mhpmevent22) // Machine performance-monitoring event selector RW(0x337, mhpmevent23) // Machine performance-monitoring event selector RW(0x338, mhpmevent24) // Machine performance-monitoring event selector RW(0x339, mhpmevent25) // Machine performance-monitoring event selector RW(0x33A, mhpmevent26) // Machine performance-monitoring event selector RW(0x33B, mhpmevent27) // Machine performance-monitoring event selector RW(0x33C, mhpmevent28) // Machine performance-monitoring event selector RW(0x33D, mhpmevent29) // Machine performance-monitoring event selector RW(0x33E, mhpmevent30) // Machine performance-monitoring event selector RW(0x33F, mhpmevent31) // Machine performance-monitoring event selector // Debug/Trace Registers (shared with Debug Mode) RW(0x7A0, tselect) // Debug/Trace trigger register select RW(0x7A1, tdata1) // First Debug/Trace trigger data register RW(0x7A2, tdata2) // Second Debug/Trace trigger data register RW(0x7A3, tdata3) // Third Debug/Trace trigger data register // Debug Mode Registers RW(0x7B0, dcsr) // Debug control and status register RW(0x7B1, dpc) // Debug PC RW(0x7B2, dscratch) // Debug scratch register // Hypervisor Trap Setup RW(0x600, hstatus) // Hypervisor status register RW(0x602, hedeleg) // Hypervisor exception delegation register RW(0x603, hideleg) // Hypervisor interrupt delegation register RW(0x604, hie) // Hypervisor interrupt-enable register RW(0x606, hcounteren) // Hypervisor counter enable RW(0x607, hgeie) // Hypervisor guest external interrupt-enable register // Hypervisor Trap Handling RW(0x643, htval) // Hypervisor bad guest physical address RW(0x644, hip) // Hypervisor interrupt pending RW(0x645, hvip) // Hypervisor virtual interrupt pending RW(0x64a, htinst) // Hypervisor trap instruction (transformed) RW(0xe12, hgeip) // Hypervisor guest external interrupt pending // Hypervisor Protection and Translation RO(0x680, hgatp) // Hypervisor guest address translation and protection // Debug/Trace Registers RW(0x6a8, hcontext) // Hypervisor-mode context register // Hypervisor Counter/Timer Virtualization Registers RW(0x605, htimedelta) // Delta for VS/VU-mode timer RW32(0x615, htimedeltah) // Upper 32 bits of {\tt htimedelta}, RV32 only // Virtual Supervisor Registers RW(0x200, vsstatus) // Virtual supervisor status register RW(0x204, vsie) // Virtual supervisor interrupt-enable register RW(0x205, vstvec) // Virtual supervisor trap handler base address RW(0x240, vsscratch) // Virtual supervisor scratch register RW(0x241, vsepc) // Virtual supervisor exception program counter RW(0x242, vscause) // Virtual supervisor trap cause RW(0x243, vstval) // Virtual supervisor bad address or instruction RW(0x244, vsip) // Virtual supervisor interrupt pending RW(0x280, vsatp) // Virtual supervisor address translation and protection
Littlew0od/OSKernel2023-Main.os-2-1-1-
1,791
kernel/src/trap/trap.S
.altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text.trampoline .globl __alltraps .globl __restore .align 2 __alltraps: csrrw sp, sscratch, sp # now sp->*TrapContext in user space, sscratch->user stack # save other general purpose registers sd x1, 1*8(sp) # skip sp(x2), we will save it later # sd x3, 3*8(sp) # skip tp(x4), application does not use it # save x5~x31 .set n, 3 .rept 29 SAVE_GP %n .set n, n+1 .endr # we can use t0/t1/t2 freely, because they have been saved in TrapContext csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) # read user stack from sscratch and save it in TrapContext csrr t2, sscratch sd t2, 2*8(sp) # load kernel_satp into t0 ld t0, 34*8(sp) # load trap_handler into t1 ld t1, 36*8(sp) # move to kernel_sp ld sp, 35*8(sp) # switch to kernel space csrw satp, t0 sfence.vma # jump to trap_handler jr t1 __restore: # a0: *TrapContext in user space(Constant); a1: user space token # switch to user space csrw satp, a1 sfence.vma csrw sscratch, a0 mv sp, a0 # now sp points to TrapContext in user space, start restoring based on it # restore sstatus/sepc ld t0, 32*8(sp) ld t1, 33*8(sp) csrw sstatus, t0 csrw sepc, t1 # restore general purpose registers except x0/sp/tp ld x1, 1*8(sp) # ld x3, 3*8(sp) .set n, 3 .rept 29 LOAD_GP %n .set n, n+1 .endr # back to user stack ld sp, 2*8(sp) sret .section .text.signaltrampoline .globl __call_sigreturn .align 2 __call_sigreturn: # ecall sys_sigreturn li a7, 139 ecall
Littlew0od/OSKernel2023-Main.os-2-1-1-
17,376
dependency/riscv/asm.S
#include "asm.h" .section .text.__ebreak .global __ebreak __ebreak: ebreak ret .section .text.__wfi .global __wfi __wfi: wfi ret .section .text.__sfence_vma_all .global __sfence_vma_all __sfence_vma_all: sfence.vma ret .section .text.__sfence_vma .global __sfence_vma __sfence_vma: sfence.vma a0, a1 ret // RISC-V hypervisor instructions. // The switch for enabling LLVM support for asm generation. // #define LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT .section .text.__hfence_gvma .global __hfence_gvma __hfence_gvma: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hfence.gvma a0, a1 #else .word 1656029299 #endif ret .section .text.__hfence_vvma .global __hfence_vvma __hfence_vvma: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hfence.vvma a0, a1 #else .word 582287475 #endif ret .section .text.__hlv_b .global __hlv_b __hlv_b: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.b a0, a0 #else .word 1610958195 #endif ret .section .text.__hlv_bu .global __hlv_bu __hlv_bu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.bu a0, a0 #else .word 1612006771 #endif ret .section .text.__hlv_h .global __hlv_h __hlv_h: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.h a0, a0 #else .word 1678067059 #endif ret .section .text.__hlv_hu .global __hlv_hu __hlv_hu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.hu a0, a0 #else .word 1679115635 #endif ret .section .text.__hlvx_hu .global __hlvx_hu __hlvx_hu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlvx.hu a0, a0 #else .word 1681212787 #endif ret .section .text.__hlv_w .global __hlv_w __hlv_w: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.w a0, a0 #else .word 1745175923 #endif ret .section .text.__hlvx_wu .global __hlvx_wu __hlvx_wu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlvx.wu a0, a0 #else .word 1748321651 #endif ret .section .text.__hsv_b .global __hsv_b __hsv_b: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.b a0, a1 #else .word 1656045683 #endif ret .section .text.__hsv_h .global __hsv_h __hsv_h: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.h a0, a1 #else .word 1723154547 #endif ret .section .text.__hsv_w .global __hsv_w __hsv_w: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.w a0, a1 #else .word 1790263411 #endif ret .section .text.__hlv_wu .global __hlv_wu __hlv_wu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.wu a0, a0 #else .word 1746224499 #endif ret .section .text.__hlv_d .global __hlv_d __hlv_d: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.d a0, a0 #else .word 1812284787 #endif ret .section .text.__hsv_d .global __hsv_d __hsv_d: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.d a0, a1 #else .word 1857372275 #endif ret // User Trap Setup RW(0x000, ustatus) // User status register RW(0x004, uie) // User interrupt-enable register RW(0x005, utvec) // User trap handler base address // User Trap Handling RW(0x040, uscratch) // Scratch register for user trap handlers RW(0x041, uepc) // User exception program counter RW(0x042, ucause) // User trap cause RW(0x043, utval) // User bad address or instruction RW(0x044, uip) // User interrupt pending // User Floating-Point CSRs RW(0x001, fflags) // Floating-Point Accrued Exceptions RW(0x002, frm) // Floating-Point Dynamic Rounding Mode RW(0x003, fcsr) // Floating-Point Control and Status Register (frm + fflags) // User Counter/Timers RO( 0xC00, cycle) // Cycle counter for RDCYCLE instruction RO( 0xC01, time) // Timer for RDTIME instruction RO( 0xC02, instret) // Instructions-retired counter for RDINSTRET instruction RO( 0xC03, hpmcounter3) // Performance-monitoring counter RO( 0xC04, hpmcounter4) // Performance-monitoring counter RO( 0xC05, hpmcounter5) // Performance-monitoring counter RO( 0xC06, hpmcounter6) // Performance-monitoring counter RO( 0xC07, hpmcounter7) // Performance-monitoring counter RO( 0xC08, hpmcounter8) // Performance-monitoring counter RO( 0xC09, hpmcounter9) // Performance-monitoring counter RO( 0xC0A, hpmcounter10) // Performance-monitoring counter RO( 0xC0B, hpmcounter11) // Performance-monitoring counter RO( 0xC0C, hpmcounter12) // Performance-monitoring counter RO( 0xC0D, hpmcounter13) // Performance-monitoring counter RO( 0xC0E, hpmcounter14) // Performance-monitoring counter RO( 0xC0F, hpmcounter15) // Performance-monitoring counter RO( 0xC10, hpmcounter16) // Performance-monitoring counter RO( 0xC11, hpmcounter17) // Performance-monitoring counter RO( 0xC12, hpmcounter18) // Performance-monitoring counter RO( 0xC13, hpmcounter19) // Performance-monitoring counter RO( 0xC14, hpmcounter20) // Performance-monitoring counter RO( 0xC15, hpmcounter21) // Performance-monitoring counter RO( 0xC16, hpmcounter22) // Performance-monitoring counter RO( 0xC17, hpmcounter23) // Performance-monitoring counter RO( 0xC18, hpmcounter24) // Performance-monitoring counter RO( 0xC19, hpmcounter25) // Performance-monitoring counter RO( 0xC1A, hpmcounter26) // Performance-monitoring counter RO( 0xC1B, hpmcounter27) // Performance-monitoring counter RO( 0xC1C, hpmcounter28) // Performance-monitoring counter RO( 0xC1D, hpmcounter29) // Performance-monitoring counter RO( 0xC1E, hpmcounter30) // Performance-monitoring counter RO( 0xC1F, hpmcounter31) // Performance-monitoring counter RO32(0xC80, cycleh) // Upper 32 bits of cycle, RV32I only RO32(0xC81, timeh) // Upper 32 bits of time, RV32I only RO32(0xC82, instreth) // Upper 32 bits of instret, RV32I only RO32(0xC83, hpmcounter3h) // Upper 32 bits of hpmcounter3, RV32I only RO32(0xC84, hpmcounter4h) RO32(0xC85, hpmcounter5h) RO32(0xC86, hpmcounter6h) RO32(0xC87, hpmcounter7h) RO32(0xC88, hpmcounter8h) RO32(0xC89, hpmcounter9h) RO32(0xC8A, hpmcounter10h) RO32(0xC8B, hpmcounter11h) RO32(0xC8C, hpmcounter12h) RO32(0xC8D, hpmcounter13h) RO32(0xC8E, hpmcounter14h) RO32(0xC8F, hpmcounter15h) RO32(0xC90, hpmcounter16h) RO32(0xC91, hpmcounter17h) RO32(0xC92, hpmcounter18h) RO32(0xC93, hpmcounter19h) RO32(0xC94, hpmcounter20h) RO32(0xC95, hpmcounter21h) RO32(0xC96, hpmcounter22h) RO32(0xC97, hpmcounter23h) RO32(0xC98, hpmcounter24h) RO32(0xC99, hpmcounter25h) RO32(0xC9A, hpmcounter26h) RO32(0xC9B, hpmcounter27h) RO32(0xC9C, hpmcounter28h) RO32(0xC9D, hpmcounter29h) RO32(0xC9E, hpmcounter30h) RO32(0xC9F, hpmcounter31h) // Supervisor Trap Setup RW(0x100, sstatus) // Supervisor status register RW(0x102, sedeleg) // Supervisor exception delegation register RW(0x103, sideleg) // Supervisor interrupt delegation register RW(0x104, sie) // Supervisor interrupt-enable register RW(0x105, stvec) // Supervisor trap handler base address RW(0x106, scounteren) // Supervisor counter enable // Supervisor Trap Handling RW(0x140, sscratch) // Scratch register for supervisor trap handlers RW(0x141, sepc) // Supervisor exception program counter RW(0x142, scause) // Supervisor trap cause RW(0x143, stval) // Supervisor bad address or instruction RW(0x144, sip) // Supervisor interrupt pending // Supervisor Protection and Translation RW(0x180, satp) // Supervisor address translation and protection // Machine Information Registers RO(0xF11, mvendorid) // Vendor ID RO(0xF12, marchid) // Architecture ID RO(0xF13, mimpid) // Implementation ID RO(0xF14, mhartid) // Hardware thread ID // Machine Trap Setup RW(0x300, mstatus) // Machine status register RW(0x301, misa) // ISA and extensions RW(0x302, medeleg) // Machine exception delegation register RW(0x303, mideleg) // Machine interrupt delegation register RW(0x304, mie) // Machine interrupt-enable register RW(0x305, mtvec) // Machine trap handler base address RW(0x306, mcounteren) // Machine counter enable // Machine Trap Handling RW(0x340, mscratch) // Scratch register for machine trap handlers RW(0x341, mepc) // Machine exception program counter RW(0x342, mcause) // Machine trap cause RW(0x343, mtval) // Machine bad address or instruction RW(0x344, mip) // Machine interrupt pending // Machine Protection and Translation RW( 0x3A0, pmpcfg0) // Physical memory protection configuration RW32(0x3A1, pmpcfg1) // Physical memory protection configuration, RV32 only RW( 0x3A2, pmpcfg2) // Physical memory protection configuration RW32(0x3A3, pmpcfg3) // Physical memory protection configuration, RV32 only RW( 0x3B0, pmpaddr0) // Physical memory protection address register RW( 0x3B1, pmpaddr1) // Physical memory protection address register RW( 0x3B2, pmpaddr2) // Physical memory protection address register RW( 0x3B3, pmpaddr3) // Physical memory protection address register RW( 0x3B4, pmpaddr4) // Physical memory protection address register RW( 0x3B5, pmpaddr5) // Physical memory protection address register RW( 0x3B6, pmpaddr6) // Physical memory protection address register RW( 0x3B7, pmpaddr7) // Physical memory protection address register RW( 0x3B8, pmpaddr8) // Physical memory protection address register RW( 0x3B9, pmpaddr9) // Physical memory protection address register RW( 0x3BA, pmpaddr10) // Physical memory protection address register RW( 0x3BB, pmpaddr11) // Physical memory protection address register RW( 0x3BC, pmpaddr12) // Physical memory protection address register RW( 0x3BD, pmpaddr13) // Physical memory protection address register RW( 0x3BE, pmpaddr14) // Physical memory protection address register RW( 0x3BF, pmpaddr15) // Physical memory protection address register // Machine Counter/Timers RO( 0xB00, mcycle) // Machine cycle counter RO( 0xB02, minstret) // Machine instructions-retired counter RO( 0xB03, mhpmcounter3) // Machine performance-monitoring counter RO( 0xB04, mhpmcounter4) // Machine performance-monitoring counter RO( 0xB05, mhpmcounter5) // Machine performance-monitoring counter RO( 0xB06, mhpmcounter6) // Machine performance-monitoring counter RO( 0xB07, mhpmcounter7) // Machine performance-monitoring counter RO( 0xB08, mhpmcounter8) // Machine performance-monitoring counter RO( 0xB09, mhpmcounter9) // Machine performance-monitoring counter RO( 0xB0A, mhpmcounter10) // Machine performance-monitoring counter RO( 0xB0B, mhpmcounter11) // Machine performance-monitoring counter RO( 0xB0C, mhpmcounter12) // Machine performance-monitoring counter RO( 0xB0D, mhpmcounter13) // Machine performance-monitoring counter RO( 0xB0E, mhpmcounter14) // Machine performance-monitoring counter RO( 0xB0F, mhpmcounter15) // Machine performance-monitoring counter RO( 0xB10, mhpmcounter16) // Machine performance-monitoring counter RO( 0xB11, mhpmcounter17) // Machine performance-monitoring counter RO( 0xB12, mhpmcounter18) // Machine performance-monitoring counter RO( 0xB13, mhpmcounter19) // Machine performance-monitoring counter RO( 0xB14, mhpmcounter20) // Machine performance-monitoring counter RO( 0xB15, mhpmcounter21) // Machine performance-monitoring counter RO( 0xB16, mhpmcounter22) // Machine performance-monitoring counter RO( 0xB17, mhpmcounter23) // Machine performance-monitoring counter RO( 0xB18, mhpmcounter24) // Machine performance-monitoring counter RO( 0xB19, mhpmcounter25) // Machine performance-monitoring counter RO( 0xB1A, mhpmcounter26) // Machine performance-monitoring counter RO( 0xB1B, mhpmcounter27) // Machine performance-monitoring counter RO( 0xB1C, mhpmcounter28) // Machine performance-monitoring counter RO( 0xB1D, mhpmcounter29) // Machine performance-monitoring counter RO( 0xB1E, mhpmcounter30) // Machine performance-monitoring counter RO( 0xB1F, mhpmcounter31) // Machine performance-monitoring counter RO32(0xB80, mcycleh) // Upper 32 bits of mcycle, RV32I only RO32(0xB82, minstreth) // Upper 32 bits of minstret, RV32I only RO32(0xB83, mhpmcounter3h) // Upper 32 bits of mhpmcounter3, RV32I only RO32(0xB84, mhpmcounter4h) RO32(0xB85, mhpmcounter5h) RO32(0xB86, mhpmcounter6h) RO32(0xB87, mhpmcounter7h) RO32(0xB88, mhpmcounter8h) RO32(0xB89, mhpmcounter9h) RO32(0xB8A, mhpmcounter10h) RO32(0xB8B, mhpmcounter11h) RO32(0xB8C, mhpmcounter12h) RO32(0xB8D, mhpmcounter13h) RO32(0xB8E, mhpmcounter14h) RO32(0xB8F, mhpmcounter15h) RO32(0xB90, mhpmcounter16h) RO32(0xB91, mhpmcounter17h) RO32(0xB92, mhpmcounter18h) RO32(0xB93, mhpmcounter19h) RO32(0xB94, mhpmcounter20h) RO32(0xB95, mhpmcounter21h) RO32(0xB96, mhpmcounter22h) RO32(0xB97, mhpmcounter23h) RO32(0xB98, mhpmcounter24h) RO32(0xB99, mhpmcounter25h) RO32(0xB9A, mhpmcounter26h) RO32(0xB9B, mhpmcounter27h) RO32(0xB9C, mhpmcounter28h) RO32(0xB9D, mhpmcounter29h) RO32(0xB9E, mhpmcounter30h) RO32(0xB9F, mhpmcounter31h) RW(0x323, mhpmevent3) // Machine performance-monitoring event selector RW(0x324, mhpmevent4) // Machine performance-monitoring event selector RW(0x325, mhpmevent5) // Machine performance-monitoring event selector RW(0x326, mhpmevent6) // Machine performance-monitoring event selector RW(0x327, mhpmevent7) // Machine performance-monitoring event selector RW(0x328, mhpmevent8) // Machine performance-monitoring event selector RW(0x329, mhpmevent9) // Machine performance-monitoring event selector RW(0x32A, mhpmevent10) // Machine performance-monitoring event selector RW(0x32B, mhpmevent11) // Machine performance-monitoring event selector RW(0x32C, mhpmevent12) // Machine performance-monitoring event selector RW(0x32D, mhpmevent13) // Machine performance-monitoring event selector RW(0x32E, mhpmevent14) // Machine performance-monitoring event selector RW(0x32F, mhpmevent15) // Machine performance-monitoring event selector RW(0x330, mhpmevent16) // Machine performance-monitoring event selector RW(0x331, mhpmevent17) // Machine performance-monitoring event selector RW(0x332, mhpmevent18) // Machine performance-monitoring event selector RW(0x333, mhpmevent19) // Machine performance-monitoring event selector RW(0x334, mhpmevent20) // Machine performance-monitoring event selector RW(0x335, mhpmevent21) // Machine performance-monitoring event selector RW(0x336, mhpmevent22) // Machine performance-monitoring event selector RW(0x337, mhpmevent23) // Machine performance-monitoring event selector RW(0x338, mhpmevent24) // Machine performance-monitoring event selector RW(0x339, mhpmevent25) // Machine performance-monitoring event selector RW(0x33A, mhpmevent26) // Machine performance-monitoring event selector RW(0x33B, mhpmevent27) // Machine performance-monitoring event selector RW(0x33C, mhpmevent28) // Machine performance-monitoring event selector RW(0x33D, mhpmevent29) // Machine performance-monitoring event selector RW(0x33E, mhpmevent30) // Machine performance-monitoring event selector RW(0x33F, mhpmevent31) // Machine performance-monitoring event selector // Debug/Trace Registers (shared with Debug Mode) RW(0x7A0, tselect) // Debug/Trace trigger register select RW(0x7A1, tdata1) // First Debug/Trace trigger data register RW(0x7A2, tdata2) // Second Debug/Trace trigger data register RW(0x7A3, tdata3) // Third Debug/Trace trigger data register // Debug Mode Registers RW(0x7B0, dcsr) // Debug control and status register RW(0x7B1, dpc) // Debug PC RW(0x7B2, dscratch) // Debug scratch register // Hypervisor Trap Setup RW(0x600, hstatus) // Hypervisor status register RW(0x602, hedeleg) // Hypervisor exception delegation register RW(0x603, hideleg) // Hypervisor interrupt delegation register RW(0x604, hie) // Hypervisor interrupt-enable register RW(0x606, hcounteren) // Hypervisor counter enable RW(0x607, hgeie) // Hypervisor guest external interrupt-enable register // Hypervisor Trap Handling RW(0x643, htval) // Hypervisor bad guest physical address RW(0x644, hip) // Hypervisor interrupt pending RW(0x645, hvip) // Hypervisor virtual interrupt pending RW(0x64a, htinst) // Hypervisor trap instruction (transformed) RW(0xe12, hgeip) // Hypervisor guest external interrupt pending // Hypervisor Protection and Translation RO(0x680, hgatp) // Hypervisor guest address translation and protection // Debug/Trace Registers RW(0x6a8, hcontext) // Hypervisor-mode context register // Hypervisor Counter/Timer Virtualization Registers RW(0x605, htimedelta) // Delta for VS/VU-mode timer RW32(0x615, htimedeltah) // Upper 32 bits of {\tt htimedelta}, RV32 only // Virtual Supervisor Registers RW(0x200, vsstatus) // Virtual supervisor status register RW(0x204, vsie) // Virtual supervisor interrupt-enable register RW(0x205, vstvec) // Virtual supervisor trap handler base address RW(0x240, vsscratch) // Virtual supervisor scratch register RW(0x241, vsepc) // Virtual supervisor exception program counter RW(0x242, vscause) // Virtual supervisor trap cause RW(0x243, vstval) // Virtual supervisor bad address or instruction RW(0x244, vsip) // Virtual supervisor interrupt pending RW(0x280, vsatp) // Virtual supervisor address translation and protection
LiuJun5817/rbpf-main
8,125
execution.s
.text .file "execution.7e26790a3db53ff8-cgu.0" .section .text._ZN3std2rt10lang_start17hb21359643fb4ec85E,"ax",@progbits .hidden _ZN3std2rt10lang_start17hb21359643fb4ec85E .globl _ZN3std2rt10lang_start17hb21359643fb4ec85E .p2align 4, 0x90 .type _ZN3std2rt10lang_start17hb21359643fb4ec85E,@function _ZN3std2rt10lang_start17hb21359643fb4ec85E: .cfi_startproc subq $24, %rsp .cfi_def_cfa_offset 32 movl %ecx, %eax movq %rdx, %rcx movq %rsi, %rdx movq %rdi, 16(%rsp) leaq 16(%rsp), %rdi leaq .L__unnamed_1(%rip), %rsi movzbl %al, %r8d callq *_ZN3std2rt19lang_start_internal17h4a459eea85397345E@GOTPCREL(%rip) movq %rax, 8(%rsp) movq 8(%rsp), %rax addq $24, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _ZN3std2rt10lang_start17hb21359643fb4ec85E, .Lfunc_end0-_ZN3std2rt10lang_start17hb21359643fb4ec85E .cfi_endproc .section ".text._ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hb1c0fffd0cb38b2bE","ax",@progbits .p2align 4, 0x90 .type _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hb1c0fffd0cb38b2bE,@function _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hb1c0fffd0cb38b2bE: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 movq (%rdi), %rdi callq _ZN3std3sys9backtrace28__rust_begin_short_backtrace17hf200dddcd1f10969E callq _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h0561b17b27da706eE movb %al, 7(%rsp) movzbl 7(%rsp), %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hb1c0fffd0cb38b2bE, .Lfunc_end1-_ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hb1c0fffd0cb38b2bE .cfi_endproc .section .text._ZN3std3sys9backtrace28__rust_begin_short_backtrace17hf200dddcd1f10969E,"ax",@progbits .p2align 4, 0x90 .type _ZN3std3sys9backtrace28__rust_begin_short_backtrace17hf200dddcd1f10969E,@function _ZN3std3sys9backtrace28__rust_begin_short_backtrace17hf200dddcd1f10969E: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 callq _ZN4core3ops8function6FnOnce9call_once17hf62bd4d407bd7728E #APP #NO_APP popq %rax .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size _ZN3std3sys9backtrace28__rust_begin_short_backtrace17hf200dddcd1f10969E, .Lfunc_end2-_ZN3std3sys9backtrace28__rust_begin_short_backtrace17hf200dddcd1f10969E .cfi_endproc .section ".text._ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h95b8a31c278e1155E","ax",@progbits .p2align 4, 0x90 .type _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h95b8a31c278e1155E,@function _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h95b8a31c278e1155E: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 movq (%rdi), %rdi callq _ZN4core3ops8function6FnOnce9call_once17h3b419c02f3cdb588E popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end3: .size _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h95b8a31c278e1155E, .Lfunc_end3-_ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h95b8a31c278e1155E .cfi_endproc .section .text._ZN4core3ops8function6FnOnce9call_once17h3b419c02f3cdb588E,"ax",@progbits .p2align 4, 0x90 .type _ZN4core3ops8function6FnOnce9call_once17h3b419c02f3cdb588E,@function _ZN4core3ops8function6FnOnce9call_once17h3b419c02f3cdb588E: .Lfunc_begin0: .cfi_startproc .cfi_personality 155, DW.ref.rust_eh_personality .cfi_lsda 27, .Lexception0 subq $40, %rsp .cfi_def_cfa_offset 48 movq %rdi, 8(%rsp) .Ltmp0: leaq 8(%rsp), %rdi callq _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hb1c0fffd0cb38b2bE .Ltmp1: movl %eax, 4(%rsp) jmp .LBB4_3 .LBB4_1: movq 24(%rsp), %rdi callq _Unwind_Resume@PLT .LBB4_2: .Ltmp2: movq %rax, %rcx movl %edx, %eax movq %rcx, 24(%rsp) movl %eax, 32(%rsp) jmp .LBB4_1 .LBB4_3: movl 4(%rsp), %eax addq $40, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _ZN4core3ops8function6FnOnce9call_once17h3b419c02f3cdb588E, .Lfunc_end4-_ZN4core3ops8function6FnOnce9call_once17h3b419c02f3cdb588E .cfi_endproc .section .gcc_except_table._ZN4core3ops8function6FnOnce9call_once17h3b419c02f3cdb588E,"a",@progbits .p2align 2, 0x0 GCC_except_table4: .Lexception0: .byte 255 .byte 255 .byte 1 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Ltmp0-.Lfunc_begin0 .uleb128 .Ltmp1-.Ltmp0 .uleb128 .Ltmp2-.Lfunc_begin0 .byte 0 .uleb128 .Ltmp1-.Lfunc_begin0 .uleb128 .Lfunc_end4-.Ltmp1 .byte 0 .byte 0 .Lcst_end0: .p2align 2, 0x0 .section .text._ZN4core3ops8function6FnOnce9call_once17hf62bd4d407bd7728E,"ax",@progbits .p2align 4, 0x90 .type _ZN4core3ops8function6FnOnce9call_once17hf62bd4d407bd7728E,@function _ZN4core3ops8function6FnOnce9call_once17hf62bd4d407bd7728E: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 callq *%rdi popq %rax .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size _ZN4core3ops8function6FnOnce9call_once17hf62bd4d407bd7728E, .Lfunc_end5-_ZN4core3ops8function6FnOnce9call_once17hf62bd4d407bd7728E .cfi_endproc .section ".text._ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17he9283b2115a8b38dE","ax",@progbits .p2align 4, 0x90 .type _ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17he9283b2115a8b38dE,@function _ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17he9283b2115a8b38dE: .cfi_startproc retq .Lfunc_end6: .size _ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17he9283b2115a8b38dE, .Lfunc_end6-_ZN4core3ptr85drop_in_place$LT$std..rt..lang_start$LT$$LP$$RP$$GT$..$u7b$$u7b$closure$u7d$$u7d$$GT$17he9283b2115a8b38dE .cfi_endproc .section ".text._ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h0561b17b27da706eE","ax",@progbits .p2align 4, 0x90 .type _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h0561b17b27da706eE,@function _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h0561b17b27da706eE: .cfi_startproc xorl %eax, %eax retq .Lfunc_end7: .size _ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h0561b17b27da706eE, .Lfunc_end7-_ZN54_$LT$$LP$$RP$$u20$as$u20$std..process..Termination$GT$6report17h0561b17b27da706eE .cfi_endproc .section .text._ZN9execution4main17hb6a5818852063068E,"ax",@progbits .p2align 4, 0x90 .type _ZN9execution4main17hb6a5818852063068E,@function _ZN9execution4main17hb6a5818852063068E: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 movl $8, %edi xorl %eax, %eax movl %eax, %esi callq *_ZN4test16test_main_static17hba1978a6861d4f9bE@GOTPCREL(%rip) popq %rax .cfi_def_cfa_offset 8 retq .Lfunc_end8: .size _ZN9execution4main17hb6a5818852063068E, .Lfunc_end8-_ZN9execution4main17hb6a5818852063068E .cfi_endproc .section .text.main,"ax",@progbits .globl main .p2align 4, 0x90 .type main,@function main: .cfi_startproc pushq %rax .cfi_def_cfa_offset 16 movq %rsi, %rdx movslq %edi, %rsi leaq _ZN9execution4main17hb6a5818852063068E(%rip), %rdi xorl %ecx, %ecx callq _ZN3std2rt10lang_start17hb21359643fb4ec85E popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end9: .size main, .Lfunc_end9-main .cfi_endproc .type .L__unnamed_1,@object .section .data.rel.ro..L__unnamed_1,"aw",@progbits .p2align 3, 0x0 .L__unnamed_1: .asciz "\000\000\000\000\000\000\000\000\b\000\000\000\000\000\000\000\b\000\000\000\000\000\000" .quad _ZN4core3ops8function6FnOnce40call_once$u7b$$u7b$vtable.shim$u7d$$u7d$17h95b8a31c278e1155E .quad _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hb1c0fffd0cb38b2bE .quad _ZN3std2rt10lang_start28_$u7b$$u7b$closure$u7d$$u7d$17hb1c0fffd0cb38b2bE .size .L__unnamed_1, 48 .hidden DW.ref.rust_eh_personality .weak DW.ref.rust_eh_personality .section .data.DW.ref.rust_eh_personality,"awG",@progbits,DW.ref.rust_eh_personality,comdat .p2align 3, 0x0 .type DW.ref.rust_eh_personality,@object .size DW.ref.rust_eh_personality, 8 DW.ref.rust_eh_personality: .quad rust_eh_personality .ident "rustc version 1.83.0-nightly (52fd99839 2024-10-10)" .section ".note.GNU-stack","",@progbits
llb1238/NPUCORE_2025_npu123team
4,264
os/src/hal/arch/loongarch64/trap/trap.S
FP_START = 32 .altmacro .macro SAVE_GP n st.d $r\n, $sp, \n*8 .endm .macro LOAD_GP n ld.d $r\n, $sp, \n*8 .endm .macro SAVE_FP n, m fst.d $f\n, $sp, \m*8 .endm .macro LOAD_FP n, m fld.d $f\n, $sp, \m*8 .endm .section .text.trampoline .globl __alltraps .globl __restore .align 2 .equ CSR_SAVE, 0x30 .equ CSR_ERA, 0x6 .equ CSR_PRMD, 0x1 .equ CSR_PGDL, 0x19 .equ CSR_PGD, 0x1b __alltraps: #==================REMEMBER TO TURN OFF THE INTERRUPT !======================= csrwr $sp, CSR_SAVE # now sp->*TrapContext in user space, CSR_SAVE->user stack SAVE_GP 1 SAVE_GP 2 # skip r3(sp) .set n, 4 .rept 28 SAVE_GP %n .set n, n+1 .endr .set n, 0 .set m, FP_START .rept 32 SAVE_FP %n, %m .set n, n+1 .set m, m+1 .endr # we can use t0/t1/t2 freely, because they have been saved in TrapContext # save FCSR movfcsr2gr $t0, $fcsr0 st.w $t0, $sp, 64*8 # save FCC movcf2gr $t0, $fcc7 slli.w $t0, $t0, 1 movcf2gr $t0, $fcc6 slli.w $t0, $t0, 1 movcf2gr $t0, $fcc5 slli.w $t0, $t0, 1 movcf2gr $t0, $fcc4 slli.w $t0, $t0, 1 movcf2gr $t0, $fcc3 slli.w $t0, $t0, 1 movcf2gr $t0, $fcc2 slli.w $t0, $t0, 1 movcf2gr $t0, $fcc1 slli.w $t0, $t0, 1 movcf2gr $t0, $fcc0 st.b $t0, $sp, 64*8+4 # save other general purpose registers st.d $a0, $sp, 65*8 csrrd $t0, CSR_PRMD csrrd $t1, CSR_ERA st.d $t0, $sp, 66*8 st.d $t1, $sp, 0 # read user stack from SAVE and save it in TrapContext csrrd $t2, CSR_SAVE st.d $t2, $sp, 3*8 # load kernel_satp into t0 #ld.d $t0, $sp, 67*8 # load trap_handler into t1 ld.d $t1, $sp, 68*8 # move to kernel_sp ld.d $sp, $sp, 69*8 # switch to kernel space #csrwr $t0, CSR_PGDL invtlb 0x3, $zero, $zero # jump to trap_handler jr $t1 __restore: # a0: *TrapContext in user space(Constant), # a1: user space token # switch to user space slli.d $a1, $a1, 12 csrwr $a1, CSR_PGDL invtlb 0x3, $zero, $zero move $sp, $a0 csrwr $a0, CSR_SAVE # now sp points to TrapContext in user space, start restoring based on it # restore FCSR ld.w $t0, $sp, 64*8 movgr2fcsr $fcsr0, $t0 # restore FCC ld.b $t0, $sp, 64*8+4 movgr2cf $fcc0, $t0 srli.w $t0, $t0, 1 movgr2cf $fcc1, $t0 srli.w $t0, $t0, 1 movgr2cf $fcc2, $t0 srli.w $t0, $t0, 1 movgr2cf $fcc3, $t0 srli.w $t0, $t0, 1 movgr2cf $fcc4, $t0 srli.w $t0, $t0, 1 movgr2cf $fcc5, $t0 srli.w $t0, $t0, 1 movgr2cf $fcc6, $t0 srli.w $t0, $t0, 1 movgr2cf $fcc7, $t0 # restore CSR_PRMD/CSR_ERA ld.d $t0, $sp, 66*8 ld.d $t1, $sp, 0 csrwr $t0, CSR_PRMD csrwr $t1, CSR_ERA # restore general purpose registers except r0/$sp LOAD_GP 1 LOAD_GP 2 .set n, 4 .rept 28 LOAD_GP %n .set n, n+1 .endr .set n, 0 .set m, FP_START .rept 32 LOAD_FP %n, %m .set n, n+1 .set m, m+1 .endr # back to user stack LOAD_GP 3 ertn .section .text.signaltrampoline .globl __call_sigreturn .align 2 __call_sigreturn: # ecall sys_sigreturn addi.d $a7, $zero, 139 syscall 0 # Kernel Trap Section .section .text.kern_trap .globl __kern_trap .align 2 __kern_trap: # Keep the original $sp in SAVE csrwr $sp, CSR_SAVE csrrd $sp, CSR_SAVE # Now move the $sp lower to push the registers addi.d $sp, $sp, -256 # Align the $sp srli.d $sp, $sp, 3 slli.d $sp, $sp, 3 # now sp->*GeneralRegisters in kern space, CSR_SAVE->(the previous $sp) SAVE_GP 1 # Save $ra SAVE_GP 2 # Save $tp # skip r3(sp) .set n, 4 .rept 28 SAVE_GP %n .set n, n+1 .endr .set n, 0 csrrd $t0, CSR_ERA st.d $t0, $sp, 0 move $a0, $sp csrrd $sp, CSR_SAVE st.d $sp, $a0, 3*8 move $sp, $a0 bl trap_from_kernel ld.d $ra, $sp, 0 csrwr $ra, CSR_ERA LOAD_GP 1 LOAD_GP 2 # skip r3(sp) .set n, 4 .rept 28 LOAD_GP %n .set n, n+1 .endr .set n, 0 csrrd $sp, CSR_SAVE ertn
llb1238/NPUCORE_2025_npu123team
2,157
os/src/hal/arch/riscv/trap/trap.S
.attribute arch, "rv64gc" FP_START = 32 .altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .macro SAVE_FP n, m fsd f\n, \m*8(sp) .endm .macro LOAD_FP n, m fld f\n, \m*8(sp) .endm .section .text.trampoline .globl __alltraps .globl __restore .align 2 __alltraps: csrrw sp, sscratch, sp # now sp->*TrapContext in user space, sscratch->user stack sd x1, 1*8(sp) # skip sp(x2), we will save it later .set n, 3 .rept 29 SAVE_GP %n .set n, n+1 .endr .set n, 0 .set m, FP_START .rept 32 SAVE_FP %n, %m .set n, n+1 .set m, m+1 .endr # we can use t0/t1/t2 freely, because they have been saved in TrapContext csrr t0, fcsr sd t0, 64*8(sp) # save other general purpose registers sd a0, 65*8(sp) csrr t0, sstatus csrr t1, sepc sd t0, 66*8(sp) sd t1, 0(sp) # read user stack from sscratch and save it in TrapContext csrr t2, sscratch sd t2, 2*8(sp) # load kernel_satp into t0 ld t0, 67*8(sp) # load trap_handler into t1 ld t1, 68*8(sp) # move to kernel_sp ld sp, 69*8(sp) # switch to kernel space csrw satp, t0 sfence.vma # jump to trap_handler jr t1 __restore: # a0: *TrapContext in user space(Constant); a1: user space token # switch to user space csrw satp, a1 sfence.vma csrw sscratch, a0 mv sp, a0 # now sp points to TrapContext in user space, start restoring based on it # restore sstatus/sepc ld t0, 64*8(sp) csrw fcsr, t0 ld t0, 66*8(sp) ld t1, 0(sp) csrw sstatus, t0 csrw sepc, t1 # restore general purpose registers except x0/sp ld x1, 1*8(sp) .set n, 3 .rept 29 LOAD_GP %n .set n, n+1 .endr .set n, 0 .set m, FP_START .rept 32 LOAD_FP %n, %m .set n, n+1 .set m, m+1 .endr # back to user stack ld sp, 2*8(sp) sret .section .text.signaltrampoline .globl __call_sigreturn .align 2 __call_sigreturn: # ecall sys_sigreturn addi a7, zero, 139 ecall
llb1238/NPUCORE_2025_npu123team
17,376
dependency/riscv/asm.S
#include "asm.h" .section .text.__ebreak .global __ebreak __ebreak: ebreak ret .section .text.__wfi .global __wfi __wfi: wfi ret .section .text.__sfence_vma_all .global __sfence_vma_all __sfence_vma_all: sfence.vma ret .section .text.__sfence_vma .global __sfence_vma __sfence_vma: sfence.vma a0, a1 ret // RISC-V hypervisor instructions. // The switch for enabling LLVM support for asm generation. // #define LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT .section .text.__hfence_gvma .global __hfence_gvma __hfence_gvma: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hfence.gvma a0, a1 #else .word 1656029299 #endif ret .section .text.__hfence_vvma .global __hfence_vvma __hfence_vvma: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hfence.vvma a0, a1 #else .word 582287475 #endif ret .section .text.__hlv_b .global __hlv_b __hlv_b: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.b a0, a0 #else .word 1610958195 #endif ret .section .text.__hlv_bu .global __hlv_bu __hlv_bu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.bu a0, a0 #else .word 1612006771 #endif ret .section .text.__hlv_h .global __hlv_h __hlv_h: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.h a0, a0 #else .word 1678067059 #endif ret .section .text.__hlv_hu .global __hlv_hu __hlv_hu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.hu a0, a0 #else .word 1679115635 #endif ret .section .text.__hlvx_hu .global __hlvx_hu __hlvx_hu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlvx.hu a0, a0 #else .word 1681212787 #endif ret .section .text.__hlv_w .global __hlv_w __hlv_w: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.w a0, a0 #else .word 1745175923 #endif ret .section .text.__hlvx_wu .global __hlvx_wu __hlvx_wu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlvx.wu a0, a0 #else .word 1748321651 #endif ret .section .text.__hsv_b .global __hsv_b __hsv_b: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.b a0, a1 #else .word 1656045683 #endif ret .section .text.__hsv_h .global __hsv_h __hsv_h: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.h a0, a1 #else .word 1723154547 #endif ret .section .text.__hsv_w .global __hsv_w __hsv_w: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.w a0, a1 #else .word 1790263411 #endif ret .section .text.__hlv_wu .global __hlv_wu __hlv_wu: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.wu a0, a0 #else .word 1746224499 #endif ret .section .text.__hlv_d .global __hlv_d __hlv_d: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hlv.d a0, a0 #else .word 1812284787 #endif ret .section .text.__hsv_d .global __hsv_d __hsv_d: #ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT hsv.d a0, a1 #else .word 1857372275 #endif ret // User Trap Setup RW(0x000, ustatus) // User status register RW(0x004, uie) // User interrupt-enable register RW(0x005, utvec) // User trap handler base address // User Trap Handling RW(0x040, uscratch) // Scratch register for user trap handlers RW(0x041, uepc) // User exception program counter RW(0x042, ucause) // User trap cause RW(0x043, utval) // User bad address or instruction RW(0x044, uip) // User interrupt pending // User Floating-Point CSRs RW(0x001, fflags) // Floating-Point Accrued Exceptions RW(0x002, frm) // Floating-Point Dynamic Rounding Mode RW(0x003, fcsr) // Floating-Point Control and Status Register (frm + fflags) // User Counter/Timers RO( 0xC00, cycle) // Cycle counter for RDCYCLE instruction RO( 0xC01, time) // Timer for RDTIME instruction RO( 0xC02, instret) // Instructions-retired counter for RDINSTRET instruction RO( 0xC03, hpmcounter3) // Performance-monitoring counter RO( 0xC04, hpmcounter4) // Performance-monitoring counter RO( 0xC05, hpmcounter5) // Performance-monitoring counter RO( 0xC06, hpmcounter6) // Performance-monitoring counter RO( 0xC07, hpmcounter7) // Performance-monitoring counter RO( 0xC08, hpmcounter8) // Performance-monitoring counter RO( 0xC09, hpmcounter9) // Performance-monitoring counter RO( 0xC0A, hpmcounter10) // Performance-monitoring counter RO( 0xC0B, hpmcounter11) // Performance-monitoring counter RO( 0xC0C, hpmcounter12) // Performance-monitoring counter RO( 0xC0D, hpmcounter13) // Performance-monitoring counter RO( 0xC0E, hpmcounter14) // Performance-monitoring counter RO( 0xC0F, hpmcounter15) // Performance-monitoring counter RO( 0xC10, hpmcounter16) // Performance-monitoring counter RO( 0xC11, hpmcounter17) // Performance-monitoring counter RO( 0xC12, hpmcounter18) // Performance-monitoring counter RO( 0xC13, hpmcounter19) // Performance-monitoring counter RO( 0xC14, hpmcounter20) // Performance-monitoring counter RO( 0xC15, hpmcounter21) // Performance-monitoring counter RO( 0xC16, hpmcounter22) // Performance-monitoring counter RO( 0xC17, hpmcounter23) // Performance-monitoring counter RO( 0xC18, hpmcounter24) // Performance-monitoring counter RO( 0xC19, hpmcounter25) // Performance-monitoring counter RO( 0xC1A, hpmcounter26) // Performance-monitoring counter RO( 0xC1B, hpmcounter27) // Performance-monitoring counter RO( 0xC1C, hpmcounter28) // Performance-monitoring counter RO( 0xC1D, hpmcounter29) // Performance-monitoring counter RO( 0xC1E, hpmcounter30) // Performance-monitoring counter RO( 0xC1F, hpmcounter31) // Performance-monitoring counter RO32(0xC80, cycleh) // Upper 32 bits of cycle, RV32I only RO32(0xC81, timeh) // Upper 32 bits of time, RV32I only RO32(0xC82, instreth) // Upper 32 bits of instret, RV32I only RO32(0xC83, hpmcounter3h) // Upper 32 bits of hpmcounter3, RV32I only RO32(0xC84, hpmcounter4h) RO32(0xC85, hpmcounter5h) RO32(0xC86, hpmcounter6h) RO32(0xC87, hpmcounter7h) RO32(0xC88, hpmcounter8h) RO32(0xC89, hpmcounter9h) RO32(0xC8A, hpmcounter10h) RO32(0xC8B, hpmcounter11h) RO32(0xC8C, hpmcounter12h) RO32(0xC8D, hpmcounter13h) RO32(0xC8E, hpmcounter14h) RO32(0xC8F, hpmcounter15h) RO32(0xC90, hpmcounter16h) RO32(0xC91, hpmcounter17h) RO32(0xC92, hpmcounter18h) RO32(0xC93, hpmcounter19h) RO32(0xC94, hpmcounter20h) RO32(0xC95, hpmcounter21h) RO32(0xC96, hpmcounter22h) RO32(0xC97, hpmcounter23h) RO32(0xC98, hpmcounter24h) RO32(0xC99, hpmcounter25h) RO32(0xC9A, hpmcounter26h) RO32(0xC9B, hpmcounter27h) RO32(0xC9C, hpmcounter28h) RO32(0xC9D, hpmcounter29h) RO32(0xC9E, hpmcounter30h) RO32(0xC9F, hpmcounter31h) // Supervisor Trap Setup RW(0x100, sstatus) // Supervisor status register RW(0x102, sedeleg) // Supervisor exception delegation register RW(0x103, sideleg) // Supervisor interrupt delegation register RW(0x104, sie) // Supervisor interrupt-enable register RW(0x105, stvec) // Supervisor trap handler base address RW(0x106, scounteren) // Supervisor counter enable // Supervisor Trap Handling RW(0x140, sscratch) // Scratch register for supervisor trap handlers RW(0x141, sepc) // Supervisor exception program counter RW(0x142, scause) // Supervisor trap cause RW(0x143, stval) // Supervisor bad address or instruction RW(0x144, sip) // Supervisor interrupt pending // Supervisor Protection and Translation RW(0x180, satp) // Supervisor address translation and protection // Machine Information Registers RO(0xF11, mvendorid) // Vendor ID RO(0xF12, marchid) // Architecture ID RO(0xF13, mimpid) // Implementation ID RO(0xF14, mhartid) // Hardware thread ID // Machine Trap Setup RW(0x300, mstatus) // Machine status register RW(0x301, misa) // ISA and extensions RW(0x302, medeleg) // Machine exception delegation register RW(0x303, mideleg) // Machine interrupt delegation register RW(0x304, mie) // Machine interrupt-enable register RW(0x305, mtvec) // Machine trap handler base address RW(0x306, mcounteren) // Machine counter enable // Machine Trap Handling RW(0x340, mscratch) // Scratch register for machine trap handlers RW(0x341, mepc) // Machine exception program counter RW(0x342, mcause) // Machine trap cause RW(0x343, mtval) // Machine bad address or instruction RW(0x344, mip) // Machine interrupt pending // Machine Protection and Translation RW( 0x3A0, pmpcfg0) // Physical memory protection configuration RW32(0x3A1, pmpcfg1) // Physical memory protection configuration, RV32 only RW( 0x3A2, pmpcfg2) // Physical memory protection configuration RW32(0x3A3, pmpcfg3) // Physical memory protection configuration, RV32 only RW( 0x3B0, pmpaddr0) // Physical memory protection address register RW( 0x3B1, pmpaddr1) // Physical memory protection address register RW( 0x3B2, pmpaddr2) // Physical memory protection address register RW( 0x3B3, pmpaddr3) // Physical memory protection address register RW( 0x3B4, pmpaddr4) // Physical memory protection address register RW( 0x3B5, pmpaddr5) // Physical memory protection address register RW( 0x3B6, pmpaddr6) // Physical memory protection address register RW( 0x3B7, pmpaddr7) // Physical memory protection address register RW( 0x3B8, pmpaddr8) // Physical memory protection address register RW( 0x3B9, pmpaddr9) // Physical memory protection address register RW( 0x3BA, pmpaddr10) // Physical memory protection address register RW( 0x3BB, pmpaddr11) // Physical memory protection address register RW( 0x3BC, pmpaddr12) // Physical memory protection address register RW( 0x3BD, pmpaddr13) // Physical memory protection address register RW( 0x3BE, pmpaddr14) // Physical memory protection address register RW( 0x3BF, pmpaddr15) // Physical memory protection address register // Machine Counter/Timers RO( 0xB00, mcycle) // Machine cycle counter RO( 0xB02, minstret) // Machine instructions-retired counter RO( 0xB03, mhpmcounter3) // Machine performance-monitoring counter RO( 0xB04, mhpmcounter4) // Machine performance-monitoring counter RO( 0xB05, mhpmcounter5) // Machine performance-monitoring counter RO( 0xB06, mhpmcounter6) // Machine performance-monitoring counter RO( 0xB07, mhpmcounter7) // Machine performance-monitoring counter RO( 0xB08, mhpmcounter8) // Machine performance-monitoring counter RO( 0xB09, mhpmcounter9) // Machine performance-monitoring counter RO( 0xB0A, mhpmcounter10) // Machine performance-monitoring counter RO( 0xB0B, mhpmcounter11) // Machine performance-monitoring counter RO( 0xB0C, mhpmcounter12) // Machine performance-monitoring counter RO( 0xB0D, mhpmcounter13) // Machine performance-monitoring counter RO( 0xB0E, mhpmcounter14) // Machine performance-monitoring counter RO( 0xB0F, mhpmcounter15) // Machine performance-monitoring counter RO( 0xB10, mhpmcounter16) // Machine performance-monitoring counter RO( 0xB11, mhpmcounter17) // Machine performance-monitoring counter RO( 0xB12, mhpmcounter18) // Machine performance-monitoring counter RO( 0xB13, mhpmcounter19) // Machine performance-monitoring counter RO( 0xB14, mhpmcounter20) // Machine performance-monitoring counter RO( 0xB15, mhpmcounter21) // Machine performance-monitoring counter RO( 0xB16, mhpmcounter22) // Machine performance-monitoring counter RO( 0xB17, mhpmcounter23) // Machine performance-monitoring counter RO( 0xB18, mhpmcounter24) // Machine performance-monitoring counter RO( 0xB19, mhpmcounter25) // Machine performance-monitoring counter RO( 0xB1A, mhpmcounter26) // Machine performance-monitoring counter RO( 0xB1B, mhpmcounter27) // Machine performance-monitoring counter RO( 0xB1C, mhpmcounter28) // Machine performance-monitoring counter RO( 0xB1D, mhpmcounter29) // Machine performance-monitoring counter RO( 0xB1E, mhpmcounter30) // Machine performance-monitoring counter RO( 0xB1F, mhpmcounter31) // Machine performance-monitoring counter RO32(0xB80, mcycleh) // Upper 32 bits of mcycle, RV32I only RO32(0xB82, minstreth) // Upper 32 bits of minstret, RV32I only RO32(0xB83, mhpmcounter3h) // Upper 32 bits of mhpmcounter3, RV32I only RO32(0xB84, mhpmcounter4h) RO32(0xB85, mhpmcounter5h) RO32(0xB86, mhpmcounter6h) RO32(0xB87, mhpmcounter7h) RO32(0xB88, mhpmcounter8h) RO32(0xB89, mhpmcounter9h) RO32(0xB8A, mhpmcounter10h) RO32(0xB8B, mhpmcounter11h) RO32(0xB8C, mhpmcounter12h) RO32(0xB8D, mhpmcounter13h) RO32(0xB8E, mhpmcounter14h) RO32(0xB8F, mhpmcounter15h) RO32(0xB90, mhpmcounter16h) RO32(0xB91, mhpmcounter17h) RO32(0xB92, mhpmcounter18h) RO32(0xB93, mhpmcounter19h) RO32(0xB94, mhpmcounter20h) RO32(0xB95, mhpmcounter21h) RO32(0xB96, mhpmcounter22h) RO32(0xB97, mhpmcounter23h) RO32(0xB98, mhpmcounter24h) RO32(0xB99, mhpmcounter25h) RO32(0xB9A, mhpmcounter26h) RO32(0xB9B, mhpmcounter27h) RO32(0xB9C, mhpmcounter28h) RO32(0xB9D, mhpmcounter29h) RO32(0xB9E, mhpmcounter30h) RO32(0xB9F, mhpmcounter31h) RW(0x323, mhpmevent3) // Machine performance-monitoring event selector RW(0x324, mhpmevent4) // Machine performance-monitoring event selector RW(0x325, mhpmevent5) // Machine performance-monitoring event selector RW(0x326, mhpmevent6) // Machine performance-monitoring event selector RW(0x327, mhpmevent7) // Machine performance-monitoring event selector RW(0x328, mhpmevent8) // Machine performance-monitoring event selector RW(0x329, mhpmevent9) // Machine performance-monitoring event selector RW(0x32A, mhpmevent10) // Machine performance-monitoring event selector RW(0x32B, mhpmevent11) // Machine performance-monitoring event selector RW(0x32C, mhpmevent12) // Machine performance-monitoring event selector RW(0x32D, mhpmevent13) // Machine performance-monitoring event selector RW(0x32E, mhpmevent14) // Machine performance-monitoring event selector RW(0x32F, mhpmevent15) // Machine performance-monitoring event selector RW(0x330, mhpmevent16) // Machine performance-monitoring event selector RW(0x331, mhpmevent17) // Machine performance-monitoring event selector RW(0x332, mhpmevent18) // Machine performance-monitoring event selector RW(0x333, mhpmevent19) // Machine performance-monitoring event selector RW(0x334, mhpmevent20) // Machine performance-monitoring event selector RW(0x335, mhpmevent21) // Machine performance-monitoring event selector RW(0x336, mhpmevent22) // Machine performance-monitoring event selector RW(0x337, mhpmevent23) // Machine performance-monitoring event selector RW(0x338, mhpmevent24) // Machine performance-monitoring event selector RW(0x339, mhpmevent25) // Machine performance-monitoring event selector RW(0x33A, mhpmevent26) // Machine performance-monitoring event selector RW(0x33B, mhpmevent27) // Machine performance-monitoring event selector RW(0x33C, mhpmevent28) // Machine performance-monitoring event selector RW(0x33D, mhpmevent29) // Machine performance-monitoring event selector RW(0x33E, mhpmevent30) // Machine performance-monitoring event selector RW(0x33F, mhpmevent31) // Machine performance-monitoring event selector // Debug/Trace Registers (shared with Debug Mode) RW(0x7A0, tselect) // Debug/Trace trigger register select RW(0x7A1, tdata1) // First Debug/Trace trigger data register RW(0x7A2, tdata2) // Second Debug/Trace trigger data register RW(0x7A3, tdata3) // Third Debug/Trace trigger data register // Debug Mode Registers RW(0x7B0, dcsr) // Debug control and status register RW(0x7B1, dpc) // Debug PC RW(0x7B2, dscratch) // Debug scratch register // Hypervisor Trap Setup RW(0x600, hstatus) // Hypervisor status register RW(0x602, hedeleg) // Hypervisor exception delegation register RW(0x603, hideleg) // Hypervisor interrupt delegation register RW(0x604, hie) // Hypervisor interrupt-enable register RW(0x606, hcounteren) // Hypervisor counter enable RW(0x607, hgeie) // Hypervisor guest external interrupt-enable register // Hypervisor Trap Handling RW(0x643, htval) // Hypervisor bad guest physical address RW(0x644, hip) // Hypervisor interrupt pending RW(0x645, hvip) // Hypervisor virtual interrupt pending RW(0x64a, htinst) // Hypervisor trap instruction (transformed) RW(0xe12, hgeip) // Hypervisor guest external interrupt pending // Hypervisor Protection and Translation RO(0x680, hgatp) // Hypervisor guest address translation and protection // Debug/Trace Registers RW(0x6a8, hcontext) // Hypervisor-mode context register // Hypervisor Counter/Timer Virtualization Registers RW(0x605, htimedelta) // Delta for VS/VU-mode timer RW32(0x615, htimedeltah) // Upper 32 bits of {\tt htimedelta}, RV32 only // Virtual Supervisor Registers RW(0x200, vsstatus) // Virtual supervisor status register RW(0x204, vsie) // Virtual supervisor interrupt-enable register RW(0x205, vstvec) // Virtual supervisor trap handler base address RW(0x240, vsscratch) // Virtual supervisor scratch register RW(0x241, vsepc) // Virtual supervisor exception program counter RW(0x242, vscause) // Virtual supervisor trap cause RW(0x243, vstval) // Virtual supervisor bad address or instruction RW(0x244, vsip) // Virtual supervisor interrupt pending RW(0x280, vsatp) // Virtual supervisor address translation and protection
llb1238/NPUCORE_2025_npu123team
3,187
dependency/virtio-drivers/examples/x86_64/src/multiboot.S
# Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section .text.entry .code32 .global _start _start: mov ecx, {mb_magic} cmp ecx, eax jnz 1f mov edi, ebx # arg1: multiboot info jmp entry32 1: hlt jmp 1b .balign 4 .type multiboot_header, STT_OBJECT multiboot_header: .int {mb_hdr_magic} # magic: 0x1BADB002 .int {mb_hdr_flags} # flags .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum .int multiboot_header - {offset} # header_addr .int start - {offset} # load_addr .int edata - {offset} # load_end .int ebss - {offset} # bss_end_addr .int _start - {offset} # entry_addr .code32 entry32: lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set PAE, PGE bit in CR4 mov eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [.Ltmp_pml4 - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} mov edx, 0 mov eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, {cr0} mov cr0, eax ljmp 0x10, offset entry64 - {offset} # 0x10 is code64 segment .code64 entry64: # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set RSP to boot stack top movabs rsp, offset boot_stack_top # call main(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end: .balign 4096 .Ltmp_pml4: # 0x0000_0000 ~ 0xffff_ffff .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 511 .Ltmp_pdpt_low: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508 .section .bss.stack .balign 4096 .boot_stack: .space {boot_stack_size} boot_stack_top:
LogicLanedeng/LogicLanedeng
1,854
examples/a.s
.file "a.c" .text .globl amazing_func .def amazing_func; .scl 2; .type 32; .endef .seh_proc amazing_func amazing_func: pushq %rbp .seh_pushreg %rbp movq %rsp, %rbp .seh_setframe %rbp, 0 subq $16, %rsp .seh_stackalloc 16 .seh_endprologue movl %ecx, 16(%rbp) movl 16(%rbp), %eax movl %eax, -4(%rbp) movl -4(%rbp), %edx movl %edx, %eax sall $2, %eax addl %edx, %eax sall $2, %eax movl %eax, -4(%rbp) movl -4(%rbp), %ecx movl $1717986919, %edx movl %ecx, %eax imull %edx sarl $3, %edx movl %ecx, %eax sarl $31, %eax subl %eax, %edx movl %edx, %eax movl %eax, -4(%rbp) movl $1, %eax subl -4(%rbp), %eax movl %eax, -4(%rbp) movl $1, %eax subl -4(%rbp), %eax movl %eax, -4(%rbp) movl -4(%rbp), %eax addq $16, %rsp popq %rbp ret .seh_endproc .globl fib .def fib; .scl 2; .type 32; .endef .seh_proc fib fib: pushq %rbp .seh_pushreg %rbp pushq %rbx .seh_pushreg %rbx subq $40, %rsp .seh_stackalloc 40 leaq 128(%rsp), %rbp .seh_setframe %rbp, 128 .seh_endprologue movl %ecx, -64(%rbp) movl -64(%rbp), %ecx call amazing_func movl %eax, -64(%rbp) cmpl $1, -64(%rbp) jg .L4 movl -64(%rbp), %eax jmp .L5 .L4: movl -64(%rbp), %eax subl $1, %eax movl %eax, %ecx call fib movl %eax, %ebx movl -64(%rbp), %eax subl $2, %eax movl %eax, %ecx call fib addl %ebx, %eax .L5: addq $40, %rsp popq %rbx popq %rbp ret .seh_endproc .def __main; .scl 2; .type 32; .endef .globl main .def main; .scl 2; .type 32; .endef .seh_proc main main: pushq %rbp .seh_pushreg %rbp movq %rsp, %rbp .seh_setframe %rbp, 0 subq $64, %rsp .seh_stackalloc 64 .seh_endprologue call __main movl $10, -12(%rbp) movl -12(%rbp), %eax movl %eax, %ecx call fib movl %eax, -4(%rbp) movl $0, %eax addq $64, %rsp popq %rbp ret .seh_endproc .ident "GCC: (x86_64-posix-seh-rev0, Built by MinGW-W64 project) 8.1.0"
lululalulula/roadmap
14,578
Roadmap/01 - OPERADORES Y ESTRUCTURAS DE CONTROL/nasm/evanz2608.s
; https://www.nasm.us/ ; ====================================================================================================================== ; Testeado en Arch linux. No lo he testeado en otras distribuciones. ; Para ensamblar: nasm -f elf64 -o evanz1902.o evanz1902.s ; Para linkear y generar el ejecutable: ld -o evanz1902 evanz1902.o -lc --dynamic-linker=/lib64/ld-linux-x86-64.so.2 ; ====================================================================================================================== extern printf ; Se usa para llamar a la función printf de libc. ; =========================================================================== ; Sección de código, con permisos de lectura y ejecución ; =========================================================================== section .text global _start _start: ; IMPORTANTE: ; Todas las instrucciones en ensamblador pueden recibir 0, 1, o 2 parametros, donde: ; instruccion destino, fuente ; destino puede ser un registro o una direccion de memoria. (variable que se accede con [variable]) ; fuente puede ser un registro, una direccion de memoria o un valor inmediato ; pero NO SE PUEDE usar dos direcciones de memoria como destino y fuente a la vez. Ej: ; mov qword [var_integer], qword [mi_float] ; no está permitido. ; Como se ve, cuando se accede a la memoria, se debe especificar el tamaño que se espera y deben coincidir los dos operandos. Ej ; RAX es un registro de 64 bits (8 bytes), y cuando se escribe su valor en memoria, se usa qword [variable] para indicar que vamos a escribir 8 bytes. ; EAX hace referencia a los primeros 32 bits (4 bytes) del registro rax, y se usaría dword [variable] para indicar que escribimos 4 bytes. ; Operación de asignación. Para asignar un valor a una variable definida en la sección .data, usamos la instrucción mov mov qword [var_integer], 10 ; imprimimos el valor en [var_integer] ; para entender como se imprime por pantalla, le pasamos a printf los parametros que espera ; printf(const char* fmt, ...) ; Para entender como se pasan parametros en ensamblador: https://www.ired.team/miscellaneous-reversing-forensics/windows-kernel-internals/linux-x64-calling-convention-stack-frame lea rdi, [mask_string] lea rsi, [int_string] mov rdx, [var_integer] xor rax, rax call printf ; Lo mismo que antes, pero usando un registro. Movemos 10 a rax, y luego escribimos en [var_integer] el valor en rax. (rax = 20 en este caso). mov rax, 20 mov qword [var_integer], rax ; y volvemos a imprimr el valor en [var_integer] lea rdi, [mask_string] lea rsi, [int_string] mov rdx, [var_integer] xor rax, rax call printf ; Operaciones aritmeticas. Usamos add para sumar, sub para restar, mul para multiplicar y div para dividir. mov rax, 10 ; rax = 10 mov qword [var_integer], 10 ; var_integer = 10 add qword [var_integer], rax ; Sumamos. El resultado se guarda en var_integer. ; imprimimos en pantalla el resultado. lea rdi, [mask_string] lea rsi, [suma] mov rdx, [var_integer] xor rax, rax call printf sub qword [var_integer], 5 ; restamos 5 al resultado. ; imprimimos en pantalla el resultado. lea rdi, [mask_string] lea rsi, [resta] mov rdx, [var_integer] xor rax, rax call printf ; Como dije antes, RAX es un registro de 64 bits, que está subdividido en registros más pequeños. Dejo una referencia: https://www.jamieweb.net/info/x86_64-general-purpose-registers-reference/ ; Tanto la instrucción MUL como DIV, trabajan de una manera un tanto particular: ; El primer parametro se debe cargar en el registro RAX. ; luego se llama a la instrucción con el segundo parametro, que puede ser un registro o un valor guardado en memoria. ; El resultado se devuelve en RDX:RAX. ; Al multiplicar, se puede dar el caso que el resultado sea tan grande que no entre en un solo registro, por lo que se usan dos registros para devolver el valor, de la siguiente forma: ; RDX:RAX = resultado. ; en RAX estan los primeros 64 bits del resultado (bits 0-63), y en RDX los siguientes (bits 64- 127). Si el resultado cabe en 64 bits, entonces RDX será 0 y el resultado estará en RAX. ; Se pueden usar los registros de 32 bits también y siguen la misma regla, el resultado estará en EDX:EAX, aunque se debe tener en cuenta que en tal caso ; se deberá usar un registro de 32 bits para el segundo operando también. mov rax, 10 mov rbx, 3 mul rbx ; rax * rbx. En este caso el resultado es RDX = 0, RAX = 30. ; imprimimos en pantalla el resultado. lea rdi, [mask_string] lea rsi, [multiplicacion] mov rdx, rax xor rax, rax call printf ; En este caso utilizamos los registros de 32 bits. Se usa el registro ebx puesto que todos los parametros deben ser del mismo tamaño. mov eax, 20 mov ebx, 10 mul ebx ; eax * ebx. El resultado es EDX = 0, EAX = 200 ; imprimimos en pantalla el resultado. lea rdi, [mask_string] lea rsi, [multiplicacion] mov rdx, rax xor rax, rax call printf ; La división funciona de manera muy similar a MUL: Ya que se puede dar que el dividendo sea mas grande que el registro que vayamos a usar, se pasa ; en los registros combinados RDX:RAX para 64 bits, EDX:EAX en 32 bits, y el divisor debe ser un registro del mismo tamaño, u un valor en memoria también del ; mismo tamaño. El resultado se devuelve de la siguiente forma: RAX = cociente, RDX = resto. mov rdx, 0 ; limpiamos rdx, de esta manera rdx:rax = 100 mov rax, 100 mov rbx, 12 div rbx ; rax / rbx. El resultado es RAX = 8 (cociente), RDX = 4 (resto). ; imprimimos en pantalla el resultado. lea rdi, [mask_div] mov rsi, rax call printf mov edx, 0 ; limpiamos edx, de esta manera rdx:rax = 100 mov eax, 200 mov ebx, 12 div ebx ; eax / ebx. El resultado es EAX = 16 (cociente), EDX = 8 (resto). ; imprimimos en pantalla el resultado. lea rdi, [mask_div] mov rsi, rax call printf ; Existen dos instrucciones más, imul e idiv. Ambas funcionan de la misma manera sólo que tienen en cuenta el signo de los operandos. ; voy a poner un ejemplo sólo con idiv. mov rdx, 0 mov rax, -100 mov rbx, 2 idiv rbx ; imprimimos en pantalla el resultado. lea rdi, [mask_div] mov rsi, rax call printf ; Ahora, para trabajar con números decimales (float), se usa la fpu (floating point unit) del procesador. ; La fpu tiene 8 registros: st0 - st7 que se comportan como un stack, con lo cual para cargar datos debemos hacer un "push" ; en el siguiente ejemplo se explica con comentarios todo el codigo. ; Vamos a usar qword para nuestras variables, o lo que es lo mismo, el tipo double de C/C++. Si usaramos dword, serían float de C/C++. ; Existen varias instrucciones que operan en la fpu: fmul, fadd, fdiv, etc... Funcionan todas de manera muy similar ; Ademas de la fpu, existen otros registros (xmm0 - xmm15) que se usan para el manejo de valores decimales y tenemos acceso a ellos de la misma manera ; que accedemos a los registros de uso general. fninit ; Inicializamos la fpu para evitar inconsistencias. fld qword [var_float1] ; Cargamos en st0 el valor de var_float1. Sólo se pueden cargar datos desde la memoria fld qword [var_float2] ; Cargamos en st0 el valor de var_float2. El valor de st0 se pasa a st1 y se carga en st0 el nuevo valor. faddp ; (Add and pop) Suma st0 con st1. El resultado se guarda en st0 y st1 ahora queda vacío. fstp qword [var_result] ; Guarda el valor de st0 en var_result y hace un "pop", con lo cual ahora el stack de la fpu queda vacío. ; imprimimos en pantalla el resultado. movsd xmm0, [var_result] lea rdi, [mask_float] call printf ; Operadores de comparación y condicionales: ; ; Referencia de todas las instrucciones de salto que hay: https://www.felixcloutier.com/x86/jcc ; ; Para comparaciones tenemos dos instrucciones: cmp y test. Son prácticamente iguales y se usan en conjunto con los saltos para ; crear estructuras de control. cmp y test comparan dos valores, y modifican la flags del procesador, que luego usaremos para controlar el flujo ; de ejecución con saltos condicionales. Hay que tener en cuenta que las operaciones aritméticas también modifican los flags y por lo tanto, a la hora de saltar, ; hay que tener esto en cuenta, ya que se pueden usar las instrucciones de salto en conjunto con estas operaciones directamente sin usar cmp o test. ; Para poder controlar donde vamos a continuar la ejecución debemos conocer la dirección de memoria donde esté la próxima instrucción que querramos ejecutar, ; aunque sería muy engorroso tener que calcular esas direcciones a mano. Para eso usamos las etiquetas, o labels que podemos definir en cualquier parte de nuestro programa ; y luego saltar a ellas. NASM calculará las direcciones cuando ensamblemos el programa y reemplazará las etiquetas con los offsets correspondientes. ; Para declarar una etiqueta basta con escribir un nombre seguido de ":". Esto definirá una etiqueta a la que podremos saltar desde cualquier lado de nuestro programa. ; ; Vamos a declarar algunas etiquetas y saltar a ellas como prueba. mov rax, 20 mov rbx, 10 comparacion_rax_rbx: ; definimos una etiqueta para volver a ella luego de las comparaciones cmp rax, rbx ; cmp hará una resta entre los operandos y modificará los flags del cpu acorde al resultado. ja mayor ; ja = Jump if Above. En este caso, saltará a "mayor" sólo si rax es mayor que rbx. Existe también jae = Jump if Above or Equal. je iguales ; je = Jump if Equal. Este salto condicional saltará a la etiqueta "iguales" si rax y rbx son iguales. jne distintos ; jne = Jump if Not Equal. Este salto condicional saltará a la etiqueta "distintos" si rax y rbx son distintos. jmp fin_de_saltos ; jmp = Jump. Es una instrucción de salto incondicional, por lo que siempre saltará. En este caso, como en ensamblador las instrucciones ; ejecutan de manera secuencial, si no saltamos a "fin_de_saltos", el programa volvería a ejecutar el código que hay debajo, por lo que ; tenemos que saltarlo sí o sí para continuar nuestra ejecución. Es aquí donde se ve la utilidad de jmp. ; En este caso, nunca se ejecutará este salto, ya que lo vamos a hacer en "distintos", de lo contrario estaríamos en un bucle infinito ya que ; no existe forma de escapar de estas tres comparaciones a la vez, lo que muestra que hay que tener cuidado a la hora de hacer saltos sin ; tener en cuenta esta posibilidad. distintos: ; Saltamos aquí cuando rax es distinto de rbx. Imprimimos un mensaje y preparamos rax y rbx para el próximo salto. lea rdi, [mask_jumps] lea rsi, [str_distintos] call printf jmp fin_de_saltos ; Saltamos fuera de las comprobaciones, de lo contrario estaríamos en un bucle infinito. iguales: ; Saltamos aquí cuando rax y rbx son iguales. Imprimimos un mensaje y preparamos rax y rbx para el próximo salto. lea rdi, [mask_jumps] lea rsi, [str_iguales] call printf mov rax, 10 mov rbx, 20 jmp comparacion_rax_rbx mayor: lea rdi, [mask_jumps] lea rsi, [str_mayor] call printf mov rax, 20 mov rbx, 20 jmp comparacion_rax_rbx fin_de_saltos: ; Con estas instrucciones, podemos crear fácilmente ciclos del estilo while, do while, etc... ; También podemos crear estructuras del estilo if (a == b), if (a == 0), if (a == b) else (), etc... ; Las etiquetas se pueden definir en cualquier parte dentro de la sección .text y vamos a poder saltar a ellas ; de manera que no es necesario que estén debajo del punto donde hacemos la comprobación. ; =================================== ; Ejercicio opcional ; =================================== lea rdi, [str_adicional] call printf mov byte [valor], 9 ; pongo [valor] en 9 porque lo primero que haré en bucle será incrementarlo. bucle: add qword [valor], 1 cmp qword [valor], 55 je fin_de_bucle jmp es_par imprimir: lea rdi, [mask_int] mov rsi, [valor] call printf jmp bucle es_16: cmp qword [valor], 16 jne es_multiplo jmp bucle es_multiplo: mov rbx, 3 mov rdx, 0 mov rax, qword [valor] div rbx cmp rdx, 0 jne imprimir jmp bucle es_par: mov rbx, 2 mov rdx, 0 mov rax, qword [valor] div rbx cmp rdx, 0 je es_16 jmp bucle fin_de_bucle: lea rdi, [mask_int] mov rsi, [valor] call printf lea rdi, [mask_nl] call printf jmp exit_proc ; Terminamos la ejecucion exit_proc: mov rax, SYS_exit xor rdi, rdi syscall ; =========================================================================== ; Sección de datos inicializados con permisos de sólo lectura. ; =========================================================================== section .rodata SYS_exit: equ 60 mask_string: db "%s %u", 10, 0 mask_div: db "Division: cociente: %i - resto: %i", 10, 0 mask_float: db "Float: %.4f", 10, 0 mask_jumps: db "Comparacion: RAX es %s que RBX", 10, 0 mask_rcx_zero: db "Comparacion: RCX es igual a 0" mask_int: db "%u - ", 0 mask_nl: db 0x0A int_string: db "var_integer:", 0 suma: db "Suma:", 0 resta: db "Resta:", 0 multiplicacion: db "Multiplicacion:", 0 str_distintos: db "distinto", 0 str_iguales: db "igual", 0 str_mayor: db "mayor que", 0 str_mayor_igual: db "mayor o igual", 0 str_menor: db "menor", 0 str_menor_igual: db "menor o igual", 0 str_zero: db "es igual a 0", 0 str_not_zero: db "es distinto de 0", 0 str_adicional: db 0x0A, 0x0A, 0x0A, "Ejercicio adicional", 0x0A, 0 ; =========================================================================== ; Sección de datos inicializados con permisos de lectura y escritura. ; No existe en ensamblador el concepto de tipo de datos, sólo son bytes que ; reservamos para su uso. ; ; =========================================================================== section .data var_integer: dq 0 var_float1: dq 100.0 var_float2: dq 200.0 valor: dq 0 ; =========================================================================== ; Sección de datos no inicializados con permisos de lectura y escritura. ; Aplica lo mismo que para la sección .data, solo que la memoria que usemos ; en esta sección no va a ser inicializada y va a contener datos basura ; hasta que escribamos en ella. ; =========================================================================== section .bss var_result: resq 1
lululalulula/roadmap
2,693
Roadmap/00 - SINTAXIS, VARIABLES, TIPOS DE DATOS Y HOLA MUNDO/nasm/evanz2608.s
; https://www.nasm.us/ ; IMPORTANTE ; ============================================================================================= ; Aclarar que NASM es uno de los ensambladores que existen y no el lenguaje en si, ; aunque cada ensamblador define una sintaxis, en ensamblador las instrucciones son las mismas, ; y solo cambian cosas que no son especificas del lenguaje ensamblador en si. ; ; La sintaxis que se usa es la de Intel, y se escribe codigo para 64 bits en Linux. ; ============================================================================================= ; Los comentarios inician con punto y coma. %if 0 No existen comentarios multilinea como tal, pero se pueden usar macros como esta que el ensamblador ignorará a la hora de procesar el archivo porque al evaluar la macro 0 es false, y por lo tanto esto no se incluirá, al igual que un comentario %endif ;0 section .data ; las "variables" en ensablador son simples porciones de memoria que reservamos para su uso ; no tienen tipo, y van dentro de la seccion .data mi_variable: db 0 ; mi_variable reserva 1 byte inicializado a 0. mi_segunda_variable: dw 0 ; aquí reservamos 2 bytes, lo que se llama word, también inicializado a 0 ; Se puede usar dq para reservar 4 bytes, y dq para reservar 8 bytes. mi_array: db 0, 1, 2, 3, 4 ; Puesto que las variables son porciones de memoria, podemos definir arrays separando cada valor con una coma. mi_segundo_array: times 20 dw 0 ; Reservamos 20 words, lo que serían 40 bytes y los inicializamos a 0. mi_string: db "Cadena de texto" ; Se pueden inicializar variables con cadenas de texto y el tamaño sería igual a la cantidad de caracteres ; que contenga la cadena, y cada caracter será tratado como un byte. section .rodata mi_constante: equ 10 ; Para las "constantes" usamos la seccion .rodata (read only data) y usamos EQU para definirles un valor. ; Decir que las constantes son meros nombres para el valor que definamos. hello: db "Hola, NASM!", 0x0A ; para imprimir por consola el texto. (0x0A es el caracter para salto de línea. En otros lenguajes se puede denotar como '\n') hello_len: equ $-hello ; calculamos el largo del texto a imprimir. section .text ; la seccion donde ira nuestro codigo. global _start ; declaramos el punto de entrada de nuesto programa. Similar a la funcion main de algunos lenguajes. _start: ; y aquí la implementamos ; Imprimimos por pantalla el texto "Hola mundo" mov rax, 1 mov rdi, 1 lea rsi, [hello] mov rdx, hello_len syscall ; y salimos de nuestro programa. mov rax, 60 mov rdi, 0 syscall
lunalunaa/lunaris
4,235
src/kernel/asm/boot.S
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com> //-------------------------------------------------------------------------------------------------- // Definitions //-------------------------------------------------------------------------------------------------- // Load the address of a symbol into a register, PC-relative. // // The symbol must lie within +/- 4 GiB of the Program Counter. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_REL register, symbol adrp \register, \symbol add \register, \register, #:lo12:\symbol .endm //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- .section .text._start //------------------------------------------------------------------------------ // fn _start() //------------------------------------------------------------------------------ _start: // Only proceed on the boot core. Park it otherwise. mrs x1, mpidr_el1 and x1, x1, #3 cmp x1, #0 bne .L_parking_loop // If execution reaches here, it is the boot core. /*=============================================================*/ /* Enable FP/SIMD at EL1 */ /*=============================================================*/ mov x0, #3 << 20 msr cpacr_el1, x0 /* Enable FP/SIMD at EL1 */ // are we already in EL3? mrs x9, CurrentEL cmp x9, #12 beq drop_el1_secure // are we already in EL1 cmp x9, #4 beq el1_secure // otherwise, switch from EL2 to EL1 mov x10, #2147483648 msr hcr_el2, x10 mov x11, #709 msr spsr_el2, x11 adr x12, el1_secure msr elr_el2, x12 ADR_REL x0, __boot_core_stack_end_exclusive msr SP_EL1, x0 // set the stack pointer for EL1 eret drop_el1_secure: /* Try drop from el3 to el1 secure */ /*=============================================================*/ /* Initialize sctlr_el1 */ /*=============================================================*/ mov x0, xzr orr x0, x0, #(1 << 29) /* Checking http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0500d/CIHDIEBD.html */ orr x0, x0, #(1 << 28) /* Bits 29,28,23,22,20,11 should be 1 (res1 on documentation) */ orr x0, x0, #(1 << 23) orr x0, x0, #(1 << 22) orr x0, x0, #(1 << 20) orr x0, x0, #(1 << 11) msr sctlr_el1, x0 /*=============================================================*/ /* Initialize scr_el3 */ /*=============================================================*/ mrs x0, scr_el3 orr x0, x0, #(1<<10) /* Lower EL is 64bits */ msr scr_el3, x0 /*=============================================================*/ /* Initialize spsr_el3 */ /*=============================================================*/ mov x0, xzr mov x0, #0b00101 /* EL1 */ orr x0, x0, #(1 << 8) /* Enable SError and External Abort. */ orr x0, x0, #(1 << 7) /* IRQ interrupt Process state mask. */ orr x0, x0, #(1 << 6) /* FIQ interrupt Process state mask. */ msr spsr_el3, x0 /*=============================================================*/ /* Initialize elr_el3 */ /*=============================================================*/ adr x0, el1_secure msr elr_el3, x0 ADR_REL x0, __boot_core_stack_end_exclusive msr SP_EL1, x0 // set the stack pointer for EL1 eret el1_secure: // Initialize DRAM. msr SPSel, #1 ADR_REL x0, __bss_start ADR_REL x1, __bss_end_exclusive .L_bss_init_loop: cmp x0, x1 b.eq .L_prepare_rust stp xzr, xzr, [x0], #16 b .L_bss_init_loop // Prepare the jump to Rust code. .L_prepare_rust: // Set the stack pointer. ADR_REL x0, __boot_core_stack_end_exclusive mov sp, x0 bl _kmain // Infinitely wait for events (aka "park the core"). .L_parking_loop: wfe b .L_parking_loop .size _start, . - _start .type _start, function .global _start
lunalunaa/lunaris
1,190
src/kernel/asm/switch.S
// switch(old_context, new_context) // should only need to save callee saved registers? // this switches from kernel to user .section .text __switch_to_task: stp x19, x20, [x0, #0] stp x21, x22, [x0, #16] stp x23, x24, [x0, #32] stp x25, x26, [x0, #48] stp x27, x28, [x0, #64] stp x29, x30, [x0, #80] mov x2, sp str x2, [x0, #96] ldp x19, x20, [x1, #0] ldp x21, x22, [x1, #16] ldp x23, x24, [x1, #32] ldp x25, x26, [x1, #48] ldp x27, x28, [x1, #64] ldp x29, x30, [x1, #80] ldr x2, [x1, #96] mov sp, x2 isb sy dsb sy eret __switch_to_scheduler: stp x19, x20, [x0, #0] stp x21, x22, [x0, #16] stp x23, x24, [x0, #32] stp x25, x26, [x0, #48] stp x27, x28, [x0, #64] stp x29, x30, [x0, #80] mov x2, sp str x2, [x0, #96] ldp x19, x20, [x1, #0] ldp x21, x22, [x1, #16] ldp x23, x24, [x1, #32] ldp x25, x26, [x1, #48] ldp x27, x28, [x1, #64] ldp x29, x30, [x1, #80] ldr x2, [x1, #96] mov sp, x2 isb sy dsb sy ret .global __switch_to_task .global __switch_to_scheduler .type __switch_to_scheduler, function .type __switch_to_task, function
lunalunaa/lunaris
2,250
src/kernel/asm/exception.S
// Typical exception vector table code. .section .text .align 11 vector_table_start: .org 0x000 curr_el_sp0_sync: b __syscall_handler .org 0x080 b __syscall_handler .org 0x100 b __syscall_handler .org 0x180 b __syscall_handler // Current exception level with SP_ELx, x > 0. .org 0x200 b __syscall_handler .org 0x280 b __syscall_handler .org 0x300 b __syscall_handler .org 0x380 b __syscall_handler // Lower exception level, AArch64 .org 0x400 // this is the one b __syscall_handler .org 0x480 b __syscall_handler .org 0x500 b __syscall_handler .org 0x580 b __syscall_handler // Lower exception level, AArch32 .org 0x600 b __syscall_handler .org 0x680 b __syscall_handler .org 0x700 b __syscall_handler .org 0x780 b __syscall_handler .org 0x800 __syscall_handler: // we are using user stack rn msr SPSel, #0 // let the kernel use SP_EL0 sub sp, sp, #288 stp x0, x1, [sp, #0] stp x2, x3, [sp, #16] stp x4, x5, [sp, #32] stp x6, x7, [sp, #48] stp x8, x9, [sp, #64] stp x10, x11, [sp, #80] stp x12, x13, [sp, #96] stp x14, x15, [sp, #112] stp x16, x17, [sp, #128] stp x18, x19, [sp, #144] stp x20, x21, [sp, #160] stp x22, x23, [sp, #176] stp x24, x25, [sp, #192] stp x26, x27, [sp, #208] stp x28, x29, [sp, #224] stp x30, xzr, [sp, #240] mrs x0, ESR_EL1 mrs x1, SPSR_EL1 stp x0, x1, [sp, #256] mrs x0, ELR_EL1 stp x0, x0, [sp, #272] bl get_kernel_sp mov x1, x0 mov x0, sp msr SPSel, #1 mov sp, x1 isb sy dsb sy b syscall __syscall_ret: msr SPSel, #0 ldr x0, [sp, #264] msr SPSR_EL1, x0 ldp x0, x1, [sp, #0] ldp x2, x3, [sp, #16] ldp x4, x5, [sp, #32] ldp x6, x7, [sp, #48] ldp x8, x9, [sp, #64] ldp x10, x11, [sp, #80] ldp x12, x13, [sp, #96] ldp x14, x15, [sp, #112] ldp x16, x17, [sp, #128] ldp x18, x19, [sp, #144] ldp x20, x21, [sp, #160] ldp x22, x23, [sp, #176] ldp x24, x25, [sp, #192] ldp x26, x27, [sp, #208] ldp x28, x29, [sp, #224] ldp x30, xzr, [sp, #240] add sp, sp, #288 isb sy dsb sy eret .global vector_table_start .global __syscall_ret .size vector_table_start, . - vector_table_start
lunarforky-z/octopus-rust1
2,134
src/arch/x86_64/start.s
.set PIC1_DATA_PORT, 0x21 .set PIC2_DATA_PORT, 0xA1 .set CR0_PG, 1 << 31 .set CR4_PAE, 1 << 5 .set CR4_OSFXSR, 1 << 9 .set PTE_PRESENT, 1 << 0 .set PTE_WRITE, 1 << 1 .set PTE_PS, 1 << 7 .set MSR_EFER, 0xC0000080 .set MSR_EFER_LME, 1 << 8 .set GDT_TYPE_DATA, 0x2 << 40 .set GDT_TYPE_CODE, 0xA << 40 .set GDT_NONSYS, 1 << 44 .set GDT_PRESENT, 1 << 47 .set GDT_BITS64, 1 << 53 .set GDT_BITS32, 1 << 54 .set SEGMENT_CODE, 0x8 .set SEGMENT_DATA, 0x10 .set STACK_SIZE, 8 * 1024 .bss .global mb_magic mb_magic: .long 0 .global mb_info mb_info: .long 0 .align 4096 pml4: .fill 512, 8 pdp0: .fill 512, 8 pd0: .fill 512, 8 .align 16 stack: .fill STACK_SIZE .data .align 4 gdt: .quad 0 .quad GDT_TYPE_CODE | GDT_NONSYS | GDT_PRESENT | GDT_BITS64 .quad GDT_TYPE_DATA | GDT_NONSYS | GDT_PRESENT | GDT_BITS32 end_of_gdt: gdti: .word end_of_gdt - gdt - 1 .quad gdt .text .code32 .global _start _start: /* Preserve magic and multiboot_info. */ movl %eax, mb_magic movl %ebx, mb_info /* Disable IRQs. */ movb $0xFF, %al outb %al, $PIC1_DATA_PORT outb %al, $PIC2_DATA_PORT /* Enable PAE and SSE. */ movl %cr4, %edx orl $(CR4_PAE | CR4_OSFXSR), %edx movl %edx, %cr4 /* Link page table entries and set page map. */ orl $(PTE_PRESENT | PTE_WRITE), pml4 orl $pdp0, pml4 orl $(PTE_PRESENT | PTE_WRITE), pdp0 orl $pd0, pdp0 orl $(PTE_PRESENT | PTE_WRITE | PTE_PS), pd0 movl $pml4, %eax movl %eax, %cr3 /* Enable long mode. */ movl $MSR_EFER, %ecx rdmsr orl $MSR_EFER_LME, %eax wrmsr /* Enable paging. */ movl %cr0, %eax orl $CR0_PG, %eax movl %eax, %cr0 /* Load GDT. */ lgdt gdti /* Long jump to 64-bit code. */ ljmp $SEGMENT_CODE, $start64 .code64 start64: /* Set segments and stack. */ movw $SEGMENT_DATA, %ax movw %ax, %ds movw %ax, %ss movq $(stack + STACK_SIZE), %rsp /* Call the Rust entry point. */ call main
luque667788/RaspiBareMetal
1,372
src/boot.S
// --- Boot section for Raspberry Pi 4 (AArch64) --- .section ".text.boot" .global _start // Entry point _start: // Only run on core 0; others wait forever mrs x1, mpidr_el1 and x1, x1, #3 // Mask for core ID cbz x1, 2f // If core 0, continue //q:what cbz 2f means. a: // If core ID is 0, branch to label 2 1: wfe // Other cores: wait for event b 1b 2: // Main core continues // Set up stack pointer below code ldr x1, =_start mov sp, x1 // Zero BSS section //q: what is the BSS section? a: The BSS (Block Started by Symbol) section is used to hold uninitialized global and static variables in a program. It is typically zeroed out at program startup. // ldr is used to load the address of __bss_start in register x1 // and __bss_size in register w2 // This is necessary to clear the BSS section before running the main program ldr x1, =__bss_start ldr w2, =__bss_size 3: cbz w2, 4f str xzr, [x1], #8 // Store 0, advance pointer sub w2, w2, #1 cbnz w2, 3b // 3b means "branch to label 3 if w2 is not zero" //q:why sometimes it is f or b? a: f means "forward" and b means "backward" in branch instructions // Jump to Rust main() 4: bl main b 1b // If main returns, halt
luvroc/rCore-Tutorial-2024S
2,894
ch3/os/src/link_app.S
.align 3 .section .data .global _num_app _num_app: .quad 13 .quad app_0_start .quad app_1_start .quad app_2_start .quad app_3_start .quad app_4_start .quad app_5_start .quad app_6_start .quad app_7_start .quad app_8_start .quad app_9_start .quad app_10_start .quad app_11_start .quad app_12_start .quad app_12_end .global _app_names _app_names: .string "ch2b_bad_address" .string "ch2b_bad_instructions" .string "ch2b_bad_register" .string "ch2b_hello_world" .string "ch2b_power_3" .string "ch2b_power_5" .string "ch2b_power_7" .string "ch3_sleep" .string "ch3_sleep1" .string "ch3_taskinfo" .string "ch3b_yield0" .string "ch3b_yield1" .string "ch3b_yield2" .section .data .global app_0_start .global app_0_end .align 3 app_0_start: .incbin "../ci-user/user/build/bin/ch2b_bad_address.bin" app_0_end: .section .data .global app_1_start .global app_1_end .align 3 app_1_start: .incbin "../ci-user/user/build/bin/ch2b_bad_instructions.bin" app_1_end: .section .data .global app_2_start .global app_2_end .align 3 app_2_start: .incbin "../ci-user/user/build/bin/ch2b_bad_register.bin" app_2_end: .section .data .global app_3_start .global app_3_end .align 3 app_3_start: .incbin "../ci-user/user/build/bin/ch2b_hello_world.bin" app_3_end: .section .data .global app_4_start .global app_4_end .align 3 app_4_start: .incbin "../ci-user/user/build/bin/ch2b_power_3.bin" app_4_end: .section .data .global app_5_start .global app_5_end .align 3 app_5_start: .incbin "../ci-user/user/build/bin/ch2b_power_5.bin" app_5_end: .section .data .global app_6_start .global app_6_end .align 3 app_6_start: .incbin "../ci-user/user/build/bin/ch2b_power_7.bin" app_6_end: .section .data .global app_7_start .global app_7_end .align 3 app_7_start: .incbin "../ci-user/user/build/bin/ch3_sleep.bin" app_7_end: .section .data .global app_8_start .global app_8_end .align 3 app_8_start: .incbin "../ci-user/user/build/bin/ch3_sleep1.bin" app_8_end: .section .data .global app_9_start .global app_9_end .align 3 app_9_start: .incbin "../ci-user/user/build/bin/ch3_taskinfo.bin" app_9_end: .section .data .global app_10_start .global app_10_end .align 3 app_10_start: .incbin "../ci-user/user/build/bin/ch3b_yield0.bin" app_10_end: .section .data .global app_11_start .global app_11_end .align 3 app_11_start: .incbin "../ci-user/user/build/bin/ch3b_yield1.bin" app_11_end: .section .data .global app_12_start .global app_12_end .align 3 app_12_start: .incbin "../ci-user/user/build/bin/ch3b_yield2.bin" app_12_end:
luvroc/rCore-Tutorial-2024S
1,488
ch3/os/src/trap/trap.S
.altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text .globl __alltraps .globl __restore .align 2 __alltraps: csrrw sp, sscratch, sp # now sp->kernel stack, sscratch->user stack # allocate a TrapContext on kernel stack addi sp, sp, -34*8 # save general-purpose registers sd x1, 1*8(sp) # skip sp(x2), we will save it later sd x3, 3*8(sp) # skip tp(x4), application does not use it # save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr # we can use t0/t1/t2 freely, because they were saved on kernel stack csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) # read user stack from sscratch and save it on the kernel stack csrr t2, sscratch sd t2, 2*8(sp) # set input argument of trap_handler(cx: &mut TrapContext) mv a0, sp call trap_handler __restore: # now sp->kernel stack(after allocated), sscratch->user stack # restore sstatus/sepc ld t0, 32*8(sp) ld t1, 33*8(sp) ld t2, 2*8(sp) csrw sstatus, t0 csrw sepc, t1 csrw sscratch, t2 # restore general-purpuse registers except sp/tp ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr # release TrapContext on kernel stack addi sp, sp, 34*8 # now sp->kernel stack, sscratch->user stack csrrw sp, sscratch, sp sret
lululalulula/roadmap
14,578
Roadmap/01 - OPERADORES Y ESTRUCTURAS DE CONTROL/nasm/evanz2608.s
; https://www.nasm.us/ ; ====================================================================================================================== ; Testeado en Arch linux. No lo he testeado en otras distribuciones. ; Para ensamblar: nasm -f elf64 -o evanz1902.o evanz1902.s ; Para linkear y generar el ejecutable: ld -o evanz1902 evanz1902.o -lc --dynamic-linker=/lib64/ld-linux-x86-64.so.2 ; ====================================================================================================================== extern printf ; Se usa para llamar a la función printf de libc. ; =========================================================================== ; Sección de código, con permisos de lectura y ejecución ; =========================================================================== section .text global _start _start: ; IMPORTANTE: ; Todas las instrucciones en ensamblador pueden recibir 0, 1, o 2 parametros, donde: ; instruccion destino, fuente ; destino puede ser un registro o una direccion de memoria. (variable que se accede con [variable]) ; fuente puede ser un registro, una direccion de memoria o un valor inmediato ; pero NO SE PUEDE usar dos direcciones de memoria como destino y fuente a la vez. Ej: ; mov qword [var_integer], qword [mi_float] ; no está permitido. ; Como se ve, cuando se accede a la memoria, se debe especificar el tamaño que se espera y deben coincidir los dos operandos. Ej ; RAX es un registro de 64 bits (8 bytes), y cuando se escribe su valor en memoria, se usa qword [variable] para indicar que vamos a escribir 8 bytes. ; EAX hace referencia a los primeros 32 bits (4 bytes) del registro rax, y se usaría dword [variable] para indicar que escribimos 4 bytes. ; Operación de asignación. Para asignar un valor a una variable definida en la sección .data, usamos la instrucción mov mov qword [var_integer], 10 ; imprimimos el valor en [var_integer] ; para entender como se imprime por pantalla, le pasamos a printf los parametros que espera ; printf(const char* fmt, ...) ; Para entender como se pasan parametros en ensamblador: https://www.ired.team/miscellaneous-reversing-forensics/windows-kernel-internals/linux-x64-calling-convention-stack-frame lea rdi, [mask_string] lea rsi, [int_string] mov rdx, [var_integer] xor rax, rax call printf ; Lo mismo que antes, pero usando un registro. Movemos 10 a rax, y luego escribimos en [var_integer] el valor en rax. (rax = 20 en este caso). mov rax, 20 mov qword [var_integer], rax ; y volvemos a imprimr el valor en [var_integer] lea rdi, [mask_string] lea rsi, [int_string] mov rdx, [var_integer] xor rax, rax call printf ; Operaciones aritmeticas. Usamos add para sumar, sub para restar, mul para multiplicar y div para dividir. mov rax, 10 ; rax = 10 mov qword [var_integer], 10 ; var_integer = 10 add qword [var_integer], rax ; Sumamos. El resultado se guarda en var_integer. ; imprimimos en pantalla el resultado. lea rdi, [mask_string] lea rsi, [suma] mov rdx, [var_integer] xor rax, rax call printf sub qword [var_integer], 5 ; restamos 5 al resultado. ; imprimimos en pantalla el resultado. lea rdi, [mask_string] lea rsi, [resta] mov rdx, [var_integer] xor rax, rax call printf ; Como dije antes, RAX es un registro de 64 bits, que está subdividido en registros más pequeños. Dejo una referencia: https://www.jamieweb.net/info/x86_64-general-purpose-registers-reference/ ; Tanto la instrucción MUL como DIV, trabajan de una manera un tanto particular: ; El primer parametro se debe cargar en el registro RAX. ; luego se llama a la instrucción con el segundo parametro, que puede ser un registro o un valor guardado en memoria. ; El resultado se devuelve en RDX:RAX. ; Al multiplicar, se puede dar el caso que el resultado sea tan grande que no entre en un solo registro, por lo que se usan dos registros para devolver el valor, de la siguiente forma: ; RDX:RAX = resultado. ; en RAX estan los primeros 64 bits del resultado (bits 0-63), y en RDX los siguientes (bits 64- 127). Si el resultado cabe en 64 bits, entonces RDX será 0 y el resultado estará en RAX. ; Se pueden usar los registros de 32 bits también y siguen la misma regla, el resultado estará en EDX:EAX, aunque se debe tener en cuenta que en tal caso ; se deberá usar un registro de 32 bits para el segundo operando también. mov rax, 10 mov rbx, 3 mul rbx ; rax * rbx. En este caso el resultado es RDX = 0, RAX = 30. ; imprimimos en pantalla el resultado. lea rdi, [mask_string] lea rsi, [multiplicacion] mov rdx, rax xor rax, rax call printf ; En este caso utilizamos los registros de 32 bits. Se usa el registro ebx puesto que todos los parametros deben ser del mismo tamaño. mov eax, 20 mov ebx, 10 mul ebx ; eax * ebx. El resultado es EDX = 0, EAX = 200 ; imprimimos en pantalla el resultado. lea rdi, [mask_string] lea rsi, [multiplicacion] mov rdx, rax xor rax, rax call printf ; La división funciona de manera muy similar a MUL: Ya que se puede dar que el dividendo sea mas grande que el registro que vayamos a usar, se pasa ; en los registros combinados RDX:RAX para 64 bits, EDX:EAX en 32 bits, y el divisor debe ser un registro del mismo tamaño, u un valor en memoria también del ; mismo tamaño. El resultado se devuelve de la siguiente forma: RAX = cociente, RDX = resto. mov rdx, 0 ; limpiamos rdx, de esta manera rdx:rax = 100 mov rax, 100 mov rbx, 12 div rbx ; rax / rbx. El resultado es RAX = 8 (cociente), RDX = 4 (resto). ; imprimimos en pantalla el resultado. lea rdi, [mask_div] mov rsi, rax call printf mov edx, 0 ; limpiamos edx, de esta manera rdx:rax = 100 mov eax, 200 mov ebx, 12 div ebx ; eax / ebx. El resultado es EAX = 16 (cociente), EDX = 8 (resto). ; imprimimos en pantalla el resultado. lea rdi, [mask_div] mov rsi, rax call printf ; Existen dos instrucciones más, imul e idiv. Ambas funcionan de la misma manera sólo que tienen en cuenta el signo de los operandos. ; voy a poner un ejemplo sólo con idiv. mov rdx, 0 mov rax, -100 mov rbx, 2 idiv rbx ; imprimimos en pantalla el resultado. lea rdi, [mask_div] mov rsi, rax call printf ; Ahora, para trabajar con números decimales (float), se usa la fpu (floating point unit) del procesador. ; La fpu tiene 8 registros: st0 - st7 que se comportan como un stack, con lo cual para cargar datos debemos hacer un "push" ; en el siguiente ejemplo se explica con comentarios todo el codigo. ; Vamos a usar qword para nuestras variables, o lo que es lo mismo, el tipo double de C/C++. Si usaramos dword, serían float de C/C++. ; Existen varias instrucciones que operan en la fpu: fmul, fadd, fdiv, etc... Funcionan todas de manera muy similar ; Ademas de la fpu, existen otros registros (xmm0 - xmm15) que se usan para el manejo de valores decimales y tenemos acceso a ellos de la misma manera ; que accedemos a los registros de uso general. fninit ; Inicializamos la fpu para evitar inconsistencias. fld qword [var_float1] ; Cargamos en st0 el valor de var_float1. Sólo se pueden cargar datos desde la memoria fld qword [var_float2] ; Cargamos en st0 el valor de var_float2. El valor de st0 se pasa a st1 y se carga en st0 el nuevo valor. faddp ; (Add and pop) Suma st0 con st1. El resultado se guarda en st0 y st1 ahora queda vacío. fstp qword [var_result] ; Guarda el valor de st0 en var_result y hace un "pop", con lo cual ahora el stack de la fpu queda vacío. ; imprimimos en pantalla el resultado. movsd xmm0, [var_result] lea rdi, [mask_float] call printf ; Operadores de comparación y condicionales: ; ; Referencia de todas las instrucciones de salto que hay: https://www.felixcloutier.com/x86/jcc ; ; Para comparaciones tenemos dos instrucciones: cmp y test. Son prácticamente iguales y se usan en conjunto con los saltos para ; crear estructuras de control. cmp y test comparan dos valores, y modifican la flags del procesador, que luego usaremos para controlar el flujo ; de ejecución con saltos condicionales. Hay que tener en cuenta que las operaciones aritméticas también modifican los flags y por lo tanto, a la hora de saltar, ; hay que tener esto en cuenta, ya que se pueden usar las instrucciones de salto en conjunto con estas operaciones directamente sin usar cmp o test. ; Para poder controlar donde vamos a continuar la ejecución debemos conocer la dirección de memoria donde esté la próxima instrucción que querramos ejecutar, ; aunque sería muy engorroso tener que calcular esas direcciones a mano. Para eso usamos las etiquetas, o labels que podemos definir en cualquier parte de nuestro programa ; y luego saltar a ellas. NASM calculará las direcciones cuando ensamblemos el programa y reemplazará las etiquetas con los offsets correspondientes. ; Para declarar una etiqueta basta con escribir un nombre seguido de ":". Esto definirá una etiqueta a la que podremos saltar desde cualquier lado de nuestro programa. ; ; Vamos a declarar algunas etiquetas y saltar a ellas como prueba. mov rax, 20 mov rbx, 10 comparacion_rax_rbx: ; definimos una etiqueta para volver a ella luego de las comparaciones cmp rax, rbx ; cmp hará una resta entre los operandos y modificará los flags del cpu acorde al resultado. ja mayor ; ja = Jump if Above. En este caso, saltará a "mayor" sólo si rax es mayor que rbx. Existe también jae = Jump if Above or Equal. je iguales ; je = Jump if Equal. Este salto condicional saltará a la etiqueta "iguales" si rax y rbx son iguales. jne distintos ; jne = Jump if Not Equal. Este salto condicional saltará a la etiqueta "distintos" si rax y rbx son distintos. jmp fin_de_saltos ; jmp = Jump. Es una instrucción de salto incondicional, por lo que siempre saltará. En este caso, como en ensamblador las instrucciones ; ejecutan de manera secuencial, si no saltamos a "fin_de_saltos", el programa volvería a ejecutar el código que hay debajo, por lo que ; tenemos que saltarlo sí o sí para continuar nuestra ejecución. Es aquí donde se ve la utilidad de jmp. ; En este caso, nunca se ejecutará este salto, ya que lo vamos a hacer en "distintos", de lo contrario estaríamos en un bucle infinito ya que ; no existe forma de escapar de estas tres comparaciones a la vez, lo que muestra que hay que tener cuidado a la hora de hacer saltos sin ; tener en cuenta esta posibilidad. distintos: ; Saltamos aquí cuando rax es distinto de rbx. Imprimimos un mensaje y preparamos rax y rbx para el próximo salto. lea rdi, [mask_jumps] lea rsi, [str_distintos] call printf jmp fin_de_saltos ; Saltamos fuera de las comprobaciones, de lo contrario estaríamos en un bucle infinito. iguales: ; Saltamos aquí cuando rax y rbx son iguales. Imprimimos un mensaje y preparamos rax y rbx para el próximo salto. lea rdi, [mask_jumps] lea rsi, [str_iguales] call printf mov rax, 10 mov rbx, 20 jmp comparacion_rax_rbx mayor: lea rdi, [mask_jumps] lea rsi, [str_mayor] call printf mov rax, 20 mov rbx, 20 jmp comparacion_rax_rbx fin_de_saltos: ; Con estas instrucciones, podemos crear fácilmente ciclos del estilo while, do while, etc... ; También podemos crear estructuras del estilo if (a == b), if (a == 0), if (a == b) else (), etc... ; Las etiquetas se pueden definir en cualquier parte dentro de la sección .text y vamos a poder saltar a ellas ; de manera que no es necesario que estén debajo del punto donde hacemos la comprobación. ; =================================== ; Ejercicio opcional ; =================================== lea rdi, [str_adicional] call printf mov byte [valor], 9 ; pongo [valor] en 9 porque lo primero que haré en bucle será incrementarlo. bucle: add qword [valor], 1 cmp qword [valor], 55 je fin_de_bucle jmp es_par imprimir: lea rdi, [mask_int] mov rsi, [valor] call printf jmp bucle es_16: cmp qword [valor], 16 jne es_multiplo jmp bucle es_multiplo: mov rbx, 3 mov rdx, 0 mov rax, qword [valor] div rbx cmp rdx, 0 jne imprimir jmp bucle es_par: mov rbx, 2 mov rdx, 0 mov rax, qword [valor] div rbx cmp rdx, 0 je es_16 jmp bucle fin_de_bucle: lea rdi, [mask_int] mov rsi, [valor] call printf lea rdi, [mask_nl] call printf jmp exit_proc ; Terminamos la ejecucion exit_proc: mov rax, SYS_exit xor rdi, rdi syscall ; =========================================================================== ; Sección de datos inicializados con permisos de sólo lectura. ; =========================================================================== section .rodata SYS_exit: equ 60 mask_string: db "%s %u", 10, 0 mask_div: db "Division: cociente: %i - resto: %i", 10, 0 mask_float: db "Float: %.4f", 10, 0 mask_jumps: db "Comparacion: RAX es %s que RBX", 10, 0 mask_rcx_zero: db "Comparacion: RCX es igual a 0" mask_int: db "%u - ", 0 mask_nl: db 0x0A int_string: db "var_integer:", 0 suma: db "Suma:", 0 resta: db "Resta:", 0 multiplicacion: db "Multiplicacion:", 0 str_distintos: db "distinto", 0 str_iguales: db "igual", 0 str_mayor: db "mayor que", 0 str_mayor_igual: db "mayor o igual", 0 str_menor: db "menor", 0 str_menor_igual: db "menor o igual", 0 str_zero: db "es igual a 0", 0 str_not_zero: db "es distinto de 0", 0 str_adicional: db 0x0A, 0x0A, 0x0A, "Ejercicio adicional", 0x0A, 0 ; =========================================================================== ; Sección de datos inicializados con permisos de lectura y escritura. ; No existe en ensamblador el concepto de tipo de datos, sólo son bytes que ; reservamos para su uso. ; ; =========================================================================== section .data var_integer: dq 0 var_float1: dq 100.0 var_float2: dq 200.0 valor: dq 0 ; =========================================================================== ; Sección de datos no inicializados con permisos de lectura y escritura. ; Aplica lo mismo que para la sección .data, solo que la memoria que usemos ; en esta sección no va a ser inicializada y va a contener datos basura ; hasta que escribamos en ella. ; =========================================================================== section .bss var_result: resq 1
lululalulula/roadmap
2,693
Roadmap/00 - SINTAXIS, VARIABLES, TIPOS DE DATOS Y HOLA MUNDO/nasm/evanz2608.s
; https://www.nasm.us/ ; IMPORTANTE ; ============================================================================================= ; Aclarar que NASM es uno de los ensambladores que existen y no el lenguaje en si, ; aunque cada ensamblador define una sintaxis, en ensamblador las instrucciones son las mismas, ; y solo cambian cosas que no son especificas del lenguaje ensamblador en si. ; ; La sintaxis que se usa es la de Intel, y se escribe codigo para 64 bits en Linux. ; ============================================================================================= ; Los comentarios inician con punto y coma. %if 0 No existen comentarios multilinea como tal, pero se pueden usar macros como esta que el ensamblador ignorará a la hora de procesar el archivo porque al evaluar la macro 0 es false, y por lo tanto esto no se incluirá, al igual que un comentario %endif ;0 section .data ; las "variables" en ensablador son simples porciones de memoria que reservamos para su uso ; no tienen tipo, y van dentro de la seccion .data mi_variable: db 0 ; mi_variable reserva 1 byte inicializado a 0. mi_segunda_variable: dw 0 ; aquí reservamos 2 bytes, lo que se llama word, también inicializado a 0 ; Se puede usar dq para reservar 4 bytes, y dq para reservar 8 bytes. mi_array: db 0, 1, 2, 3, 4 ; Puesto que las variables son porciones de memoria, podemos definir arrays separando cada valor con una coma. mi_segundo_array: times 20 dw 0 ; Reservamos 20 words, lo que serían 40 bytes y los inicializamos a 0. mi_string: db "Cadena de texto" ; Se pueden inicializar variables con cadenas de texto y el tamaño sería igual a la cantidad de caracteres ; que contenga la cadena, y cada caracter será tratado como un byte. section .rodata mi_constante: equ 10 ; Para las "constantes" usamos la seccion .rodata (read only data) y usamos EQU para definirles un valor. ; Decir que las constantes son meros nombres para el valor que definamos. hello: db "Hola, NASM!", 0x0A ; para imprimir por consola el texto. (0x0A es el caracter para salto de línea. En otros lenguajes se puede denotar como '\n') hello_len: equ $-hello ; calculamos el largo del texto a imprimir. section .text ; la seccion donde ira nuestro codigo. global _start ; declaramos el punto de entrada de nuesto programa. Similar a la funcion main de algunos lenguajes. _start: ; y aquí la implementamos ; Imprimimos por pantalla el texto "Hola mundo" mov rax, 1 mov rdi, 1 lea rsi, [hello] mov rdx, hello_len syscall ; y salimos de nuestro programa. mov rax, 60 mov rdi, 0 syscall
lunalunaa/lunaris
4,235
src/kernel/asm/boot.S
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com> //-------------------------------------------------------------------------------------------------- // Definitions //-------------------------------------------------------------------------------------------------- // Load the address of a symbol into a register, PC-relative. // // The symbol must lie within +/- 4 GiB of the Program Counter. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_REL register, symbol adrp \register, \symbol add \register, \register, #:lo12:\symbol .endm //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- .section .text._start //------------------------------------------------------------------------------ // fn _start() //------------------------------------------------------------------------------ _start: // Only proceed on the boot core. Park it otherwise. mrs x1, mpidr_el1 and x1, x1, #3 cmp x1, #0 bne .L_parking_loop // If execution reaches here, it is the boot core. /*=============================================================*/ /* Enable FP/SIMD at EL1 */ /*=============================================================*/ mov x0, #3 << 20 msr cpacr_el1, x0 /* Enable FP/SIMD at EL1 */ // are we already in EL3? mrs x9, CurrentEL cmp x9, #12 beq drop_el1_secure // are we already in EL1 cmp x9, #4 beq el1_secure // otherwise, switch from EL2 to EL1 mov x10, #2147483648 msr hcr_el2, x10 mov x11, #709 msr spsr_el2, x11 adr x12, el1_secure msr elr_el2, x12 ADR_REL x0, __boot_core_stack_end_exclusive msr SP_EL1, x0 // set the stack pointer for EL1 eret drop_el1_secure: /* Try drop from el3 to el1 secure */ /*=============================================================*/ /* Initialize sctlr_el1 */ /*=============================================================*/ mov x0, xzr orr x0, x0, #(1 << 29) /* Checking http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0500d/CIHDIEBD.html */ orr x0, x0, #(1 << 28) /* Bits 29,28,23,22,20,11 should be 1 (res1 on documentation) */ orr x0, x0, #(1 << 23) orr x0, x0, #(1 << 22) orr x0, x0, #(1 << 20) orr x0, x0, #(1 << 11) msr sctlr_el1, x0 /*=============================================================*/ /* Initialize scr_el3 */ /*=============================================================*/ mrs x0, scr_el3 orr x0, x0, #(1<<10) /* Lower EL is 64bits */ msr scr_el3, x0 /*=============================================================*/ /* Initialize spsr_el3 */ /*=============================================================*/ mov x0, xzr mov x0, #0b00101 /* EL1 */ orr x0, x0, #(1 << 8) /* Enable SError and External Abort. */ orr x0, x0, #(1 << 7) /* IRQ interrupt Process state mask. */ orr x0, x0, #(1 << 6) /* FIQ interrupt Process state mask. */ msr spsr_el3, x0 /*=============================================================*/ /* Initialize elr_el3 */ /*=============================================================*/ adr x0, el1_secure msr elr_el3, x0 ADR_REL x0, __boot_core_stack_end_exclusive msr SP_EL1, x0 // set the stack pointer for EL1 eret el1_secure: // Initialize DRAM. msr SPSel, #1 ADR_REL x0, __bss_start ADR_REL x1, __bss_end_exclusive .L_bss_init_loop: cmp x0, x1 b.eq .L_prepare_rust stp xzr, xzr, [x0], #16 b .L_bss_init_loop // Prepare the jump to Rust code. .L_prepare_rust: // Set the stack pointer. ADR_REL x0, __boot_core_stack_end_exclusive mov sp, x0 bl _kmain // Infinitely wait for events (aka "park the core"). .L_parking_loop: wfe b .L_parking_loop .size _start, . - _start .type _start, function .global _start
lunalunaa/lunaris
1,190
src/kernel/asm/switch.S
// switch(old_context, new_context) // should only need to save callee saved registers? // this switches from kernel to user .section .text __switch_to_task: stp x19, x20, [x0, #0] stp x21, x22, [x0, #16] stp x23, x24, [x0, #32] stp x25, x26, [x0, #48] stp x27, x28, [x0, #64] stp x29, x30, [x0, #80] mov x2, sp str x2, [x0, #96] ldp x19, x20, [x1, #0] ldp x21, x22, [x1, #16] ldp x23, x24, [x1, #32] ldp x25, x26, [x1, #48] ldp x27, x28, [x1, #64] ldp x29, x30, [x1, #80] ldr x2, [x1, #96] mov sp, x2 isb sy dsb sy eret __switch_to_scheduler: stp x19, x20, [x0, #0] stp x21, x22, [x0, #16] stp x23, x24, [x0, #32] stp x25, x26, [x0, #48] stp x27, x28, [x0, #64] stp x29, x30, [x0, #80] mov x2, sp str x2, [x0, #96] ldp x19, x20, [x1, #0] ldp x21, x22, [x1, #16] ldp x23, x24, [x1, #32] ldp x25, x26, [x1, #48] ldp x27, x28, [x1, #64] ldp x29, x30, [x1, #80] ldr x2, [x1, #96] mov sp, x2 isb sy dsb sy ret .global __switch_to_task .global __switch_to_scheduler .type __switch_to_scheduler, function .type __switch_to_task, function
lunalunaa/lunaris
2,250
src/kernel/asm/exception.S
// Typical exception vector table code. .section .text .align 11 vector_table_start: .org 0x000 curr_el_sp0_sync: b __syscall_handler .org 0x080 b __syscall_handler .org 0x100 b __syscall_handler .org 0x180 b __syscall_handler // Current exception level with SP_ELx, x > 0. .org 0x200 b __syscall_handler .org 0x280 b __syscall_handler .org 0x300 b __syscall_handler .org 0x380 b __syscall_handler // Lower exception level, AArch64 .org 0x400 // this is the one b __syscall_handler .org 0x480 b __syscall_handler .org 0x500 b __syscall_handler .org 0x580 b __syscall_handler // Lower exception level, AArch32 .org 0x600 b __syscall_handler .org 0x680 b __syscall_handler .org 0x700 b __syscall_handler .org 0x780 b __syscall_handler .org 0x800 __syscall_handler: // we are using user stack rn msr SPSel, #0 // let the kernel use SP_EL0 sub sp, sp, #288 stp x0, x1, [sp, #0] stp x2, x3, [sp, #16] stp x4, x5, [sp, #32] stp x6, x7, [sp, #48] stp x8, x9, [sp, #64] stp x10, x11, [sp, #80] stp x12, x13, [sp, #96] stp x14, x15, [sp, #112] stp x16, x17, [sp, #128] stp x18, x19, [sp, #144] stp x20, x21, [sp, #160] stp x22, x23, [sp, #176] stp x24, x25, [sp, #192] stp x26, x27, [sp, #208] stp x28, x29, [sp, #224] stp x30, xzr, [sp, #240] mrs x0, ESR_EL1 mrs x1, SPSR_EL1 stp x0, x1, [sp, #256] mrs x0, ELR_EL1 stp x0, x0, [sp, #272] bl get_kernel_sp mov x1, x0 mov x0, sp msr SPSel, #1 mov sp, x1 isb sy dsb sy b syscall __syscall_ret: msr SPSel, #0 ldr x0, [sp, #264] msr SPSR_EL1, x0 ldp x0, x1, [sp, #0] ldp x2, x3, [sp, #16] ldp x4, x5, [sp, #32] ldp x6, x7, [sp, #48] ldp x8, x9, [sp, #64] ldp x10, x11, [sp, #80] ldp x12, x13, [sp, #96] ldp x14, x15, [sp, #112] ldp x16, x17, [sp, #128] ldp x18, x19, [sp, #144] ldp x20, x21, [sp, #160] ldp x22, x23, [sp, #176] ldp x24, x25, [sp, #192] ldp x26, x27, [sp, #208] ldp x28, x29, [sp, #224] ldp x30, xzr, [sp, #240] add sp, sp, #288 isb sy dsb sy eret .global vector_table_start .global __syscall_ret .size vector_table_start, . - vector_table_start
lunarforky-z/octopus-rust1
2,134
src/arch/x86_64/start.s
.set PIC1_DATA_PORT, 0x21 .set PIC2_DATA_PORT, 0xA1 .set CR0_PG, 1 << 31 .set CR4_PAE, 1 << 5 .set CR4_OSFXSR, 1 << 9 .set PTE_PRESENT, 1 << 0 .set PTE_WRITE, 1 << 1 .set PTE_PS, 1 << 7 .set MSR_EFER, 0xC0000080 .set MSR_EFER_LME, 1 << 8 .set GDT_TYPE_DATA, 0x2 << 40 .set GDT_TYPE_CODE, 0xA << 40 .set GDT_NONSYS, 1 << 44 .set GDT_PRESENT, 1 << 47 .set GDT_BITS64, 1 << 53 .set GDT_BITS32, 1 << 54 .set SEGMENT_CODE, 0x8 .set SEGMENT_DATA, 0x10 .set STACK_SIZE, 8 * 1024 .bss .global mb_magic mb_magic: .long 0 .global mb_info mb_info: .long 0 .align 4096 pml4: .fill 512, 8 pdp0: .fill 512, 8 pd0: .fill 512, 8 .align 16 stack: .fill STACK_SIZE .data .align 4 gdt: .quad 0 .quad GDT_TYPE_CODE | GDT_NONSYS | GDT_PRESENT | GDT_BITS64 .quad GDT_TYPE_DATA | GDT_NONSYS | GDT_PRESENT | GDT_BITS32 end_of_gdt: gdti: .word end_of_gdt - gdt - 1 .quad gdt .text .code32 .global _start _start: /* Preserve magic and multiboot_info. */ movl %eax, mb_magic movl %ebx, mb_info /* Disable IRQs. */ movb $0xFF, %al outb %al, $PIC1_DATA_PORT outb %al, $PIC2_DATA_PORT /* Enable PAE and SSE. */ movl %cr4, %edx orl $(CR4_PAE | CR4_OSFXSR), %edx movl %edx, %cr4 /* Link page table entries and set page map. */ orl $(PTE_PRESENT | PTE_WRITE), pml4 orl $pdp0, pml4 orl $(PTE_PRESENT | PTE_WRITE), pdp0 orl $pd0, pdp0 orl $(PTE_PRESENT | PTE_WRITE | PTE_PS), pd0 movl $pml4, %eax movl %eax, %cr3 /* Enable long mode. */ movl $MSR_EFER, %ecx rdmsr orl $MSR_EFER_LME, %eax wrmsr /* Enable paging. */ movl %cr0, %eax orl $CR0_PG, %eax movl %eax, %cr0 /* Load GDT. */ lgdt gdti /* Long jump to 64-bit code. */ ljmp $SEGMENT_CODE, $start64 .code64 start64: /* Set segments and stack. */ movw $SEGMENT_DATA, %ax movw %ax, %ds movw %ax, %ss movq $(stack + STACK_SIZE), %rsp /* Call the Rust entry point. */ call main
luque667788/RaspiBareMetal
1,372
src/boot.S
// --- Boot section for Raspberry Pi 4 (AArch64) --- .section ".text.boot" .global _start // Entry point _start: // Only run on core 0; others wait forever mrs x1, mpidr_el1 and x1, x1, #3 // Mask for core ID cbz x1, 2f // If core 0, continue //q:what cbz 2f means. a: // If core ID is 0, branch to label 2 1: wfe // Other cores: wait for event b 1b 2: // Main core continues // Set up stack pointer below code ldr x1, =_start mov sp, x1 // Zero BSS section //q: what is the BSS section? a: The BSS (Block Started by Symbol) section is used to hold uninitialized global and static variables in a program. It is typically zeroed out at program startup. // ldr is used to load the address of __bss_start in register x1 // and __bss_size in register w2 // This is necessary to clear the BSS section before running the main program ldr x1, =__bss_start ldr w2, =__bss_size 3: cbz w2, 4f str xzr, [x1], #8 // Store 0, advance pointer sub w2, w2, #1 cbnz w2, 3b // 3b means "branch to label 3 if w2 is not zero" //q:why sometimes it is f or b? a: f means "forward" and b means "backward" in branch instructions // Jump to Rust main() 4: bl main b 1b // If main returns, halt
luvroc/rCore-Tutorial-2024S
2,894
ch3/os/src/link_app.S
.align 3 .section .data .global _num_app _num_app: .quad 13 .quad app_0_start .quad app_1_start .quad app_2_start .quad app_3_start .quad app_4_start .quad app_5_start .quad app_6_start .quad app_7_start .quad app_8_start .quad app_9_start .quad app_10_start .quad app_11_start .quad app_12_start .quad app_12_end .global _app_names _app_names: .string "ch2b_bad_address" .string "ch2b_bad_instructions" .string "ch2b_bad_register" .string "ch2b_hello_world" .string "ch2b_power_3" .string "ch2b_power_5" .string "ch2b_power_7" .string "ch3_sleep" .string "ch3_sleep1" .string "ch3_taskinfo" .string "ch3b_yield0" .string "ch3b_yield1" .string "ch3b_yield2" .section .data .global app_0_start .global app_0_end .align 3 app_0_start: .incbin "../ci-user/user/build/bin/ch2b_bad_address.bin" app_0_end: .section .data .global app_1_start .global app_1_end .align 3 app_1_start: .incbin "../ci-user/user/build/bin/ch2b_bad_instructions.bin" app_1_end: .section .data .global app_2_start .global app_2_end .align 3 app_2_start: .incbin "../ci-user/user/build/bin/ch2b_bad_register.bin" app_2_end: .section .data .global app_3_start .global app_3_end .align 3 app_3_start: .incbin "../ci-user/user/build/bin/ch2b_hello_world.bin" app_3_end: .section .data .global app_4_start .global app_4_end .align 3 app_4_start: .incbin "../ci-user/user/build/bin/ch2b_power_3.bin" app_4_end: .section .data .global app_5_start .global app_5_end .align 3 app_5_start: .incbin "../ci-user/user/build/bin/ch2b_power_5.bin" app_5_end: .section .data .global app_6_start .global app_6_end .align 3 app_6_start: .incbin "../ci-user/user/build/bin/ch2b_power_7.bin" app_6_end: .section .data .global app_7_start .global app_7_end .align 3 app_7_start: .incbin "../ci-user/user/build/bin/ch3_sleep.bin" app_7_end: .section .data .global app_8_start .global app_8_end .align 3 app_8_start: .incbin "../ci-user/user/build/bin/ch3_sleep1.bin" app_8_end: .section .data .global app_9_start .global app_9_end .align 3 app_9_start: .incbin "../ci-user/user/build/bin/ch3_taskinfo.bin" app_9_end: .section .data .global app_10_start .global app_10_end .align 3 app_10_start: .incbin "../ci-user/user/build/bin/ch3b_yield0.bin" app_10_end: .section .data .global app_11_start .global app_11_end .align 3 app_11_start: .incbin "../ci-user/user/build/bin/ch3b_yield1.bin" app_11_end: .section .data .global app_12_start .global app_12_end .align 3 app_12_start: .incbin "../ci-user/user/build/bin/ch3b_yield2.bin" app_12_end:
luvroc/rCore-Tutorial-2024S
1,488
ch3/os/src/trap/trap.S
.altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text .globl __alltraps .globl __restore .align 2 __alltraps: csrrw sp, sscratch, sp # now sp->kernel stack, sscratch->user stack # allocate a TrapContext on kernel stack addi sp, sp, -34*8 # save general-purpose registers sd x1, 1*8(sp) # skip sp(x2), we will save it later sd x3, 3*8(sp) # skip tp(x4), application does not use it # save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr # we can use t0/t1/t2 freely, because they were saved on kernel stack csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) # read user stack from sscratch and save it on the kernel stack csrr t2, sscratch sd t2, 2*8(sp) # set input argument of trap_handler(cx: &mut TrapContext) mv a0, sp call trap_handler __restore: # now sp->kernel stack(after allocated), sscratch->user stack # restore sstatus/sepc ld t0, 32*8(sp) ld t1, 33*8(sp) ld t2, 2*8(sp) csrw sstatus, t0 csrw sepc, t1 csrw sscratch, t2 # restore general-purpuse registers except sp/tp ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr # release TrapContext on kernel stack addi sp, sp, 34*8 # now sp->kernel stack, sscratch->user stack csrrw sp, sscratch, sp sret
lwx270901/registryintern
1,569
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/riscv64.s
#include "psm.h" .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li x10, STACK_DIRECTION_DESCENDING jr x1 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc add x10, x2, x0 jr x1 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */ .cfi_startproc add x2, x12, x0 jr x11 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */ .cfi_startproc sd x1, -8(x13) sd x2, -16(x13) addi x2, x13, -16 .cfi_def_cfa x2, 16 .cfi_offset x1, -8 .cfi_offset x2, -16 jalr x1, x12, 0 ld x1, 8(x2) .cfi_restore x1 ld x2, 0(x2) .cfi_restore x2 jr x1 .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,571
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/riscv.s
#include "psm.h" .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li x10, STACK_DIRECTION_DESCENDING jr x1 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc add x10, x2, x0 jr x1 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */ .cfi_startproc add x2, x12, x0 jr x11 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */ .cfi_startproc sw x1, -12(x13) sw x2, -16(x13) addi x2, x13, -16 .cfi_def_cfa x2, 16 .cfi_offset x1, -12 .cfi_offset x2, -16 jalr x1, x12, 0 lw x1, 4(x2) .cfi_restore x1 lw x2, 0(x2) .cfi_restore x2 jr x1 .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,971
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/aarch_aapcs64.s
#include "psm.h" .text #if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios #define GLOBL(fnname) .globl _##fnname #define TYPE(fnname) #define FUNCTION(fnname) _##fnname #define END_FUNCTION(fnname) #elif CFG_TARGET_OS_windows #define GLOBL(fnname) .globl fnname #define TYPE(fnname) #define FUNCTION(fnname) fnname #define LABEL_FOR_SIZE(endlabel) #define SIZE(fnname,endlabel) #else #define GLOBL(fnname) .globl fnname #define TYPE(fnname) .type fnname,@function #define FUNCTION(fnname) fnname #define END_FUNCTION(fnname) .size fnname,.-fnname #endif GLOBL(rust_psm_stack_direction) .p2align 2 TYPE(rust_psm_stack_direction) FUNCTION(rust_psm_stack_direction): /* extern "C" fn() -> u8 */ .cfi_startproc orr w0, wzr, #STACK_DIRECTION_DESCENDING ret END_FUNCTION(rust_psm_stack_direction) .cfi_endproc GLOBL(rust_psm_stack_pointer) .p2align 2 TYPE(rust_psm_stack_pointer) FUNCTION(rust_psm_stack_pointer): /* extern "C" fn() -> *mut u8 */ .cfi_startproc mov x0, sp ret END_FUNCTION(rust_psm_stack_pointer) .cfi_endproc GLOBL(rust_psm_replace_stack) .p2align 2 TYPE(rust_psm_replace_stack) FUNCTION(rust_psm_replace_stack): /* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */ mov sp, x2 br x1 END_FUNCTION(rust_psm_replace_stack) .cfi_endproc GLOBL(rust_psm_on_stack) .p2align 2 TYPE(rust_psm_on_stack) FUNCTION(rust_psm_on_stack): /* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */ .cfi_startproc stp x29, x30, [sp, #-16]! .cfi_def_cfa sp, 16 mov x29, sp .cfi_def_cfa x29, 16 .cfi_offset x29, -16 .cfi_offset x30, -8 mov sp, x3 blr x2 mov sp, x29 .cfi_def_cfa sp, 16 ldp x29, x30, [sp], #16 .cfi_def_cfa sp, 0 .cfi_restore x29 .cfi_restore x30 ret END_FUNCTION(rust_psm_on_stack) .cfi_endproc
lwx270901/registryintern
2,299
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/arm_aapcs.s
#include "psm.h" .text .syntax unified #if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios #define GLOBL(fnname) .globl _##fnname #define THUMBTYPE(fnname) .thumb_func _##fnname #define FUNCTION(fnname) _##fnname #define THUMBFN .code 16 #define SIZE(fnname,endlabel) #define FNSTART #define CANTUNWIND #define FNEND #else #define GLOBL(fnname) .globl fnname #define THUMBTYPE(fnname) .type fnname,%function #define FUNCTION(fnname) fnname #define THUMBFN .code 16 #define SIZE(fnname,endlabel) .size fnname,endlabel-fnname #define FNSTART .fnstart #define CANTUNWIND .cantunwind #define FNEND .fnend #endif GLOBL(rust_psm_stack_direction) .p2align 2 THUMBTYPE(rust_psm_stack_direction) THUMBFN FUNCTION(rust_psm_stack_direction): /* extern "C" fn() -> u8 */ FNSTART .cfi_startproc /* movs to support Thumb-1 */ movs r0, #STACK_DIRECTION_DESCENDING bx lr .rust_psm_stack_direction_end: SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end) .cfi_endproc CANTUNWIND FNEND GLOBL(rust_psm_stack_pointer) .p2align 2 THUMBTYPE(rust_psm_stack_pointer) THUMBFN FUNCTION(rust_psm_stack_pointer): /* extern "C" fn() -> *mut u8 */ FNSTART .cfi_startproc mov r0, sp bx lr .rust_psm_stack_pointer_end: SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end) .cfi_endproc CANTUNWIND FNEND GLOBL(rust_psm_replace_stack) .p2align 2 THUMBTYPE(rust_psm_replace_stack) THUMBFN FUNCTION(rust_psm_replace_stack): /* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */ FNSTART .cfi_startproc /* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */ mov sp, r2 bx r1 .rust_psm_replace_stack_end: SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end) .cfi_endproc CANTUNWIND FNEND GLOBL(rust_psm_on_stack) .p2align 2 THUMBTYPE(rust_psm_on_stack) THUMBFN FUNCTION(rust_psm_on_stack): /* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */ FNSTART .cfi_startproc push {r4, lr} .cfi_def_cfa_offset 8 mov r4, sp .cfi_def_cfa_register r4 .cfi_offset lr, -4 .cfi_offset r4, -8 mov sp, r3 blx r2 mov sp, r4 .cfi_restore sp pop {r4, pc} .rust_psm_on_stack_end: SIZE(rust_psm_on_stack,.rust_psm_on_stack_end) .cfi_endproc CANTUNWIND FNEND
lwx270901/registryintern
2,557
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/x86.s
#include "psm.h" /* NOTE: fastcall calling convention used on all x86 targets */ .text #if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios #define GLOBL(fnname) .globl _##fnname #define TYPE(fnname) #define FUNCTION(fnname) _##fnname #define SIZE(fnname,endlabel) #else #define GLOBL(fnname) .globl fnname #define TYPE(fnname) .type fnname,@function #define FUNCTION(fnname) fnname #define SIZE(fnname,endlabel) .size fnname,endlabel-fnname #endif GLOBL(rust_psm_stack_direction) .p2align 4 TYPE(rust_psm_stack_direction) FUNCTION(rust_psm_stack_direction): /* extern "fastcall" fn() -> u8 (%al) */ .cfi_startproc movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64 retl .rust_psm_stack_direction_end: SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end) .cfi_endproc GLOBL(rust_psm_stack_pointer) .p2align 4 TYPE(rust_psm_stack_pointer) FUNCTION(rust_psm_stack_pointer): /* extern "fastcall" fn() -> *mut u8 (%rax) */ .cfi_startproc leal 4(%esp), %eax retl .rust_psm_stack_pointer_end: SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end) .cfi_endproc GLOBL(rust_psm_replace_stack) .p2align 4 TYPE(rust_psm_replace_stack) FUNCTION(rust_psm_replace_stack): /* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address would be store there) off the required stack alignment on entry. To offset the stack in such a way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but would require to adjust the stack manually, which cannot be easily done, because the stack pointer argument is already stored in memory. */ movl 4(%esp), %esp calll *%edx ud2 .rust_psm_replace_stack_end: SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end) .cfi_endproc GLOBL(rust_psm_on_stack) .p2align 4 TYPE(rust_psm_on_stack) FUNCTION(rust_psm_on_stack): /* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */ .cfi_startproc pushl %ebp .cfi_def_cfa %esp, 8 .cfi_offset %ebp, -8 movl %esp, %ebp .cfi_def_cfa_register %ebp movl 12(%ebp), %esp calll *8(%ebp) movl %ebp, %esp popl %ebp .cfi_def_cfa %esp, 4 retl $8 .rust_psm_on_stack_end: SIZE(rust_psm_on_stack,.rust_psm_on_stack_end) .cfi_endproc
lwx270901/registryintern
3,609
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/powerpc64_aix.s
.csect .text[PR],2 .file "powerpc64_aix.s" .globl rust_psm_stack_direction[DS] .globl .rust_psm_stack_direction .align 4 .csect rust_psm_stack_direction[DS],3 .vbyte 8, .rust_psm_stack_direction .vbyte 8, TOC[TC0] .vbyte 8, 0 .csect .text[PR],2 .rust_psm_stack_direction: # extern "C" fn() -> u8 li 3, 2 blr L..rust_psm_stack_direction_end: # Following bytes form the traceback table on AIX. # For specification, see https://www.ibm.com/docs/en/aix/7.2?topic=processor-traceback-tables. # For implementation, see https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp, # `PPCAIXAsmPrinter::emitTracebackTable`. .vbyte 4, 0x00000000 # Traceback table begin, for unwinder to search the table. .byte 0x00 # Version = 0 .byte 0x09 # Language = CPlusPlus, since rust is using C++-like LSDA. .byte 0x20 # -IsGlobaLinkage, -IsOutOfLineEpilogOrPrologue # +HasTraceBackTableOffset, -IsInternalProcedure # -HasControlledStorage, -IsTOCless # -IsFloatingPointPresent # -IsFloatingPointOperationLogOrAbortEnabled .byte 0x40 # -IsInterruptHandler, +IsFunctionNamePresent, -IsAllocaUsed # OnConditionDirective = 0, -IsCRSaved, -IsLRSaved .byte 0x80 # +IsBackChainStored, -IsFixup, NumOfFPRsSaved = 0 .byte 0x00 # -HasExtensionTable, -HasVectorInfo, NumOfGPRsSaved = 0 .byte 0x00 # NumberOfFixedParms = 0 .byte 0x01 # NumberOfFPParms = 0, +HasParmsOnStack .vbyte 4, L..rust_psm_stack_direction_end-.rust_psm_stack_direction #Function size .vbyte 2, 0x0018 # Function name len = 24 .byte "rust_psm_stack_direction" # Function Name .globl rust_psm_stack_pointer[DS] .globl .rust_psm_stack_pointer .align 4 .csect rust_psm_stack_pointer[DS],3 .vbyte 8, .rust_psm_stack_pointer .vbyte 8, TOC[TC0] .vbyte 8, 0 .csect .text[PR],2 .rust_psm_stack_pointer: # extern "C" fn() -> *mut u8 mr 3, 1 blr L..rust_psm_stack_pointer_end: .vbyte 4, 0x00000000 .byte 0x00 .byte 0x09 .byte 0x20 .byte 0x40 .byte 0x80 .byte 0x00 .byte 0x00 .byte 0x01 .vbyte 4, L..rust_psm_stack_pointer_end-.rust_psm_stack_pointer .vbyte 2, 0x0016 .byte "rust_psm_stack_pointer" .globl rust_psm_replace_stack[DS] .globl .rust_psm_replace_stack .align 4 .csect rust_psm_replace_stack[DS],3 .vbyte 8, .rust_psm_replace_stack .vbyte 8, TOC[TC0] .vbyte 8, 0 .csect .text[PR],2 .rust_psm_replace_stack: # extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) # Load the function pointer and toc pointer from TOC and make the call. ld 2, 8(4) ld 4, 0(4) addi 5, 5, -48 mr 1, 5 mtctr 4 bctr L..rust_psm_replace_stack_end: .vbyte 4, 0x00000000 .byte 0x00 .byte 0x09 .byte 0x20 .byte 0x40 .byte 0x80 .byte 0x00 .byte 0x03 .byte 0x01 .vbyte 4, 0x00000000 # Parameter type = i, i, i .vbyte 4, L..rust_psm_replace_stack_end-.rust_psm_replace_stack .vbyte 2, 0x0016 .byte "rust_psm_replace_stack" .globl rust_psm_on_stack[DS] .globl .rust_psm_on_stack .align 4 .csect rust_psm_on_stack[DS],3 .vbyte 8, .rust_psm_on_stack .vbyte 8, TOC[TC0] .vbyte 8, 0 .csect .text[PR],2 .rust_psm_on_stack: # extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) mflr 0 std 2, -72(6) std 0, -8(6) sub 6, 6, 1 addi 6, 6, -112 stdux 1, 1, 6 ld 2, 8(5) ld 5, 0(5) mtctr 5 bctrl ld 2, 40(1) ld 0, 104(1) mtlr 0 ld 1, 0(1) blr L..rust_psm_on_stack_end: .vbyte 4, 0x00000000 .byte 0x00 .byte 0x09 .byte 0x20 .byte 0x41 .byte 0x80 .byte 0x00 .byte 0x04 .byte 0x01 .vbyte 4, 0x00000000 # Parameter type = i, i, i, i .vbyte 4, L..rust_psm_on_stack_end-.rust_psm_on_stack .vbyte 2, 0x0011 .byte "rust_psm_on_stack" .toc
lwx270901/registryintern
2,045
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/powerpc64_openpower.s
/* Implementation of stack swtiching routines for OpenPOWER 64-bit ELF ABI The specification can be found at http://openpowerfoundation.org/wp-content/uploads/resources/leabi/content/ch_preface.html This ABI is usually used by the ppc64le targets. */ #include "psm.h" .text .abiversion 2 .globl rust_psm_stack_direction .p2align 4 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li 3, STACK_DIRECTION_DESCENDING blr .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 4 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc mr 3, 1 blr .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 4 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */ .cfi_startproc addi 5, 5, -32 mtctr 4 mr 12, 4 mr 1, 5 bctr .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 4 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */ .cfi_startproc mflr 0 std 0, -8(6) std 2, -24(6) sub 6, 6, 1 addi 6, 6, -48 stdux 1, 1, 6 .cfi_def_cfa r1, 48 .cfi_offset r1, -48 .cfi_offset r2, -24 .cfi_offset lr, -8 mr 12, 5 mtctr 5 bctrl ld 2, 24(1) .cfi_restore r2 ld 0, 40(1) mtlr 0 .cfi_restore lr /* FIXME: after this instructin backtrace breaks until control returns to the caller */ ld 1, 0(1) blr .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
2,218
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/x86_64_windows_gnu.s
.text .def rust_psm_stack_direction .scl 2 .type 32 .endef .globl rust_psm_stack_direction .p2align 4 rust_psm_stack_direction: /* extern "sysv64" fn() -> u8 (%al) */ .cfi_startproc movb $2, %al # always descending on x86_64 retq .cfi_endproc .def rust_psm_stack_pointer .scl 2 .type 32 .endef .globl rust_psm_stack_pointer .p2align 4 rust_psm_stack_pointer: /* extern "sysv64" fn() -> *mut u8 (%rax) */ .cfi_startproc leaq 8(%rsp), %rax retq .cfi_endproc .def rust_psm_replace_stack .scl 2 .type 32 .endef .globl rust_psm_replace_stack .p2align 4 rust_psm_replace_stack: /* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8, %rcx: *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi. 8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto stack with a regular call */ movq %gs:0x08, %rdx movq %gs:0x10, %rcx leaq -8(%rdx), %rsp jmpq *%rsi .cfi_endproc .def rust_psm_on_stack .scl 2 .type 32 .endef .globl rust_psm_on_stack .p2align 4 rust_psm_on_stack: /* extern "sysv64" fn(%rdi: usize, %rsi: usize, %rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8, %r8: *mut u8) NB: on Windows for SEH to work at all, the pointers in TIB, thread information block, need to be fixed up. Otherwise, it seems that exception mechanism on Windows will not bother looking for exception handlers at *all* if they happen to fall outside the are specified in TIB. This necessitates an API difference from the usual 4-argument signature used elsewhere. FIXME: this needs a catch-all exception handler that aborts in case somebody unwinds into here. */ .cfi_startproc pushq %rbp .cfi_def_cfa %rsp, 16 .cfi_offset %rbp, -16 pushq %gs:0x08 .cfi_def_cfa %rsp, 24 pushq %gs:0x10 .cfi_def_cfa %rsp, 32 movq %rsp, %rbp .cfi_def_cfa_register %rbp movq %rcx, %gs:0x08 movq %r8, %gs:0x10 movq %rcx, %rsp callq *%rdx movq %rbp, %rsp popq %gs:0x10 .cfi_def_cfa %rsp, 24 popq %gs:0x08 .cfi_def_cfa %rsp, 16 popq %rbp .cfi_def_cfa %rsp, 8 retq .cfi_endproc
lwx270901/registryintern
2,541
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/powerpc64.s
/* Implementation of the AIX-like PowerPC ABI. Seems to be used by the big-endian PowerPC targets. The following references were used during the implementation of this code: https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_rntime_stack.htm https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_reg_use_conv.htm https://www.ibm.com/developerworks/library/l-powasm4/index.html */ #include "psm.h" .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li 3, STACK_DIRECTION_DESCENDING blr .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc mr 3, 1 blr .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */ .cfi_startproc ld 2, 8(4) ld 4, 0(4) /* do not allocate the whole 112-byte sized frame, we know wont be used */ addi 5, 5, -48 mr 1, 5 mtctr 4 bctr .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */ .cfi_startproc mflr 0 std 2, -72(6) std 0, -8(6) sub 6, 6, 1 addi 6, 6, -112 stdux 1, 1, 6 .cfi_def_cfa r1, 112 .cfi_offset r1, -112 .cfi_offset r2, -72 .cfi_offset lr, -8 /* load the function pointer from TOC and make the call */ ld 2, 8(5) ld 5, 0(5) mtctr 5 bctrl ld 2, 40(1) .cfi_restore r2 ld 0, 104(1) mtlr 0 .cfi_restore lr /* FIXME: after this instruction backtrace breaks until control returns to the caller. That being said compiler-generated code has the same issue, so I guess that is fine for now? */ ld 1, 0(1) .cfi_restore r1 blr .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
2,264
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/x86_windows_gnu.s
/* FIXME: this works locally but not on appveyor??!? */ /* NOTE: fastcall calling convention used on all x86 targets */ .text .def @rust_psm_stack_direction@0 .scl 2 .type 32 .endef .globl @rust_psm_stack_direction@0 .p2align 4 @rust_psm_stack_direction@0: /* extern "fastcall" fn() -> u8 (%al) */ .cfi_startproc movb $2, %al # always descending on x86_64 retl .cfi_endproc .def @rust_psm_stack_pointer@0 .scl 2 .type 32 .endef .globl @rust_psm_stack_pointer@0 .p2align 4 @rust_psm_stack_pointer@0: /* extern "fastcall" fn() -> *mut u8 (%rax) */ .cfi_startproc leal 4(%esp), %eax retl .cfi_endproc .def @rust_psm_replace_stack@16 .scl 2 .type 32 .endef .globl @rust_psm_replace_stack@16 .p2align 4 @rust_psm_replace_stack@16: /* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address would be store there) off the required stack alignment on entry. To offset the stack in such a way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but would require to adjust the stack manually, which cannot be easily done, because the stack pointer argument is already stored in memory. */ movl 8(%esp), %eax mov %eax, %fs:0x08 movl 4(%esp), %esp mov %esp, %fs:0x04 calll *%edx ud2 .cfi_endproc .def @rust_psm_on_stack@16 .scl 2 .type 32 .endef .globl @rust_psm_on_stack@16 .p2align 4 @rust_psm_on_stack@16: /* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */ .cfi_startproc pushl %ebp .cfi_def_cfa %esp, 8 .cfi_offset %ebp, -8 pushl %fs:0x04 .cfi_def_cfa %esp, 12 pushl %fs:0x08 .cfi_def_cfa %esp, 16 movl %esp, %ebp .cfi_def_cfa_register %ebp movl 24(%ebp), %eax movl %eax, %fs:0x08 movl 20(%ebp), %esp movl %esp, %fs:0x04 calll *16(%ebp) movl %ebp, %esp popl %fs:0x08 .cfi_def_cfa %esp, 12 popl %fs:0x04 .cfi_def_cfa %esp, 8 popl %ebp .cfi_def_cfa %esp, 4 retl $12 .cfi_endproc
lwx270901/registryintern
1,938
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/sparc64.s
#include "psm.h" .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc jmpl %o7 + 8, %g0 mov STACK_DIRECTION_DESCENDING, %o0 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc jmpl %o7 + 8, %g0 mov %o6, %o0 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */ .cfi_startproc .cfi_def_cfa 0, 0 .cfi_return_column 0 jmpl %o1, %g0 /* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */ add %o2, -0x7ff, %o6 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */ .cfi_startproc /* The fact that locals and saved register windows are offset by 2kB is very nasty property of SPARC architecture and ABI. In this case it forces us to slice off 2kB of the stack space outright for no good reason other than adapting to a botched design. */ save %o3, -0x87f, %o6 .cfi_def_cfa_register %fp .cfi_window_save .cfi_register %r15, %r31 mov %i1, %o1 jmpl %i2, %o7 mov %i0, %o0 ret restore .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
2,144
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/mips64_eabi.s
/* Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is used where. This is an "EABI" implementation based on the following page: http://www.cygwin.com/ml/binutils/2003-06/msg00436.html */ #include "psm.h" .set noreorder /* we’ll manage the delay slots on our own, thanks! */ .text .globl rust_psm_stack_direction .p2align 3 .type rust_psm_stack_direction,@function .ent rust_psm_stack_direction /* extern "C" fn() -> u8 */ rust_psm_stack_direction: .cfi_startproc jr $31 addiu $2, $zero, STACK_DIRECTION_DESCENDING .end rust_psm_stack_direction .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 3 .type rust_psm_stack_pointer,@function .ent rust_psm_stack_pointer /* extern "C" fn() -> *mut u8 */ rust_psm_stack_pointer: .cfi_startproc jr $31 move $2, $29 .end rust_psm_stack_pointer .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 3 .type rust_psm_replace_stack,@function .ent rust_psm_replace_stack /* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */ rust_psm_replace_stack: .cfi_startproc move $25, $5 jr $5 move $29, $6 .end rust_psm_replace_stack .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc .globl rust_psm_on_stack .p2align 3 .type rust_psm_on_stack,@function .ent rust_psm_on_stack /* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */ rust_psm_on_stack: .cfi_startproc sd $29, -8($7) sd $31, -16($7) .cfi_def_cfa 7, 0 .cfi_offset 31, -16 .cfi_offset 29, -8 move $25, $6 jalr $31, $6 daddiu $29, $7, -16 .cfi_def_cfa 29, 16 ld $31, 0($29) .cfi_restore 31 ld $29, 8($29) .cfi_restore 29 jr $31 nop .end rust_psm_on_stack .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
2,094
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/x86_64.s
#include "psm.h" /* NOTE: sysv64 calling convention is used on all x86_64 targets, including Windows! */ .text #if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios #define GLOBL(fnname) .globl _##fnname #define TYPE(fnname) #define FUNCTION(fnname) _##fnname #define END_FUNCTION(fnname) #else #define GLOBL(fnname) .globl fnname #define TYPE(fnname) .type fnname,@function #define FUNCTION(fnname) fnname #define END_FUNCTION(fnname) .size fnname,.-fnname #endif GLOBL(rust_psm_stack_direction) .p2align 4 TYPE(rust_psm_stack_direction) FUNCTION(rust_psm_stack_direction): /* extern "sysv64" fn() -> u8 (%al) */ .cfi_startproc movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64 retq END_FUNCTION(rust_psm_stack_direction) .cfi_endproc GLOBL(rust_psm_stack_pointer) .p2align 4 TYPE(rust_psm_stack_pointer) FUNCTION(rust_psm_stack_pointer): /* extern "sysv64" fn() -> *mut u8 (%rax) */ .cfi_startproc leaq 8(%rsp), %rax retq .rust_psm_stack_pointer_end: END_FUNCTION(rust_psm_stack_pointer) .cfi_endproc GLOBL(rust_psm_replace_stack) .p2align 4 TYPE(rust_psm_replace_stack) FUNCTION(rust_psm_replace_stack): /* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi. 8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto stack with a regular call */ leaq -8(%rdx), %rsp jmpq *%rsi .rust_psm_replace_stack_end: END_FUNCTION(rust_psm_replace_stack) .cfi_endproc GLOBL(rust_psm_on_stack) .p2align 4 TYPE(rust_psm_on_stack) FUNCTION(rust_psm_on_stack): /* extern "sysv64" fn(%rdi: usize, %rsi: usize, %rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8) */ .cfi_startproc pushq %rbp .cfi_def_cfa %rsp, 16 .cfi_offset %rbp, -16 movq %rsp, %rbp .cfi_def_cfa_register %rbp movq %rcx, %rsp callq *%rdx movq %rbp, %rsp popq %rbp .cfi_def_cfa %rsp, 8 retq END_FUNCTION(rust_psm_on_stack) .cfi_endproc
lwx270901/registryintern
2,149
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/mips_eabi.s
/* Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is used where. This is an "EABI" implementation based on the following page: http://www.cygwin.com/ml/binutils/2003-06/msg00436.html */ #include "psm.h" .set noreorder /* we’ll manage the delay slots on our own, thanks! */ .text .abicalls .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function .ent rust_psm_stack_direction /* extern "C" fn() -> u8 */ rust_psm_stack_direction: .cfi_startproc jr $31 addiu $2, $zero, STACK_DIRECTION_DESCENDING .end rust_psm_stack_direction .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function .ent rust_psm_stack_pointer /* extern "C" fn() -> *mut u8 */ rust_psm_stack_pointer: .cfi_startproc jr $31 move $2, $29 .end rust_psm_stack_pointer .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function .ent rust_psm_replace_stack /* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */ rust_psm_replace_stack: .cfi_startproc move $25, $5 jr $5 move $29, $6 .end rust_psm_replace_stack .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function .ent rust_psm_on_stack /* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */ rust_psm_on_stack: .cfi_startproc sw $29, -4($7) sw $31, -8($7) .cfi_def_cfa 7, 0 .cfi_offset 31, -8 .cfi_offset 29, -4 move $25, $6 jalr $31, $6 addiu $29, $7, -8 .cfi_def_cfa 29, 8 lw $31, 0($29) .cfi_restore 31 lw $29, 4($29) .cfi_restore 29 jr $31 nop .end rust_psm_on_stack .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,568
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/loongarch64.s
#include "psm.h" .text .globl rust_psm_stack_direction .align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li.w $r4, STACK_DIRECTION_DESCENDING jr $r1 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc move $r4, $r3 jr $r1 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */ .cfi_startproc move $r3, $r6 jr $r5 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize, usize), r7: *mut u8) */ .cfi_startproc st.d $r1, $r7, -8 st.d $r3, $r7, -16 addi.d $r3, $r7, -16 .cfi_def_cfa 3, 16 .cfi_offset 1, -8 .cfi_offset 3, -16 jirl $r1, $r6, 0 ld.d $r1, $r3, 8 .cfi_restore 1 ld.d $r3, $r3, 0 .cfi_restore 3 jr $r1 .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,722
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/sparc_sysv.s
#include "psm.h" /* FIXME: this ABI has definitely not been verified at all */ .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc jmpl %o7 + 8, %g0 mov STACK_DIRECTION_DESCENDING, %o0 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc jmpl %o7 + 8, %g0 mov %o6, %o0 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */ .cfi_startproc .cfi_def_cfa 0, 0 .cfi_return_column 0 jmpl %o1, %g0 /* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */ add %o2, -0x3ff, %o6 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */ .cfi_startproc save %o3, -0x43f, %o6 .cfi_def_cfa_register %fp .cfi_window_save .cfi_register %r15, %r31 mov %i1, %o1 jmpl %i2, %o7 mov %i0, %o0 ret restore .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,694
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/wasm32.s
#include "psm.h" # Note that this function is not compiled when this package is uploaded to # crates.io, this source is only here as a reference for how the corresponding # wasm32.o was generated. This file can be compiled with: # # cpp psm/src/arch/wasm32.s | llvm-mc -o psm/src/arch/wasm32.o --arch=wasm32 -filetype=obj # # where you'll want to ensure that `llvm-mc` is from a relatively recent # version of LLVM. .globaltype __stack_pointer, i32 .globl rust_psm_stack_direction .type rust_psm_stack_direction,@function rust_psm_stack_direction: .functype rust_psm_stack_direction () -> (i32) i32.const STACK_DIRECTION_DESCENDING end_function .globl rust_psm_stack_pointer .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: .functype rust_psm_stack_pointer () -> (i32) global.get __stack_pointer end_function .globl rust_psm_on_stack .type rust_psm_on_stack,@function rust_psm_on_stack: .functype rust_psm_on_stack (i32, i32, i32, i32) -> () # get our new stack argument, then save the old stack # pointer into that local local.get 3 global.get __stack_pointer local.set 3 global.set __stack_pointer # Call our indirect function specified local.get 0 local.get 1 local.get 2 call_indirect (i32, i32) -> () # restore the stack pointer before returning local.get 3 global.set __stack_pointer end_function .globl rust_psm_replace_stack .type rust_psm_replace_stack,@function rust_psm_replace_stack: .functype rust_psm_replace_stack (i32, i32, i32) -> () local.get 2 global.set __stack_pointer local.get 0 local.get 1 call_indirect (i32) -> () unreachable end_function
lwx270901/registryintern
2,080
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/zseries_linux.s
/* Implementation of stack swtiching routines for zSeries LINUX ABI. This ABI is used by the s390x-unknown-linux-gnu target. Documents used: * LINUX for zSeries: ELF Application Binary Interface Supplement (1st ed., 2001) (LNUX-1107-01) * z/Architecture: Principles of Operation (4th ed., 2004) (SA22-7832-03) */ #include "psm.h" .text .globl rust_psm_stack_direction .p2align 4 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc lghi %r2, STACK_DIRECTION_DESCENDING br %r14 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 4 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc la %r2, 0(%r15) br %r14 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 4 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(r2: usize, r3: extern "C" fn(usize), r4: *mut u8) */ .cfi_startproc /* FIXME: backtrace does not terminate cleanly for some reason */ lay %r15, -160(%r4) /* FIXME: this is `basr` instead of `br` purely to remove the backtrace link to the caller */ basr %r14, %r3 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 4 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(r2: usize, r3: usize, r4: extern "C" fn(usize, usize), r5: *mut u8) */ .cfi_startproc stmg %r14, %r15, -16(%r5) lay %r15, -176(%r5) .cfi_def_cfa %r15, 176 .cfi_offset %r14, -16 .cfi_offset %r15, -8 basr %r14, %r4 lmg %r14, %r15, 160(%r15) .cfi_restore %r14 .cfi_restore %r15 br %r14 .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,963
home/minh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/psm-0.1.23/src/arch/powerpc32.s
#include "psm.h" /* FIXME: this probably does not cover all ABIs? Tested with sysv only, possibly works for AIX as well? */ .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li 3, STACK_DIRECTION_DESCENDING blr .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc mr 3, 1 blr .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */ .cfi_startproc /* NOTE: perhaps add a debug-assertion for stack alignment? */ addi 5, 5, -16 mr 1, 5 mtctr 4 bctr .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */ .cfi_startproc mflr 0 stw 0, -24(6) sub 6, 6, 1 addi 6, 6, -32 stwux 1, 1, 6 .cfi_def_cfa r1, 32 .cfi_offset r1, -32 .cfi_offset lr, -24 mtctr 5 bctrl lwz 0, 8(1) mtlr 0 .cfi_restore lr /* FIXME: after this instruction backtrace breaks until control returns to the caller That being said compiler-generated code has the same issue, so I guess that is fine for now? */ lwz 1, 0(1) .cfi_restore r1 blr .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,118
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.7.5/src/detail/asm/asm_x86_64_sysv_elf_gas.S
.text .globl prefetch .type prefetch,@function .align 16 prefetch: prefetcht2 (%rdi) ret .size prefetch,.-prefetch .text .globl bootstrap_green_task .type bootstrap_green_task,@function .align 16 bootstrap_green_task: mov %r12, %rdi /* setup the function arg */ mov %r13, %rsi /* setup the function arg */ and $-16, %rsp /* align the stack pointer */ mov %r14, (%rsp) /* this is the new return adrress */ ret .size bootstrap_green_task,.-bootstrap_green_task .text .globl swap_registers .type swap_registers,@function .align 16 swap_registers: mov %rbx, (0*8)(%rdi) mov %rsp, (1*8)(%rdi) mov %rbp, (2*8)(%rdi) mov %r12, (4*8)(%rdi) mov %r13, (5*8)(%rdi) mov %r14, (6*8)(%rdi) mov %r15, (7*8)(%rdi) mov (0*8)(%rsi), %rbx mov (1*8)(%rsi), %rsp mov (2*8)(%rsi), %rbp mov (4*8)(%rsi), %r12 mov (5*8)(%rsi), %r13 mov (6*8)(%rsi), %r14 mov (7*8)(%rsi), %r15 pop %rax jmp *%rax .size bootstrap_green_task,.-bootstrap_green_task /* Mark that we don't need executable stack. */ .section .note.GNU-stack,"",%progbits
lwx270901/registryintern
1,242
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/generator-0.7.5/src/detail/asm/asm_aarch64_aapcs_elf_gas.S
.text .globl prefetch .type prefetch,@function .align 16 prefetch: prfm pldl1keep, [x0] ret .size prefetch,.-prefetch .text .globl bootstrap_green_task .type bootstrap_green_task,@function .align 16 bootstrap_green_task: mov x0, x19 // arg0 mov x1, x20 // arg1 mov x30, #0 // clear LR ret x21 .size bootstrap_green_task,.-bootstrap_green_task .text .globl swap_registers .type swap_registers,@function .align 16 swap_registers: stp x19, x20, [x0, #0] stp x21, x22, [x0, #16] stp x23, x24, [x0, #32] stp x25, x26, [x0, #48] stp x27, x28, [x0, #64] stp x29, x30, [x0, #80] mov x2, sp str x2, [x0, #96] stp d8, d9, [x0, #112] stp d10, d11, [x0, #128] stp d12, d13, [x0, #144] stp d14, d15, [x0, #160] ldp x19, x20, [x1, #0] ldp x21, x22, [x1, #16] ldp x23, x24, [x1, #32] ldp x25, x26, [x1, #48] ldp x27, x28, [x1, #64] ldp x29, x30, [x1, #80] ldr x2, [x1, #96] mov sp, x2 ldp d8, d9, [x1, #112] ldp d10, d11, [x1, #128] ldp d12, d13, [x1, #144] ldp d14, d15, [x1, #160] br x30 .size swap_registers,.-swap_registers /* Mark that we don't need executable stack. */ .section .note.GNU-stack,"",%progbits
lwx270901/registryintern
1,569
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/riscv64.s
#include "psm.h" .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li x10, STACK_DIRECTION_DESCENDING jr x1 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc add x10, x2, x0 jr x1 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */ .cfi_startproc add x2, x12, x0 jr x11 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */ .cfi_startproc sd x1, -8(x13) sd x2, -16(x13) addi x2, x13, -16 .cfi_def_cfa x2, 16 .cfi_offset x1, -8 .cfi_offset x2, -16 jalr x1, x12, 0 ld x1, 8(x2) .cfi_restore x1 ld x2, 0(x2) .cfi_restore x2 jr x1 .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,571
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/riscv.s
#include "psm.h" .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li x10, STACK_DIRECTION_DESCENDING jr x1 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc add x10, x2, x0 jr x1 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(x10: usize, x11: extern "C" fn(usize), x12: *mut u8) */ .cfi_startproc add x2, x12, x0 jr x11 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(x10: usize, x11: usize, x12: extern "C" fn(usize, usize), x13: *mut u8) */ .cfi_startproc sw x1, -12(x13) sw x2, -16(x13) addi x2, x13, -16 .cfi_def_cfa x2, 16 .cfi_offset x1, -12 .cfi_offset x2, -16 jalr x1, x12, 0 lw x1, 4(x2) .cfi_restore x1 lw x2, 0(x2) .cfi_restore x2 jr x1 .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,971
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/aarch_aapcs64.s
#include "psm.h" .text #if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios #define GLOBL(fnname) .globl _##fnname #define TYPE(fnname) #define FUNCTION(fnname) _##fnname #define END_FUNCTION(fnname) #elif CFG_TARGET_OS_windows #define GLOBL(fnname) .globl fnname #define TYPE(fnname) #define FUNCTION(fnname) fnname #define LABEL_FOR_SIZE(endlabel) #define SIZE(fnname,endlabel) #else #define GLOBL(fnname) .globl fnname #define TYPE(fnname) .type fnname,@function #define FUNCTION(fnname) fnname #define END_FUNCTION(fnname) .size fnname,.-fnname #endif GLOBL(rust_psm_stack_direction) .p2align 2 TYPE(rust_psm_stack_direction) FUNCTION(rust_psm_stack_direction): /* extern "C" fn() -> u8 */ .cfi_startproc orr w0, wzr, #STACK_DIRECTION_DESCENDING ret END_FUNCTION(rust_psm_stack_direction) .cfi_endproc GLOBL(rust_psm_stack_pointer) .p2align 2 TYPE(rust_psm_stack_pointer) FUNCTION(rust_psm_stack_pointer): /* extern "C" fn() -> *mut u8 */ .cfi_startproc mov x0, sp ret END_FUNCTION(rust_psm_stack_pointer) .cfi_endproc GLOBL(rust_psm_replace_stack) .p2align 2 TYPE(rust_psm_replace_stack) FUNCTION(rust_psm_replace_stack): /* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */ mov sp, x2 br x1 END_FUNCTION(rust_psm_replace_stack) .cfi_endproc GLOBL(rust_psm_on_stack) .p2align 2 TYPE(rust_psm_on_stack) FUNCTION(rust_psm_on_stack): /* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */ .cfi_startproc stp x29, x30, [sp, #-16]! .cfi_def_cfa sp, 16 mov x29, sp .cfi_def_cfa x29, 16 .cfi_offset x29, -16 .cfi_offset x30, -8 mov sp, x3 blr x2 mov sp, x29 .cfi_def_cfa sp, 16 ldp x29, x30, [sp], #16 .cfi_def_cfa sp, 0 .cfi_restore x29 .cfi_restore x30 ret END_FUNCTION(rust_psm_on_stack) .cfi_endproc
lwx270901/registryintern
2,299
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/arm_aapcs.s
#include "psm.h" .text .syntax unified #if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios #define GLOBL(fnname) .globl _##fnname #define THUMBTYPE(fnname) .thumb_func _##fnname #define FUNCTION(fnname) _##fnname #define THUMBFN .code 16 #define SIZE(fnname,endlabel) #define FNSTART #define CANTUNWIND #define FNEND #else #define GLOBL(fnname) .globl fnname #define THUMBTYPE(fnname) .type fnname,%function #define FUNCTION(fnname) fnname #define THUMBFN .code 16 #define SIZE(fnname,endlabel) .size fnname,endlabel-fnname #define FNSTART .fnstart #define CANTUNWIND .cantunwind #define FNEND .fnend #endif GLOBL(rust_psm_stack_direction) .p2align 2 THUMBTYPE(rust_psm_stack_direction) THUMBFN FUNCTION(rust_psm_stack_direction): /* extern "C" fn() -> u8 */ FNSTART .cfi_startproc /* movs to support Thumb-1 */ movs r0, #STACK_DIRECTION_DESCENDING bx lr .rust_psm_stack_direction_end: SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end) .cfi_endproc CANTUNWIND FNEND GLOBL(rust_psm_stack_pointer) .p2align 2 THUMBTYPE(rust_psm_stack_pointer) THUMBFN FUNCTION(rust_psm_stack_pointer): /* extern "C" fn() -> *mut u8 */ FNSTART .cfi_startproc mov r0, sp bx lr .rust_psm_stack_pointer_end: SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end) .cfi_endproc CANTUNWIND FNEND GLOBL(rust_psm_replace_stack) .p2align 2 THUMBTYPE(rust_psm_replace_stack) THUMBFN FUNCTION(rust_psm_replace_stack): /* extern "C" fn(r0: usize, r1: extern "C" fn(usize), r2: *mut u8) */ FNSTART .cfi_startproc /* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi */ mov sp, r2 bx r1 .rust_psm_replace_stack_end: SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end) .cfi_endproc CANTUNWIND FNEND GLOBL(rust_psm_on_stack) .p2align 2 THUMBTYPE(rust_psm_on_stack) THUMBFN FUNCTION(rust_psm_on_stack): /* extern "C" fn(r0: usize, r1: usize, r2: extern "C" fn(usize, usize), r3: *mut u8) */ FNSTART .cfi_startproc push {r4, lr} .cfi_def_cfa_offset 8 mov r4, sp .cfi_def_cfa_register r4 .cfi_offset lr, -4 .cfi_offset r4, -8 mov sp, r3 blx r2 mov sp, r4 .cfi_restore sp pop {r4, pc} .rust_psm_on_stack_end: SIZE(rust_psm_on_stack,.rust_psm_on_stack_end) .cfi_endproc CANTUNWIND FNEND
lwx270901/registryintern
2,557
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/x86.s
#include "psm.h" /* NOTE: fastcall calling convention used on all x86 targets */ .text #if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios #define GLOBL(fnname) .globl _##fnname #define TYPE(fnname) #define FUNCTION(fnname) _##fnname #define SIZE(fnname,endlabel) #else #define GLOBL(fnname) .globl fnname #define TYPE(fnname) .type fnname,@function #define FUNCTION(fnname) fnname #define SIZE(fnname,endlabel) .size fnname,endlabel-fnname #endif GLOBL(rust_psm_stack_direction) .p2align 4 TYPE(rust_psm_stack_direction) FUNCTION(rust_psm_stack_direction): /* extern "fastcall" fn() -> u8 (%al) */ .cfi_startproc movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64 retl .rust_psm_stack_direction_end: SIZE(rust_psm_stack_direction,.rust_psm_stack_direction_end) .cfi_endproc GLOBL(rust_psm_stack_pointer) .p2align 4 TYPE(rust_psm_stack_pointer) FUNCTION(rust_psm_stack_pointer): /* extern "fastcall" fn() -> *mut u8 (%rax) */ .cfi_startproc leal 4(%esp), %eax retl .rust_psm_stack_pointer_end: SIZE(rust_psm_stack_pointer,.rust_psm_stack_pointer_end) .cfi_endproc GLOBL(rust_psm_replace_stack) .p2align 4 TYPE(rust_psm_replace_stack) FUNCTION(rust_psm_replace_stack): /* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address would be store there) off the required stack alignment on entry. To offset the stack in such a way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but would require to adjust the stack manually, which cannot be easily done, because the stack pointer argument is already stored in memory. */ movl 4(%esp), %esp calll *%edx ud2 .rust_psm_replace_stack_end: SIZE(rust_psm_replace_stack,.rust_psm_replace_stack_end) .cfi_endproc GLOBL(rust_psm_on_stack) .p2align 4 TYPE(rust_psm_on_stack) FUNCTION(rust_psm_on_stack): /* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */ .cfi_startproc pushl %ebp .cfi_def_cfa %esp, 8 .cfi_offset %ebp, -8 movl %esp, %ebp .cfi_def_cfa_register %ebp movl 12(%ebp), %esp calll *8(%ebp) movl %ebp, %esp popl %ebp .cfi_def_cfa %esp, 4 retl $8 .rust_psm_on_stack_end: SIZE(rust_psm_on_stack,.rust_psm_on_stack_end) .cfi_endproc
lwx270901/registryintern
3,609
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/powerpc64_aix.s
.csect .text[PR],2 .file "powerpc64_aix.s" .globl rust_psm_stack_direction[DS] .globl .rust_psm_stack_direction .align 4 .csect rust_psm_stack_direction[DS],3 .vbyte 8, .rust_psm_stack_direction .vbyte 8, TOC[TC0] .vbyte 8, 0 .csect .text[PR],2 .rust_psm_stack_direction: # extern "C" fn() -> u8 li 3, 2 blr L..rust_psm_stack_direction_end: # Following bytes form the traceback table on AIX. # For specification, see https://www.ibm.com/docs/en/aix/7.2?topic=processor-traceback-tables. # For implementation, see https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp, # `PPCAIXAsmPrinter::emitTracebackTable`. .vbyte 4, 0x00000000 # Traceback table begin, for unwinder to search the table. .byte 0x00 # Version = 0 .byte 0x09 # Language = CPlusPlus, since rust is using C++-like LSDA. .byte 0x20 # -IsGlobaLinkage, -IsOutOfLineEpilogOrPrologue # +HasTraceBackTableOffset, -IsInternalProcedure # -HasControlledStorage, -IsTOCless # -IsFloatingPointPresent # -IsFloatingPointOperationLogOrAbortEnabled .byte 0x40 # -IsInterruptHandler, +IsFunctionNamePresent, -IsAllocaUsed # OnConditionDirective = 0, -IsCRSaved, -IsLRSaved .byte 0x80 # +IsBackChainStored, -IsFixup, NumOfFPRsSaved = 0 .byte 0x00 # -HasExtensionTable, -HasVectorInfo, NumOfGPRsSaved = 0 .byte 0x00 # NumberOfFixedParms = 0 .byte 0x01 # NumberOfFPParms = 0, +HasParmsOnStack .vbyte 4, L..rust_psm_stack_direction_end-.rust_psm_stack_direction #Function size .vbyte 2, 0x0018 # Function name len = 24 .byte "rust_psm_stack_direction" # Function Name .globl rust_psm_stack_pointer[DS] .globl .rust_psm_stack_pointer .align 4 .csect rust_psm_stack_pointer[DS],3 .vbyte 8, .rust_psm_stack_pointer .vbyte 8, TOC[TC0] .vbyte 8, 0 .csect .text[PR],2 .rust_psm_stack_pointer: # extern "C" fn() -> *mut u8 mr 3, 1 blr L..rust_psm_stack_pointer_end: .vbyte 4, 0x00000000 .byte 0x00 .byte 0x09 .byte 0x20 .byte 0x40 .byte 0x80 .byte 0x00 .byte 0x00 .byte 0x01 .vbyte 4, L..rust_psm_stack_pointer_end-.rust_psm_stack_pointer .vbyte 2, 0x0016 .byte "rust_psm_stack_pointer" .globl rust_psm_replace_stack[DS] .globl .rust_psm_replace_stack .align 4 .csect rust_psm_replace_stack[DS],3 .vbyte 8, .rust_psm_replace_stack .vbyte 8, TOC[TC0] .vbyte 8, 0 .csect .text[PR],2 .rust_psm_replace_stack: # extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) # Load the function pointer and toc pointer from TOC and make the call. ld 2, 8(4) ld 4, 0(4) addi 5, 5, -48 mr 1, 5 mtctr 4 bctr L..rust_psm_replace_stack_end: .vbyte 4, 0x00000000 .byte 0x00 .byte 0x09 .byte 0x20 .byte 0x40 .byte 0x80 .byte 0x00 .byte 0x03 .byte 0x01 .vbyte 4, 0x00000000 # Parameter type = i, i, i .vbyte 4, L..rust_psm_replace_stack_end-.rust_psm_replace_stack .vbyte 2, 0x0016 .byte "rust_psm_replace_stack" .globl rust_psm_on_stack[DS] .globl .rust_psm_on_stack .align 4 .csect rust_psm_on_stack[DS],3 .vbyte 8, .rust_psm_on_stack .vbyte 8, TOC[TC0] .vbyte 8, 0 .csect .text[PR],2 .rust_psm_on_stack: # extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) mflr 0 std 2, -72(6) std 0, -8(6) sub 6, 6, 1 addi 6, 6, -112 stdux 1, 1, 6 ld 2, 8(5) ld 5, 0(5) mtctr 5 bctrl ld 2, 40(1) ld 0, 104(1) mtlr 0 ld 1, 0(1) blr L..rust_psm_on_stack_end: .vbyte 4, 0x00000000 .byte 0x00 .byte 0x09 .byte 0x20 .byte 0x41 .byte 0x80 .byte 0x00 .byte 0x04 .byte 0x01 .vbyte 4, 0x00000000 # Parameter type = i, i, i, i .vbyte 4, L..rust_psm_on_stack_end-.rust_psm_on_stack .vbyte 2, 0x0011 .byte "rust_psm_on_stack" .toc
lwx270901/registryintern
2,045
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/powerpc64_openpower.s
/* Implementation of stack swtiching routines for OpenPOWER 64-bit ELF ABI The specification can be found at http://openpowerfoundation.org/wp-content/uploads/resources/leabi/content/ch_preface.html This ABI is usually used by the ppc64le targets. */ #include "psm.h" .text .abiversion 2 .globl rust_psm_stack_direction .p2align 4 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li 3, STACK_DIRECTION_DESCENDING blr .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 4 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc mr 3, 1 blr .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 4 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */ .cfi_startproc addi 5, 5, -32 mtctr 4 mr 12, 4 mr 1, 5 bctr .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 4 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */ .cfi_startproc mflr 0 std 0, -8(6) std 2, -24(6) sub 6, 6, 1 addi 6, 6, -48 stdux 1, 1, 6 .cfi_def_cfa r1, 48 .cfi_offset r1, -48 .cfi_offset r2, -24 .cfi_offset lr, -8 mr 12, 5 mtctr 5 bctrl ld 2, 24(1) .cfi_restore r2 ld 0, 40(1) mtlr 0 .cfi_restore lr /* FIXME: after this instructin backtrace breaks until control returns to the caller */ ld 1, 0(1) blr .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
2,218
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/x86_64_windows_gnu.s
.text .def rust_psm_stack_direction .scl 2 .type 32 .endef .globl rust_psm_stack_direction .p2align 4 rust_psm_stack_direction: /* extern "sysv64" fn() -> u8 (%al) */ .cfi_startproc movb $2, %al # always descending on x86_64 retq .cfi_endproc .def rust_psm_stack_pointer .scl 2 .type 32 .endef .globl rust_psm_stack_pointer .p2align 4 rust_psm_stack_pointer: /* extern "sysv64" fn() -> *mut u8 (%rax) */ .cfi_startproc leaq 8(%rsp), %rax retq .cfi_endproc .def rust_psm_replace_stack .scl 2 .type 32 .endef .globl rust_psm_replace_stack .p2align 4 rust_psm_replace_stack: /* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8, %rcx: *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi. 8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto stack with a regular call */ movq %gs:0x08, %rdx movq %gs:0x10, %rcx leaq -8(%rdx), %rsp jmpq *%rsi .cfi_endproc .def rust_psm_on_stack .scl 2 .type 32 .endef .globl rust_psm_on_stack .p2align 4 rust_psm_on_stack: /* extern "sysv64" fn(%rdi: usize, %rsi: usize, %rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8, %r8: *mut u8) NB: on Windows for SEH to work at all, the pointers in TIB, thread information block, need to be fixed up. Otherwise, it seems that exception mechanism on Windows will not bother looking for exception handlers at *all* if they happen to fall outside the are specified in TIB. This necessitates an API difference from the usual 4-argument signature used elsewhere. FIXME: this needs a catch-all exception handler that aborts in case somebody unwinds into here. */ .cfi_startproc pushq %rbp .cfi_def_cfa %rsp, 16 .cfi_offset %rbp, -16 pushq %gs:0x08 .cfi_def_cfa %rsp, 24 pushq %gs:0x10 .cfi_def_cfa %rsp, 32 movq %rsp, %rbp .cfi_def_cfa_register %rbp movq %rcx, %gs:0x08 movq %r8, %gs:0x10 movq %rcx, %rsp callq *%rdx movq %rbp, %rsp popq %gs:0x10 .cfi_def_cfa %rsp, 24 popq %gs:0x08 .cfi_def_cfa %rsp, 16 popq %rbp .cfi_def_cfa %rsp, 8 retq .cfi_endproc
lwx270901/registryintern
2,541
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/powerpc64.s
/* Implementation of the AIX-like PowerPC ABI. Seems to be used by the big-endian PowerPC targets. The following references were used during the implementation of this code: https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_rntime_stack.htm https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.alangref/idalangref_reg_use_conv.htm https://www.ibm.com/developerworks/library/l-powasm4/index.html */ #include "psm.h" .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li 3, STACK_DIRECTION_DESCENDING blr .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc mr 3, 1 blr .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */ .cfi_startproc ld 2, 8(4) ld 4, 0(4) /* do not allocate the whole 112-byte sized frame, we know wont be used */ addi 5, 5, -48 mr 1, 5 mtctr 4 bctr .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */ .cfi_startproc mflr 0 std 2, -72(6) std 0, -8(6) sub 6, 6, 1 addi 6, 6, -112 stdux 1, 1, 6 .cfi_def_cfa r1, 112 .cfi_offset r1, -112 .cfi_offset r2, -72 .cfi_offset lr, -8 /* load the function pointer from TOC and make the call */ ld 2, 8(5) ld 5, 0(5) mtctr 5 bctrl ld 2, 40(1) .cfi_restore r2 ld 0, 104(1) mtlr 0 .cfi_restore lr /* FIXME: after this instruction backtrace breaks until control returns to the caller. That being said compiler-generated code has the same issue, so I guess that is fine for now? */ ld 1, 0(1) .cfi_restore r1 blr .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
2,264
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/x86_windows_gnu.s
/* FIXME: this works locally but not on appveyor??!? */ /* NOTE: fastcall calling convention used on all x86 targets */ .text .def @rust_psm_stack_direction@0 .scl 2 .type 32 .endef .globl @rust_psm_stack_direction@0 .p2align 4 @rust_psm_stack_direction@0: /* extern "fastcall" fn() -> u8 (%al) */ .cfi_startproc movb $2, %al # always descending on x86_64 retl .cfi_endproc .def @rust_psm_stack_pointer@0 .scl 2 .type 32 .endef .globl @rust_psm_stack_pointer@0 .p2align 4 @rust_psm_stack_pointer@0: /* extern "fastcall" fn() -> *mut u8 (%rax) */ .cfi_startproc leal 4(%esp), %eax retl .cfi_endproc .def @rust_psm_replace_stack@16 .scl 2 .type 32 .endef .globl @rust_psm_replace_stack@16 .p2align 4 @rust_psm_replace_stack@16: /* extern "fastcall" fn(%ecx: usize, %edx: extern "fastcall" fn(usize), 4(%esp): *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to 4(%esp) & tail-call the callback in %edx Note, that the callee expects the stack to be offset by 4 bytes (normally, a return address would be store there) off the required stack alignment on entry. To offset the stack in such a way we use the `calll` instruction, however it would also be possible to to use plain `jmpl` but would require to adjust the stack manually, which cannot be easily done, because the stack pointer argument is already stored in memory. */ movl 8(%esp), %eax mov %eax, %fs:0x08 movl 4(%esp), %esp mov %esp, %fs:0x04 calll *%edx ud2 .cfi_endproc .def @rust_psm_on_stack@16 .scl 2 .type 32 .endef .globl @rust_psm_on_stack@16 .p2align 4 @rust_psm_on_stack@16: /* extern "fastcall" fn(%ecx: usize, %edx: usize, 4(%esp): extern "fastcall" fn(usize, usize), 8(%esp): *mut u8) */ .cfi_startproc pushl %ebp .cfi_def_cfa %esp, 8 .cfi_offset %ebp, -8 pushl %fs:0x04 .cfi_def_cfa %esp, 12 pushl %fs:0x08 .cfi_def_cfa %esp, 16 movl %esp, %ebp .cfi_def_cfa_register %ebp movl 24(%ebp), %eax movl %eax, %fs:0x08 movl 20(%ebp), %esp movl %esp, %fs:0x04 calll *16(%ebp) movl %ebp, %esp popl %fs:0x08 .cfi_def_cfa %esp, 12 popl %fs:0x04 .cfi_def_cfa %esp, 8 popl %ebp .cfi_def_cfa %esp, 4 retl $12 .cfi_endproc
lwx270901/registryintern
1,938
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/sparc64.s
#include "psm.h" .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc jmpl %o7 + 8, %g0 mov STACK_DIRECTION_DESCENDING, %o0 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc jmpl %o7 + 8, %g0 mov %o6, %o0 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */ .cfi_startproc .cfi_def_cfa 0, 0 .cfi_return_column 0 jmpl %o1, %g0 /* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */ add %o2, -0x7ff, %o6 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */ .cfi_startproc /* The fact that locals and saved register windows are offset by 2kB is very nasty property of SPARC architecture and ABI. In this case it forces us to slice off 2kB of the stack space outright for no good reason other than adapting to a botched design. */ save %o3, -0x87f, %o6 .cfi_def_cfa_register %fp .cfi_window_save .cfi_register %r15, %r31 mov %i1, %o1 jmpl %i2, %o7 mov %i0, %o0 ret restore .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
2,144
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/mips64_eabi.s
/* Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is used where. This is an "EABI" implementation based on the following page: http://www.cygwin.com/ml/binutils/2003-06/msg00436.html */ #include "psm.h" .set noreorder /* we’ll manage the delay slots on our own, thanks! */ .text .globl rust_psm_stack_direction .p2align 3 .type rust_psm_stack_direction,@function .ent rust_psm_stack_direction /* extern "C" fn() -> u8 */ rust_psm_stack_direction: .cfi_startproc jr $31 addiu $2, $zero, STACK_DIRECTION_DESCENDING .end rust_psm_stack_direction .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 3 .type rust_psm_stack_pointer,@function .ent rust_psm_stack_pointer /* extern "C" fn() -> *mut u8 */ rust_psm_stack_pointer: .cfi_startproc jr $31 move $2, $29 .end rust_psm_stack_pointer .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 3 .type rust_psm_replace_stack,@function .ent rust_psm_replace_stack /* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */ rust_psm_replace_stack: .cfi_startproc move $25, $5 jr $5 move $29, $6 .end rust_psm_replace_stack .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc .globl rust_psm_on_stack .p2align 3 .type rust_psm_on_stack,@function .ent rust_psm_on_stack /* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */ rust_psm_on_stack: .cfi_startproc sd $29, -8($7) sd $31, -16($7) .cfi_def_cfa 7, 0 .cfi_offset 31, -16 .cfi_offset 29, -8 move $25, $6 jalr $31, $6 daddiu $29, $7, -16 .cfi_def_cfa 29, 16 ld $31, 0($29) .cfi_restore 31 ld $29, 8($29) .cfi_restore 29 jr $31 nop .end rust_psm_on_stack .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
2,094
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/x86_64.s
#include "psm.h" /* NOTE: sysv64 calling convention is used on all x86_64 targets, including Windows! */ .text #if CFG_TARGET_OS_darwin || CFG_TARGET_OS_macos || CFG_TARGET_OS_ios #define GLOBL(fnname) .globl _##fnname #define TYPE(fnname) #define FUNCTION(fnname) _##fnname #define END_FUNCTION(fnname) #else #define GLOBL(fnname) .globl fnname #define TYPE(fnname) .type fnname,@function #define FUNCTION(fnname) fnname #define END_FUNCTION(fnname) .size fnname,.-fnname #endif GLOBL(rust_psm_stack_direction) .p2align 4 TYPE(rust_psm_stack_direction) FUNCTION(rust_psm_stack_direction): /* extern "sysv64" fn() -> u8 (%al) */ .cfi_startproc movb $STACK_DIRECTION_DESCENDING, %al # always descending on x86_64 retq END_FUNCTION(rust_psm_stack_direction) .cfi_endproc GLOBL(rust_psm_stack_pointer) .p2align 4 TYPE(rust_psm_stack_pointer) FUNCTION(rust_psm_stack_pointer): /* extern "sysv64" fn() -> *mut u8 (%rax) */ .cfi_startproc leaq 8(%rsp), %rax retq .rust_psm_stack_pointer_end: END_FUNCTION(rust_psm_stack_pointer) .cfi_endproc GLOBL(rust_psm_replace_stack) .p2align 4 TYPE(rust_psm_replace_stack) FUNCTION(rust_psm_replace_stack): /* extern "sysv64" fn(%rdi: usize, %rsi: extern "sysv64" fn(usize), %rdx: *mut u8) */ .cfi_startproc /* All we gotta do is set the stack pointer to %rdx & tail-call the callback in %rsi. 8-byte offset necessary to account for the "return" pointer that would otherwise be placed onto stack with a regular call */ leaq -8(%rdx), %rsp jmpq *%rsi .rust_psm_replace_stack_end: END_FUNCTION(rust_psm_replace_stack) .cfi_endproc GLOBL(rust_psm_on_stack) .p2align 4 TYPE(rust_psm_on_stack) FUNCTION(rust_psm_on_stack): /* extern "sysv64" fn(%rdi: usize, %rsi: usize, %rdx: extern "sysv64" fn(usize, usize), %rcx: *mut u8) */ .cfi_startproc pushq %rbp .cfi_def_cfa %rsp, 16 .cfi_offset %rbp, -16 movq %rsp, %rbp .cfi_def_cfa_register %rbp movq %rcx, %rsp callq *%rdx movq %rbp, %rsp popq %rbp .cfi_def_cfa %rsp, 8 retq END_FUNCTION(rust_psm_on_stack) .cfi_endproc
lwx270901/registryintern
2,149
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/mips_eabi.s
/* Not only MIPS has 20 different ABIs... nobody tells anybody what specific variant of which ABI is used where. This is an "EABI" implementation based on the following page: http://www.cygwin.com/ml/binutils/2003-06/msg00436.html */ #include "psm.h" .set noreorder /* we’ll manage the delay slots on our own, thanks! */ .text .abicalls .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function .ent rust_psm_stack_direction /* extern "C" fn() -> u8 */ rust_psm_stack_direction: .cfi_startproc jr $31 addiu $2, $zero, STACK_DIRECTION_DESCENDING .end rust_psm_stack_direction .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function .ent rust_psm_stack_pointer /* extern "C" fn() -> *mut u8 */ rust_psm_stack_pointer: .cfi_startproc jr $31 move $2, $29 .end rust_psm_stack_pointer .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function .ent rust_psm_replace_stack /* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */ rust_psm_replace_stack: .cfi_startproc move $25, $5 jr $5 move $29, $6 .end rust_psm_replace_stack .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function .ent rust_psm_on_stack /* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize), r7: *mut u8) */ rust_psm_on_stack: .cfi_startproc sw $29, -4($7) sw $31, -8($7) .cfi_def_cfa 7, 0 .cfi_offset 31, -8 .cfi_offset 29, -4 move $25, $6 jalr $31, $6 addiu $29, $7, -8 .cfi_def_cfa 29, 8 lw $31, 0($29) .cfi_restore 31 lw $29, 4($29) .cfi_restore 29 jr $31 nop .end rust_psm_on_stack .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,568
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/loongarch64.s
#include "psm.h" .text .globl rust_psm_stack_direction .align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li.w $r4, STACK_DIRECTION_DESCENDING jr $r1 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc move $r4, $r3 jr $r1 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(r4: usize, r5: extern "C" fn(usize), r6: *mut u8) */ .cfi_startproc move $r3, $r6 jr $r5 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(r4: usize, r5: usize, r6: extern "C" fn(usize, usize), r7: *mut u8) */ .cfi_startproc st.d $r1, $r7, -8 st.d $r3, $r7, -16 addi.d $r3, $r7, -16 .cfi_def_cfa 3, 16 .cfi_offset 1, -8 .cfi_offset 3, -16 jirl $r1, $r6, 0 ld.d $r1, $r3, 8 .cfi_restore 1 ld.d $r3, $r3, 0 .cfi_restore 3 jr $r1 .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,722
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/sparc_sysv.s
#include "psm.h" /* FIXME: this ABI has definitely not been verified at all */ .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc jmpl %o7 + 8, %g0 mov STACK_DIRECTION_DESCENDING, %o0 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc jmpl %o7 + 8, %g0 mov %o6, %o0 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(%i0: usize, %i1: extern "C" fn(usize), %i2: *mut u8) */ .cfi_startproc .cfi_def_cfa 0, 0 .cfi_return_column 0 jmpl %o1, %g0 /* WEIRD: Why is the LSB set for the %sp and %fp on SPARC?? */ add %o2, -0x3ff, %o6 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(%i0: usize, %i1: usize, %i2: extern "C" fn(usize, usize), %i3: *mut u8) */ .cfi_startproc save %o3, -0x43f, %o6 .cfi_def_cfa_register %fp .cfi_window_save .cfi_register %r15, %r31 mov %i1, %o1 jmpl %i2, %o7 mov %i0, %o0 ret restore .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,694
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/wasm32.s
#include "psm.h" # Note that this function is not compiled when this package is uploaded to # crates.io, this source is only here as a reference for how the corresponding # wasm32.o was generated. This file can be compiled with: # # cpp psm/src/arch/wasm32.s | llvm-mc -o psm/src/arch/wasm32.o --arch=wasm32 -filetype=obj # # where you'll want to ensure that `llvm-mc` is from a relatively recent # version of LLVM. .globaltype __stack_pointer, i32 .globl rust_psm_stack_direction .type rust_psm_stack_direction,@function rust_psm_stack_direction: .functype rust_psm_stack_direction () -> (i32) i32.const STACK_DIRECTION_DESCENDING end_function .globl rust_psm_stack_pointer .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: .functype rust_psm_stack_pointer () -> (i32) global.get __stack_pointer end_function .globl rust_psm_on_stack .type rust_psm_on_stack,@function rust_psm_on_stack: .functype rust_psm_on_stack (i32, i32, i32, i32) -> () # get our new stack argument, then save the old stack # pointer into that local local.get 3 global.get __stack_pointer local.set 3 global.set __stack_pointer # Call our indirect function specified local.get 0 local.get 1 local.get 2 call_indirect (i32, i32) -> () # restore the stack pointer before returning local.get 3 global.set __stack_pointer end_function .globl rust_psm_replace_stack .type rust_psm_replace_stack,@function rust_psm_replace_stack: .functype rust_psm_replace_stack (i32, i32, i32) -> () local.get 2 global.set __stack_pointer local.get 0 local.get 1 call_indirect (i32) -> () unreachable end_function
lwx270901/registryintern
2,080
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/zseries_linux.s
/* Implementation of stack swtiching routines for zSeries LINUX ABI. This ABI is used by the s390x-unknown-linux-gnu target. Documents used: * LINUX for zSeries: ELF Application Binary Interface Supplement (1st ed., 2001) (LNUX-1107-01) * z/Architecture: Principles of Operation (4th ed., 2004) (SA22-7832-03) */ #include "psm.h" .text .globl rust_psm_stack_direction .p2align 4 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc lghi %r2, STACK_DIRECTION_DESCENDING br %r14 .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 4 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc la %r2, 0(%r15) br %r14 .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 4 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(r2: usize, r3: extern "C" fn(usize), r4: *mut u8) */ .cfi_startproc /* FIXME: backtrace does not terminate cleanly for some reason */ lay %r15, -160(%r4) /* FIXME: this is `basr` instead of `br` purely to remove the backtrace link to the caller */ basr %r14, %r3 .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 4 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(r2: usize, r3: usize, r4: extern "C" fn(usize, usize), r5: *mut u8) */ .cfi_startproc stmg %r14, %r15, -16(%r5) lay %r15, -176(%r5) .cfi_def_cfa %r15, 176 .cfi_offset %r14, -16 .cfi_offset %r15, -8 basr %r14, %r4 lmg %r14, %r15, 160(%r15) .cfi_restore %r14 .cfi_restore %r15 br %r14 .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lwx270901/registryintern
1,963
home/minh/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/psm-0.1.23/src/arch/powerpc32.s
#include "psm.h" /* FIXME: this probably does not cover all ABIs? Tested with sysv only, possibly works for AIX as well? */ .text .globl rust_psm_stack_direction .p2align 2 .type rust_psm_stack_direction,@function rust_psm_stack_direction: /* extern "C" fn() -> u8 */ .cfi_startproc li 3, STACK_DIRECTION_DESCENDING blr .rust_psm_stack_direction_end: .size rust_psm_stack_direction,.rust_psm_stack_direction_end-rust_psm_stack_direction .cfi_endproc .globl rust_psm_stack_pointer .p2align 2 .type rust_psm_stack_pointer,@function rust_psm_stack_pointer: /* extern "C" fn() -> *mut u8 */ .cfi_startproc mr 3, 1 blr .rust_psm_stack_pointer_end: .size rust_psm_stack_pointer,.rust_psm_stack_pointer_end-rust_psm_stack_pointer .cfi_endproc .globl rust_psm_replace_stack .p2align 2 .type rust_psm_replace_stack,@function rust_psm_replace_stack: /* extern "C" fn(3: usize, 4: extern "C" fn(usize), 5: *mut u8) */ .cfi_startproc /* NOTE: perhaps add a debug-assertion for stack alignment? */ addi 5, 5, -16 mr 1, 5 mtctr 4 bctr .rust_psm_replace_stack_end: .size rust_psm_replace_stack,.rust_psm_replace_stack_end-rust_psm_replace_stack .cfi_endproc .globl rust_psm_on_stack .p2align 2 .type rust_psm_on_stack,@function rust_psm_on_stack: /* extern "C" fn(3: usize, 4: usize, 5: extern "C" fn(usize, usize), 6: *mut u8) */ .cfi_startproc mflr 0 stw 0, -24(6) sub 6, 6, 1 addi 6, 6, -32 stwux 1, 1, 6 .cfi_def_cfa r1, 32 .cfi_offset r1, -32 .cfi_offset lr, -24 mtctr 5 bctrl lwz 0, 8(1) mtlr 0 .cfi_restore lr /* FIXME: after this instruction backtrace breaks until control returns to the caller That being said compiler-generated code has the same issue, so I guess that is fine for now? */ lwz 1, 0(1) .cfi_restore r1 blr .rust_psm_on_stack_end: .size rust_psm_on_stack,.rust_psm_on_stack_end-rust_psm_on_stack .cfi_endproc
lythesia/rcore-os
2,790
os/src/trap/trap.S
.altmacro .macro SAVE_GP n sd x\n, \n*8(sp) .endm .macro LOAD_GP n ld x\n, \n*8(sp) .endm .section .text.trampoline .globl __alltraps .globl __restore .globl __alltraps_k .globl __restore_k // __alltraps 的地址 4 字节对齐,这是 RISC-V 特权级规范的要求 .align 2 __alltraps: csrrw sp, sscratch, sp // now sp->*TrapContext in user space, sscratch->user stack // save general-purpose registers sd x1, 1*8(sp) // skip sp(x2), we will save it later sd x3, 3*8(sp) // skip tp(x4), application does not use it // save x5~x31 .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr // we can use t0/t1/t2 freely, because they were saved on kernel stack // csrr rd, csr 的功能就是将 CSR 的值读到寄存器rd中, 这里t0 <- sstatus, t1 <- sepc // t0-2是x5-7, 上面已经保存了, 所以这里直接使用 csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) // read user stack from sscratch and save it in TrapContext csrr t2, sscratch sd t2, 2*8(sp) // load kernel_satp into t0 ld t0, 34*8(sp) // load trap_handler into t1 ld t1, 36*8(sp) // move to kernel_sp ld sp, 35*8(sp) // switch to kernel space csrw satp, t0 sfence.vma // jump to trap_handler // 为什么不能沿用call trap_handler? // 本段.text.trampoline和trap_handler在同一个段内, trap_handler的偏移是个固定值, call是伪指令, // 会编译成直接跳转这个偏移量; 但实际上trap_handler(我们需要跳转的)的虚地址在最高页内, 并不是这个偏移 // 得到的地址 // Q: 我还有问题, 如果trap_handler的代码都是一样的, 跳转到哪走的逻辑不都是一样? // A: 注意jr上面的两条指令是切换地址空间, 如果使用call (== pc + offset), 那么切换换地址空间以后, // pc + offset还是trap_handler吗? 显然不是了 jr t1 __restore: // a0: *TrapContext in user space(Constant); a1: user space token // switch to user space csrw satp, a1 sfence.vma csrw sscratch, a0 mv sp, a0 // now sp points to TrapContext in user space, start restoring based on it // restore sstatus/sepc // 先恢复 CSR , 这样t0-2(x5-7)在后面才能正确恢复 ld t0, 32*8(sp) ld t1, 33*8(sp) csrw sstatus, t0 csrw sepc, t1 // restore general-purpuse registers except sp/tp ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr // 直接恢复sp, x[2] -> sp, 即回到user stack ld sp, 2*8(sp) sret .align 2 __alltraps_k: addi sp, sp, -34*8 sd x1, 1*8(sp) sd x3, 3*8(sp) .set n, 5 .rept 27 SAVE_GP %n .set n, n+1 .endr csrr t0, sstatus csrr t1, sepc sd t0, 32*8(sp) sd t1, 33*8(sp) mv a0, sp csrr t2, sscratch jalr t2 __restore_k: ld t0, 32*8(sp) ld t1, 33*8(sp) csrw sstatus, t0 csrw sepc, t1 ld x1, 1*8(sp) ld x3, 3*8(sp) .set n, 5 .rept 27 LOAD_GP %n .set n, n+1 .endr addi sp, sp, 34*8 sret
MabaKalox/srop_task
1,505
main.S
#include <asm/unistd.h> .intel_syntax noprefix .global _start # calling convention here is: # args in RSI, RDX, result in RAX # because it is convenient with syscalls # which registers are preserved is not really defined, I'm lazy .text _start: mov RSI, offset greeting mov RDX, 7 call write_n call main # exit(0) mov RAX, __NR_exit xor RDI, RDI syscall main: sub RSP, 16 # reserve string buffer mov RSI, RSP main_loop: call step cmp RAX, 1 jg main_loop add RSP, 16 ret step: call getline # read line mov RDX, RAX push RAX # preserve RAX, we don't expect write to stdout to fail call write_n # write line pop RAX ret write_n: # void (char *RSI_in, uint64_t RDX_in_size) mov RAX, __NR_write mov RDI, 1 # stdout syscall ret getline: # uint64_t (char *RSI_out), returns RAX = string length push RSI # save old RSI xor RDI, RDI # stdin mov RDX, 1 # read 1 byte dec RSI # will inc getline_loop: mov RAX, __NR_read inc RSI syscall cmpb [RSI], 10 # newline jne getline_loop mov RAX, RSI pop RSI sub RAX, RSI # RAX = new_RSI - old_RSI inc RAX ret greeting: .ascii "Hello!\n"
mag1c1an1/hypercraft
4,928
src/arch/riscv/guest.S
/// Enter the guest given in `VmCpuRegisters` from `a0` .global _run_guest _run_guest: /* Save hypervisor state */ /* Save hypervisor GPRs (except T0-T6 and a0, which is GuestInfo and stashed in sscratch) */ sd ra, ({hyp_ra})(a0) sd gp, ({hyp_gp})(a0) sd tp, ({hyp_tp})(a0) sd s0, ({hyp_s0})(a0) sd s1, ({hyp_s1})(a0) sd a1, ({hyp_a1})(a0) sd a2, ({hyp_a2})(a0) sd a3, ({hyp_a3})(a0) sd a4, ({hyp_a4})(a0) sd a5, ({hyp_a5})(a0) sd a6, ({hyp_a6})(a0) sd a7, ({hyp_a7})(a0) sd s2, ({hyp_s2})(a0) sd s3, ({hyp_s3})(a0) sd s4, ({hyp_s4})(a0) sd s5, ({hyp_s5})(a0) sd s6, ({hyp_s6})(a0) sd s7, ({hyp_s7})(a0) sd s8, ({hyp_s8})(a0) sd s9, ({hyp_s9})(a0) sd s10, ({hyp_s10})(a0) sd s11, ({hyp_s11})(a0) sd sp, ({hyp_sp})(a0) /* Swap in guest CSRs. */ ld t1, ({guest_sstatus})(a0) csrrw t1, sstatus, t1 sd t1, ({hyp_sstatus})(a0) ld t1, ({guest_hstatus})(a0) csrrw t1, hstatus, t1 sd t1, ({hyp_hstatus})(a0) ld t1, ({guest_scounteren})(a0) csrrw t1, scounteren, t1 sd t1, ({hyp_scounteren})(a0) ld t1, ({guest_sepc})(a0) csrw sepc, t1 /* Set stvec so that hypervisor resumes after the sret when the guest exits. */ la t1, _guest_exit csrrw t1, stvec, t1 sd t1, ({hyp_stvec})(a0) /* Save sscratch and replace with pointer to GuestInfo. */ csrrw t1, sscratch, a0 sd t1, ({hyp_sscratch})(a0) /* Restore the gprs from this GuestInfo */ ld ra, ({guest_ra})(a0) ld gp, ({guest_gp})(a0) ld tp, ({guest_tp})(a0) ld s0, ({guest_s0})(a0) ld s1, ({guest_s1})(a0) ld a1, ({guest_a1})(a0) ld a2, ({guest_a2})(a0) ld a3, ({guest_a3})(a0) ld a4, ({guest_a4})(a0) ld a5, ({guest_a5})(a0) ld a6, ({guest_a6})(a0) ld a7, ({guest_a7})(a0) ld s2, ({guest_s2})(a0) ld s3, ({guest_s3})(a0) ld s4, ({guest_s4})(a0) ld s5, ({guest_s5})(a0) ld s6, ({guest_s6})(a0) ld s7, ({guest_s7})(a0) ld s8, ({guest_s8})(a0) ld s9, ({guest_s9})(a0) ld s10, ({guest_s10})(a0) ld s11, ({guest_s11})(a0) ld t0, ({guest_t0})(a0) ld t1, ({guest_t1})(a0) ld t2, ({guest_t2})(a0) ld t3, ({guest_t3})(a0) ld t4, ({guest_t4})(a0) ld t5, ({guest_t5})(a0) ld t6, ({guest_t6})(a0) ld sp, ({guest_sp})(a0) ld a0, ({guest_a0})(a0) sret .align 2 _guest_exit: /* Pull GuestInfo out of sscratch, swapping with guest's a0 */ csrrw a0, sscratch, a0 /* Save guest GPRs. */ sd ra, ({guest_ra})(a0) sd gp, ({guest_gp})(a0) sd tp, ({guest_tp})(a0) sd s0, ({guest_s0})(a0) sd s1, ({guest_s1})(a0) sd a1, ({guest_a1})(a0) sd a2, ({guest_a2})(a0) sd a3, ({guest_a3})(a0) sd a4, ({guest_a4})(a0) sd a5, ({guest_a5})(a0) sd a6, ({guest_a6})(a0) sd a7, ({guest_a7})(a0) sd s2, ({guest_s2})(a0) sd s3, ({guest_s3})(a0) sd s4, ({guest_s4})(a0) sd s5, ({guest_s5})(a0) sd s6, ({guest_s6})(a0) sd s7, ({guest_s7})(a0) sd s8, ({guest_s8})(a0) sd s9, ({guest_s9})(a0) sd s10, ({guest_s10})(a0) sd s11, ({guest_s11})(a0) sd t0, ({guest_t0})(a0) sd t1, ({guest_t1})(a0) sd t2, ({guest_t2})(a0) sd t3, ({guest_t3})(a0) sd t4, ({guest_t4})(a0) sd t5, ({guest_t5})(a0) sd t6, ({guest_t6})(a0) sd sp, ({guest_sp})(a0) /* Save Guest a0 after recovering from sscratch. */ csrr t0, sscratch sd t0, ({guest_a0})(a0) _restore_csrs: /* Swap in hypervisor CSRs. */ ld t1, ({hyp_sstatus})(a0) csrrw t1, sstatus, t1 sd t1, ({guest_sstatus})(a0) ld t1, ({hyp_hstatus})(a0) csrrw t1, hstatus, t1 sd t1, ({guest_hstatus})(a0) ld t1, ({hyp_scounteren})(a0) csrrw t1, scounteren, t1 sd t1, ({guest_scounteren})(a0) ld t1, ({hyp_stvec})(a0) csrw stvec, t1 ld t1, ({hyp_sscratch})(a0) csrw sscratch, t1 /* Save guest EPC. */ csrr t1, sepc sd t1, ({guest_sepc})(a0) /* Restore hypervisor GPRs. */ ld ra, ({hyp_ra})(a0) ld gp, ({hyp_gp})(a0) ld tp, ({hyp_tp})(a0) ld s0, ({hyp_s0})(a0) ld s1, ({hyp_s1})(a0) ld a1, ({hyp_a1})(a0) ld a2, ({hyp_a2})(a0) ld a3, ({hyp_a3})(a0) ld a4, ({hyp_a4})(a0) ld a5, ({hyp_a5})(a0) ld a6, ({hyp_a6})(a0) ld a7, ({hyp_a7})(a0) ld s2, ({hyp_s2})(a0) ld s3, ({hyp_s3})(a0) ld s4, ({hyp_s4})(a0) ld s5, ({hyp_s5})(a0) ld s6, ({hyp_s6})(a0) ld s7, ({hyp_s7})(a0) ld s8, ({hyp_s8})(a0) ld s9, ({hyp_s9})(a0) ld s10, ({hyp_s10})(a0) ld s11, ({hyp_s11})(a0) ld sp, ({hyp_sp})(a0) ret
mag1c1an1/hypercraft
3,397
src/arch/riscv/mem_extable.S
// Copyright (c) 2022 by Rivos Inc. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 // Very unoptimized memcpy() to/from guest memory functions, using the HLV/HSV instructions. // Adds the instruction at 'lbl' to the exception table. .macro add_extable lbl .pushsection .extable, "a" .balign 8 .quad \lbl .popsection .endm .section .text // memcpy() to a guest physical address using HSV. .global _copy_to_guest _copy_to_guest: // handle_trap assumes t0 holds the address of where we want to jump to when we encounter // a fault and will stick SCAUSE in t1. la t0, _ret_from_copy // _ret_from_copy assumes the return value is in t2. mv t2, zero 1: beq t2, a2, _ret_from_copy lb t3, (a1) 2: hsv.b t3, (a0) add_extable 2b addi a0, a0, 1 addi a1, a1, 1 addi t2, t2, 1 j 1b // memcpy() from a guest physical address using HLV. .global _copy_from_guest _copy_from_guest: // handle_trap assumes t0 holds the address of where we want to jump to when we encounter // a fault and will stick SCAUSE in t1. la t0, _ret_from_copy // _ret_from_copy assumes the return value is in t2. mv t2, zero 1: beq t2, a2, _ret_from_copy 2: hlv.b t3, (a1) add_extable 2b sb t3, (a0) addi a0, a0, 1 addi a1, a1, 1 addi t2, t2, 1 j 1b // Fetch an instruction from guest memory using HLVX. Only supports 2 or 4 byte instructions. // // Arguments: // A0: Guest address of the instruction to fetch, using the translation modes/tables currently // programmed in HGATP and VSATP. // A1: Pointer to a u32 where the instruction will be written. // // Returns -1 on error. .global _fetch_guest_instruction _fetch_guest_instruction: // handle_trap assumes t0 holds the address of where we want to jump to when we encounter // a fault and will stick SCAUSE in t1. la t0, 4f 1: hlvx.hu t2, (a0) add_extable 1b sh t2, (a1) addi a0, a0, 2 addi a1, a1, 2 // If it's a compressed instrution (bits [1:0] != 'b11) then we're done. li t3, 3 and t2, t2, t3 bne t2, t3, 3f // Load the next half-word. 2: hlvx.hu t2, (a0) add_extable 2b sh t2, (a1) 3: mv a0, zero ret 4: // Took a fault, return -1. not a0, zero ret // memcpy() to a user address. .global _copy_to_user _copy_to_user: // handle_trap assumes t0 holds the address of where we want to jump to when we encounter // a fault and will stick SCAUSE in t1. la t0, _ret_from_copy // _ret_from_copy assumes the return value is in t2. mv t2, zero 1: beq t2, a2, _ret_from_copy lb t3, (a1) 2: sb t3, (a0) add_extable 2b addi a0, a0, 1 addi a1, a1, 1 addi t2, t2, 1 j 1b // memcpy() from a user address. .global _copy_from_user _copy_from_user: // handle_trap assumes t0 holds the address of where we want to jump to when we encounter // a fault and will stick SCAUSE in t1. la t0, _ret_from_copy // _ret_from_copy assumes the return value is in t2. mv t2, zero 1: beq t2, a2, _ret_from_copy 2: lb t3, (a1) add_extable 2b sb t3, (a0) addi a0, a0, 1 addi a1, a1, 1 addi t2, t2, 1 j 1b .align 2 _ret_from_copy: mv a0, t2 ret
mag1c1an1/hypercraft
5,858
src/arch/aarch64/memset.S
/* Copyright (c) 2012-2013, Linaro Limited All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Linaro nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Assumptions: * * ARMv8-a, AArch64 * Unaligned accesses * */ /* By default we assume that the DC instruction can be used to zero data blocks more efficiently. In some circumstances this might be unsafe, for example in an asymmetric multiprocessor environment with different DC clear lengths (neither the upper nor lower lengths are safe to use). The feature can be disabled by defining DONT_USE_DC. If code may be run in a virtualized environment, then define MAYBE_VIRT. This will cause the code to cache the system register values rather than re-reading them each call. */ .macro def_fn f p2align=0 .text .p2align \p2align .global \f .type \f, %function \f: .endm def_fn memset p2align=6 mov x8, x0 /* Preserve return value. */ ands w7, w1, #255 orr w7, w7, w7, lsl #8 orr w7, w7, w7, lsl #16 orr x7, x7, x7, lsl #32 .Ltail_maybe_long: cmp x2, #64 b.ge .Lnot_short .Ltail_maybe_tiny: cmp x2, #15 b.le .Ltail15tiny .Ltail63: ands x3, x2, #0x30 b.eq .Ltail15 add x8, x8, x3 cmp w3, #0x20 b.eq 1f b.lt 2f stp x7, x7, [x8, #-48] 1: stp x7, x7, [x8, #-32] 2: stp x7, x7, [x8, #-16] .Ltail15: and x2, x2, #15 add x8, x8, x2 stp x7, x7, [x8, #-16] /* Repeat some/all of last store. */ ret .Ltail15tiny: /* Set up to 15 bytes. Does not assume earlier memory being set. */ tbz x2, #3, 1f str x7, [x8], #8 1: tbz x2, #2, 1f str w7, [x8], #4 1: tbz x2, #1, 1f strh w7, [x8], #2 1: tbz x2, #0, 1f strb w7, [x8] 1: ret /* Critical loop. Start at a new cache line boundary. Assuming * 64 bytes per line, this ensures the entire loop is in one line. */ .p2align 6 .Lnot_short: neg x4, x8 ands x4, x4, #15 b.eq 2f /* Bring x8 to 128-bit (16-byte) alignment. We know that there's * more than that to set, so we simply store 16 bytes and advance by * the amount required to reach alignment. */ sub x2, x2, x4 stp x7, x7, [x8] add x8, x8, x4 /* There may be less than 63 bytes to go now. */ cmp x2, #63 b.le .Ltail63 2: sub x8, x8, #16 /* Pre-bias. */ sub x2, x2, #64 1: stp x7, x7, [x8, #16] stp x7, x7, [x8, #32] stp x7, x7, [x8, #48] stp x7, x7, [x8, #64]! subs x2, x2, #64 b.ge 1b tst x2, #0x3f add x8, x8, #16 b.ne .Ltail63 ret #ifndef DONT_USE_DC /* For zeroing memory, check to see if we can use the ZVA feature to * zero entire 'cache' lines. */ .Lzero_mem: mov x7, #0 cmp x2, #63 b.le .Ltail_maybe_tiny neg x4, x8 ands x4, x4, #15 b.eq 1f sub x2, x2, x4 stp x7, x7, [x8] add x8, x8, x4 cmp x2, #63 b.le .Ltail63 1: /* For zeroing small amounts of memory, it's not worth setting up * the line-clear code. */ cmp x2, #128 b.lt .Lnot_short #ifdef MAYBE_VIRT /* For efficiency when virtualized, we cache the ZVA capability. */ adrp x4, .Lcache_clear ldr w5, [x4, #:lo12:.Lcache_clear] tbnz w5, #31, .Lnot_short cbnz w5, .Lzero_by_line mrs x3, dczid_el0 tbz x3, #4, 1f /* ZVA not available. Remember this for next time. */ mov w5, #~0 str w5, [x4, #:lo12:.Lcache_clear] b .Lnot_short 1: mov w9, #4 and w5, w3, #15 /* Safety: other bits reserved. */ lsl w5, w9, w5 str w5, [x4, #:lo12:.Lcache_clear] #else mrs x3, dczid_el0 tbnz x3, #4, .Lnot_short mov w9, #4 and w5, w3, #15 /* Safety: other bits reserved. */ lsl w5, w9, w5 #endif .Lzero_by_line: /* Compute how far we need to go to become suitably aligned. We're * already at quad-word alignment. */ cmp x2, x5 b.lt .Lnot_short /* Not enough to reach alignment. */ sub x6, x5, #1 neg x4, x8 ands x4, x4, x6 b.eq 1f /* Already aligned. */ /* Not aligned, check that there's enough to copy after alignment. */ sub x3, x2, x4 cmp x3, #64 ccmp x3, x5, #8, ge /* NZCV=0b1000 */ b.lt .Lnot_short /* We know that there's at least 64 bytes to zero and that it's safe * to overrun by 64 bytes. */ mov x2, x3 2: stp x7, x7, [x8] stp x7, x7, [x8, #16] stp x7, x7, [x8, #32] subs x4, x4, #64 stp x7, x7, [x8, #48] add x8, x8, #64 b.ge 2b /* We've overrun a bit, so adjust x8 downwards. */ add x8, x8, x4 1: sub x2, x2, x5 3: dc zva, x8 add x8, x8, x5 subs x2, x2, x5 b.ge 3b ands x2, x2, x6 b.ne .Ltail_maybe_long ret .size memset, .-memset #ifdef MAYBE_VIRT .bss .p2align 2 .Lcache_clear: .space 4 #endif #endif /* DONT_USE_DC */
mag1c1an1/hypercraft
3,611
src/arch/aarch64/memcpy.S
/* * memcpy - copy memory area * * Copyright (c) 2012-2020, Arm Limited. * SPDX-License-Identifier: MIT */ /* Assumptions: * * ARMv8-a, AArch64, unaligned accesses. * */ /* This implementation of memcpy uses unaligned accesses and branchless sequences to keep the code small, simple and improve performance. Copies are split into 3 main cases: small copies of up to 32 bytes, medium copies of up to 128 bytes, and large copies. The overhead of the overlap check is negligible since it is only required for large copies. Large copies use a software pipelined loop processing 64 bytes per iteration. The destination pointer is 16-byte aligned to minimize unaligned accesses. The loop tail is handled by always copying 64 bytes from the end. */ .text .global memcpy // .type memcpy,%function memcpy: add x4, x1, x2 add x5, x0, x2 cmp x2, 128 b.hi .Lcopy_long cmp x2, 32 b.hi .Lcopy32_128 /* Small copies: 0..32 bytes. */ cmp x2, 16 b.lo .Lcopy16 ldp x6, x7, [x1] ldp x12, x13, [x4, -16] stp x6, x7, [x0] stp x12, x13, [x5, -16] ret /* Copy 8-15 bytes. */ .Lcopy16: tbz x2, 3, .Lcopy8 ldr x6, [x1] ldr x7, [x4, -8] str x6, [x0] str x7, [x5, -8] ret .p2align 3 /* Copy 4-7 bytes. */ .Lcopy8: tbz x2, 2, .Lcopy4 ldr w6, [x1] ldr w8, [x4, -4] str w6, [x0] str w8, [x5, -4] ret /* Copy 0..3 bytes using a branchless sequence. */ .Lcopy4: cbz x2, .Lcopy0 lsr x14, x2, 1 ldrb w6, [x1] ldrb w10, [x4, -1] ldrb w8, [x1, x14] strb w6, [x0] strb w8, [x0, x14] strb w10, [x5, -1] .Lcopy0: ret .p2align 4 /* Medium copies: 33..128 bytes. */ .Lcopy32_128: ldp x6, x7, [x1] ldp x8, x9, [x1, 16] ldp x10, x11, [x4, -32] ldp x12, x13, [x4, -16] cmp x2, 64 b.hi .Lcopy128 stp x6, x7, [x0] stp x8, x9, [x0, 16] stp x10, x11, [x5, -32] stp x12, x13, [x5, -16] ret .p2align 4 /* Copy 65..128 bytes. */ .Lcopy128: ldp x14, x15, [x1, 32] ldp x16, x17, [x1, 48] cmp x2, 96 b.ls .Lcopy96 ldp x2, x3, [x4, -64] ldp x1, x4, [x4, -48] stp x2, x3, [x5, -64] stp x1, x4, [x5, -48] .Lcopy96: stp x6, x7, [x0] stp x8, x9, [x0, 16] stp x14, x15, [x0, 32] stp x16, x17, [x0, 48] stp x10, x11, [x5, -32] stp x12, x13, [x5, -16] ret .p2align 4 /* Copy more than 128 bytes. */ .Lcopy_long: /* Copy 16 bytes and then align x3 to 16-byte alignment. */ ldp x12, x13, [x1] and x14, x0, 15 bic x3, x0, 15 sub x1, x1, x14 add x2, x2, x14 /* x2 is now 16 too large. */ ldp x6, x7, [x1, 16] stp x12, x13, [x0] ldp x8, x9, [x1, 32] ldp x10, x11, [x1, 48] ldp x12, x13, [x1, 64]! subs x2, x2, 128 + 16 /* Test and readjust x2. */ b.ls .Lcopy64_from_end .Lloop64: stp x6, x7, [x3, 16] ldp x6, x7, [x1, 16] stp x8, x9, [x3, 32] ldp x8, x9, [x1, 32] stp x10, x11, [x3, 48] ldp x10, x11, [x1, 48] stp x12, x13, [x3, 64]! ldp x12, x13, [x1, 64]! subs x2, x2, 64 b.hi .Lloop64 /* Write the last iteration and copy 64 bytes from the end. */ .Lcopy64_from_end: ldp x14, x15, [x4, -64] stp x6, x7, [x3, 16] ldp x6, x7, [x4, -48] stp x8, x9, [x3, 32] ldp x8, x9, [x4, -32] stp x10, x11, [x3, 48] ldp x10, x11, [x4, -16] stp x12, x13, [x3, 64] stp x14, x15, [x5, -64] stp x6, x7, [x5, -48] stp x8, x9, [x5, -32] stp x10, x11, [x5, -16] ret .size memcpy,.-memcpy
mag1c1an1/arceos
1,376
modules/axhal/linker.lds.S
OUTPUT_ARCH(%ARCH%) BASE_ADDRESS = %KERNEL_BASE%; ENTRY(_start) SECTIONS { . = BASE_ADDRESS; skernel = .; .text : ALIGN(4K) { stext = .; *(.text.boot) *(.text .text.*) . = ALIGN(4K); etext = .; } .rodata : ALIGN(4K) { srodata = .; *(.rodata .rodata.*) *(.srodata .srodata.*) *(.sdata2 .sdata2.*) . = ALIGN(4K); erodata = .; } .data : ALIGN(4K) { sdata = .; *(.data.boot_page_table) . = ALIGN(4K); *(.data .data.*) *(.sdata .sdata.*) *(.got .got.*) . = ALIGN(4K); edata = .; } percpu_start = .; .percpu 0x0 : AT(percpu_start) ALIGN(4K) { __percpu_offset_start = .; *(.percpu .percpu.*) __percpu_offset_end = .; . = ALIGN(4K); __percpu_size_aligned = .; . = __percpu_offset_start + __percpu_size_aligned * %SMP%; } . = percpu_start + SIZEOF(.percpu); percpu_end = .; .bss : ALIGN(4K) { boot_stack = .; *(.bss.stack) . = ALIGN(4K); boot_stack_top = .; sbss = .; *(.bss .bss.*) *(.sbss .sbss.*) *(COMMON) . = ALIGN(4K); ebss = .; } ekernel = .; /DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) } }
mag1c1an1/arceos
4,307
modules/axhal/src/platform/pc_x86/multiboot.S
# Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section .text.boot .code32 .global _start _start: mov edi, eax # arg1: magic: 0x2BADB002 mov esi, ebx # arg2: multiboot info jmp bsp_entry32 .balign 4 .type multiboot_header, STT_OBJECT multiboot_header: .int {mb_hdr_magic} # magic: 0x1BADB002 .int {mb_hdr_flags} # flags .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum .int multiboot_header - {offset} # header_addr .int skernel - {offset} # load_addr .int edata - {offset} # load_end .int ebss - {offset} # bss_end_addr .int _start - {offset} # entry_addr # Common code in 32-bit, prepare states to enter 64-bit. .macro ENTRY32_COMMON # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set PAE, PGE bit in CR4 mov eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [.Ltmp_pml4 - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} mov edx, 0 mov eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, {cr0} mov cr0, eax .endm # Common code in 64-bit .macro ENTRY64_COMMON # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax .endm .code32 bsp_entry32: lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT ENTRY32_COMMON ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment .code32 .global ap_entry32 ap_entry32: ENTRY32_COMMON ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment .code64 bsp_entry64: ENTRY64_COMMON # set RSP to boot stack movabs rsp, offset {boot_stack} add rsp, {boot_stack_size} # call rust_entry(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .code64 ap_entry64: ENTRY64_COMMON # set RSP to high address (already set in ap_start.S) mov rax, {offset} add rsp, rax # call rust_entry_secondary(magic) mov rdi, {mb_magic} movabs rax, offset {entry_secondary} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end: .balign 4096 .Ltmp_pml4: # 0x0000_0000 ~ 0xffff_ffff .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 510 # 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff .quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) # FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb) .Ltmp_pdpt_low: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508 .Ltmp_pdpt_high: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508
mag1c1an1/arceos
1,965
modules/axhal/src/platform/pc_x86/ap_start.S
# Boot application processors into the protected mode. # Each non-boot CPU ("AP") is started up in response to a STARTUP # IPI from the boot CPU. Section B.4.2 of the Multi-Processor # Specification says that the AP will start in real mode with CS:IP # set to XY00:0000, where XY is an 8-bit value sent with the # STARTUP. Thus this code must start at a 4096-byte boundary. # # Because this code sets DS to zero, it must sit # at an address in the low 2^16 bytes. .equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr} .equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr} .equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr} .equ stack_ptr, {start_page_paddr} + 0xff0 .equ entry_ptr, {start_page_paddr} + 0xff8 # 0x6000 .section .text .code16 .p2align 12 .global ap_start ap_start: cli wbinvd xor ax, ax mov ds, ax mov es, ax mov ss, ax mov fs, ax mov gs, ax # load the 64-bit GDT lgdt [pa_ap_gdt_desc] # switch to protected-mode mov eax, cr0 or eax, (1 << 0) mov cr0, eax # far jump to 32-bit code. 0x8 is code32 segment selector ljmp 0x8, offset pa_ap_start32 .code32 ap_start32: mov esp, [stack_ptr] mov eax, [entry_ptr] jmp eax .balign 8 # .type multiboot_header, STT_OBJECT .Lap_tmp_gdt_desc: .short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit .long pa_ap_gdt # base .balign 16 .Lap_tmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Lap_tmp_gdt_end: # 0x7000 .p2align 12 .global ap_end ap_end:
mag1c1an1/arceos
1,672
modules/axhal/src/arch/riscv/trap.S
.macro SAVE_REGS, from_user addi sp, sp, -{trapframe_size} PUSH_GENERAL_REGS csrr t0, sepc csrr t1, sstatus csrrw t2, sscratch, zero // save sscratch (sp) and zero it STR t0, sp, 31 // tf.sepc STR t1, sp, 32 // tf.sstatus STR t2, sp, 1 // tf.regs.sp .if \from_user == 1 LDR t0, sp, 3 // load supervisor tp STR gp, sp, 2 // save user gp and tp STR tp, sp, 3 mv tp, t0 .endif .endm .macro RESTORE_REGS, from_user .if \from_user == 1 LDR gp, sp, 2 // load user gp and tp LDR t0, sp, 3 STR tp, sp, 3 // save supervisor tp mv tp, t0 addi t0, sp, {trapframe_size} // put supervisor sp to scratch csrw sscratch, t0 .endif LDR t0, sp, 31 LDR t1, sp, 32 csrw sepc, t0 csrw sstatus, t1 POP_GENERAL_REGS LDR sp, sp, 1 // load sp from tf.regs.sp .endm .section .text .balign 4 .global trap_vector_base trap_vector_base: // sscratch == 0: trap from S mode // sscratch != 0: trap from U mode csrrw sp, sscratch, sp // switch sscratch and sp bnez sp, .Ltrap_entry_u csrr sp, sscratch // put supervisor sp back j .Ltrap_entry_s .Ltrap_entry_s: SAVE_REGS 0 mv a0, sp li a1, 0 call riscv_trap_handler RESTORE_REGS 0 sret .Ltrap_entry_u: SAVE_REGS 1 mv a0, sp li a1, 1 call riscv_trap_handler RESTORE_REGS 1 sret
mag1c1an1/arceos
1,505
modules/axhal/src/arch/x86_64/trap.S
.equ NUM_INT, 256 .altmacro .macro DEF_HANDLER, i .Ltrap_handler_\i: .if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17 # error code pushed by CPU push \i # interrupt vector jmp .Ltrap_common .else push 0 # fill in error code in TrapFrame push \i # interrupt vector jmp .Ltrap_common .endif .endm .macro DEF_TABLE_ENTRY, i .quad .Ltrap_handler_\i .endm .section .text .code64 _trap_handlers: .set i, 0 .rept NUM_INT DEF_HANDLER %i .set i, i + 1 .endr .Ltrap_common: test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space jz 1f swapgs 1: push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push rdi push rsi push rbp push rbx push rdx push rcx push rax mov rdi, rsp call x86_trap_handler pop rax pop rcx pop rdx pop rbx pop rbp pop rsi pop rdi pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space jz 2f swapgs 2: add rsp, 16 # pop vector, error_code iretq .section .rodata .global trap_handler_table trap_handler_table: .set i, 0 .rept NUM_INT DEF_TABLE_ENTRY %i .set i, i + 1 .endr
mag1c1an1/arceos
2,585
modules/axhal/src/arch/aarch64/trap_el2.S
.macro SAVE_REGS_EL2 sub sp, sp, 34 * 8 stp x0, x1, [sp] stp x2, x3, [sp, 2 * 8] stp x4, x5, [sp, 4 * 8] stp x6, x7, [sp, 6 * 8] stp x8, x9, [sp, 8 * 8] stp x10, x11, [sp, 10 * 8] stp x12, x13, [sp, 12 * 8] stp x14, x15, [sp, 14 * 8] stp x16, x17, [sp, 16 * 8] stp x18, x19, [sp, 18 * 8] stp x20, x21, [sp, 20 * 8] stp x22, x23, [sp, 22 * 8] stp x24, x25, [sp, 24 * 8] stp x26, x27, [sp, 26 * 8] stp x28, x29, [sp, 28 * 8] mov x1, sp add x1, x1, #(0x110) stp x30, x1, [sp, 30 * 8] mrs x10, elr_el2 mrs x11, spsr_el2 stp x10, x11, [sp, 32 * 8] .endm .macro RESTORE_REGS_EL2 ldp x10, x11, [sp, 32 * 8] msr elr_el2, x10 msr spsr_el2, x11 ldr x30, [sp, 30 * 8] ldp x28, x29, [sp, 28 * 8] ldp x26, x27, [sp, 26 * 8] ldp x24, x25, [sp, 24 * 8] ldp x22, x23, [sp, 22 * 8] ldp x20, x21, [sp, 20 * 8] ldp x18, x19, [sp, 18 * 8] ldp x16, x17, [sp, 16 * 8] ldp x14, x15, [sp, 14 * 8] ldp x12, x13, [sp, 12 * 8] ldp x10, x11, [sp, 10 * 8] ldp x8, x9, [sp, 8 * 8] ldp x6, x7, [sp, 6 * 8] ldp x4, x5, [sp, 4 * 8] ldp x2, x3, [sp, 2 * 8] ldp x0, x1, [sp] add sp, sp, 34 * 8 .endm .macro INVALID_EXCP_EL2, kind, source .p2align 7 SAVE_REGS_EL2 mov x0, sp mov x1, \kind mov x2, \source bl invalid_exception_el2 b .Lexception_return_el2 .endm .macro HANDLE_IRQ_EL2 .p2align 7 SAVE_REGS_EL2 mov x0, sp bl handle_irq_exception_el2 b .Lexception_return_el2 .endm .macro HANDLE_LOWER_SYNC .p2align 7 SAVE_REGS_EL2 mov x0, sp bl lower_aarch64_synchronous b .Lexception_return_el2 .endm .section .text # .section .el2code .p2align 11 .global exception_vector_base_el2 exception_vector_base_el2: // current EL, with SP_EL0 INVALID_EXCP_EL2 0 0 INVALID_EXCP_EL2 1 0 INVALID_EXCP_EL2 2 0 INVALID_EXCP_EL2 3 0 // current EL, with SP_ELx INVALID_EXCP_EL2 1 1 HANDLE_IRQ_EL2 INVALID_EXCP_EL2 2 1 INVALID_EXCP_EL2 3 1 // lower EL, aarch64 HANDLE_LOWER_SYNC HANDLE_IRQ_EL2 INVALID_EXCP_EL2 2 2 INVALID_EXCP_EL2 3 2 // lower EL, aarch32 INVALID_EXCP_EL2 0 3 INVALID_EXCP_EL2 1 3 INVALID_EXCP_EL2 2 3 INVALID_EXCP_EL2 3 3 .Lexception_return_el2: RESTORE_REGS_EL2 eret
mag1c1an1/arceos
2,416
modules/axhal/src/arch/aarch64/trap.S
.macro SAVE_REGS sub sp, sp, 34 * 8 stp x0, x1, [sp] stp x2, x3, [sp, 2 * 8] stp x4, x5, [sp, 4 * 8] stp x6, x7, [sp, 6 * 8] stp x8, x9, [sp, 8 * 8] stp x10, x11, [sp, 10 * 8] stp x12, x13, [sp, 12 * 8] stp x14, x15, [sp, 14 * 8] stp x16, x17, [sp, 16 * 8] stp x18, x19, [sp, 18 * 8] stp x20, x21, [sp, 20 * 8] stp x22, x23, [sp, 22 * 8] stp x24, x25, [sp, 24 * 8] stp x26, x27, [sp, 26 * 8] stp x28, x29, [sp, 28 * 8] mrs x9, sp_el0 mrs x10, elr_el1 mrs x11, spsr_el1 stp x30, x9, [sp, 30 * 8] stp x10, x11, [sp, 32 * 8] .endm .macro RESTORE_REGS ldp x10, x11, [sp, 32 * 8] ldp x30, x9, [sp, 30 * 8] msr sp_el0, x9 msr elr_el1, x10 msr spsr_el1, x11 ldp x28, x29, [sp, 28 * 8] ldp x26, x27, [sp, 26 * 8] ldp x24, x25, [sp, 24 * 8] ldp x22, x23, [sp, 22 * 8] ldp x20, x21, [sp, 20 * 8] ldp x18, x19, [sp, 18 * 8] ldp x16, x17, [sp, 16 * 8] ldp x14, x15, [sp, 14 * 8] ldp x12, x13, [sp, 12 * 8] ldp x10, x11, [sp, 10 * 8] ldp x8, x9, [sp, 8 * 8] ldp x6, x7, [sp, 6 * 8] ldp x4, x5, [sp, 4 * 8] ldp x2, x3, [sp, 2 * 8] ldp x0, x1, [sp] add sp, sp, 34 * 8 .endm .macro INVALID_EXCP, kind, source .p2align 7 SAVE_REGS mov x0, sp mov x1, \kind mov x2, \source bl invalid_exception b .Lexception_return .endm .macro HANDLE_SYNC .p2align 7 SAVE_REGS mov x0, sp bl handle_sync_exception b .Lexception_return .endm .macro HANDLE_IRQ .p2align 7 SAVE_REGS mov x0, sp bl handle_irq_exception b .Lexception_return .endm .section .text .p2align 11 .global exception_vector_base exception_vector_base: // current EL, with SP_EL0 INVALID_EXCP 0 0 INVALID_EXCP 1 0 INVALID_EXCP 2 0 INVALID_EXCP 3 0 // current EL, with SP_ELx HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 1 INVALID_EXCP 3 1 // lower EL, aarch64 HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 2 INVALID_EXCP 3 2 // lower EL, aarch32 INVALID_EXCP 0 3 INVALID_EXCP 1 3 INVALID_EXCP 2 3 INVALID_EXCP 3 3 .Lexception_return: RESTORE_REGS eret