repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
chyyuu/arceos-for-starry | 2,747 | modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
debug_abbrev : {
. += SIZEOF(.debug_abbrev);
}
debug_addr : {
. += SIZEOF(.debug_addr);
}
debug_aranges : {
. += SIZEOF(.debug_aranges);
}
debug_info : {
. += SIZEOF(.debug_info);
}
debug_line : {
. += SIZEOF(.debug_line);
}
debug_line_str : {
. += SIZEOF(.debug_line_str);
}
debug_ranges : {
. += SIZEOF(.debug_ranges);
}
debug_rnglists : {
. += SIZEOF(.debug_rnglists);
}
debug_str : {
. += SIZEOF(.debug_str);
}
debug_str_offsets : {
. += SIZEOF(.debug_str_offsets);
}
. = ALIGN(4K);
_erodata = .;
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
. = ALIGN(0x10);
_ex_table_start = .;
KEEP(*(__ex_table))
_ex_table_end = .;
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %CPU_NUM%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
scope_local : { *(scope_local) }
}
INSERT AFTER .tbss;
|
chyyuu/arceos-for-starry | 2,544 | tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s | // SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
chyyuu/oscamp_tour | 4,857 | tour/h_1_0/src/guest.S |
/// Enter the guest given in `VmCpuRegisters` from `a0`
.global _run_guest
_run_guest:
/* Save hypervisor state */
/* Save hypervisor GPRs (except T0-T6 and a0, which is GuestInfo and stashed in sscratch) */
sd ra, ({hyp_ra})(a0)
sd gp, ({hyp_gp})(a0)
sd tp, ({hyp_tp})(a0)
sd s0, ({hyp_s0})(a0)
sd s1, ({hyp_s1})(a0)
sd a1, ({hyp_a1})(a0)
sd a2, ({hyp_a2})(a0)
sd a3, ({hyp_a3})(a0)
sd a4, ({hyp_a4})(a0)
sd a5, ({hyp_a5})(a0)
sd a6, ({hyp_a6})(a0)
sd a7, ({hyp_a7})(a0)
sd s2, ({hyp_s2})(a0)
sd s3, ({hyp_s3})(a0)
sd s4, ({hyp_s4})(a0)
sd s5, ({hyp_s5})(a0)
sd s6, ({hyp_s6})(a0)
sd s7, ({hyp_s7})(a0)
sd s8, ({hyp_s8})(a0)
sd s9, ({hyp_s9})(a0)
sd s10, ({hyp_s10})(a0)
sd s11, ({hyp_s11})(a0)
sd sp, ({hyp_sp})(a0)
/* Swap in guest CSRs. */
ld t1, ({guest_sstatus})(a0)
csrrw t1, sstatus, t1
sd t1, ({hyp_sstatus})(a0)
ld t1, ({guest_hstatus})(a0)
csrrw t1, hstatus, t1
ld t1, ({guest_scounteren})(a0)
csrrw t1, scounteren, t1
sd t1, ({hyp_scounteren})(a0)
ld t1, ({guest_sepc})(a0)
csrw sepc, t1
/* Set stvec so that hypervisor resumes after the sret when the guest exits. */
la t1, _guest_exit
csrrw t1, stvec, t1
sd t1, ({hyp_stvec})(a0)
/* Save sscratch and replace with pointer to GuestInfo. */
csrrw t1, sscratch, a0
sd t1, ({hyp_sscratch})(a0)
/* Restore the gprs from this GuestInfo */
ld ra, ({guest_ra})(a0)
ld gp, ({guest_gp})(a0)
ld tp, ({guest_tp})(a0)
ld s0, ({guest_s0})(a0)
ld s1, ({guest_s1})(a0)
ld a1, ({guest_a1})(a0)
ld a2, ({guest_a2})(a0)
ld a3, ({guest_a3})(a0)
ld a4, ({guest_a4})(a0)
ld a5, ({guest_a5})(a0)
ld a6, ({guest_a6})(a0)
ld a7, ({guest_a7})(a0)
ld s2, ({guest_s2})(a0)
ld s3, ({guest_s3})(a0)
ld s4, ({guest_s4})(a0)
ld s5, ({guest_s5})(a0)
ld s6, ({guest_s6})(a0)
ld s7, ({guest_s7})(a0)
ld s8, ({guest_s8})(a0)
ld s9, ({guest_s9})(a0)
ld s10, ({guest_s10})(a0)
ld s11, ({guest_s11})(a0)
ld t0, ({guest_t0})(a0)
ld t1, ({guest_t1})(a0)
ld t2, ({guest_t2})(a0)
ld t3, ({guest_t3})(a0)
ld t4, ({guest_t4})(a0)
ld t5, ({guest_t5})(a0)
ld t6, ({guest_t6})(a0)
ld sp, ({guest_sp})(a0)
ld a0, ({guest_a0})(a0)
sret
.align 2
_guest_exit:
/* Pull GuestInfo out of sscratch, swapping with guest's a0 */
csrrw a0, sscratch, a0
/* Save guest GPRs. */
sd ra, ({guest_ra})(a0)
sd gp, ({guest_gp})(a0)
sd tp, ({guest_tp})(a0)
sd s0, ({guest_s0})(a0)
sd s1, ({guest_s1})(a0)
sd a1, ({guest_a1})(a0)
sd a2, ({guest_a2})(a0)
sd a3, ({guest_a3})(a0)
sd a4, ({guest_a4})(a0)
sd a5, ({guest_a5})(a0)
sd a6, ({guest_a6})(a0)
sd a7, ({guest_a7})(a0)
sd s2, ({guest_s2})(a0)
sd s3, ({guest_s3})(a0)
sd s4, ({guest_s4})(a0)
sd s5, ({guest_s5})(a0)
sd s6, ({guest_s6})(a0)
sd s7, ({guest_s7})(a0)
sd s8, ({guest_s8})(a0)
sd s9, ({guest_s9})(a0)
sd s10, ({guest_s10})(a0)
sd s11, ({guest_s11})(a0)
sd t0, ({guest_t0})(a0)
sd t1, ({guest_t1})(a0)
sd t2, ({guest_t2})(a0)
sd t3, ({guest_t3})(a0)
sd t4, ({guest_t4})(a0)
sd t5, ({guest_t5})(a0)
sd t6, ({guest_t6})(a0)
sd sp, ({guest_sp})(a0)
/* Save Guest a0 after recovering from sscratch. */
csrr t0, sscratch
sd t0, ({guest_a0})(a0)
_restore_csrs:
/* Swap in hypervisor CSRs. */
ld t1, ({hyp_sstatus})(a0)
csrrw t1, sstatus, t1
sd t1, ({guest_sstatus})(a0)
csrr t1, hstatus
sd t1, ({guest_hstatus})(a0)
ld t1, ({hyp_scounteren})(a0)
csrrw t1, scounteren, t1
sd t1, ({guest_scounteren})(a0)
ld t1, ({hyp_stvec})(a0)
csrw stvec, t1
ld t1, ({hyp_sscratch})(a0)
csrw sscratch, t1
/* Save guest EPC. */
csrr t1, sepc
sd t1, ({guest_sepc})(a0)
/* Restore hypervisor GPRs. */
ld ra, ({hyp_ra})(a0)
ld gp, ({hyp_gp})(a0)
ld tp, ({hyp_tp})(a0)
ld s0, ({hyp_s0})(a0)
ld s1, ({hyp_s1})(a0)
ld a1, ({hyp_a1})(a0)
ld a2, ({hyp_a2})(a0)
ld a3, ({hyp_a3})(a0)
ld a4, ({hyp_a4})(a0)
ld a5, ({hyp_a5})(a0)
ld a6, ({hyp_a6})(a0)
ld a7, ({hyp_a7})(a0)
ld s2, ({hyp_s2})(a0)
ld s3, ({hyp_s3})(a0)
ld s4, ({hyp_s4})(a0)
ld s5, ({hyp_s5})(a0)
ld s6, ({hyp_s6})(a0)
ld s7, ({hyp_s7})(a0)
ld s8, ({hyp_s8})(a0)
ld s9, ({hyp_s9})(a0)
ld s10, ({hyp_s10})(a0)
ld s11, ({hyp_s11})(a0)
ld sp, ({hyp_sp})(a0)
ret
|
chyyuu/arceos-starry-axvisor-all | 668 | axvisor/src/utils/arch/aarch64/cache.S | // void cache_invalidate_d(u64 start, u64 length);
.global cache_invalidate_d
cache_invalidate_d:
add x2, x0, x1 /* calculate the end address */
bic x0, x0, #(64 - 1) /* align the start with a cache line */
1:
dc ivac, x0 /* invalidate cache to PoC by VA */
add x0, x0, #64
cmp x0, x2
blt 1b
mov x0, xzr
dsb sy
ret
// void cache_clean_invalidate_d(u64 start, u64 length);
.global cache_clean_invalidate_d
cache_clean_invalidate_d:
add x2, x0, x1 /* calculate the end address */
bic x0, x0, #(64 - 1) /* align the start with a cache line */
1:
dc civac, x0 /* invalidate cache to PoC by VA */
add x0, x0, #64
cmp x0, x2
blt 1b
mov x0, xzr
dsb sy
ret
|
chyyuu/arceos-starry-axvisor-all | 1,868 | axvisor/scripts/lds/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
.rodata : ALIGN(4K) {
_srodata = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
. = ALIGN(4K);
_erodata = .;
}
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %SMP%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
axns_resource : { *(axns_resource) }
}
INSERT AFTER .tbss;
|
chyyuu/arceos-starry-axvisor-all | 171 | starry-next/apps/nimbos/c/lib/arch/riscv/crt.S | .text
.globl _start
_start:
.option push
.option norelax
lla gp, __global_pointer$
.option pop
mv a0, sp
and sp, sp, -16
tail __start_main
|
chyyuu/arceos-starry-axvisor-all | 511 | starry-next/apps/nimbos/c/lib/arch/riscv/clone.S | // __clone(func, arg, stack)
// a0, a1, a2
// syscall(SYS_clone, stack)
// a7, a0
.global __clone
.hidden __clone
__clone:
andi a2, a2, -16
addi a2, a2, -16
sd a0, 0(a2)
sd a1, 8(a2)
// syscall(SYSCALL_CLONE, newsp)
mv a0, a2
li a7, 56
ecall
beqz a0, 1f
// parent
ret
1:
// child
ld a0, 8(sp)
ld a1, 0(sp)
jalr a1
// syscall(SYSCALL_EXIT, ret)
li a7, 60
ecall
|
chyyuu/arceos-starry-axvisor-all | 117 | starry-next/apps/nimbos/c/lib/arch/aarch64/crt.S | .text
.globl _start
_start:
mov x29, #0
mov x30, #0
mov x0, sp
and sp, x0, #-16
b __start_main
|
chyyuu/arceos-starry-axvisor-all | 434 | starry-next/apps/nimbos/c/lib/arch/aarch64/clone.S | // __clone(func, arg, stack)
// x0, x1, x2
// syscall(SYS_clone, stack)
// x8, x0
.global __clone
.hidden __clone
__clone:
and x2, x2, #-16
stp x0, x1, [x2, #-16]!
// syscall(SYSCALL_CLONE, newsp)
mov x0, x2
mov x8, #56
svc #0
cbz x0, 1f
// parent
ret
1:
// child
ldp x1, x0, [sp], #16
blr x1
// syscall(SYSCALL_EXIT, ret)
mov x8, #60
svc #0
|
chyyuu/arceos-starry-axvisor-all | 198 | starry-next/apps/nimbos/c/lib/arch/loongarch64/crt.S | .section .text.entry
.globl _start
_start:
move $fp, $zero
move $a0, $sp
.weak _DYNAMIC
.hidden _DYNAMIC
la.local $a1, _DYNAMIC
bstrins.d $sp, $zero, 3, 0
b __start_main
|
chyyuu/arceos-starry-axvisor-all | 961 | starry-next/apps/nimbos/c/lib/arch/loongarch64/clone.S | #__clone(func, stack, flags, arg, ptid, tls, ctid)
# a0, a1, a2, a3, a4, a5, a6
# sys_clone(flags, stack, ptid, ctid, tls)
# a0, a1, a2, a3, a4
.global __clone
.hidden __clone
.type __clone,@function
__clone:
bstrins.d $a1, $zero, 3, 0 #stack to 16 align
# Save function pointer and argument pointer on new thread stack
addi.d $a1, $a1, -16
st.d $a0, $a1, 0 # save function pointer
st.d $a3, $a1, 8 # save argument pointer
or $a0, $a2, $zero
or $a2, $a4, $zero
or $a3, $a6, $zero
or $a4, $a5, $zero
ori $a7, $zero, 220
syscall 0 # call clone
beqz $a0, 1f # whether child process
jirl $zero, $ra, 0 # parent process return
1:
ld.d $t8, $sp, 0 # function pointer
ld.d $a0, $sp, 8 # argument pointer
jirl $ra, $t8, 0 # call the user's function
ori $a7, $zero, 93
syscall 0 # child process exit
|
chyyuu/arceos-starry-axvisor-all | 2,095 | starry-next/arceos/modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
. = ALIGN(4K);
_erodata = .;
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %SMP%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
linkme_POST_TRAP : { *(linkme_POST_TRAP) }
linkm2_POST_TRAP : { *(linkm2_POST_TRAP) }
axns_resource : { *(axns_resource) }
}
INSERT AFTER .tbss;
|
chyyuu/arceos-starry-axvisor-all | 2,358 | starry-next/arceos/modules/axhal/src/arch/riscv/trap.S | .macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.if \from_user == 1
LDR t0, sp, 2 // load supervisor gp
LDR t1, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t0
mv tp, t1
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2 // load user gp and tp
LDR t0, sp, 3
STR gp, sp, 2 // save supervisor gp
STR tp, sp, 3 // save supervisor gp and tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
// restore sepc
LDR t0, sp, 31
csrw sepc, t0
// restore sstatus, but don't change FS
LDR t0, sp, 32 // t0 = sstatus to restore
csrr t1, sstatus // t1 = current sstatus
li t2, 0x6000 // t2 = mask for FS
and t1, t1, t2 // t1 = current FS
not t2, t2 // t2 = ~(mask for FS)
and t0, t0, t2 // t0 = sstatus to restore(cleared FS)
or t0, t0, t1 // t0 = sstatus to restore with current FS
csrw sstatus, t0 // restore sstatus
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // swap sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
|
chyyuu/arceos-starry-axvisor-all | 2,989 | starry-next/arceos/modules/axhal/src/arch/aarch64/trap.S | .macro SAVE_REGS
sub sp, sp, {trapframe_size}
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
str x30, [sp, 30 * 8]
mrs x9, sp_el0
mrs x10, tpidr_el0
mrs x11, elr_el1
mrs x12, spsr_el1
stp x9, x10, [sp, 31 * 8]
stp x11, x12, [sp, 33 * 8]
# restore kernel tpidr_el0
mrs x1, tpidrro_el0
msr tpidr_el0, x1
# We may have interrupted userspace, or a guest, or exit-from or
# return-to either of those. So we can't trust sp_el0, and need to
# restore it.
bl {cache_current_task_ptr}
.endm
.macro RESTORE_REGS
# backup kernel tpidr_el0
mrs x1, tpidr_el0
msr tpidrro_el0, x1
ldp x11, x12, [sp, 33 * 8]
ldp x9, x10, [sp, 31 * 8]
msr sp_el0, x9
msr tpidr_el0, x10
msr elr_el1, x11
msr spsr_el1, x12
ldr x30, [sp, 30 * 8]
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, {trapframe_size}
.endm
.macro INVALID_EXCP, kind, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \kind
mov x2, \source
bl invalid_exception
b .Lexception_return
.endm
.macro HANDLE_SYNC, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_sync_exception
b .Lexception_return
.endm
.macro HANDLE_IRQ, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_irq_exception
b .Lexception_return
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
HANDLE_SYNC 1
HANDLE_IRQ 1
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
HANDLE_SYNC 2
HANDLE_IRQ 2
INVALID_EXCP 2 2
INVALID_EXCP 3 2
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Lexception_return:
RESTORE_REGS
eret
|
chyyuu/arceos-starry-axvisor-all | 1,791 | starry-next/arceos/modules/axhal/src/arch/loongarch64/trap.S | .macro SAVE_REGS, from_user
move $t0, $sp
.if \from_user == 1
csrrd $sp, KSAVE_KSP // restore kernel sp
addi.d $sp, $sp, -{trapframe_size}
STD $tp, $sp, 2
STD $r21, $sp, 21
csrrd $tp, KSAVE_TP
csrrd $r21, KSAVE_R21
.else
addi.d $sp, $sp, -{trapframe_size}
.endif
STD $t0, $sp, 3
csrrd $t0, KSAVE_TEMP
PUSH_GENERAL_REGS
csrrd $t1, LA_CSR_PRMD
csrrd $t2, LA_CSR_ERA
STD $t1, $sp, 32 // prmd
STD $t2, $sp, 33 // era
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
csrwr $tp, KSAVE_TP
csrwr $r21, KSAVE_R21
LDD $tp, $sp, 2
LDD $r21, $sp, 21
addi.d $t1, $sp, {trapframe_size}
csrwr $t1, KSAVE_KSP // save kernel sp
.endif
LDD $t1, $sp, 33 // era
LDD $t2, $sp, 32 // prmd
csrwr $t1, LA_CSR_ERA
csrwr $t2, LA_CSR_PRMD
POP_GENERAL_REGS
LDD $sp, $sp, 3
.endm
.section .text
.balign 4096
.global exception_entry_base
exception_entry_base:
csrwr $t0, KSAVE_TEMP
csrrd $t0, LA_CSR_PRMD
andi $t0, $t0, 0x3
bnez $t0, .Lfrom_userspace
.Lfrom_kernel:
SAVE_REGS 0
move $a0, $sp
addi.d $a1, $zero, 0
bl loongarch64_trap_handler
RESTORE_REGS 0
ertn
.Lfrom_userspace:
SAVE_REGS 1
move $a0, $sp
addi.d $a1, $zero, 1
bl loongarch64_trap_handler
RESTORE_REGS 1
ertn
.section .text
.balign 4096
.global handle_tlb_refill
handle_tlb_refill:
csrwr $t0, LA_CSR_TLBRSAVE
csrrd $t0, LA_CSR_PGD
lddir $t0, $t0, 3
lddir $t0, $t0, 2
lddir $t0, $t0, 1
ldpte $t0, 0
ldpte $t0, 1
tlbfill
csrrd $t0, LA_CSR_TLBRSAVE
ertn
|
chyyuu/arceos-starry-axvisor-all | 1,965 | starry-next/arceos/modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
chyyuu/arceos-starry-axvisor-all | 4,325 | starry-next/arceos/modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
chyyuu/arceos-starry-axvisor-all | 2,544 | starry-next/arceos/tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s | // SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
chyyuu/arceos-starry-axvisor-all | 2,001 | arceos/modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
. = ALIGN(4K);
_erodata = .;
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %SMP%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
axns_resource : { *(axns_resource) }
}
INSERT AFTER .tbss;
|
chyyuu/arceos-starry-axvisor-all | 1,839 | arceos/modules/axhal/src/arch/riscv/trap.S | .macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.if \from_user == 1
LDR t0, sp, 2 // load supervisor gp
LDR t1, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t0
mv tp, t1
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2 // load user gp and tp
LDR t0, sp, 3
STR gp, sp, 2 // save supervisor gp
STR tp, sp, 3 // save supervisor gp and tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
LDR t0, sp, 31
LDR t1, sp, 32
csrw sepc, t0
csrw sstatus, t1
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // swap sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
|
chyyuu/arceos-starry-axvisor-all | 2,616 | arceos/modules/axhal/src/arch/aarch64/trap.S | .macro SAVE_REGS
sub sp, sp, 34 * 8
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
mrs x9, sp_el0
mrs x10, elr_el1
mrs x11, spsr_el1
stp x30, x9, [sp, 30 * 8]
stp x10, x11, [sp, 32 * 8]
# We may have interrupted userspace, or a guest, or exit-from or
# return-to either of those. So we can't trust sp_el0, and need to
# restore it.
bl {cache_current_task_ptr}
.endm
.macro RESTORE_REGS
ldp x10, x11, [sp, 32 * 8]
ldp x30, x9, [sp, 30 * 8]
msr sp_el0, x9
msr elr_el1, x10
msr spsr_el1, x11
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, 34 * 8
.endm
.macro INVALID_EXCP, kind, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \kind
mov x2, \source
bl invalid_exception
b .Lexception_return
.endm
.macro HANDLE_SYNC
.p2align 7
SAVE_REGS
mov x0, sp
bl handle_sync_exception
b .Lexception_return
.endm
.macro HANDLE_IRQ
.p2align 7
SAVE_REGS
mov x0, sp
bl handle_irq_exception
b .Lexception_return
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
HANDLE_SYNC
HANDLE_IRQ
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
HANDLE_SYNC
HANDLE_IRQ
INVALID_EXCP 2 2
INVALID_EXCP 3 2
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Lexception_return:
RESTORE_REGS
eret
|
chyyuu/arceos-starry-axvisor-all | 1,705 | arceos/modules/axhal/src/arch/loongarch64/trap.S | .macro SAVE_REGS, from_user
move $t0, $sp
.if \from_user == 1
csrrd $sp, KSAVE_KSP // restore kernel sp
addi.d $sp, $sp, -{trapframe_size}
STD $tp, $sp, 2
STD $r21, $sp, 21
csrrd $tp, KSAVE_TP
csrrd $r21, KSAVE_R21
.else
addi.d $sp, $sp, -{trapframe_size}
.endif
STD $t0, $sp, 3
csrrd $t0, KSAVE_TEMP
PUSH_GENERAL_REGS
csrrd $t1, LA_CSR_PRMD
csrrd $t2, LA_CSR_ERA
STD $t1, $sp, 32 // prmd
STD $t2, $sp, 33 // era
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
csrwr $tp, KSAVE_TP
csrwr $r21, KSAVE_R21
LDD $tp, $sp, 2
LDD $r21, $sp, 21
.endif
LDD $t1, $sp, 33 // era
LDD $t2, $sp, 32 // prmd
csrwr $t1, LA_CSR_ERA
csrwr $t2, LA_CSR_PRMD
POP_GENERAL_REGS
LDD $sp, $sp, 3
.endm
.section .text
.balign 4096
.global exception_entry_base
exception_entry_base:
csrwr $t0, KSAVE_TEMP
csrrd $t0, LA_CSR_PRMD
andi $t0, $t0, 0x3
bnez $t0, .Lfrom_userspace
.Lfrom_kernel:
SAVE_REGS 0
move $a0, $sp
addi.d $a1, $zero, 0
bl loongarch64_trap_handler
RESTORE_REGS 0
ertn
.Lfrom_userspace:
SAVE_REGS 1
move $a0, $sp
addi.d $a1, $zero, 1
bl loongarch64_trap_handler
RESTORE_REGS 1
ertn
.section .text
.balign 4096
.global handle_tlb_refill
handle_tlb_refill:
csrwr $t0, LA_CSR_TLBRSAVE
csrrd $t0, LA_CSR_PGD
lddir $t0, $t0, 3
lddir $t0, $t0, 2
lddir $t0, $t0, 1
ldpte $t0, 0
ldpte $t0, 1
tlbfill
csrrd $t0, LA_CSR_TLBRSAVE
ertn
|
chyyuu/arceos-starry-axvisor-all | 1,965 | arceos/modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
chyyuu/arceos-starry-axvisor-all | 4,325 | arceos/modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
chyyuu/arceos-starry-axvisor-all | 2,544 | arceos/tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s | // SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
chyyuu/starry-mix | 2,747 | arceos/modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
debug_abbrev : {
. += SIZEOF(.debug_abbrev);
}
debug_addr : {
. += SIZEOF(.debug_addr);
}
debug_aranges : {
. += SIZEOF(.debug_aranges);
}
debug_info : {
. += SIZEOF(.debug_info);
}
debug_line : {
. += SIZEOF(.debug_line);
}
debug_line_str : {
. += SIZEOF(.debug_line_str);
}
debug_ranges : {
. += SIZEOF(.debug_ranges);
}
debug_rnglists : {
. += SIZEOF(.debug_rnglists);
}
debug_str : {
. += SIZEOF(.debug_str);
}
debug_str_offsets : {
. += SIZEOF(.debug_str_offsets);
}
. = ALIGN(4K);
_erodata = .;
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
. = ALIGN(0x10);
_ex_table_start = .;
KEEP(*(__ex_table))
_ex_table_end = .;
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %CPU_NUM%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
scope_local : { *(scope_local) }
}
INSERT AFTER .tbss;
|
chyyuu/starry-mix | 2,544 | arceos/tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s | // SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
chyyuu/starry-next-all | 171 | apps/nimbos/c/lib/arch/riscv/crt.S | .text
.globl _start
_start:
.option push
.option norelax
lla gp, __global_pointer$
.option pop
mv a0, sp
and sp, sp, -16
tail __start_main
|
chyyuu/starry-next-all | 511 | apps/nimbos/c/lib/arch/riscv/clone.S | // __clone(func, arg, stack)
// a0, a1, a2
// syscall(SYS_clone, stack)
// a7, a0
.global __clone
.hidden __clone
__clone:
andi a2, a2, -16
addi a2, a2, -16
sd a0, 0(a2)
sd a1, 8(a2)
// syscall(SYSCALL_CLONE, newsp)
mv a0, a2
li a7, 56
ecall
beqz a0, 1f
// parent
ret
1:
// child
ld a0, 8(sp)
ld a1, 0(sp)
jalr a1
// syscall(SYSCALL_EXIT, ret)
li a7, 60
ecall
|
chyyuu/starry-next-all | 117 | apps/nimbos/c/lib/arch/aarch64/crt.S | .text
.globl _start
_start:
mov x29, #0
mov x30, #0
mov x0, sp
and sp, x0, #-16
b __start_main
|
chyyuu/starry-next-all | 434 | apps/nimbos/c/lib/arch/aarch64/clone.S | // __clone(func, arg, stack)
// x0, x1, x2
// syscall(SYS_clone, stack)
// x8, x0
.global __clone
.hidden __clone
__clone:
and x2, x2, #-16
stp x0, x1, [x2, #-16]!
// syscall(SYSCALL_CLONE, newsp)
mov x0, x2
mov x8, #56
svc #0
cbz x0, 1f
// parent
ret
1:
// child
ldp x1, x0, [sp], #16
blr x1
// syscall(SYSCALL_EXIT, ret)
mov x8, #60
svc #0
|
chyyuu/starry-next-all | 121 | apps/nimbos/c/lib/arch/x86_64/crt.S | .text
.globl _start
_start:
xor %rbp, %rbp
mov %rsp, %rdi
andq $-16, %rsp
call __start_main
|
chyyuu/starry-next-all | 574 | apps/nimbos/c/lib/arch/x86_64/clone.S | // __clone(func, arg, stack)
// rdi, rsi, rdx
// syscall(SYS_clone, stack)
// rax, rdi
.global __clone
.hidden __clone
__clone:
// push arg (%rsi) to stack, set func (%rdi) to %r9
and $-16, %rdx
sub $8, %rdx
mov %rsi, (%rdx)
mov %rdi, %r9
// syscall(SYSCALL_CLONE, newsp)
mov %rdx, %rdi
mov $56, %rax
syscall
test %rax, %rax
jz 1f
// parent
ret
1:
// child
xor %rbp, %rbp
pop %rdi
call *%r9
// syscall(SYSCALL_EXIT, ret)
mov %rax, %rdi
mov $60, %rax
syscall
|
chyyuu/starry-next-all | 198 | apps/nimbos/c/lib/arch/loongarch64/crt.S | .section .text.entry
.globl _start
_start:
move $fp, $zero
move $a0, $sp
.weak _DYNAMIC
.hidden _DYNAMIC
la.local $a1, _DYNAMIC
bstrins.d $sp, $zero, 3, 0
b __start_main
|
chyyuu/starry-next-all | 961 | apps/nimbos/c/lib/arch/loongarch64/clone.S | #__clone(func, stack, flags, arg, ptid, tls, ctid)
# a0, a1, a2, a3, a4, a5, a6
# sys_clone(flags, stack, ptid, ctid, tls)
# a0, a1, a2, a3, a4
.global __clone
.hidden __clone
.type __clone,@function
__clone:
bstrins.d $a1, $zero, 3, 0 #stack to 16 align
# Save function pointer and argument pointer on new thread stack
addi.d $a1, $a1, -16
st.d $a0, $a1, 0 # save function pointer
st.d $a3, $a1, 8 # save argument pointer
or $a0, $a2, $zero
or $a2, $a4, $zero
or $a3, $a6, $zero
or $a4, $a5, $zero
ori $a7, $zero, 220
syscall 0 # call clone
beqz $a0, 1f # whether child process
jirl $zero, $ra, 0 # parent process return
1:
ld.d $t8, $sp, 0 # function pointer
ld.d $a0, $sp, 8 # argument pointer
jirl $ra, $t8, 0 # call the user's function
ori $a7, $zero, 93
syscall 0 # child process exit
|
chyyuu/starry-next-all | 2,095 | arceos/modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
. = ALIGN(4K);
_erodata = .;
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %SMP%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
linkme_POST_TRAP : { *(linkme_POST_TRAP) }
linkm2_POST_TRAP : { *(linkm2_POST_TRAP) }
axns_resource : { *(axns_resource) }
}
INSERT AFTER .tbss;
|
chyyuu/starry-next-all | 2,358 | arceos/modules/axhal/src/arch/riscv/trap.S | .macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.if \from_user == 1
LDR t0, sp, 2 // load supervisor gp
LDR t1, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t0
mv tp, t1
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2 // load user gp and tp
LDR t0, sp, 3
STR gp, sp, 2 // save supervisor gp
STR tp, sp, 3 // save supervisor gp and tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
// restore sepc
LDR t0, sp, 31
csrw sepc, t0
// restore sstatus, but don't change FS
LDR t0, sp, 32 // t0 = sstatus to restore
csrr t1, sstatus // t1 = current sstatus
li t2, 0x6000 // t2 = mask for FS
and t1, t1, t2 // t1 = current FS
not t2, t2 // t2 = ~(mask for FS)
and t0, t0, t2 // t0 = sstatus to restore(cleared FS)
or t0, t0, t1 // t0 = sstatus to restore with current FS
csrw sstatus, t0 // restore sstatus
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // swap sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
|
chyyuu/starry-next-all | 2,989 | arceos/modules/axhal/src/arch/aarch64/trap.S | .macro SAVE_REGS
sub sp, sp, {trapframe_size}
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
str x30, [sp, 30 * 8]
mrs x9, sp_el0
mrs x10, tpidr_el0
mrs x11, elr_el1
mrs x12, spsr_el1
stp x9, x10, [sp, 31 * 8]
stp x11, x12, [sp, 33 * 8]
# restore kernel tpidr_el0
mrs x1, tpidrro_el0
msr tpidr_el0, x1
# We may have interrupted userspace, or a guest, or exit-from or
# return-to either of those. So we can't trust sp_el0, and need to
# restore it.
bl {cache_current_task_ptr}
.endm
.macro RESTORE_REGS
# backup kernel tpidr_el0
mrs x1, tpidr_el0
msr tpidrro_el0, x1
ldp x11, x12, [sp, 33 * 8]
ldp x9, x10, [sp, 31 * 8]
msr sp_el0, x9
msr tpidr_el0, x10
msr elr_el1, x11
msr spsr_el1, x12
ldr x30, [sp, 30 * 8]
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, {trapframe_size}
.endm
.macro INVALID_EXCP, kind, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \kind
mov x2, \source
bl invalid_exception
b .Lexception_return
.endm
.macro HANDLE_SYNC, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_sync_exception
b .Lexception_return
.endm
.macro HANDLE_IRQ, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \source
bl handle_irq_exception
b .Lexception_return
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
HANDLE_SYNC 1
HANDLE_IRQ 1
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
HANDLE_SYNC 2
HANDLE_IRQ 2
INVALID_EXCP 2 2
INVALID_EXCP 3 2
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Lexception_return:
RESTORE_REGS
eret
|
chyyuu/starry-next-all | 1,397 | arceos/modules/axhal/src/arch/x86_64/syscall.S | .section .text
.code64
syscall_entry:
swapgs // switch to kernel gs
mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp // save user rsp
mov rsp, gs:[offset __PERCPU_TSS + {tss_rsp0_offset}] // switch to kernel stack
sub rsp, 8 // skip user ss
push gs:[offset __PERCPU_USER_RSP_OFFSET] // user rsp
push r11 // rflags
push {ucode64} // cs
push rcx // rip
sub rsp, 4 * 8 // skip until general registers
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_syscall_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 9 * 8
mov rcx, [rsp - 5 * 8] // rip
mov r11, [rsp - 3 * 8] // rflags
mov rsp, [rsp - 2 * 8] // user rsp
swapgs
sysretq
|
chyyuu/starry-next-all | 1,627 | arceos/modules/axhal/src/arch/x86_64/trap.S | .equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
sub rsp, 16 # reserve space for fs_base
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 16 # pop fs_base
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
chyyuu/starry-next-all | 1,791 | arceos/modules/axhal/src/arch/loongarch64/trap.S | .macro SAVE_REGS, from_user
move $t0, $sp
.if \from_user == 1
csrrd $sp, KSAVE_KSP // restore kernel sp
addi.d $sp, $sp, -{trapframe_size}
STD $tp, $sp, 2
STD $r21, $sp, 21
csrrd $tp, KSAVE_TP
csrrd $r21, KSAVE_R21
.else
addi.d $sp, $sp, -{trapframe_size}
.endif
STD $t0, $sp, 3
csrrd $t0, KSAVE_TEMP
PUSH_GENERAL_REGS
csrrd $t1, LA_CSR_PRMD
csrrd $t2, LA_CSR_ERA
STD $t1, $sp, 32 // prmd
STD $t2, $sp, 33 // era
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
csrwr $tp, KSAVE_TP
csrwr $r21, KSAVE_R21
LDD $tp, $sp, 2
LDD $r21, $sp, 21
addi.d $t1, $sp, {trapframe_size}
csrwr $t1, KSAVE_KSP // save kernel sp
.endif
LDD $t1, $sp, 33 // era
LDD $t2, $sp, 32 // prmd
csrwr $t1, LA_CSR_ERA
csrwr $t2, LA_CSR_PRMD
POP_GENERAL_REGS
LDD $sp, $sp, 3
.endm
.section .text
.balign 4096
.global exception_entry_base
exception_entry_base:
csrwr $t0, KSAVE_TEMP
csrrd $t0, LA_CSR_PRMD
andi $t0, $t0, 0x3
bnez $t0, .Lfrom_userspace
.Lfrom_kernel:
SAVE_REGS 0
move $a0, $sp
addi.d $a1, $zero, 0
bl loongarch64_trap_handler
RESTORE_REGS 0
ertn
.Lfrom_userspace:
SAVE_REGS 1
move $a0, $sp
addi.d $a1, $zero, 1
bl loongarch64_trap_handler
RESTORE_REGS 1
ertn
.section .text
.balign 4096
.global handle_tlb_refill
handle_tlb_refill:
csrwr $t0, LA_CSR_TLBRSAVE
csrrd $t0, LA_CSR_PGD
lddir $t0, $t0, 3
lddir $t0, $t0, 2
lddir $t0, $t0, 1
ldpte $t0, 0
ldpte $t0, 1
tlbfill
csrrd $t0, LA_CSR_TLBRSAVE
ertn
|
chyyuu/starry-next-all | 1,965 | arceos/modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
chyyuu/starry-next-all | 4,325 | arceos/modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
chyyuu/starry-next-all | 2,544 | arceos/tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s | // SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
cics-syslab/umass-os-class-materials | 1,412 | assignments-complete/02/src/entry.S | /*
qemu -kernel loads the kernel at 0x80000000
and causes each hart (i.e. CPU) to jump there.
kernel.ld causes the following code to
be placed at 0x80000000.
*/
.section .text
/*
Make the entry label (memory location) visible to ld.
This is necessary so it can put entry at 0x80000000.
*/
.global entry
/*
This reserves 4096 bytes in the .bss section, with the label stack0.
The .bss section will be filled with zeros on program load
*/
entry:
/*
We have a referrence to the bottom of the stack,
but the stack grows down! So add 4096 to get the
top of the stack (we know how big it is bc we just made it).
*/
la sp, stack0
/*
TODO:
Call `start` from `start.c`.
Once the C stack is set up, we can jump to a C routine to handle the
rest of the OS operation. You can still call the assembly functions
you already wrote, but for the rest of this course you will mostly
be writing in C.
*/
/* BEGIN DELETE BLOCK */
call start
/* END DELETE BLOCK */
/*
This is an endless spin 🤔? What's the point of that. Remember
that a computer has no idea what it's executing, even though
our program ends here, the CPU will happily keep executing
whatever random instructions it finds after this, possibly
damaging itself in the process. If something goes wrong and
the code leaves the input loop above, it will hit this and
hang here, preventing the CPU from doing anything dangerous.
*/
spin:
j spin
|
cics-syslab/umass-os-class-materials | 1,321 | assignments-complete/03/src/entry.S | /*
qemu -kernel loads the kernel at 0x80000000
and causes each hart (i.e. CPU) to jump there.
kernel.ld causes the following code to
be placed at 0x80000000.
*/
.section .text
/*
Make the entry label (memory location) visible to ld.
This is necessary so it can put entry at 0x80000000.
*/
.global entry
/*
This reserves 4096 bytes in the .bss section, with the label stack0.
The .bss section will be filled with zeros on program load
*/
entry:
/*
We have a referrence to the bottom of the stack,
but the stack grows down! So add 4096 to get the
top of the stack (we know how big it is bc we just made it).
*/
la sp, stack0
/*
Once the C stack is set up, we jump to a C routine to handle the
rest of the OS operation. You can still call the assembly functions
you already wrote, but for the rest of this course you will mostly
be writing in C.
*/
call start
/*
This is an endless spin 🤔? What's the point of that. Remember
that a computer has no idea what it's executing, even though
our program ends here, the CPU will happily keep executing
whatever random instructions it finds after this, possibly
damaging itself in the process. If something goes wrong and
the code leaves the input loop above, it will hit this and
hang here, preventing the CPU from doing anything dangerous.
*/
spin:
j spin
|
cics-syslab/umass-os-class-materials | 1,788 | assignments-complete/03/src/machinevec.S | #
# interrupts and exceptions while in machine
# mode come here.
#
.global machinevec
.align 4
machinevec:
# make room to save registers.
addi sp, sp, -256
# save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
# call the C trap handler in trap.c
call trap_devintr
# restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
# return to whatever we were doing
mret
|
cics-syslab/umass-os-class-materials | 1,321 | assignments-complete/05/src/entry.S | /*
qemu -kernel loads the kernel at 0x80000000
and causes each hart (i.e. CPU) to jump there.
kernel.ld causes the following code to
be placed at 0x80000000.
*/
.section .text
/*
Make the entry label (memory location) visible to ld.
This is necessary so it can put entry at 0x80000000.
*/
.global entry
/*
This reserves 4096 bytes in the .bss section, with the label stack0.
The .bss section will be filled with zeros on program load
*/
entry:
/*
We have a referrence to the bottom of the stack,
but the stack grows down! So add 4096 to get the
top of the stack (we know how big it is bc we just made it).
*/
la sp, stack0
/*
Once the C stack is set up, we jump to a C routine to handle the
rest of the OS operation. You can still call the assembly functions
you already wrote, but for the rest of this course you will mostly
be writing in C.
*/
call start
/*
This is an endless spin 🤔? What's the point of that. Remember
that a computer has no idea what it's executing, even though
our program ends here, the CPU will happily keep executing
whatever random instructions it finds after this, possibly
damaging itself in the process. If something goes wrong and
the code leaves the input loop above, it will hit this and
hang here, preventing the CPU from doing anything dangerous.
*/
spin:
j spin
|
cics-syslab/umass-os-class-materials | 3,657 | assignments-complete/05/src/uservec.S | #
# low-level code to handle traps from user space into
# the kernel, and returns from kernel to user.
#
.align 4
.globl uservec
uservec:
#
# trap_usertrap_return sets mtvec to point here, so
# traps from user space start here,
# in machine mode
#
# mscratch holds a pointer to proc.kernel_context
# swap with a0 to get the pointer into a
# general pupose register
csrrw a0, mscratch, a0
# save the user registers in proc's user_context
sd ra, 16(a0)
sd sp, 24(a0)
sd gp, 32(a0)
sd tp, 40(a0)
sd t0, 48(a0)
sd t1, 56(a0)
sd t2, 64(a0)
sd s0, 72(a0)
sd s1, 80(a0)
# leave blank for a0
sd a1, 96(a0)
sd a2, 104(a0)
sd a3, 112(a0)
sd a4, 120(a0)
sd a5, 128(a0)
sd a6, 136(a0)
sd a7, 144(a0)
sd s2, 152(a0)
sd s3, 160(a0)
sd s4, 168(a0)
sd s5, 176(a0)
sd s6, 184(a0)
sd s7, 192(a0)
sd s8, 200(a0)
sd s9, 208(a0)
sd s10, 216(a0)
sd s11, 224(a0)
sd t3, 232(a0)
sd t4, 240(a0)
sd t5, 248(a0)
sd t6, 256(a0)
# save the user a0 in kernel_context->a0
csrr t0, mscratch
sd t0, 88(a0)
# restore mscratch
csrw mscratch, a0
# initialize kernel stack pointer, from user_context->kernel_sp
ld sp, 0(a0)
# jump to usertrap(), which does not return
j trap_usertrap
.globl uservec_ret
uservec_ret:
# mscratch holds a pointer to proc.kernel_context
# We don't need to swap with mscratch as above
# because we are exiting kernel mode and all current
# registers can be junked. The only important part
# is that the user state is restored properly.
# Kernel context is not preserved across.
# Note that this preserves mscratch, so we don't
# have to restore it after loading the registers.
csrr a0, mscratch
# Note that we don't save the kernel sp at this point.
# This may seem a little counterintuitive because the
# switching code relies on having certain values on the
# stack, right? That is only true while it is in kernel
# mode. Recall that switching always happens in kernel
# mode, so a process that has been saved in the kernel
# and is waiting to be run is always in kernel mode.
# The kernel context will save the kernel sp while in
# kernel mode so that when it is switched back to, it
# can access the stack values it relies on. If we are
# reaching this point, then the process must be about
# to run (in user mode), therefore we don't need to
# preserve the kernel stack position and can safely
# junk the kernel context. The next time we enter kernel
# mode we will start at the first address on the kernel
# stack (highest address) and the switching code will
# save our kernel stack pointer when we call
# switch_to_process.
# restore all but a0 from TRAPFRAME
ld ra, 16(a0)
ld sp, 24(a0)
ld gp, 32(a0)
ld tp, 40(a0)
ld t0, 48(a0)
ld t1, 56(a0)
ld t2, 64(a0)
ld s0, 72(a0)
ld s1, 80(a0)
# leave blank for a0
ld a1, 96(a0)
ld a2, 104(a0)
ld a3, 112(a0)
ld a4, 120(a0)
ld a5, 128(a0)
ld a6, 136(a0)
ld a7, 144(a0)
ld s2, 152(a0)
ld s3, 160(a0)
ld s4, 168(a0)
ld s5, 176(a0)
ld s6, 184(a0)
ld s7, 192(a0)
ld s8, 200(a0)
ld s9, 208(a0)
ld s10, 216(a0)
ld s11, 224(a0)
ld t3, 232(a0)
ld t4, 240(a0)
ld t5, 248(a0)
ld t6, 256(a0)
# restore user a0 last, mscratch is untouched
ld a0, 88(a0)
# return to user mode and user pc.
# trap_usertrap_return() set up mstatus and mepc.
mret
|
cics-syslab/umass-os-class-materials | 726 | assignments-complete/05/src/switch.S | .global switch_to_process
# Context switch
#
# void switch_to_process(struct proc_register_set *old, struct proc_register_set *new);
#
# Save current registers in old. Load from new.
switch_to_process:
sd ra, 0(a0)
sd sp, 8(a0)
sd s0, 16(a0)
sd s1, 24(a0)
sd s2, 32(a0)
sd s3, 40(a0)
sd s4, 48(a0)
sd s5, 56(a0)
sd s6, 64(a0)
sd s7, 72(a0)
sd s8, 80(a0)
sd s9, 88(a0)
sd s10, 96(a0)
sd s11, 104(a0)
ld ra, 0(a1)
ld sp, 8(a1)
ld s0, 16(a1)
ld s1, 24(a1)
ld s2, 32(a1)
ld s3, 40(a1)
ld s4, 48(a1)
ld s5, 56(a1)
ld s6, 64(a1)
ld s7, 72(a1)
ld s8, 80(a1)
ld s9, 88(a1)
ld s10, 96(a1)
ld s11, 104(a1)
ret |
cics-syslab/umass-os-class-materials | 1,791 | assignments-complete/05/src/machinevec.S | #
# interrupts and exceptions while in machine
# mode come here.
#
.global machinevec
.align 4
machinevec:
# make room to save registers.
addi sp, sp, -256
# save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
# call the C trap handler in trap.c
call trap_kerneltrap
# restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
# return to whatever we were doing
mret
|
cics-syslab/umass-os-class-materials | 3,016 | assignments-complete/01/src/entry.S | /*
qemu -kernel loads the kernel at 0x80000000
and causes each hart (i.e. CPU) to jump there.
kernel.ld causes the following code to
be placed at 0x80000000.
*/
.section .text
/*
Make the entry label (memory location) visible to ld.
This is necessary so it can put entry at 0x80000000.
*/
.global entry
entry:
/*
Load the stack pointer into $sp
*/
la sp, stack0
/*
Reserve space on the stack for the most recent (printable)
character read in. Riscv calling convention says the stack
must be 16 byte aligned. stack0 is aligned by the linker,
we are responsible for aligning it during runtime.
Remember the stack grows downward!
*/
addi sp, sp, -16
/*
Always initialize variables!
*/
sb zero, 0(sp)
/*
Jump to subroutine to initialize UART.
Pay attetion to the function prologue and epilogue!
*/
call uart_init
/*
This is the top of the input loop.
*/
_prompt:
call uart_print_prompt
/*
Here we process input until we get a newline or carriage return.
Each new non-newline-cariage-return character goes into
the variable we made on the stack above, overwriting each other.
It also gets printed back to the screen so the user can see
what they're typing. Who knew such a basic part of the computer
was so complex!. Nothing to do next, so back to reading input. Printing
the null byte does nothing so this is okay, but think about how you could
add more error handling to correct this?
Once we get a new line we jump to _got_new_line to handle
priting the most recently typed character. After that print
the prompt and wait for the ext character.
*/
_wait_for_input:
/*
Returns the character in $a0
*/
call uart_read
/*
These next four lines compare the byte to see if it is
line end character, if so jump to _got_new_line to
process it.
*/
li t0, '\r'
beq a0, t0, _got_new_line
li t0, '\n'
beq a0, t0, _got_new_line
/*
Implicitly takes the output of uart_read in as input
because we never changed the value of $a0.
Note: $a0 is caller saved, so this only works because
we haven't called any functions.
This is where the character is echoed back to the user.
*/
call uart_write
/*
Store it for use later.
*/
sb a0, 0(sp)
/*
Jump back to waiting for input.
*/
j _wait_for_input
_got_new_line:
/*
Print a new line to display the output on.
*/
li a0, '\n'
call uart_write
/*
Fetch the most recently tpyed character.
*/
lb a0, 0(sp)
/*
If there is no most recently typed character then reprint the
prompt. Otherwise print the character.
*/
beqz a0, _prompt
call uart_write
/*
Clear the character we just printed.
*/
sb zero, 0(sp)
/*
Write a new line for the prompt and then reprint
the prompt.
*/
li a0, '\n'
call uart_write
j _prompt
/*
This is an endless spin 🤔? What's the point of that.
Well if something goes wrong and the code leaves the
input loop above, it will hit this and hang here, letting
the programmer see there is an erorr without writing
all over any variables that might help you debug what happened.
*/
spin:
j spin
|
cics-syslab/umass-os-class-materials | 6,155 | assignments-complete/01/src/uart.S | .section .text
/* TODO: Expose uart_init, uart_read, uart_write, and uart_print_prompt */
/* BEGIN DELETE BLOCK */
.global uart_init, uart_read, uart_write, uart_print_prompt
/* END DELETE BLOCK */
/*
Always add documentation (especially in assembly).
Initialize the UART, which is placed at 0x10000000 by qemu.
paramters:
None
return:
None
*/
uart_init:
/*
This is VERY important, you are an assembly programmer
now, you have to manage your own stack. The following
prologue is the calling convention. You can see examples
by using objdump on riscv executables like the xv6 kernel.
Note the the epilogue undoes the exact same operations.
*/
addi sp, sp, -16 # Prologue: make room on the stack for local vars
sd ra, 8(sp) # Prologue: store the return address (necessary if you call subroutines)
sd fp, 0(sp) # Prologue: store the previous frame pointer
add fp, sp, 16 # Prologue: move the frame pointer to the bottom of the new stack
/*
Location of uart memory mapped registers.
*/
li t0, 0x10000000
/*
These comments are taken from the xv6 documentation
but you can confirm them and get more information at
http://byterunner.com/16550.html
This page has a longer but more thorough explanation
https://www.lammertbies.nl/comm/info/serial-uart
*/
sb zero, 1(t0) # disable interrupts
li t1, 1<<7
sb t1, 3(t0) # special mode to set baud rate
li t1, 0x3
sb t1, 0(t0) # LSB for baud rate of 38.4K
sb zero, 1(t0) # MSB for baud rate of 38.4K
li t1, 0x3
sb t1, 3(t0) # leave set-baud mode, and set word length to 8 bits, no parity
li t1, 0x7
sb t1, 2(t0) # reset and enable FIFOs
li t1, 0x3
sb t1, 1(t0) # enable transmit and receive interrupts
ld ra, 8(sp) # Epilogue: restore the return address
ld s0, 0(sp) # Epilogue: restore the previous frame
addi sp, sp, 16 # Epilogue: restore the previous frame
ret # Epilogue: return to caller
/*
TODO:
Loop until the uart is ready to send a byte.
Once it is, return. It should NOT consume
the byte in the receive holding register.
What register holds the status of the UART
line?
It should follow all RISC-V calling conventions.
paramters:
None
return:
None
*/
uart_wait_for_write:
/* BEGIN DELETE BLOCK */
addi sp, sp, -16 # Begin prologue
sd ra, 8(sp)
sd fp, 0(sp)
add fp, sp, 16 # End prologue
li t0, 0x10000000 # Address of UART base
lb t1, 5(t0) # Tight loop reading in LSR
andi t1, t1, 0x20 # Check LSR transmit holding empty bit
beqz t1, uart_wait_for_write+20 # End of loop
ld ra, 8(sp) # Begin epilogue
ld s0, 0(sp)
addi sp, sp, 16
ret # End epilogue
/* END DELETE BLOCK */
/*
Wait until the uart has a byte to read.
Once there is, return.
paramters:
None
return:
None
*/
uart_wait_for_read:
addi sp, sp, -16 # Begin prologue
sd ra, 8(sp)
sd fp, 0(sp)
add fp, sp, 16 # End prologue
li t0, 0x10000000 # Address of UART base
lb t1, 5(t0) # Tight loop reading in LSR
andi t1, t1, 0x01 # Check LSR receive data ready
beqz t1, uart_wait_for_read+20 # End of loop
ld ra, 8(sp) # Begin prologue
ld s0, 0(sp)
addi sp, sp, 16
ret # End prologue
/*
TODO:
Wait until the uart has room for a byte, using
uart_wait_for_write. Once there is room, write
the first argument to the uart using uart_put_c.
It should follow all RISC-V calling conventions.
paramters:
a0: the character to write
return:
None
*/
uart_write:
/* BEGIN DELETE BLOCK */
addi sp, sp, -16 # Begin prologue
sd ra, 8(sp)
sd fp, 0(sp)
add fp, sp, 16 # End prologue
call uart_wait_for_write # Wait until there is data to read
call uart_put_c # Write the paramter to the uart, it is already stored in a0
ld ra, 8(sp) # Begin prologue
ld s0, 0(sp)
addi sp, sp, 16
ret # End prologue
/* END DELETE BLOCK */
/*
Wait until the uart has a byte to read.
Once there is, read it and return.
paramters:
None
return:
a0: the character read
*/
uart_read:
addi sp, sp, -16 # Begin prologue
sd ra, 8(sp)
sd fp, 0(sp)
add fp, sp, 16 # End prologue
call uart_wait_for_read # Wait for data to be available
call uart_get_c # Read the data, implicitly stored in a0
ld ra, 8(sp) # Begin epilogue
ld s0, 0(sp)
addi sp, sp, 16
ret # End epilogue
/*
TODO:
Write the character in the first parameter to the uart.
What register can you use to transmit the byte and where
is it located (what memory address)? Remember the UART
register layout and the memory layout of the virt board
we're running this on with QEMU.
It should follow all RISC-V calling conventions.
paramters:
a0: the character to write
return:
None
*/
uart_put_c:
/* BEGIN DELETE BLOCK */
addi sp, sp, -16 # Begin prologue
sd ra, 8(sp)
sd fp, 0(sp)
add fp, sp, 16 # End prologue
li t0, 0x10000000 # Address of the THR (Transmit Holding Register)
sb a0, 0(t0) # Address of THR
ld ra, 8(sp) # Begin epilogue
ld s0, 0(sp)
addi sp, sp, 16
ret # End epilogue
/* END DELETE BLOCK */
/*
Read a character from the uart.
It should follow all RISC-V calling conventions.
paramters:
None
return:
a0: the character read
*/
uart_get_c:
addi sp, sp, -16 # Begin prologue
sd ra, 8(sp)
sd fp, 0(sp)
add fp, sp, 16 # End prologue
li t0, 0x10000000 # Address of the RHR (Receive Holding Register)
lb a0, 0(t0) # Address of RHR
ld ra, 8(sp) # Begin epilogue
ld s0, 0(sp)
addi sp, sp, 16
ret # End epilogue
/*
TODO:
Use uart_write to print your prompt. Your prompt
can be anything reasonable. Remember that each
call to uart_write only writes one byte.
It should follow all RISC-V calling conventions.
paramters:
None
return:
None
*/
uart_print_prompt:
/* BEGIN DELETE BLOCK */
addi sp, sp, -16 # Begin prologue
sd ra, 8(sp)
sd fp, 0(sp)
add fp, sp, 16 # End prologue
li a0, '>' # Put '>' into the first parameter
call uart_write # Print it
li a0, ' ' # Put ' ' into the first parameter
call uart_write # Print it
ld ra, 8(sp) # Begin epilogue
ld s0, 0(sp)
addi sp, sp, 16
ret # End epilogue
/* END DELETE BLOCK */
|
cics-syslab/umass-os-class-materials | 1,321 | assignments-complete/04/src/entry.S | /*
qemu -kernel loads the kernel at 0x80000000
and causes each hart (i.e. CPU) to jump there.
kernel.ld causes the following code to
be placed at 0x80000000.
*/
.section .text
/*
Make the entry label (memory location) visible to ld.
This is necessary so it can put entry at 0x80000000.
*/
.global entry
/*
This reserves 4096 bytes in the .bss section, with the label stack0.
The .bss section will be filled with zeros on program load
*/
entry:
/*
We have a referrence to the bottom of the stack,
but the stack grows down! So add 4096 to get the
top of the stack (we know how big it is bc we just made it).
*/
la sp, stack0
/*
Once the C stack is set up, we jump to a C routine to handle the
rest of the OS operation. You can still call the assembly functions
you already wrote, but for the rest of this course you will mostly
be writing in C.
*/
call start
/*
This is an endless spin 🤔? What's the point of that. Remember
that a computer has no idea what it's executing, even though
our program ends here, the CPU will happily keep executing
whatever random instructions it finds after this, possibly
damaging itself in the process. If something goes wrong and
the code leaves the input loop above, it will hit this and
hang here, preventing the CPU from doing anything dangerous.
*/
spin:
j spin
|
cics-syslab/umass-os-class-materials | 768 | assignments-complete/04/src/switch.S | .global switch_to_process
# Context switch
#
# void switch_to_process(struct proc_register_set *old, struct proc_register_set *new);
#
# Save current registers in old. Load from new.
switch_to_process:
sd ra, 0(a0)
sd sp, 8(a0)
sd s0, 16(a0)
sd s1, 24(a0)
sd s2, 32(a0)
sd s3, 40(a0)
sd s4, 48(a0)
sd s5, 56(a0)
sd s6, 64(a0)
sd s7, 72(a0)
sd s8, 80(a0)
sd s9, 88(a0)
sd s10, 96(a0)
sd s11, 104(a0)
/* BEGIN DELETE BLOCK */
ld ra, 0(a1)
ld sp, 8(a1)
ld s0, 16(a1)
ld s1, 24(a1)
ld s2, 32(a1)
ld s3, 40(a1)
ld s4, 48(a1)
ld s5, 56(a1)
ld s6, 64(a1)
ld s7, 72(a1)
ld s8, 80(a1)
ld s9, 88(a1)
ld s10, 96(a1)
ld s11, 104(a1)
/* END DELETE BLOCK */
ret |
cics-syslab/umass-os-class-materials | 1,788 | assignments-complete/04/src/machinevec.S | #
# interrupts and exceptions while in machine
# mode come here.
#
.global machinevec
.align 4
machinevec:
# make room to save registers.
addi sp, sp, -256
# save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
# call the C trap handler in trap.c
call trap_devintr
# restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
# return to whatever we were doing
mret
|
civil-king/rCore-2024s | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
civil-king/rCore-2024s | 1,488 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
CJacob314/rust-hypervisor | 324 | guest.S | # Assembly for the guest to run
.code16 # 16-bit code
.text
.globl _start
_start:
xor %ax, %ax # Clear ax to 0
mov $10, %cx # Set loop counter to 10
loop_start:
inc %ax # Increment ax
dec %cx # Decrement cx
jnz loop_start # Jump back if cx != 0
nop
hlt # Halt the VM
|
cjsjz/final-rcore-os | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
cjsjz/final-rcore-os | 1,640 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
cjsjz/rrrcore | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
cjsjz/rrrcore | 2,218 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret
|
cjsjz/hhh | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
cjsjz/hhh | 2,218 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret
|
cjsjz/rcore-os-new | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
cjsjz/rcore-os-new | 2,218 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret
|
clairechingching/sbpf-asm-debugger | 148 | test_file/hello.s | .globl e
e:
lddw r1, message
lddw r2, 14
call sol_log_
exit
.extern sol_log_
.section .rodata
message:
.ascii "Hello, Solana!"
|
clairechingching/sbpf-asm-debugger | 30 | test_file/noop.s | .globl entry
entry:
exit
|
clairechingching/sbpf-asm-debugger | 1,027 | test_file/fib.s | .globl e
e:
// Grab the Fibonacci sequence target from our instruction data
ldxb r8, [r1+8+8+80+10240+8+8] // 8 accounts length + 8 flags + 80 account data + 10240 realloc padding + 8 padding + 8 ix length
mov64 r6, 0
mov64 r7, 1 // Skip first sequence and return 0 if n<1
jgt r8, 93, overflow // handle overflow
jlt r8, 2, exceptions // handle 1 and 0
ja step
step:
sub64 r8, 1 // -1 from r8 - subtract first as we already pre-calculated f(1)
jlt r8, 1, finalize // If there are <1 rounds left, finish it
mov64 r1, r7 // set to f(n-1)
add64 r1, r6 // add f(n-2)
mov64 r6, r7 // set f(n-2) to f(n-1)
mov64 r7, r1 // set f(n-1) to f(n)
ja step
exceptions:
mov64 r1, r8 // 0 = 0 and 1 = 1
ja finalize
overflow:
lddw r0, 1 // error
lddw r1, e1
lddw r2, 32
call sol_log_
exit
finalize:
call sol_log_64_
exit
.extern sol_log_ sol_log_64_
.rodata
e1: .ascii "Sorry, u64 maxes out at F(93) :("
// e1: "Sorry, u64 maxes out at F(93) :("
|
cleve82s/solana_smartcontract | 980 | transfer-lamports/asm/main.s | .globl entrypoint
entrypoint:
ldxdw r2, [r1 + 0] # get number of accounts
jne r2, 2, error # error if not 2 accounts
ldxb r2, [r1 + 8] # get first account
jne r2, 0xff, error # shouldn't be a duplicate, but check
ldxdw r2, [r1 + 8 + 8 + 32 + 32] # get source lamports
ldxdw r3, [r1 + 8 + 8 + 32 + 32 + 8] # get account data size
mov64 r4, r1
add64 r4, 8 + 8 + 32 + 32 + 8 + 8 + 10240 + 8 # calculate end of account data
add64 r4, r3
mov64 r5, r4 # check how much padding we need to add
and64 r5, 7 # clear high bits
jeq r5, 0, 1 # no low bits set, jump ahead
add64 r4, 8 # add 8 for truncation if needed
and64 r4, -8 # clear low bits
ldxb r5, [r4 + 0] # get second account
jne r5, 0xff, error # we don't allow duplicates
ldxdw r5, [r4 + 8 + 32 + 32] # get destination lamports
sub64 r2, 5 # subtract lamports
add64 r5, 5 # add lamports
stxdw [r1 + 8 + 8 + 32 + 32], r2 # write the new values back
stxdw [r4 + 8 + 32 + 32], r5
exit
error:
mov64 r0, 1
exit
|
cleve82s/solana_smartcontract | 125 | helloworld/asm/main.s | .globl entrypoint
entrypoint:
lddw r1, .message
mov64 r2, 12
call sol_log_
exit
.rodata
message: .ascii "Hello world!"
|
cleverfox/ch32v-rt | 3,133 | asm.S | # define STORE sw
# define LOAD lw
# define LOG_REGBYTES 2
#define REGBYTES (1 << LOG_REGBYTES)
/*
Entry point of all programs (_start).
It initializes DWARF call frame information, the stack pointer, the
frame pointer (needed for closures to work in start_rust) and the global
pointer. Then it calls _start_rust.
*/
.section .init, "ax"
.global _start
_start:
/* Jump to the absolute address defined by the linker script. */
// for 32bit
// make excutable from 8000_0004
nop
nop
lui ra, %hi(_abs_start)
jr %lo(_abs_start)(ra)
_abs_start:
.cfi_startproc
.cfi_undefined ra
csrw mie, 0 // interrupt disable
csrw mip, 0 // no pending interrupts
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
// a0..a2 (x10..x12) skipped
li x13,0
li x14,0
li x15,0
li x16,0
li x17,0
li x18,0
li x19,0
li x20,0
li x21,0
li x22,0
li x23,0
li x24,0
li x25,0
li x26,0
li x27,0
li x28,0
li x29,0
li x30,0
li x31,0
.option push
.option norelax
la gp, __global_pointer$
.option pop
csrr t2, mhartid
lui t0, %hi(_max_hart_id)
add t0, t0, %lo(_max_hart_id)
bgtu t2, t0, abort
// Allocate stacks
la sp, _stack_start
lui t0, %hi(_hart_stack_size)
add t0, t0, %lo(_hart_stack_size)
#ifdef __riscv_mul
mul t0, t2, t0
#else
beqz t2, 2f // Jump if single-hart
mv t1, t2
mv t3, t0
1:
add t0, t0, t3
addi t1, t1, -1
bnez t1, 1b
2:
#endif
sub sp, sp, t0
// Set frame pointer
add s0, sp, zero
jal zero, _start_rust
.cfi_endproc
/*
Trap entry point (_start_trap)
Saves caller saved registers ra, t0..6, a0..7, calls _start_trap_rust,
restores caller saved registers and then returns.
*/
.section .trap, "ax"
.global default_start_trap
default_start_trap:
addi sp, sp, -16*REGBYTES
STORE ra, 0*REGBYTES(sp)
STORE t0, 1*REGBYTES(sp)
STORE t1, 2*REGBYTES(sp)
STORE t2, 3*REGBYTES(sp)
STORE t3, 4*REGBYTES(sp)
STORE t4, 5*REGBYTES(sp)
STORE t5, 6*REGBYTES(sp)
STORE t6, 7*REGBYTES(sp)
STORE a0, 8*REGBYTES(sp)
STORE a1, 9*REGBYTES(sp)
STORE a2, 10*REGBYTES(sp)
STORE a3, 11*REGBYTES(sp)
STORE a4, 12*REGBYTES(sp)
STORE a5, 13*REGBYTES(sp)
STORE a6, 14*REGBYTES(sp)
STORE a7, 15*REGBYTES(sp)
add a0, sp, zero
jal ra, _start_trap_rust
LOAD ra, 0*REGBYTES(sp)
LOAD t0, 1*REGBYTES(sp)
LOAD t1, 2*REGBYTES(sp)
LOAD t2, 3*REGBYTES(sp)
LOAD t3, 4*REGBYTES(sp)
LOAD t4, 5*REGBYTES(sp)
LOAD t5, 6*REGBYTES(sp)
LOAD t6, 7*REGBYTES(sp)
LOAD a0, 8*REGBYTES(sp)
LOAD a1, 9*REGBYTES(sp)
LOAD a2, 10*REGBYTES(sp)
LOAD a3, 11*REGBYTES(sp)
LOAD a4, 12*REGBYTES(sp)
LOAD a5, 13*REGBYTES(sp)
LOAD a6, 14*REGBYTES(sp)
LOAD a7, 15*REGBYTES(sp)
addi sp, sp, 16*REGBYTES
mret
/* Make sure there is an abort when linking */
.section .text.abort
.globl abort
abort:
j abort
|
cloudgamingrage/copysh-v86-precompiled | 1,319 | tests/qemu/test-i386-code16.S | .code16
.globl code16_start
.globl code16_end
CS_SEG = 0xf
code16_start:
.globl code16_func1
/* basic test */
code16_func1 = . - code16_start
mov $1, %eax
data32 lret
/* test push/pop in 16 bit mode */
.globl code16_func2
code16_func2 = . - code16_start
xor %eax, %eax
mov $0x12345678, %ebx
movl %esp, %ecx
push %bx
subl %esp, %ecx
pop %ax
data32 lret
/* test various jmp opcodes */
.globl code16_func3
code16_func3 = . - code16_start
jmp 1f
nop
1:
mov $4, %eax
mov $0x12345678, %ebx
xor %bx, %bx
jz 2f
add $2, %ax
2:
call myfunc
lcall $CS_SEG, $(myfunc2 - code16_start)
ljmp $CS_SEG, $(myjmp1 - code16_start)
myjmp1_next:
cs lcall *myfunc2_addr - code16_start
cs ljmp *myjmp2_addr - code16_start
myjmp2_next:
data32 lret
myfunc2_addr:
.short myfunc2 - code16_start
.short CS_SEG
myjmp2_addr:
.short myjmp2 - code16_start
.short CS_SEG
myjmp1:
add $8, %ax
jmp myjmp1_next
myjmp2:
add $16, %ax
jmp myjmp2_next
myfunc:
add $1, %ax
ret
myfunc2:
add $4, %ax
lret
code16_end:
|
cloudgamingrage/copysh-v86-precompiled | 1,816 | tests/qemu/test-i386-vm86.S | .code16
.globl vm86_code_start
.globl vm86_code_end
#define GET_OFFSET(x) ((x) - vm86_code_start + 0x100)
vm86_code_start:
movw $GET_OFFSET(hello_world), %dx
movb $0x09, %ah
int $0x21
/* prepare int 0x90 vector */
xorw %ax, %ax
movw %ax, %es
es movw $GET_OFFSET(int90_test), 0x90 * 4
es movw %cs, 0x90 * 4 + 2
/* launch int 0x90 */
int $0x90
/* test IF support */
movw $GET_OFFSET(IF_msg), %dx
movb $0x09, %ah
int $0x21
pushf
popw %dx
movb $0xff, %ah
int $0x21
cli
pushf
popw %dx
movb $0xff, %ah
int $0x21
sti
pushfl
popl %edx
movb $0xff, %ah
int $0x21
#if 0
movw $GET_OFFSET(IF_msg1), %dx
movb $0x09, %ah
int $0x21
pushf
movw %sp, %bx
andw $~0x200, (%bx)
popf
#else
cli
#endif
pushf
popw %dx
movb $0xff, %ah
int $0x21
pushfl
movw %sp, %bx
orw $0x200, (%bx)
popfl
pushfl
popl %edx
movb $0xff, %ah
int $0x21
movb $0x00, %ah
int $0x21
int90_test:
pushf
pop %dx
movb $0xff, %ah
int $0x21
movw %sp, %bx
movw 4(%bx), %dx
movb $0xff, %ah
int $0x21
movw $GET_OFFSET(int90_msg), %dx
movb $0x09, %ah
int $0x21
iret
int90_msg:
.string "INT90 started\n$"
hello_world:
.string "Hello VM86 world\n$"
IF_msg:
.string "VM86 IF test\n$"
IF_msg1:
.string "If you see a diff here, your Linux kernel is buggy, please update to 2.4.20 kernel\n$"
vm86_code_end:
|
cloudgamingrage/copysh-v86-precompiled | 3,647 | tests/kvm-unit-tests/x86/cstart.S |
#include "apic-defs.h"
.globl boot_idt
boot_idt = 0
ipi_vector = 0x20
max_cpus = 64
.bss
. = . + 4096 * max_cpus
.align 16
stacktop:
. = . + 4096
.align 16
ring0stacktop:
.data
.align 4096
pt:
i = 0
.rept 1024
.long 0x1e7 | (i << 22)
i = i + 1
.endr
.globl gdt32
gdt32:
.quad 0
.quad 0x00cf9b000000ffff // flat 32-bit code segment
.quad 0x00cf93000000ffff // flat 32-bit data segment
.quad 0x00cf1b000000ffff // flat 32-bit code segment, not present
.quad 0 // TSS for task gates
.quad 0x008f9b000000FFFF // 16-bit code segment
.quad 0x008f93000000FFFF // 16-bit data segment
.quad 0x00cffb000000ffff // 32-bit code segment (user)
.quad 0x00cff3000000ffff // 32-bit data segment (user)
.quad 0 // unused
.quad 0 // 6 spare selectors
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
tss_descr:
.rept max_cpus
.quad 0x000089000000ffff // 32-bit avail tss
.endr
gdt32_end:
i = 0
.globl tss
tss:
.rept max_cpus
.long 0
.long ring0stacktop - i * 4096
.long 16
.quad 0, 0
.quad 0, 0, 0, 0, 0, 0, 0, 0
.long 0, 0, 0
i = i + 1
.endr
tss_end:
idt_descr:
.word 16 * 256 - 1
.long boot_idt
.section .init
.code32
mb_magic = 0x1BADB002
mb_flags = 0x0
# multiboot header
.long mb_magic, mb_flags, 0 - (mb_magic + mb_flags)
mb_cmdline = 16
MSR_GS_BASE = 0xc0000101
.macro setup_percpu_area
lea -4096(%esp), %eax
mov $0, %edx
mov $MSR_GS_BASE, %ecx
wrmsr
.endm
.globl start
start:
push %ebx
call setup_get_initrd
call setup_environ
mov mb_cmdline(%ebx), %eax
mov %eax, __args
call __setup_args
mov $stacktop, %esp
setup_percpu_area
call prepare_32
jmpl $8, $start32
prepare_32:
lgdtl gdt32_descr
mov %cr4, %eax
bts $4, %eax // pse
mov %eax, %cr4
mov $pt, %eax
mov %eax, %cr3
mov %cr0, %eax
bts $0, %eax
bts $31, %eax
mov %eax, %cr0
ret
smp_stacktop: .long 0xa0000
ap_start32:
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
mov %ax, %ss
mov $-4096, %esp
lock/xaddl %esp, smp_stacktop
setup_percpu_area
call prepare_32
call load_tss
call enable_apic
call enable_x2apic
sti
nop
lock incw cpu_online_count
1: hlt
jmp 1b
start32:
call load_tss
call mask_pic_interrupts
call enable_apic
call smp_init
call enable_x2apic
push $__environ
push $__argv
push __argc
call main
push %eax
call exit
load_tss:
lidt idt_descr
mov $16, %eax
mov %ax, %ss
mov $(APIC_DEFAULT_PHYS_BASE + APIC_ID), %eax
mov (%eax), %eax
shr $24, %eax
mov %eax, %ebx
shl $3, %ebx
mov $((tss_end - tss) / max_cpus), %edx
imul %edx
add $tss, %eax
mov %ax, tss_descr+2(%ebx)
shr $16, %eax
mov %al, tss_descr+4(%ebx)
shr $8, %eax
mov %al, tss_descr+7(%ebx)
lea tss_descr-gdt32(%ebx), %eax
ltr %ax
ret
smp_init:
cld
lea sipi_entry, %esi
xor %edi, %edi
mov $(sipi_end - sipi_entry), %ecx
rep/movsb
mov $APIC_DEFAULT_PHYS_BASE, %eax
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT), APIC_ICR(%eax)
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT), APIC_ICR(%eax)
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_STARTUP), APIC_ICR(%eax)
call fwcfg_get_nb_cpus
1: pause
cmpw %ax, cpu_online_count
jne 1b
smp_init_done:
ret
cpu_online_count: .word 1
.code16
sipi_entry:
mov %cr0, %eax
or $1, %eax
mov %eax, %cr0
lgdtl gdt32_descr - sipi_entry
ljmpl $8, $ap_start32
gdt32_descr:
.word gdt32_end - gdt32 - 1
.long gdt32
sipi_end:
|
cloudgamingrage/copysh-v86-precompiled | 4,163 | tests/kvm-unit-tests/x86/cstart64.S |
#include "apic-defs.h"
.globl boot_idt
boot_idt = 0
.globl idt_descr
.globl tss_descr
.globl gdt64_desc
ipi_vector = 0x20
max_cpus = 64
.bss
. = . + 4096 * max_cpus
.align 16
stacktop:
. = . + 4096
.align 16
ring0stacktop:
.data
.align 4096
.globl ptl2
ptl2:
i = 0
.rept 512 * 4
.quad 0x1e7 | (i << 21)
i = i + 1
.endr
.align 4096
ptl3:
.quad ptl2 + 7 + 0 * 4096
.quad ptl2 + 7 + 1 * 4096
.quad ptl2 + 7 + 2 * 4096
.quad ptl2 + 7 + 3 * 4096
.align 4096
ptl4:
.quad ptl3 + 7
.align 4096
gdt64_desc:
.word gdt64_end - gdt64 - 1
.quad gdt64
gdt64:
.quad 0
.quad 0x00af9b000000ffff // 64-bit code segment
.quad 0x00cf93000000ffff // 32/64-bit data segment
.quad 0x00af1b000000ffff // 64-bit code segment, not present
.quad 0x00cf9b000000ffff // 32-bit code segment
.quad 0x008f9b000000FFFF // 16-bit code segment
.quad 0x008f93000000FFFF // 16-bit data segment
.quad 0x00cffb000000ffff // 32-bit code segment (user)
.quad 0x00cff3000000ffff // 32/64-bit data segment (user)
.quad 0x00affb000000ffff // 64-bit code segment (user)
.quad 0 // 6 spare selectors
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
tss_descr:
.rept max_cpus
.quad 0x000089000000ffff // 64-bit avail tss
.quad 0 // tss high addr
.endr
gdt64_end:
i = 0
.globl tss
tss:
.rept max_cpus
.long 0
.quad ring0stacktop - i * 4096
.quad 0, 0
.quad 0, 0, 0, 0, 0, 0, 0, 0
.long 0, 0, 0
i = i + 1
.endr
tss_end:
mb_boot_info: .quad 0
.section .init
.code32
mb_magic = 0x1BADB002
mb_flags = 0x0
# multiboot header
.long mb_magic, mb_flags, 0 - (mb_magic + mb_flags)
mb_cmdline = 16
MSR_GS_BASE = 0xc0000101
.macro setup_percpu_area
lea -4096(%esp), %eax
mov $0, %edx
mov $MSR_GS_BASE, %ecx
wrmsr
.endm
.globl start
start:
mov %ebx, mb_boot_info
mov $stacktop, %esp
setup_percpu_area
call prepare_64
jmpl $8, $start64
prepare_64:
lgdt gdt64_desc
mov %cr4, %eax
bts $5, %eax // pae
mov %eax, %cr4
mov $ptl4, %eax
mov %eax, %cr3
efer = 0xc0000080
mov $efer, %ecx
rdmsr
bts $8, %eax
wrmsr
mov %cr0, %eax
bts $0, %eax
bts $31, %eax
mov %eax, %cr0
ret
smp_stacktop: .long 0xa0000
.align 16
gdt32:
.quad 0
.quad 0x00cf9b000000ffff // flat 32-bit code segment
.quad 0x00cf93000000ffff // flat 32-bit data segment
gdt32_end:
.code16
sipi_entry:
mov %cr0, %eax
or $1, %eax
mov %eax, %cr0
lgdtl gdt32_descr - sipi_entry
ljmpl $8, $ap_start32
gdt32_descr:
.word gdt32_end - gdt32 - 1
.long gdt32
sipi_end:
.code32
ap_start32:
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
mov %ax, %ss
mov $-4096, %esp
lock/xaddl %esp, smp_stacktop
setup_percpu_area
call prepare_64
ljmpl $8, $ap_start64
.code64
ap_start64:
call load_tss
call enable_apic
call enable_x2apic
sti
nop
lock incw cpu_online_count
1: hlt
jmp 1b
start64:
call load_tss
call mask_pic_interrupts
call enable_apic
call smp_init
call enable_x2apic
mov mb_boot_info(%rip), %rbx
mov %rbx, %rdi
call setup_get_initrd
call setup_environ
mov mb_cmdline(%rbx), %eax
mov %rax, __args(%rip)
call __setup_args
mov __argc(%rip), %edi
lea __argv(%rip), %rsi
lea __environ(%rip), %rdx
call main
mov %eax, %edi
call exit
idt_descr:
.word 16 * 256 - 1
.quad boot_idt
load_tss:
lidtq idt_descr
mov $(APIC_DEFAULT_PHYS_BASE + APIC_ID), %eax
mov (%rax), %eax
shr $24, %eax
mov %eax, %ebx
shl $4, %ebx
mov $((tss_end - tss) / max_cpus), %edx
imul %edx
add $tss, %rax
mov %ax, tss_descr+2(%rbx)
shr $16, %rax
mov %al, tss_descr+4(%rbx)
shr $8, %rax
mov %al, tss_descr+7(%rbx)
shr $8, %rax
mov %eax, tss_descr+8(%rbx)
lea tss_descr-gdt64(%rbx), %rax
ltr %ax
ret
smp_init:
cld
lea sipi_entry, %rsi
xor %rdi, %rdi
mov $(sipi_end - sipi_entry), %rcx
rep/movsb
mov $APIC_DEFAULT_PHYS_BASE, %eax
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT), APIC_ICR(%rax)
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT), APIC_ICR(%rax)
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_STARTUP), APIC_ICR(%rax)
call fwcfg_get_nb_cpus
1: pause
cmpw %ax, cpu_online_count
jne 1b
smp_init_done:
ret
cpu_online_count: .word 1
|
cloudgamingrage/copysh-v86-precompiled | 512 | tests/kvm-unit-tests/lib/x86/setjmp32.S | .globl setjmp
setjmp:
mov (%esp), %ecx // get return EIP
mov 4(%esp), %eax // get jmp_buf
mov %ecx, (%eax)
mov %esp, 4(%eax)
mov %ebp, 8(%eax)
mov %ebx, 12(%eax)
mov %esi, 16(%eax)
mov %edi, 20(%eax)
xor %eax, %eax
ret
.globl longjmp
longjmp:
mov 8(%esp), %eax // get return value
mov 4(%esp), %ecx // get jmp_buf
mov 20(%ecx), %edi
mov 16(%ecx), %esi
mov 12(%ecx), %ebx
mov 8(%ecx), %ebp
mov 4(%ecx), %esp
mov (%ecx), %ecx // get saved EIP
mov %ecx, (%esp) // and store it on the stack
ret
|
cloudgamingrage/copysh-v86-precompiled | 467 | tests/kvm-unit-tests/lib/x86/setjmp64.S | .globl setjmp
setjmp:
mov (%rsp), %rsi
mov %rsi, (%rdi)
mov %rsp, 0x8(%rdi)
mov %rbp, 0x10(%rdi)
mov %rbx, 0x18(%rdi)
mov %r12, 0x20(%rdi)
mov %r13, 0x28(%rdi)
mov %r14, 0x30(%rdi)
mov %r15, 0x38(%rdi)
xor %eax, %eax
ret
.globl longjmp
longjmp:
mov %esi, %eax
mov 0x38(%rdi), %r15
mov 0x30(%rdi), %r14
mov 0x28(%rdi), %r13
mov 0x20(%rdi), %r12
mov 0x18(%rdi), %rbx
mov 0x10(%rdi), %rbp
mov 0x8(%rdi), %rsp
mov (%rdi), %rsi
mov %rsi, (%rsp)
ret
|
clstatham/kados-ng | 437 | crates/chainloader/src/start.S | .section ".text.boot"
.global _start
_start:
mov x20, x0
mov x21, x1
mov x22, x2
mov x23, x3
ldr x1, =_stack_top
mov sp, x1
mrs x1, mpidr_el1
and x1, x1, #3
cbnz x1, hang
ldr x1, =LOAD_ADDR
mov x0, x1
ldr x2, =_start
mov sp, x2
ldr w3, =__loader_size
1:
ldr x4, [x1], #8
str x4, [x2], #8
sub w3, w3, #1
cbnz w3, 1b
bl recv-0x60000
hang:
wfe
b hang
|
Clxxx1819/https-github.com-KLoCSD-safeL4-kernel-tree-toolchain-upgrade | 726 | src/trusted_kernel/src/arch/riscv64/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# Step 1
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# Step 2
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# Step 3
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
# Step 4
ret |
Clxxx1819/https-github.com-KLoCSD-safeL4-kernel-tree-toolchain-upgrade | 837 | src/trusted_kernel/src/arch/aarch64/switch.S | .altmacro
.macro SAVE_XN n
str x\n, [x0, (\n+1)*8]
.endm
.macro LOAD_XN n
ldr x\n, [x1, (\n+1)*8]
.endm
.section .text
.globl __switch
__switch:
// step [1]
// __switch(
// current_task_cx_ptr: *mut TaskContext (x0),
// next_task_cx_ptr: *const TaskContext (x1)
// )
// step [2]
// save kernel stack of current task
mov x2, sp
str x2, [x0, #8]
// save lr (x30) and x19~x29 of current execution
str x30, [x0, #0]
.set n, 19
.rept 11
SAVE_XN %n
.set n, n + 1
.endr
// step [3]
// restore lr (x30) and x19~x29 of next execution
ldr x30, [x1, #0]
.set n, 19
.rept 11
LOAD_XN %n
.set n, n + 1
.endr
// restore kernel stack of next task
ldr x2, [x1, #8]
mov sp, x2
// step [4]
ret |
Clxxx1819/https-github.com-KLoCSD-safeL4-kernel-tree-toolchain-upgrade | 5,146 | src/trusted_kernel/src/arch/riscv64/boot/entry.S | #include "timer.h"
#include "page.h"
#include "intr.h"
.altmacro
# load large 64 bit imm
.macro LA_FAR, reg, imm
lui \reg, %hi(\imm)
addi \reg, \reg, %lo(\imm)
.endm
# initialize an empty well aligned pagetable
# sign-extended lui, msb of the first word should be 1
.macro DEFINE_PT, name
.align PAGE_SHIFT
\name:
.rept (1 << PAGE_SHIFT)
.byte 0
.endr
.endm
# map a virtual addr to a physical addr for pagetable with level `lvl`
# convention: root pagetable is noted as level 3
# leaf pagetable is noted as level 1
# pt_addr passed in reg t0, other as imm
.macro PT_MAP_FAR, pt_addr, vaddr, paddr, lvl, flags
# find index into the pagetable given the vaddr and lvl
LA_FAR t1, \vaddr
srli t1, t1, (PAGE_SHIFT + BITS_PER_LEVEL * (lvl - 1))
andi t1, t1, (1 << BITS_PER_LEVEL) - 1
la t2, \paddr
srli t2, t2, PAGE_SHIFT
PTE_LOAD \pt_addr, t1, t2, flags
.endm
.macro PT_MAP, pt_addr, vaddr, paddr, lvl, flags
# find index into the pagetable given the vaddr and lvl
la t1, \vaddr
srli t1, t1, (PAGE_SHIFT + BITS_PER_LEVEL * (lvl - 1))
andi t1, t1, (1 << BITS_PER_LEVEL) - 1
la t2, \paddr
srli t2, t2, PAGE_SHIFT
PTE_LOAD \pt_addr, t1, t2, flags
.endm
# macro for writing to a pte
# write phy_reg with flags onto entry of pt_root(imm) indexed by index_reg
.macro PTE_LOAD, pt_addr, index_reg, ppn_reg, flags
slli \index_reg, \index_reg, 3
add \pt_addr, \pt_addr, \index_reg
slli \ppn_reg, \ppn_reg, 10
addi \ppn_reg, \ppn_reg, \flags
sw \ppn_reg, 0(\pt_addr)
.endm
.macro SET_SATP pt_addr
li t0, 8 << 60
la t1, \pt_addr
srli t1, t1, PAGE_SHIFT
or t0, t0, t1
csrw satp, t0
.endm
.section .data.init_pt
.global __init_pt_lvl3
DEFINE_PT __init_pt_lvl3
.section .text.head
.global _m_entry
_m_entry:
# set m_trap_vector, which currently captures M timer intr
# and dispatch it to S mode code by raising a S mode software intr (xv6 way)
# m_trap_vector is loaded as part of .text.head section, the symbol binds to a phy addr
la t0, m_trap_vector
csrw mtvec, t0
la t0, m_scratch
li t1, CLINT_MTIMECMP
li t2, CLINT_INTERVAL
# contract of m_trap_vector
sd t1, 3*8(t0)
sd t2, 4*8(t0)
csrw mscratch, t0
# S model trap handler, virtual addr
LA_FAR t0, __kernel_trap_vector
csrw stvec, t0
# i/e delegation, delegation U ecall to S and keep S ecall
li t0, 0xF1FF
csrw medeleg, t0
li t0, 0xFFFF
csrw mideleg, t0
# enable intr
# M mode
csrr t0, mie
li t1, MIE_MEIE | MIE_MSIE | MIE_MTIE
or t0, t0, t1
csrw mie, t0
# S mode
csrr t0, sie
li t1, SIE_SEIE | SIE_SSIE | SIE_STIE
or t0, t0, t1
csrw sie, t0
# prepare returning
# set MPP to S
csrr t0, mstatus
li t1, MSTATUS_MPP_S
or t0, t0, t1
csrw mstatus, t0
# set pmpcfg0 & pmpaddr0 let S mode access all memory
li t0, 0x3fffffffffffff
csrw pmpaddr0, t0
li t1, 0xf
csrw pmpcfg0, t1
la a0, _entry
csrw mepc, a0
mret
_entry:
# jump here in S mode
# procedures prior to jumping to higher half of rust kernel code
# by default we use SV39, three level paging
# prematurely mapped the entire elf with all rights & device mapped region
# no fine-grained rights control over sections in the elf
# after jumping out of .boot, one should replace this premature init pagetable
# with their own well-designed kernel page table
# macro takes pt addr in t0
# identity mapping of elf (1G)
la t0, __init_pt_lvl3
PT_MAP t0, RAM_BASE, RAM_BASE, 3, 0xf
la t0, __init_pt_lvl3
PT_MAP_FAR t0, KERNEL_BASE_VIR, RAM_BASE, 3, 0xf
# identity mapping of lower device region from [0x0, 0x4000_0000]
la t0, __init_pt_lvl3
PT_MAP t0, 0x0, 0x0, 3, 0xf
# load init table
SET_SATP __init_pt_lvl3
sfence.vma zero, zero
# set up a stack for C.
# kernel_stack is declared in start_rust.c,
# with a 4096-byte stack per CPU.
# sp = kernel_stack + (hartid * 4096)
li x1, 0
li x2, 0
li x3, 0
li x4, 0
li x5, 0
li x6, 0
li x7, 0
li x8, 0
li x9, 0
# skip a0 and a1; arguments from previous boot loader stage:
li x12, 0
li x13, 0
li x14, 0
li x15, 0
li x16, 0
li x17, 0
li x18, 0
li x19, 0
li x20, 0
li x21, 0
li x22, 0
li x23, 0
li x24, 0
li x25, 0
li x26, 0
li x27, 0
li x28, 0
li x29, 0
li x30, 0
li x31, 0
LA_FAR sp, kernel_stack_end
# jump to rust code
LA_FAR a0, bootstrap
jalr x0, 0(a0)
spin:
j spin
|
Clxxx1819/https-github.com-KLoCSD-safeL4-kernel-tree-toolchain-upgrade | 498 | src/trusted_kernel/src/arch/riscv64/boot/m_trap.S | #include "bits.h"
#include "intr.h"
.option norvc
.section .text.head
.global m_trap_vector
.align 4
m_trap_vector:
csrrw sp, mscratch, sp
STORE a0, 10*REGBYTES(sp)
STORE a1, 11*REGBYTES(sp)
STORE a2, 12*REGBYTES(sp)
# clint cmp addr
ld a0, 3*REGBYTES(sp)
ld a1, 4*REGBYTES(sp)
ld a2, 0(a0)
add a2, a2, a1
sd a2, 0(a0)
li a0, MIP_SSIP
csrs mip, a0
.Lmret:
LOAD a0, 10*REGBYTES(sp)
LOAD a1, 11*REGBYTES(sp)
LOAD a2, 12*REGBYTES(sp)
csrrw sp, mscratch, sp
mret
1:
mret
|
Clxxx1819/https-github.com-KLoCSD-safeL4-kernel-tree-toolchain-upgrade | 2,709 | src/trusted_kernel/src/arch/riscv64/boot/s_trap.S | #include "bits.h"
.attribute arch, "rv64gc"
.altmacro
# load general purpose register
.macro LOAD_GR n
LOAD x\n, \n*REGBYTES(sp)
.endm
.macro SAVE_GR n
STORE x\n, \n*REGBYTES(sp)
.endm
# load floating point register
.macro LOAD_FR n
fld f\n, (\n + 37)*REGBYTES(sp) # Offset adjusted for floating-point register area
.endm
.macro SAVE_FR n
fsd f\n, (\n + 37)*REGBYTES(sp) # Offset adjusted for floating-point register area
.endm
.option norvc
.section .text
.global __kernel_trap_vector
.global __user_trap_vector
.global __restore_context
.align 4
__kernel_trap_vector:
# Handle trap issued from S mode (delegated from M mode)
# Use current sp to store context instead of sscratch pointer
addi sp, sp, -(REGBYTES * (37 + 32)) # 32 general registers + 5 csrs + 32 floating-point registers
SAVE_GR 1
.set n, 3
.rept 29
SAVE_GR %n
.set n, n+1
.endr
csrr s1, sstatus
csrr s2, sepc
csrr s3, stval
csrr s4, scause
csrr s5, stvec
addi a0, sp, REGBYTES*(37 + 32)
STORE a0, 2*REGBYTES(sp)
STORE s1, 32*REGBYTES(sp)
STORE s2, 33*REGBYTES(sp)
STORE s3, 34*REGBYTES(sp)
STORE s4, 35*REGBYTES(sp)
STORE s5, 36*REGBYTES(sp)
# Save floating-point registers
.set n, 0
.rept 32
SAVE_FR %n # Save floating-point registers from f0 to f31
.set n, n+1
.endr
mv a0, sp
call kernel_trap
j __restore_context
__user_trap_vector:
# Handle trap issued from U mode, sscratch is properly set with kernel stack pointer
csrrw sp, sscratch, sp
# Allocate space for trap context
addi sp, sp, -(REGBYTES * (37 + 32))
SAVE_GR 1
# Skip sp (x2), save x3-x31
.set n, 3
.rept 29
SAVE_GR %n
.set n, n+1
.endr
csrr s0, sscratch
csrr s1, sstatus
csrr s2, sepc
csrr s3, stval
csrr s4, scause
csrr s5, stvec
STORE s0, 2*REGBYTES(sp)
STORE s1, 32*REGBYTES(sp)
STORE s2, 33*REGBYTES(sp)
STORE s3, 34*REGBYTES(sp)
STORE s4, 35*REGBYTES(sp)
STORE s5, 36*REGBYTES(sp)
# Save floating-point registers
.set n, 0
.rept 32
SAVE_FR %n # Save floating-point registers from f0 to f31
.set n, n+1
.endr
mv a0, sp
call user_trap
__restore_context:
# Restore floating-point registers, in reverse order of saving
.set n, 0
.rept 32
LOAD_FR %n # Restore floating-point registers from f0 to f31
.set n, n+1
.endr
# Restore sstatus, sepc, stvec
LOAD t0, 32*REGBYTES(sp)
LOAD t1, 33*REGBYTES(sp)
LOAD t2, 36*REGBYTES(sp)
LOAD t3, 2*REGBYTES(sp)
csrw sstatus, t0
csrw sepc, t1
csrw stvec, t2
csrw sscratch, t3
# Restore general-purpose registers
LOAD_GR 1
# Skip sp (x2), restore x3-x31
.set n, 3
.rept 29
LOAD_GR %n
.set n, n+1
.endr
# Release trapcontext on kernel stack
addi sp, sp, (REGBYTES * (37 + 32))
csrrw sp, sscratch, sp
sret
|
Clxxx1819/https-github.com-KLoCSD-safeL4-kernel-tree-toolchain-upgrade | 3,882 | src/trusted_kernel/src/arch/aarch64/boot/entry.S | #include "page.h"
.altmacro
.macro PT_MAP pt_addr, vaddr, paddr, lvl, flags
mov x0, \vaddr
lsr x0, x0, (PAGE_SHIFT + BITS_PER_LEVEL * (\lvl - 1))
and x0, x0, (1 << BITS_PER_LEVEL) - 1
lsl x0, x0, #3
mov x1, \paddr
mov x2, \flags
orr x1, x1, x2
orr x1, x1, #(1 << 8)
orr x1, x1, #(1 << 9)
# ldr x2, =\pt_addr
add x2, \pt_addr, x0
str x1, [x2]
.endm
.macro PT_INIT
ldr x5, =page_table_lvl4
ldr x6, =page_table_lvl3
PT_MAP x5, 0, x6, 4, PTEF_PT
PT_MAP x6, 0, 0, 3, PTEF_BLOCK_DEVICE
PT_MAP x5, 0xFFFFFFFF80000000, x6, 4, PTEF_PT
PT_MAP x6, 0xFFFFFFFF80000000, 0, 3, PTEF_BLOCK_NORMAL
.endm
.macro ENABLE_MMU
ldr x0, =page_table_lvl4
lsr x0, x0, #1
lsl x0, x0, #1
msr ttbr0_el1, x0
msr ttbr1_el1, x0
ldr x0, =0x00000072b5103510
msr tcr_el1, x0
bl init_mair
mrs x0, sctlr_el1
orr x0, x0, #0x1
orr x0, x0, #(1 << 12)
orr x0, x0, #(1 << 2)
msr sctlr_el1, x0
isb
ic iallu
dsb ish
isb
.endm
.section .text.head
.global _start
_start:
bl _turn_off_all_intr
# read cpu affinity, start core 0, halt rest
mrs x19, mpidr_el1
and x19, x19, #3
# compare and branch if non zero
cbnz x19, halt
# switch to el1
bl el_setup
# init vbar_el1
ldr x0, =__vectors
msr vbar_el1, x0
# init mmu
PT_INIT
ENABLE_MMU
b bootstrap
halt:
# unreachable
wfe
b halt
# switch to EL1, setup system registers of other EL
el_setup:
# use SP_ELx for Exception level ELx
msr SPsel, #1
# read the current exception level into x0 (ref: C5.2.1)
mrs x0, CurrentEL
and x0, x0, #0b1100
lsr x0, x0, #2
switch_to_el2:
# switch to EL2 if we're in EL3. otherwise switch to EL1
cmp x0, #2
beq switch_to_el1
# at EL3
# set-up SCR_EL3 (bits 0, 4, 5, 7, 8, 10) (A53: 4.3.42)
mov x0, #0x5b1
msr scr_el3, x0
# set-up SPSR_EL3 (bits 0, 3, 6, 7, 8, 9) (ref: C5.2.20)
mov x0, #0x3c9
msr spsr_el3, x0
# switch to EL2
adr x0, switch_to_el1
msr elr_el3, x0
eret
switch_to_el1:
# switch to EL1 if we're not already in EL1. otherwise continue with start
cmp x0, #1
beq el_setup_end
# at EL2
# set the temporary stack for EL1 in lower VA range
# x19 is cpu id
adrp x0, _start
sub x0, x0, x19, lsl #16
msr sp_el1, x0
# set-up HCR_EL2, enable AArch64 in EL1 (bits 1, 31) (ref: D10.2.45)
mov x0, #0x0002
movk x0, #0x8000, lsl #16
msr hcr_el2, x0
# don't trap accessing SVE registers (ref: D10.2.30)
msr cptr_el2, xzr
# enable floating point and SVE (SIMD) (bits 20, 21) (ref: D10.2.29)
mrs x0, cpacr_el1
orr x0, x0, #(0x3 << 20)
msr cpacr_el1, x0
# Set SCTLR to known state (RES1: 11, 20, 22, 23, 28, 29) (ref: D10.2.100)
mov x0, #0x0800
movk x0, #0x30d0, lsl #16
msr sctlr_el1, x0
# set-up SPSR_EL2 (bits 0, 2, 6, 7, 8, 9) (ref: C5.2.19)
mov x0, #0x3c5
msr spsr_el2, x0
# enable CNTP for EL1/EL0 (ref: D7.5.2, D7.5.13)
# NOTE: This doesn't actually enable the counter stream.
mrs x0, cnthctl_el2
orr x0, x0, #3
msr cnthctl_el2, x0
msr cntvoff_el2, xzr
# init kernel stack
ldr x0, =kernel_stack_end
msr sp_el1, x0
# switch to EL1
msr elr_el2, lr
eret
el_setup_end:
# at EL1
# x19 is cpu id
# adrp x0, _start
# sub x0, x0, x19, lsl #16
# mov sp, x0
ret
.section .data.init_pt
.align 12
.global page_table_lvl4
.global page_table_lvl3
.global page_table_lvl2
page_table_lvl4:
.space 0x1000 // 4K
page_table_lvl3:
.space 0x1000 // 4K
page_table_lvl2:
.space 0x1000 // 4K |
Clxxx1819/https-github.com-KLoCSD-safeL4-kernel-tree-toolchain-upgrade | 3,167 | src/trusted_kernel/src/arch/aarch64/boot/trap.S | .section .text
.global __alltraps
__alltraps:
# x30 and x0 are saved in __vectors
# x0 is trap num now
# skip __reversed
str x29, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x19, x20, [sp, #-16]!
stp x17, x18, [sp, #-16]!
stp x15, x16, [sp, #-16]!
stp x13, x14, [sp, #-16]!
stp x11, x12, [sp, #-16]!
stp x9, x10, [sp, #-16]!
stp x7, x8, [sp, #-16]!
stp x5, x6, [sp, #-16]!
stp x3, x4, [sp, #-16]!
stp x1, x2, [sp, #-16]!
# skip tpidr and sp
add sp, sp, #-16
# read spsr and elr
mrs x2, spsr_el1
mrs x1, elr_el1
stp x1, x2, [sp, #-16]!
# save trap num
str x0, [sp, #-16]!
# check source is 2
mov x1, #0x3
and x1, x1, x0
cmp x1, #2
beq trap_from_user
trap_from_kernel:
# read tpidr and sp
mrs x2, tpidr_el1
add x1, sp, #38*8
stp x1, x2, [sp, #32]
# go to rust
mov x0, sp
bl kernel_trap
# load tpidr
ldr x1, [sp, #40]
msr tpidr_el1, x1
# go to trap_return
b __restore_context
trap_from_user:
# read tpidr and sp
mrs x2, tpidr_el0
mrs x1, sp_el0
stp x1, x2, [sp, #32]
# read sp
# ldr x2, [sp, #8]
# mov sp, x2
mov x0, sp
bl user_trap
b __restore_context
# load callee-saved registers
ldp x19, x20, [sp], #16
ldp x21, x22, [sp], #16
ldp x23, x24, [sp], #16
ldp x25, x26, [sp], #16
ldp x27, x28, [sp], #16
ldp x29, x30, [sp], #16
ret
.macro HANDLER source kind
.align 7
# sp is set to SP_EL1 upon trap
stp lr, x0, [sp, #-16]!
mov x0, #\source
movk x0, #\kind, lsl #16
b __alltraps
.endm
.global __vectors
.align 11
__vectors:
HANDLER 0 0
HANDLER 0 1
HANDLER 0 2
HANDLER 0 3
HANDLER 1 0
HANDLER 1 1
HANDLER 1 2
HANDLER 1 3
HANDLER 2 0
HANDLER 2 1
HANDLER 2 2
HANDLER 2 3
HANDLER 3 0
HANDLER 3 1
HANDLER 3 2
HANDLER 3 3
.global __restore_context
__restore_context:
# load sp and tpidr
ldp x1, x2, [sp, #32]
msr sp_el0, x1
msr tpidr_el0, x2
# sp points to TrapFrame
# skip trap num, don't restore
add sp, sp, #16
# elr and spsr
ldp x1, x2, [sp], #16
msr elr_el1, x1
msr spsr_el1, x2
# skip sp and tpidr
add sp, sp, #16
# general purpose registers
ldp x1, x2, [sp], #16
ldp x3, x4, [sp], #16
ldp x5, x6, [sp], #16
ldp x7, x8, [sp], #16
ldp x9, x10, [sp], #16
ldp x11, x12, [sp], #16
ldp x13, x14, [sp], #16
ldp x15, x16, [sp], #16
ldp x17, x18, [sp], #16
ldp x19, x20, [sp], #16
ldp x21, x22, [sp], #16
ldp x23, x24, [sp], #16
ldp x25, x26, [sp], #16
ldp x27, x28, [sp], #16
ldr x29, [sp], #16
ldp lr, x0, [sp], #16
# return
eret |
cncf-hacks/atlanticode | 7,219 | smpc/islet/rmm/src/exception/vectors.s | .equ VCPU_GP_REGS, 0
.equ VCPU_SYS_REGS, 264
.equ VCPU_FP_REGS, 472
.macro save_volatile_to_stack
stp x29, x30, [SP, #-16]!
stp x17, x18, [SP, #-(8*12)]!
stp x15, x16, [SP, #-16]!
stp x13, x14, [SP, #-16]!
stp x11, x12, [SP, #-16]!
stp x9, x10, [SP, #-16]!
stp x7, x8, [SP, #-16]!
stp x5, x6, [SP, #-16]!
stp x3, x4, [SP, #-16]!
stp x1, x2, [SP, #-16]!
mrs x18, spsr_el2
stp x18, x0, [SP, #-16]!
mrs x18, elr_el2
stp xzr, x18, [SP, #-16]!
.endm
.macro save_volatile_to_vcpu
/* Save all general purpose registers */
str x18, [sp, #-16]!
mrs x18, tpidr_el2
stp x0, x1, [x18, #VCPU_GP_REGS + 8 * 0]
stp x2, x3, [x18, #VCPU_GP_REGS + 8 * 2]
stp x4, x5, [x18, #VCPU_GP_REGS + 8 * 4]
stp x6, x7, [x18, #VCPU_GP_REGS + 8 * 6]
stp x8, x9, [x18, #VCPU_GP_REGS + 8 * 8]
stp x10, x11, [x18, #VCPU_GP_REGS + 8 * 10]
stp x12, x13, [x18, #VCPU_GP_REGS + 8 * 12]
stp x14, x15, [x18, #VCPU_GP_REGS + 8 * 14]
stp x16, x17, [x18, #VCPU_GP_REGS + 8 * 16]
stp x19, x20, [x18, #VCPU_GP_REGS + 8 * 19]
stp x21, x22, [x18, #VCPU_GP_REGS + 8 * 21]
stp x23, x24, [x18, #VCPU_GP_REGS + 8 * 23]
stp x25, x26, [x18, #VCPU_GP_REGS + 8 * 25]
stp x27, x28, [x18, #VCPU_GP_REGS + 8 * 27]
stp x29, x30, [x18, #VCPU_GP_REGS + 8 * 29]
ldr x0, [sp], #16
str x0, [x18, #VCPU_GP_REGS + 8 * 18]
/* Save return address & mode */
mrs x1, elr_el2
mrs x2, spsr_el2
stp x1, x2, [x18, #VCPU_GP_REGS + 8 * 31]
.endm
.global restore_all_from_vcpu_and_run
restore_all_from_vcpu_and_run:
mrs x0, tpidr_el2
/* Restore system registers */
/* Use x28 as the base */
add x28, x0, #VCPU_SYS_REGS
ldr x3, [x28], #8
/* msr sctlr_el2, x2 */
msr sp_el1, x3
ldp x2, x3, [x28], #16
msr sp_el0, x2
msr esr_el1, x3
ldp x2, x3, [x28], #16
msr vbar_el1, x2
msr ttbr0_el1, x3
ldp x2, x3, [x28], #16
msr ttbr1_el1, x2
msr mair_el1, x3
ldp x2, x3, [x28], #16
msr amair_el1, x2
msr tcr_el1, x3
ldp x2, x3, [x28], #16
msr tpidr_el1, x2
msr tpidr_el0, x3
ldp x2, x3, [x28], #16
msr tpidrro_el0, x2
msr actlr_el1, x3
ldp x2, x3, [x28], #16
//msr vmpidr_el2, x2
msr csselr_el1, x3
ldp x2, x3, [x28], #16
msr cpacr_el1, x2
msr afsr0_el1, x3
ldp x2, x3, [x28], #16
msr afsr1_el1, x2
msr far_el1, x3
ldp x2, x3, [x28], #16
msr contextidr_el1, x2
msr cntkctl_el1, x3
ldp x2, x3, [x28], #16
msr par_el1, x2
msr vttbr_el2, x3
ldp x2, x3, [x28], #16
msr esr_el2, x2
msr hpfar_el2, x3
ldr x3, [x28], #8
msr sctlr_el1, x3
/* TODO: invalidate TLB */
/* Intentional fallthrough */
.global restore_nonvolatile_from_vcpu_and_run
restore_nonvolatile_from_vcpu_and_run:
/* Restore non-volatile registers. */
ldp x19, x20, [x0, #VCPU_GP_REGS + 8 * 19]
ldp x21, x22, [x0, #VCPU_GP_REGS + 8 * 21]
ldp x23, x24, [x0, #VCPU_GP_REGS + 8 * 23]
ldp x25, x26, [x0, #VCPU_GP_REGS + 8 * 25]
ldp x27, x28, [x0, #VCPU_GP_REGS + 8 * 27]
/* Intentional fallthrough */
.global restore_volatile_from_vcpu_and_run
restore_volatile_from_vcpu_and_run:
ldp x4, x5, [x0, #VCPU_GP_REGS + 8 * 4]
ldp x6, x7, [x0, #VCPU_GP_REGS + 8 * 6]
ldp x8, x9, [x0, #VCPU_GP_REGS + 8 * 8]
ldp x10, x11, [x0, #VCPU_GP_REGS + 8 * 10]
ldp x12, x13, [x0, #VCPU_GP_REGS + 8 * 12]
ldp x14, x15, [x0, #VCPU_GP_REGS + 8 * 14]
ldp x16, x17, [x0, #VCPU_GP_REGS + 8 * 16]
ldr x18, [x0, #VCPU_GP_REGS + 8 * 18]
ldp x29, x30, [x0, #VCPU_GP_REGS + 8 * 29]
ldp x1, x2, [x0, #VCPU_GP_REGS + 8 * 31]
msr elr_el2, x1
msr spsr_el2, x2
ldp x2, x3, [x0, #VCPU_GP_REGS + 8 * 2]
ldp x0, x1, [x0, #VCPU_GP_REGS + 8 * 0]
eret
dsb nsh
isb
.global restore_volatile_from_stack_and_return
restore_volatile_from_stack_and_return:
ldp xzr, x18, [SP], #16
msr ELR_EL2, x18
ldp x18, x0, [SP], #16
msr SPSR_EL2, x18
ldp x1, x2, [SP], #16
ldp x3, x4, [SP], #16
ldp x5, x6, [SP], #16
ldp x7, x8, [SP], #16
ldp x9, x10, [SP], #16
ldp x11, x12, [SP], #16
ldp x13, x14, [SP], #16
ldp x15, x16, [SP], #16
ldp x17, x18, [SP], #(8*12)
ldp x29, x30, [SP], #16
eret
.macro HANDLER source, kind
.align 7
save_volatile_to_stack
mov x0, \source
movk x0, \kind, LSL #16
mrs x1, ESR_EL2
mov x2, SP
bl handle_exception
b restore_volatile_from_stack_and_return
.endm
.macro HANDLER_LOWER source, kind
.align 7
save_volatile_to_vcpu
/* Setup arguments to exception handler */
mov x0, \source
movk x0, \kind, LSL #16
mrs x1, ESR_EL2
mrs x2, TPIDR_EL2
mov x3, SP
bl handle_lower_exception
/* Enter to rmm */
/* vcpu will be switched by rmm if needed */
cbnz x0, rmm_enter
mrs x0, tpidr_el2
b restore_nonvolatile_from_vcpu_and_run
.endm
.global rmm_enter
rmm_enter:
/* Save non-volatile registers */
mrs x1, tpidr_el2
stp x19, x20, [x1, #VCPU_GP_REGS + 8 * 19]
stp x21, x22, [x1, #VCPU_GP_REGS + 8 * 21]
stp x23, x24, [x1, #VCPU_GP_REGS + 8 * 23]
stp x25, x26, [x1, #VCPU_GP_REGS + 8 * 25]
stp x27, x28, [x1, #VCPU_GP_REGS + 8 * 27]
/* Save system registers */
/* Use x28 as the base */
add x28, x1, #VCPU_SYS_REGS
/* mrs x2, sctlr_el2 */
mrs x3, sp_el1
str x3, [x28], #8
mrs x2, sp_el0
mrs x3, esr_el1
stp x2, x3, [x28], #16
mrs x2, vbar_el1
mrs x3, ttbr0_el1
stp x2, x3, [x28], #16
mrs x2, ttbr1_el1
mrs x3, mair_el1
stp x2, x3, [x28], #16
mrs x2, amair_el1
mrs x3, tcr_el1
stp x2, x3, [x28], #16
mrs x2, tpidr_el1
mrs x3, tpidr_el0
stp x2, x3, [x28], #16
mrs x2, tpidrro_el0
mrs x3, actlr_el1
stp x2, x3, [x28], #16
mrs x2, vmpidr_el2
mrs x3, csselr_el1
stp x2, x3, [x28], #16
mrs x2, cpacr_el1
mrs x3, afsr0_el1
stp x2, x3, [x28], #16
mrs x2, afsr1_el1
mrs x3, far_el1
stp x2, x3, [x28], #16
mrs x2, contextidr_el1
mrs x3, cntkctl_el1
stp x2, x3, [x28], #16
mrs x2, par_el1
mrs x3, vttbr_el2
stp x2, x3, [x28], #16
mrs x2, esr_el2
mrs x3, hpfar_el2
stp x2, x3, [x28], #16
mrs x3, sctlr_el1
str x3, [x28], #8
/* TODO: FP_REGS */
/* load three more registers to match with TrapFrame */
ldr xzr, [SP], #8
ldr xzr, [SP], #8
ldr xzr, [SP], #8
ldr x0, [SP], #8
ldp x1, x2, [SP], #16
ldp x3, x4, [SP], #16
ldp x5, x6, [SP], #16
ldp x7, x8, [SP], #16
ldp x9, x10, [SP], #16
ldp x11, x12, [SP], #16
ldp x13, x14, [SP], #16
ldp x15, x16, [SP], #16
ldp x17, x18, [SP], #16
ldp x19, x20, [SP], #16
ldp x21, x22, [SP], #16
ldp x23, x24, [SP], #16
ldp x25, x26, [SP], #16
ldp x27, x28, [SP], #16
ldp x29, x30, [SP], #16
ret
.global rmm_exit
rmm_exit:
stp x29, x30, [SP, #-16]!
stp x27, x28, [SP, #-16]!
stp x25, x26, [SP, #-16]!
stp x23, x24, [SP, #-16]!
stp x21, x22, [SP, #-16]!
stp x19, x20, [SP, #-16]!
stp x17, x18, [SP, #-16]!
stp x15, x16, [SP, #-16]!
stp x13, x14, [SP, #-16]!
stp x11, x12, [SP, #-16]!
stp x9, x10, [SP, #-16]!
stp x7, x8, [SP, #-16]!
stp x5, x6, [SP, #-16]!
stp x3, x4, [SP, #-16]!
stp x1, x2, [SP, #-16]!
str x0, [SP, #-8]!
/* store three more registers to match with TrapFrame */
str xzr, [SP, #-8]!
str xzr, [SP, #-8]!
str xzr, [SP, #-8]!
b restore_all_from_vcpu_and_run
.align 11
.global vectors
vectors:
HANDLER 0, 0
HANDLER 0, 1
HANDLER 0, 2
HANDLER 0, 3
HANDLER 1, 0
HANDLER 1, 1
HANDLER 1, 2
HANDLER 1, 3
HANDLER_LOWER 2, 0
HANDLER_LOWER 2, 1
HANDLER_LOWER 2, 2
HANDLER_LOWER 2, 3
HANDLER_LOWER 3, 0
HANDLER_LOWER 3, 1
HANDLER_LOWER 3, 2
HANDLER_LOWER 3, 3
|
cndoit18/rCore-Tutorial | 1,588 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# case1: start running app by __restore
# case2: back to U after handling trap
mv sp, a0
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret |
cocobricko/usb_device_01 | 524 | Firmware-Development/Rust/tock-bootloader-master/legacy/src/jumpfunc.s |
.syntax unified
.section .text.jumpfunc
.global jump_into_user_code
.thumb_func
jump_into_user_code:
ldr r0, =0x10000 //The address of the payload's .vectors
ldr r1, =0xe000ed08 //The address of the VTOR register (0xE000E000(SCS) + 0xD00(SCB) + 0x8(VTOR))
str r0, [r1] //Move the payload's VT address into the VTOR register
ldr r1, [r0] //Move the payload's initial SP into r1
mov sp, r1 //Set our SP to that
ldr r0, [r0, #4] //Load the payload's ENTRY into r0
bx r0 //Whoopee
|
codygunton/openvm | 8,423 | crates/toolchain/openvm/src/memset.s | // This is musl-libc memset commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30:
//
// src/string/memset.c
//
// This was compiled into assembly with:
//
// clang-14 -target riscv32 -march=rv32im -O3 -S memset.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contributors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.attribute 4, 16
.attribute 5, "rv32im"
.file "musl_memset.c"
.globl memset
.p2align 2
.type memset,@function
memset:
beqz a2, .LBB0_9memset
sb a1, 0(a0)
add a3, a2, a0
li a4, 3
sb a1, -1(a3)
bltu a2, a4, .LBB0_9memset
sb a1, 1(a0)
sb a1, 2(a0)
sb a1, -2(a3)
li a4, 7
sb a1, -3(a3)
bltu a2, a4, .LBB0_9memset
sb a1, 3(a0)
li a5, 9
sb a1, -4(a3)
bltu a2, a5, .LBB0_9memset
neg a3, a0
andi a4, a3, 3
add a3, a0, a4
sub a2, a2, a4
andi a2, a2, -4
andi a1, a1, 255
lui a4, 4112
addi a4, a4, 257
mul a1, a1, a4
sw a1, 0(a3)
add a4, a3, a2
sw a1, -4(a4)
bltu a2, a5, .LBB0_9memset
sw a1, 4(a3)
sw a1, 8(a3)
sw a1, -12(a4)
li a5, 25
sw a1, -8(a4)
bltu a2, a5, .LBB0_9memset
sw a1, 12(a3)
sw a1, 16(a3)
sw a1, 20(a3)
sw a1, 24(a3)
sw a1, -28(a4)
sw a1, -24(a4)
sw a1, -20(a4)
andi a5, a3, 4
ori a5, a5, 24
sub a2, a2, a5
li a6, 32
sw a1, -16(a4)
bltu a2, a6, .LBB0_9memset
add a3, a3, a5
li a4, 31
.LBB0_8memset:
sw a1, 0(a3)
sw a1, 4(a3)
sw a1, 8(a3)
sw a1, 12(a3)
sw a1, 16(a3)
sw a1, 20(a3)
sw a1, 24(a3)
sw a1, 28(a3)
addi a2, a2, -32
addi a3, a3, 32
bltu a4, a2, .LBB0_8memset
.LBB0_9memset:
ret
.Lfunc_end0memset:
.size memset, .Lfunc_end0memset-memset
.ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157"
.section ".note.GNU-stack","",@progbits
.addrsig
|
codygunton/openvm | 11,828 | crates/toolchain/openvm/src/memcpy.s | // This is musl-libc commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30:
//
// src/string/memcpy.c
//
// This was compiled into assembly with:
//
// clang-14 -target riscv32 -march=rv32im -O3 -S memcpy.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contributors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.attribute 4, 16
.attribute 5, "rv32im"
.file "musl_memcpy.c"
.globl memcpy
.p2align 2
.type memcpy,@function
memcpy:
andi a3, a1, 3
seqz a3, a3
seqz a4, a2
or a3, a3, a4
bnez a3, .LBBmemcpy0_11
addi a5, a1, 1
mv a6, a0
.LBBmemcpy0_2:
lb a7, 0(a1)
addi a4, a1, 1
addi a3, a6, 1
sb a7, 0(a6)
addi a2, a2, -1
andi a1, a5, 3
snez a1, a1
snez a6, a2
and a7, a1, a6
addi a5, a5, 1
mv a1, a4
mv a6, a3
bnez a7, .LBBmemcpy0_2
andi a1, a3, 3
beqz a1, .LBBmemcpy0_12
.LBBmemcpy0_4:
li a5, 32
bltu a2, a5, .LBBmemcpy0_26
li a5, 3
beq a1, a5, .LBBmemcpy0_19
li a5, 2
beq a1, a5, .LBBmemcpy0_22
li a5, 1
bne a1, a5, .LBBmemcpy0_26
lw a5, 0(a4)
sb a5, 0(a3)
srli a1, a5, 8
sb a1, 1(a3)
srli a6, a5, 16
addi a1, a3, 3
sb a6, 2(a3)
addi a2, a2, -3
addi a3, a4, 16
li a4, 16
.LBBmemcpy0_9:
lw a6, -12(a3)
srli a5, a5, 24
slli a7, a6, 8
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 24
slli a6, t0, 8
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 24
slli t0, a7, 8
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 24
slli a7, a5, 8
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_9
addi a4, a3, -13
j .LBBmemcpy0_25
.LBBmemcpy0_11:
mv a3, a0
mv a4, a1
andi a1, a3, 3
bnez a1, .LBBmemcpy0_4
.LBBmemcpy0_12:
li a1, 16
bltu a2, a1, .LBBmemcpy0_15
li a1, 15
.LBBmemcpy0_14:
lw a5, 0(a4)
lw a6, 4(a4)
lw a7, 8(a4)
lw t0, 12(a4)
sw a5, 0(a3)
sw a6, 4(a3)
sw a7, 8(a3)
sw t0, 12(a3)
addi a4, a4, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a1, a2, .LBBmemcpy0_14
.LBBmemcpy0_15:
andi a1, a2, 8
beqz a1, .LBBmemcpy0_17
lw a1, 0(a4)
lw a5, 4(a4)
sw a1, 0(a3)
sw a5, 4(a3)
addi a3, a3, 8
addi a4, a4, 8
.LBBmemcpy0_17:
andi a1, a2, 4
beqz a1, .LBBmemcpy0_30
lw a1, 0(a4)
sw a1, 0(a3)
addi a3, a3, 4
addi a4, a4, 4
j .LBBmemcpy0_30
.LBBmemcpy0_19:
lw a5, 0(a4)
addi a1, a3, 1
sb a5, 0(a3)
addi a2, a2, -1
addi a3, a4, 16
li a4, 18
.LBBmemcpy0_20:
lw a6, -12(a3)
srli a5, a5, 8
slli a7, a6, 24
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 8
slli a6, t0, 24
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 8
slli t0, a7, 24
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 8
slli a7, a5, 24
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_20
addi a4, a3, -15
j .LBBmemcpy0_25
.LBBmemcpy0_22:
lw a5, 0(a4)
sb a5, 0(a3)
srli a6, a5, 8
addi a1, a3, 2
sb a6, 1(a3)
addi a2, a2, -2
addi a3, a4, 16
li a4, 17
.LBBmemcpy0_23:
lw a6, -12(a3)
srli a5, a5, 16
slli a7, a6, 16
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 16
slli a6, t0, 16
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 16
slli t0, a7, 16
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 16
slli a7, a5, 16
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_23
addi a4, a3, -14
.LBBmemcpy0_25:
mv a3, a1
.LBBmemcpy0_26:
andi a1, a2, 16
bnez a1, .LBBmemcpy0_35
andi a1, a2, 8
bnez a1, .LBBmemcpy0_36
.LBBmemcpy0_28:
andi a1, a2, 4
beqz a1, .LBBmemcpy0_30
.LBBmemcpy0_29:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
addi a4, a4, 4
addi a5, a3, 4
sb a1, 3(a3)
mv a3, a5
.LBBmemcpy0_30:
andi a1, a2, 2
bnez a1, .LBBmemcpy0_33
andi a1, a2, 1
bnez a1, .LBBmemcpy0_34
.LBBmemcpy0_32:
ret
.LBBmemcpy0_33:
lb a1, 0(a4)
lb a5, 1(a4)
sb a1, 0(a3)
addi a4, a4, 2
addi a1, a3, 2
sb a5, 1(a3)
mv a3, a1
andi a1, a2, 1
beqz a1, .LBBmemcpy0_32
.LBBmemcpy0_34:
lb a1, 0(a4)
sb a1, 0(a3)
ret
.LBBmemcpy0_35:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
lb a5, 4(a4)
lb a6, 5(a4)
sb a1, 3(a3)
lb a1, 6(a4)
sb a5, 4(a3)
sb a6, 5(a3)
lb a5, 7(a4)
sb a1, 6(a3)
lb a1, 8(a4)
lb a6, 9(a4)
sb a5, 7(a3)
lb a5, 10(a4)
sb a1, 8(a3)
sb a6, 9(a3)
lb a1, 11(a4)
sb a5, 10(a3)
lb a5, 12(a4)
lb a6, 13(a4)
sb a1, 11(a3)
lb a1, 14(a4)
sb a5, 12(a3)
sb a6, 13(a3)
lb a5, 15(a4)
sb a1, 14(a3)
addi a4, a4, 16
addi a1, a3, 16
sb a5, 15(a3)
mv a3, a1
andi a1, a2, 8
beqz a1, .LBBmemcpy0_28
.LBBmemcpy0_36:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
lb a5, 4(a4)
lb a6, 5(a4)
sb a1, 3(a3)
lb a1, 6(a4)
sb a5, 4(a3)
sb a6, 5(a3)
lb a5, 7(a4)
sb a1, 6(a3)
addi a4, a4, 8
addi a1, a3, 8
sb a5, 7(a3)
mv a3, a1
andi a1, a2, 4
bnez a1, .LBBmemcpy0_29
j .LBBmemcpy0_30
.Lfuncmemcpy_end0:
.size memcpy, .Lfuncmemcpy_end0-memcpy
.ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157"
.section ".note.GNU-stack","",@progbits
.addrsig
|
codygunton/openvm | 312 | crates/toolchain/tests/tests/data/fib.S | .text
.global _start
_start:
li a0, 15
li a1, 0
li a2, 1
jal x1, loop
loop:
beq a0, zero, exit
addi a0, a0, -1
add a3, a1, a2
add a1, zero, a2
add a2, zero, a3
jal x1, loop
exit:
# Exit program
.insn i 0x0b, 0, x0, x0, 0
|
codygunton/openvm | 593 | crates/toolchain/tests/tests/data/intrin.S | #define CUSTOM_0 0x0b
#define CUSTOM_1 0x2b
.macro addmod_1 rd, rs1, rs2
.insn r CUSTOM_1, 0, 0, \rd, \rs1, \rs2
.endm
.macro submod_1 rd, rs1, rs2
.insn r CUSTOM_1, 0, 1, \rd, \rs1, \rs2
.endm
.macro mulmod_1 rd, rs1, rs2
.insn r CUSTOM_1, 0, 2, \rd, \rs1, \rs2
.endm
.macro iseqmod_1 rd, rs1, rs2
.insn r CUSTOM_1, 0, 4, \rd, \rs1, \rs2
.endm
.macro addmod_2 rd, rs1, rs2
.insn r CUSTOM_1, 0, 8, \rd, \rs1, \rs2
.endm
.global _start
_start:
addmod_1 a2, a0, a1
addmod_2 a2, a0, a1
submod_1 a3, a0, a1
mulmod_1 a4, a0, a1
iseqmod_1 a6, a0, a1
.insn i 0x0b, 0, x0, x0, 0
|
codygunton/openvm | 355 | crates/toolchain/tests/tests/data/terminate.S | #define CUSTOM_0 0x0b
#define CUSTOM_1 0x2b
.macro addmod_1 rd, rs1, rs2
.insn r CUSTOM_1, 0, 0, \rd, \rs1, \rs2
.endm
.macro addmod_2 rd, rs1, rs2
.insn r CUSTOM_1, 0, 4, \rd, \rs1, \rs2
.endm
.macro terminate ec
.insn i CUSTOM_0, 0, x0, x0, \ec
.endm
.global _start
_start:
li zero, 1
add a0, a0, zero
bne a0, a1, 8
terminate 0
terminate 1
|
codygunton/openvm | 337 | crates/toolchain/tests/tests/data/exp.S | .global _start
_start:
li a0, 57
li a2, 10007
addi a1, a2, -2
li a4, 1
loop:
beqz a1, finish
andi t3, a1, 1
srli a1, a1, 1
beqz t3, tmp
mul a4, a4, a0
rem a4, a4, a2
tmp:
mul a0, a0, a0
rem a0, a0, a2
jal t4, loop
finish:
li a0, 57
mul a0, a0, a4
rem a0, a0, a2
li a1, 1
bne a0, a1, 228
.insn i 0x0b, 0, x0, x0, 0
|
CommanderCortex/CortexARM_OS | 715 | src/boot.s | .section .text._start
.global _start
.type _start, @function
_start:
adr x7, {} // stack start
mov x8, {} // stack size
add x7, x7, x8 // stack end
mov sp, x7 // set stack pointer
//Enable floating pointer:
mrs x7, cpacr_el1 // Read CPACR_EL1
orr x7, x7, #(3 << 20) // Set bits 20 and 21
msr cpacr_el1, x7 // Write CPACR_EL1
adr x0, _start
adr x1, _rela_start
adr x2, _rela_end
bl _relocate_binary
bl main // call main
_relocate_binary:
ldr x3, [x1]
add x3, x3, x0
str x3, [x1]
add x1, x1, #8
cmp x1, x2
bne _relocate_binary
ret
|
comsec-group/milesan-meta | 459 | pocs/milesan/init/init-mds.S | # User mode initialization for MDS.
#
# Perform some operations in user mode to make sure that microarchitectural
# buffers are filled with valid entries before we run the test case.
# This page should not be touched by the rest of the program.
#define FORBIDDEN_ADDR 0x8000C000
.section .u_text , "ax"
init_uarch:
li s0, FORBIDDEN_ADDR
.rep 16
ld t0, 0(s0)
sd t1, 16(s0)
addi s0, s0, 32
.endr
# Padding for alignment
nop
|
comsec-group/milesan-meta | 5,124 | pocs/milesan/init/init.S | /* PMP configuration */
#define PMP_R 0x01
#define PMP_W 0x02
#define PMP_X 0x04
#define PMP_A 0x18
#define PMP_A_TOR 0x08
#define PMP_A_NA4 0x10
#define PMP_A_NAPOT 0x18
#define PMP_L 0x80
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
#define CSR_MCAUSE 0x342
#define CSR_MTVAL 0x343
#define CSR_MIP 0x344
#define CSR_PMPCFG0 0x3a0
#define CSR_PMPADDR0 0x3b0
#define CSR_MHARTID 0xf14
#include "memcfg.h"
// ------------------ Machine-Mode Code
.section .init
init_trap_vector:
// all traps will end up in the pc
// being set to trap_handler
lla t0, trap_handler
csrw mtvec, t0
change_to_S:
// clear Trap Virtual Memory(TVM) bit
li s1, 0x00100000
csrc mstatus, s1
// set MPP such that we return to S mode
li s1, 0x00001000
csrc mstatus, s1
li s1, 0x00000800
csrs mstatus, s1
// Setup a PMP to permit access to all of memory
li a0, -1
csrw CSR_PMPADDR0, a0
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
// load address of S mode code
lla s2, s_code
csrw mepc, s2
// return to S mode code
mret
.align 4
trap_handler:
li a1, REGDUMP_ADDR
# Store all registers to the address in a1
sd x0, 0(a1)
sd ra, 0(a1)
sd sp, 0(a1)
sd gp, 0(a1)
sd tp, 0(a1)
sd t0, 0(a1)
sd t1, 0(a1)
sd t2, 0(a1)
sd s0, 0(a1)
sd s1, 0(a1)
sd a0, 0(a1)
sd a1, 0(a1)
sd a2, 0(a1)
sd a3, 0(a1)
sd a4, 0(a1)
sd a5, 0(a1)
sd a6, 0(a1)
sd a7, 0(a1)
sd s2, 0(a1)
sd s3, 0(a1)
sd s4, 0(a1)
sd s5, 0(a1)
sd s6, 0(a1)
sd s7, 0(a1)
sd s8, 0(a1)
sd s9, 0(a1)
sd s10, 0(a1)
sd s11, 0(a1)
sd t3, 0(a1)
sd t4, 0(a1)
sd t5, 0(a1)
sd t6, 0(a1)
li a0, STOPSIG_ADDR
sd a0, 0(a0)
// ------------------ Supervisor Code
.align 4
s_code:
set_page_table:
// set the satp register and page table accordingly
// page table address is 0x00000080002 (actual address 0x80002000)
// ASID is 0x0004 (random value)
// MODE is 0x8 (for Sv39)
li s0, 0x8000400000080002
#ifndef NO_MMU
csrw satp, s0
#endif
init_cache:
// Load taint into the cache hierarchy.
lla s0, s_mem
addi s0, s0, 8
ld s0, 0(s0)
prepare_change_to_U:
// set SPP such that we return to U mode
li s1, 0x00000100
csrc sstatus, s1
// load address of user mode code
lla s2, u_code
# li s2, 0x80008000
csrw sepc, s2
init_regs:
// Initialize registers
// Basic regs
li x1, 0x80008000
li x2, 0x80008000
li x3, 0x80008000
li x4, 0x80008000
// User data
li x5, 0x80009000
li x6, 0x80009000
li x7, 0x80009000
li x8, 0x80009000
// Supervisor data
li x9, 0x80001000
li x10, 0x80001000
li x11, 0x80001000
li x12, 0x80001000
// User code
li x13, 0x8000A000
li x15, 0x8000A000
li x15, 0x8000A000
li x16, 0x8000A000
// Other user data
li x17, 0x8000B000
li x18, 0x8000B000
li x19, 0x8000B000
li x20, 0x8000B000
// Power of two constants
li x21, 8
li x22, 16
li x23, 32
li x24, 64
change_to_u:
// return to U mode code
sret
// ------------------ Supervisor Data
.section .s_data , "adw"
s_mem:
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
.dword 0x0000000000000000
// ------------------ Page Tables ---------------------
// table1@0x80002000
.section .table1 , "adw"
table1:
.dword 0x20001401 // table2_addr0
.dword 0x0 // empty
.dword 0x20000c01 // table2
// table2@0x80003000
.section .table2 , "adw"
table2:
.dword 0x20001001 // table3
// table3@0x80004000
.section .table3 , "adw"
table3:
.dword 0x2000004b // text
.dword 0x200004c7 // s_data
.dword 0x0 // table1
.dword 0x0 // table2
.dword 0x0 // table3
.dword 0x0 // table2_addr0
.dword 0x0 // table3_addr0
.dword 0x0 // empty
.dword 0x2000205b // u_text
.dword 0x200024d7 // u_data
.dword 0x200028d7 // u_data
.dword 0x20002cd7 // u_data
.dword 0x200030d7 // u_data (MDS)
// table2_addr0@0x80005000
.section .table2_addr0 , "adw"
table2_addr0:
.dword 0x20001801 // table3_addr0
// table3_addr0@0x80006000
.section .table3_addr0 , "adw"
table3_addr0:
.dword 0x2000205b // u_text
// empty (for binary alignment)
.section .empty , "ax"
empty:
.rep 0x200
.dword 0x0
.endr
// ---------------- End Page Tables -------------------
// this is the code section for user mode code
.section .u_text , "ax"
u_code:
// user mode code
|
comsec-group/milesan-meta | 424 | pocs/milesan/boot/bootrom.S | #define BOOTADDR_REG 0x4000
#define DRAM_BASE 0x0000000
.section .text.start, "ax", @progbits
.globl _start
_start:
auipc t0,0x0
li t0, DRAM_BASE
jr t0
.section .text.hang, "ax", @progbits
.globl _hang
_hang: // reset vector
auipc t0,0x0
li t0, DRAM_BASE
jr t0
.section .text.hang80, "ax", @progbits
.globl _hang80
_hang80:
auipc t0,0x0
li t0, DRAM_BASE
jr t0
.align 3
_dtb:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.