repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
jontindal/pyriscv
607
firmware/start.S
.section ._start _start: # Set stack pointer la sp, _estack # Copy data section from ROM to RAM la a0, _sidata la a1, _sdata la a2, _edata bge a1, a2, end_init_data loop_init_data: lw a3, 0(a0) sw a3, 0(a1) addi a0, a0, 4 addi a1, a1, 4 blt a1, a2, loop_init_data end_init_data: # Initialise BSS section with zeros la a0, _sbss la a1, _ebss bge a0, a1, end_init_bss loop_init_bss: sw zero, 0(a0) addi a0, a0, 4 blt a0, a1, loop_init_bss end_init_bss: # Call main function call main # Stop execution ebreak
Josen-B/ci_arceos
2,001
modules/axhal/linker.lds.S
OUTPUT_ARCH(%ARCH%) BASE_ADDRESS = %KERNEL_BASE%; ENTRY(_start) SECTIONS { . = BASE_ADDRESS; _skernel = .; .text : ALIGN(4K) { _stext = .; *(.text.boot) *(.text .text.*) . = ALIGN(4K); _etext = .; } _srodata = .; .rodata : ALIGN(4K) { *(.rodata .rodata.*) *(.srodata .srodata.*) *(.sdata2 .sdata2.*) } .init_array : ALIGN(0x10) { __init_array_start = .; *(.init_array .init_array.*) __init_array_end = .; } . = ALIGN(4K); _erodata = .; .data : ALIGN(4K) { _sdata = .; *(.data.boot_page_table) . = ALIGN(4K); *(.data .data.*) *(.sdata .sdata.*) *(.got .got.*) } .tdata : ALIGN(0x10) { _stdata = .; *(.tdata .tdata.*) _etdata = .; } .tbss : ALIGN(0x10) { _stbss = .; *(.tbss .tbss.*) *(.tcommon) _etbss = .; } . = ALIGN(4K); _percpu_start = .; _percpu_end = _percpu_start + SIZEOF(.percpu); .percpu 0x0 : AT(_percpu_start) { _percpu_load_start = .; *(.percpu .percpu.*) _percpu_load_end = .; . = _percpu_load_start + ALIGN(64) * %SMP%; } . = _percpu_end; . = ALIGN(4K); _edata = .; .bss : AT(.) ALIGN(4K) { boot_stack = .; *(.bss.stack) . = ALIGN(4K); boot_stack_top = .; _sbss = .; *(.bss .bss.*) *(.sbss .sbss.*) *(COMMON) . = ALIGN(4K); _ebss = .; } _ekernel = .; /DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) } } SECTIONS { linkme_IRQ : { *(linkme_IRQ) } linkm2_IRQ : { *(linkm2_IRQ) } linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) } linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) } linkme_SYSCALL : { *(linkme_SYSCALL) } linkm2_SYSCALL : { *(linkm2_SYSCALL) } axns_resource : { *(axns_resource) } } INSERT AFTER .tbss;
Josen-B/ci_arceos
1,839
modules/axhal/src/arch/riscv/trap.S
.macro SAVE_REGS, from_user addi sp, sp, -{trapframe_size} PUSH_GENERAL_REGS csrr t0, sepc csrr t1, sstatus csrrw t2, sscratch, zero // save sscratch (sp) and zero it STR t0, sp, 31 // tf.sepc STR t1, sp, 32 // tf.sstatus STR t2, sp, 1 // tf.regs.sp .if \from_user == 1 LDR t0, sp, 2 // load supervisor gp LDR t1, sp, 3 // load supervisor tp STR gp, sp, 2 // save user gp and tp STR tp, sp, 3 mv gp, t0 mv tp, t1 .endif .endm .macro RESTORE_REGS, from_user .if \from_user == 1 LDR t1, sp, 2 // load user gp and tp LDR t0, sp, 3 STR gp, sp, 2 // save supervisor gp STR tp, sp, 3 // save supervisor gp and tp mv gp, t1 mv tp, t0 addi t0, sp, {trapframe_size} // put supervisor sp to scratch csrw sscratch, t0 .endif LDR t0, sp, 31 LDR t1, sp, 32 csrw sepc, t0 csrw sstatus, t1 POP_GENERAL_REGS LDR sp, sp, 1 // load sp from tf.regs.sp .endm .section .text .balign 4 .global trap_vector_base trap_vector_base: // sscratch == 0: trap from S mode // sscratch != 0: trap from U mode csrrw sp, sscratch, sp // swap sscratch and sp bnez sp, .Ltrap_entry_u csrr sp, sscratch // put supervisor sp back j .Ltrap_entry_s .Ltrap_entry_s: SAVE_REGS 0 mv a0, sp li a1, 0 call riscv_trap_handler RESTORE_REGS 0 sret .Ltrap_entry_u: SAVE_REGS 1 mv a0, sp li a1, 1 call riscv_trap_handler RESTORE_REGS 1 sret
Josen-B/ci_arceos
2,616
modules/axhal/src/arch/aarch64/trap.S
.macro SAVE_REGS sub sp, sp, 34 * 8 stp x0, x1, [sp] stp x2, x3, [sp, 2 * 8] stp x4, x5, [sp, 4 * 8] stp x6, x7, [sp, 6 * 8] stp x8, x9, [sp, 8 * 8] stp x10, x11, [sp, 10 * 8] stp x12, x13, [sp, 12 * 8] stp x14, x15, [sp, 14 * 8] stp x16, x17, [sp, 16 * 8] stp x18, x19, [sp, 18 * 8] stp x20, x21, [sp, 20 * 8] stp x22, x23, [sp, 22 * 8] stp x24, x25, [sp, 24 * 8] stp x26, x27, [sp, 26 * 8] stp x28, x29, [sp, 28 * 8] mrs x9, sp_el0 mrs x10, elr_el1 mrs x11, spsr_el1 stp x30, x9, [sp, 30 * 8] stp x10, x11, [sp, 32 * 8] # We may have interrupted userspace, or a guest, or exit-from or # return-to either of those. So we can't trust sp_el0, and need to # restore it. bl {cache_current_task_ptr} .endm .macro RESTORE_REGS ldp x10, x11, [sp, 32 * 8] ldp x30, x9, [sp, 30 * 8] msr sp_el0, x9 msr elr_el1, x10 msr spsr_el1, x11 ldp x28, x29, [sp, 28 * 8] ldp x26, x27, [sp, 26 * 8] ldp x24, x25, [sp, 24 * 8] ldp x22, x23, [sp, 22 * 8] ldp x20, x21, [sp, 20 * 8] ldp x18, x19, [sp, 18 * 8] ldp x16, x17, [sp, 16 * 8] ldp x14, x15, [sp, 14 * 8] ldp x12, x13, [sp, 12 * 8] ldp x10, x11, [sp, 10 * 8] ldp x8, x9, [sp, 8 * 8] ldp x6, x7, [sp, 6 * 8] ldp x4, x5, [sp, 4 * 8] ldp x2, x3, [sp, 2 * 8] ldp x0, x1, [sp] add sp, sp, 34 * 8 .endm .macro INVALID_EXCP, kind, source .p2align 7 SAVE_REGS mov x0, sp mov x1, \kind mov x2, \source bl invalid_exception b .Lexception_return .endm .macro HANDLE_SYNC .p2align 7 SAVE_REGS mov x0, sp bl handle_sync_exception b .Lexception_return .endm .macro HANDLE_IRQ .p2align 7 SAVE_REGS mov x0, sp bl handle_irq_exception b .Lexception_return .endm .section .text .p2align 11 .global exception_vector_base exception_vector_base: // current EL, with SP_EL0 INVALID_EXCP 0 0 INVALID_EXCP 1 0 INVALID_EXCP 2 0 INVALID_EXCP 3 0 // current EL, with SP_ELx HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 1 INVALID_EXCP 3 1 // lower EL, aarch64 HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 2 INVALID_EXCP 3 2 // lower EL, aarch32 INVALID_EXCP 0 3 INVALID_EXCP 1 3 INVALID_EXCP 2 3 INVALID_EXCP 3 3 .Lexception_return: RESTORE_REGS eret
Josen-B/ci_arceos
1,339
modules/axhal/src/arch/x86_64/syscall.S
.section .text .code64 syscall_entry: swapgs // switch to kernel gs mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp // save user rsp mov rsp, gs:[offset __PERCPU_TSS + {tss_rsp0_offset}] // switch to kernel stack sub rsp, 8 // skip user ss push gs:[offset __PERCPU_USER_RSP_OFFSET] // user rsp push r11 // rflags mov [rsp - 2 * 8], rcx // rip sub rsp, 4 * 8 // skip until general registers push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push rdi push rsi push rbp push rbx push rdx push rcx push rax mov rdi, rsp call x86_syscall_handler pop rax pop rcx pop rdx pop rbx pop rbp pop rsi pop rdi pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 add rsp, 7 * 8 mov rcx, [rsp - 5 * 8] // rip mov r11, [rsp - 3 * 8] // rflags mov rsp, [rsp - 2 * 8] // user rsp swapgs sysretq
Josen-B/ci_arceos
1,505
modules/axhal/src/arch/x86_64/trap.S
.equ NUM_INT, 256 .altmacro .macro DEF_HANDLER, i .Ltrap_handler_\i: .if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17 # error code pushed by CPU push \i # interrupt vector jmp .Ltrap_common .else push 0 # fill in error code in TrapFrame push \i # interrupt vector jmp .Ltrap_common .endif .endm .macro DEF_TABLE_ENTRY, i .quad .Ltrap_handler_\i .endm .section .text .code64 _trap_handlers: .set i, 0 .rept NUM_INT DEF_HANDLER %i .set i, i + 1 .endr .Ltrap_common: test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space jz 1f swapgs 1: push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push rdi push rsi push rbp push rbx push rdx push rcx push rax mov rdi, rsp call x86_trap_handler pop rax pop rcx pop rdx pop rbx pop rbp pop rsi pop rdi pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space jz 2f swapgs 2: add rsp, 16 # pop vector, error_code iretq .section .rodata .global trap_handler_table trap_handler_table: .set i, 0 .rept NUM_INT DEF_TABLE_ENTRY %i .set i, i + 1 .endr
Josen-B/ci_arceos
1,705
modules/axhal/src/arch/loongarch64/trap.S
.macro SAVE_REGS, from_user move $t0, $sp .if \from_user == 1 csrrd $sp, KSAVE_KSP // restore kernel sp addi.d $sp, $sp, -{trapframe_size} STD $tp, $sp, 2 STD $r21, $sp, 21 csrrd $tp, KSAVE_TP csrrd $r21, KSAVE_R21 .else addi.d $sp, $sp, -{trapframe_size} .endif STD $t0, $sp, 3 csrrd $t0, KSAVE_TEMP PUSH_GENERAL_REGS csrrd $t1, LA_CSR_PRMD csrrd $t2, LA_CSR_ERA STD $t1, $sp, 32 // prmd STD $t2, $sp, 33 // era .endm .macro RESTORE_REGS, from_user .if \from_user == 1 csrwr $tp, KSAVE_TP csrwr $r21, KSAVE_R21 LDD $tp, $sp, 2 LDD $r21, $sp, 21 .endif LDD $t1, $sp, 33 // era LDD $t2, $sp, 32 // prmd csrwr $t1, LA_CSR_ERA csrwr $t2, LA_CSR_PRMD POP_GENERAL_REGS LDD $sp, $sp, 3 .endm .section .text .balign 4096 .global exception_entry_base exception_entry_base: csrwr $t0, KSAVE_TEMP csrrd $t0, LA_CSR_PRMD andi $t0, $t0, 0x3 bnez $t0, .Lfrom_userspace .Lfrom_kernel: SAVE_REGS 0 move $a0, $sp addi.d $a1, $zero, 0 bl loongarch64_trap_handler RESTORE_REGS 0 ertn .Lfrom_userspace: SAVE_REGS 1 move $a0, $sp addi.d $a1, $zero, 1 bl loongarch64_trap_handler RESTORE_REGS 1 ertn .section .text .balign 4096 .global handle_tlb_refill handle_tlb_refill: csrwr $t0, LA_CSR_TLBRSAVE csrrd $t0, LA_CSR_PGD lddir $t0, $t0, 3 lddir $t0, $t0, 2 lddir $t0, $t0, 1 ldpte $t0, 0 ldpte $t0, 1 tlbfill csrrd $t0, LA_CSR_TLBRSAVE ertn
Josen-B/ci_arceos
1,965
modules/axhal/src/platform/x86_pc/ap_start.S
# Boot application processors into the protected mode. # Each non-boot CPU ("AP") is started up in response to a STARTUP # IPI from the boot CPU. Section B.4.2 of the Multi-Processor # Specification says that the AP will start in real mode with CS:IP # set to XY00:0000, where XY is an 8-bit value sent with the # STARTUP. Thus this code must start at a 4096-byte boundary. # # Because this code sets DS to zero, it must sit # at an address in the low 2^16 bytes. .equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr} .equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr} .equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr} .equ stack_ptr, {start_page_paddr} + 0xff0 .equ entry_ptr, {start_page_paddr} + 0xff8 # 0x6000 .section .text .code16 .p2align 12 .global ap_start ap_start: cli wbinvd xor ax, ax mov ds, ax mov es, ax mov ss, ax mov fs, ax mov gs, ax # load the 64-bit GDT lgdt [pa_ap_gdt_desc] # switch to protected-mode mov eax, cr0 or eax, (1 << 0) mov cr0, eax # far jump to 32-bit code. 0x8 is code32 segment selector ljmp 0x8, offset pa_ap_start32 .code32 ap_start32: mov esp, [stack_ptr] mov eax, [entry_ptr] jmp eax .balign 8 # .type multiboot_header, STT_OBJECT .Lap_tmp_gdt_desc: .short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit .long pa_ap_gdt # base .balign 16 .Lap_tmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Lap_tmp_gdt_end: # 0x7000 .p2align 12 .global ap_end ap_end:
Josen-B/ci_arceos
4,325
modules/axhal/src/platform/x86_pc/multiboot.S
# Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section .text.boot .code32 .global _start _start: mov edi, eax # arg1: magic: 0x2BADB002 mov esi, ebx # arg2: multiboot info jmp bsp_entry32 .balign 4 .type multiboot_header, STT_OBJECT multiboot_header: .int {mb_hdr_magic} # magic: 0x1BADB002 .int {mb_hdr_flags} # flags .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum .int multiboot_header - {offset} # header_addr .int _skernel - {offset} # load_addr .int _edata - {offset} # load_end .int _ebss - {offset} # bss_end_addr .int _start - {offset} # entry_addr # Common code in 32-bit, prepare states to enter 64-bit. .macro ENTRY32_COMMON # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set PAE, PGE bit in CR4 mov eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [.Ltmp_pml4 - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} mov edx, 0 mov eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, {cr0} mov cr0, eax .endm # Common code in 64-bit .macro ENTRY64_COMMON # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax .endm .code32 bsp_entry32: lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT ENTRY32_COMMON ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment .code32 .global ap_entry32 ap_entry32: ENTRY32_COMMON ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment .code64 bsp_entry64: ENTRY64_COMMON # set RSP to boot stack movabs rsp, offset {boot_stack} add rsp, {boot_stack_size} # call rust_entry(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .code64 ap_entry64: ENTRY64_COMMON # set RSP to high address (already set in ap_start.S) mov rax, {offset} add rsp, rax # call rust_entry_secondary(magic) mov rdi, {mb_magic} movabs rax, offset {entry_secondary} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end: .balign 4096 .Ltmp_pml4: # 0x0000_0000 ~ 0xffff_ffff .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 255 # 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff .quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 255 # FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb) .Ltmp_pdpt_low: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508 .Ltmp_pdpt_high: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508
Josen-B/ci_arceos
2,544
tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com> //-------------------------------------------------------------------------------------------------- // Definitions //-------------------------------------------------------------------------------------------------- // Load the address of a symbol into a register, PC-relative. // // The symbol must lie within +/- 4 GiB of the Program Counter. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_REL register, symbol adrp \register, \symbol add \register, \register, #:lo12:\symbol .endm // Load the address of a symbol into a register, absolute. // // # Resources // // - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html .macro ADR_ABS register, symbol movz \register, #:abs_g2:\symbol movk \register, #:abs_g1_nc:\symbol movk \register, #:abs_g0_nc:\symbol .endm //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- .section .text._start //------------------------------------------------------------------------------ // fn _start() //------------------------------------------------------------------------------ _start: // Only proceed on the boot core. Park it otherwise. mrs x0, MPIDR_EL1 and x0, x0, {CONST_CORE_ID_MASK} ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs cmp x0, x1 b.ne .L_parking_loop // If execution reaches here, it is the boot core. // Initialize DRAM. ADR_ABS x0, __bss_start ADR_ABS x1, __bss_end_exclusive .L_bss_init_loop: cmp x0, x1 b.eq .L_relocate_binary stp xzr, xzr, [x0], #16 b .L_bss_init_loop // Next, relocate the binary. .L_relocate_binary: ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to. ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to. ADR_ABS x2, __binary_nonzero_end_exclusive .L_copy_loop: ldr x3, [x0], #8 str x3, [x1], #8 cmp x1, x2 b.lo .L_copy_loop // Prepare the jump to Rust code. // Set the stack pointer. ADR_ABS x0, __boot_core_stack_end_exclusive mov sp, x0 // Jump to the relocated Rust code. ADR_ABS x1, _start_rust br x1 // Infinitely wait for events (aka "park the core"). .L_parking_loop: wfe b .L_parking_loop .size _start, . - _start .type _start, function .global _start
jprochazk/hebi4
9,742
src/codegen/opcodes.s
/* Do nothing. */ nop; /* Move value from register `src` to register `dst`. */ mov dst:reg src:reg; # Module variables, captures, array indices and table keys. /* Load module variable `src` to register `dst`. */ lmvar dst:reg src:mvar; # NOTE: `dst` comes 2nd here due to being 16-bit # it's still disassembled as 1st operand for consistency /* Store register `dst` into module variable `mvar`. */ smvar src:reg dst:mvar; /* Load current closure's capture `src` to register `dst`. */ lcap dst:reg src:cap; /* Store register `src` to current closure's capture `dst`. */ scap dst:cap src:reg; /* Load index `idx` (register) from `target` (array) to register `dst`. */ lidx dst:reg target:reg idx:reg; /* Load index `idx` (literal) from `target` (array) to register `dst`. */ lidxn dst:reg target:reg idx:lit8; /* Store value from `src` into `idx` (register) in `target` (array). */ sidx target:reg idx:reg src:reg; /* Store value from `src` into `idx` (register) in `target` (array). */ sidxn target:reg idx:lit8 src:reg; /* Load `key` (register) from `target` (object) to register `dst` (object). */ lkey dst:reg target:reg key:reg; /* Load `key` (literal) from `target` (object) to register `dst` (object). */ lkeyc dst:reg target:reg key:lit8; /* Store value from `src` into `key` (register) in `target` (object). */ skey target:reg key:reg src:reg; /* Store value from `src` into `key` (literal) in `target` (object). */ skeyc target:reg key:lit8 src:reg; # Value instructions /* Load `nil` value into register `dst`. */ lnil dst:reg; /* Load 16-bit integer `v` into register `dst`. */ lsmi dst:reg v:imm16s; /* Load literal `true` into register `dst`. */ ltrue dst:reg; /* Load literal `false` into register `dst`. */ lfalse dst:reg; /* * Load literal by `id` into register `dst`. * * `id` holds a 64-bit integer. */ lint dst:reg id:lit; /* * Load literal by `id` into register `dst`. * * `id` holds a 64-bit float. */ lnum dst:reg id:lit; /* * Load literal by `id` into register `dst`. * * `id` holds a string literal. */ lstr dst:reg id:lit; /* * Load closure function by `id` into register `dst`. * * `id` holds a ClosureInfo. */ lclosure dst:reg id:lit; /* * Load function by `id` into register `dst`. * * `id` holds a FuncInfo. * * This implies no captures, but the resulting object * is still a `Closure`. */ lfunc dst:reg id:lit; # TODO: constant lists/tables don't need to use stack space at all /* * Allocate a list with `capacity` into register `dst`. */ llist dst:reg cap:imm16; /* * Allocate an object with `capacity` into register `dst`. */ ltable dst:reg cap:imm16; # In hebi4's VM, there is only one `jmp` instruction with # a signed offset (stored as u24 with a bias). # # To make this instruction conditional, it may be prefixed with # various other instructions which perform value comparisons. # If a given comparison yields `true`, they skip the `jmp`. /* Adjust instruction pointer by `rel`. */ jmp rel:imm24s; /* Skip `jmp` if `v` coerced to bool is `true` */ istrue v:reg; /* Skip `jmp` if `v` coerced to bool is `false` */ isfalse v:reg; # Variants of `istrue`/`isfalse` which preserve values, used # for "ternary" expressions. /** * If `v` coerced to bool is `true`: * - Set `dst` to original `v` * - Skip `jmp` */ istruec dst:reg v:reg; /** * If `v` coerced to bool is `false`: * - Set `dst` to original `v` * - Skip `jmp` */ isfalsec dst:reg v:reg; /* Skip `jmp` if `lhs < rhs` (register, register) */ islt lhs:reg rhs:reg; /* Skip `jmp` if `lhs <= rhs` (register, register) */ isle lhs:reg rhs:reg; /* Skip `jmp` if `lhs > rhs` (register, register) */ isgt lhs:reg rhs:reg; /* Skip `jmp` if `lhs >= rhs` (register, register) */ isge lhs:reg rhs:reg; /* Skip `jmp` if `lhs == rhs` (register, register) */ iseq lhs:reg rhs:reg; /* Skip `jmp` if `lhs != rhs` (register, register) */ isne lhs:reg rhs:reg; # Specialized for certain kinds of constants: strings, numbers, "primitives" # Primitives are values which can be compared by bit pattern: bools and nils. # # This specialization reduces the number of type checks we have to do for # comparisons against constant values. /* Skip `jmp` if `lhs == rhs` (register, literal string) */ iseqs lhs:reg rhs:lit; /* Skip `jmp` if `lhs != rhs` (register, literal string) */ isnes lhs:reg rhs:lit; /* Skip `jmp` if `lhs == rhs` (register, literal number) */ iseqn lhs:reg rhs:lit; /* Skip `jmp` if `lhs != rhs` (register, literal number) */ isnen lhs:reg rhs:lit; /* Skip `jmp` if `lhs == rhs` (register, primitive) */ iseqp lhs:reg rhs:imm8; /* Skip `jmp` if `lhs != rhs` (register, primitive) */ isnep lhs:reg rhs:imm8; /* `dst = lhs < rhs` (register, register) */ isltv dst:reg lhs:reg rhs:reg; /* `dst = lhs <= rhs` (register, register) */ islev dst:reg lhs:reg rhs:reg; /* `dst = lhs > rhs` (register, register) */ isgtv dst:reg lhs:reg rhs:reg; /* `dst = lhs >= rhs` (register, register) */ isgev dst:reg lhs:reg rhs:reg; /* `dst = lhs == rhs` (register, register) */ iseqv dst:reg lhs:reg rhs:reg; /* `dst = lhs != rhs` (register, register) */ isnev dst:reg lhs:reg rhs:reg; # Arithmetic instructions # # LHS and RHS may be either in a register, or a constant. # # Using a constant typically avoids having to execute a few # instructions to materialize the value at runtime. # # When the compiler runs out of 8-bit literal slots, it falls back # to using `vv` variants by emitting a load of the literal first. /* `dst = lhs + rhs` (register, register) */ addvv dst:reg lhs:reg rhs:reg; /* `dst = lhs + rhs` (register, literal) */ addvn dst:reg lhs:reg rhs:lit8; /* `dst = lhs + rhs` (literal, register) */ addnv dst:reg lhs:lit8 rhs:reg; /* `dst = lhs - rhs` (register, register) */ subvv dst:reg lhs:reg rhs:reg; /* `dst = lhs - rhs` (register, literal) */ subvn dst:reg lhs:reg rhs:lit8; /* `dst = lhs - rhs` (literal, register) */ subnv dst:reg lhs:lit8 rhs:reg; /* `dst = lhs * rhs` (register, register) */ mulvv dst:reg lhs:reg rhs:reg; /* `dst = lhs * rhs` (register, literal) */ mulvn dst:reg lhs:reg rhs:lit8; /* `dst = lhs * rhs` (literal, register) */ mulnv dst:reg lhs:lit8 rhs:reg; /* `dst = lhs / rhs` (register, register) */ divvv dst:reg lhs:reg rhs:reg; /* `dst = lhs / rhs` (register, literal) */ divvn dst:reg lhs:reg rhs:lit8; /* `dst = lhs / rhs` (literal, register) */ divnv dst:reg lhs:lit8 rhs:reg; # Unary instructions /* `dst = -rhs` */ unm dst:reg rhs:reg; /* `dst = not rhs` */ not dst:reg rhs:reg; # Function calls # # Hebi4's VM uses overlapping stacks for function calls. # That means the closure and arguments passed to it during # a call are required to be not only contiguous, but also # top of the stack at the time of the call. # # This isn't the same as needing them at the end of the # current stack frame, there just need to be no live values # above the arguments. # # The layout for a stack frame is: # # [ret, arg0, arg1, .., argN, local0, local1, .., localN] # ^ # enum Value { tag, union value } # # Information about call frames is stored in a separate array: # # [frame, ..] # ^ # struct CallFrame { func_id:16, stack_size:8, <padding:16>, return_addr:32 } # # The return value is always at r0, followed by the arguments, then # the function's locals/intermediates. # # To perform a call, the arguments are evaluated and placed into their # corresponding registers: # # [.., callee, arg0, arg1, .., argN, .., <dead intermediates>] # # When the `call` instruction is dispatched, it performs various checks, # then constructs a new stack frame with its base at `callee`: # # old: [.., callee, arg0, arg1, .., argN] # new: [ret, arg0, arg1, .., argN, local0, local1, .., localN] # # Information about the previous stack frame is pushed into the # call frame array, and the interpreter dispatches the next instruction # at the start of the callee's code. # # This kind of layout is possible due to two invariants: # - Variables are below intermediate values on the stack # - Only the most recent intermediate is considered live # # This means values after and including `callee` can be safely discarded, # so the new stack can re-use those slots. This greatly reduces the total # number of stack slots needed for function calls, and avoids a memcpy of # call arguments before a call. # Due to (intentional) limitations of Hebi's syntax and semantics, it is # possible to statically know that a given variable may only ever contain # a function, and also exactly which function. The latter means we can # retrieve the required number of arguments whenever emitting this kind of # function call. # # The resulting call is named "fastcall", because it's much faster than # a regular call, as it requires no type or arity checking. # Regular `call` instructions are only generated for calling functions # stored in variables. # TODO: specialize for differing number of arguments? /* * `dst = dst(dst+1..dst+1+args)` */ call dst:reg args:imm8; /* * `dst = funcs[id](dst..dst+funcs[id].args)` */ fastcall dst:reg id:fnid; # Note that `ret` and `stop` are separate, to avoid a branch in `ret` which # would otherwise be required to check if there are any call frames left to # return to. There is always at least one call frame on the call frame stack. # # The VM initiates execution by first stepping into a "trampoline". # That's a bit of handwritten bytecode, which calls the module's main entrypoint # with zero arguments. When the main entrypoint returns to the trampoline, it # executes a `stop` which tells the VM to break out of the dispatch loop. /* * Return from the current call. */ ret; /* * Stop execution, and yield to the VM's caller. * Never generated by the compiler. */ stop;
jrmoulton/probe-rs
461
smoke-tester/test_function_arm.s
.global test_func .thumb_func test_func: mov r0, #0 /* 0x00 */ mov r1, #128 /* 0x02 */ mov r2, #0 /* 0x04 */ bkpt /* 0x06 */ loop: add r0, r0, #1 /* 0x08 */ cmp r0, r1 /* 0x0a */ ble loop /* 0x0c */ mov r2, #1 /* 0x0e */ bkpt /* 0x10 */ finish: b finish /* 0x12 */
jrusso440/rustps4
6,429
arch/arm/boot/aeabi_runtime.s
//===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// .syntax unified .cpu arm926ej-s .globl __aeabi_memset __aeabi_memset: mov r3, r1 mov r1, r2 mov r2, r3 b memset .globl __aeabi_memcpy __aeabi_memcpy: b memcpy .globl __aeabi_memmove __aeabi_memmove: b memmove .align 2 .globl __aeabi_ldivmod __aeabi_ldivmod: push {r11, lr} sub sp, sp, #16 add r12, sp, #8 str r12, [sp] bl __divmoddi4 ldr r2, [sp, #8] ldr r3, [sp, #12] add sp, sp, #16 pop {r11, pc} .align 2 .globl __aeabi_uldivmod __aeabi_uldivmod: push {r11, lr} sub sp, sp, #16 add r12, sp, #8 str r12, [sp] bl __udivmoddi4 ldr r2, [sp, #8] ldr r3, [sp, #12] add sp, sp, #16 pop {r11, pc} .align 3 .globl __aeabi_uidiv .globl __udivsi3 __aeabi_uidiv: __udivsi3: # 51 "udivsi3.S" push {r7, lr} ; mov r7, sp clz r2, r0 tst r1, r1 clz r3, r1 mov ip, #0 beq .L_return mov lr, #1 subs r3, r3, r2 blt .L_return .L_mainLoop: # 75 "udivsi3.S" subs r2, r0, r1, lsl r3 itt hs orrhs ip, ip,lr, lsl r3 movhs r0, r2 it ne subsne r3, r3, #1 bhi .L_mainLoop subs r2, r0, r1 it hs orrhs ip, #1 .L_return: mov r0, ip pop {r7, pc} .align 3 .globl __umodsi3 __umodsi3: # 39 "umodsi3.S" clz r2, r0 tst r1, r1 clz r3, r1 bxeq lr subs r3, r3, r2 bxlt lr .L_mainLoop2: # 59 "umodsi3.S" subs r2, r0, r1, lsl r3 it hs movhs r0, r2 it ne subsne r3, r3, #1 bhi .L_mainLoop2 subs r2, r0, r1 it hs movhs r0, r2 bx lr .align 3 .globl __aeabi_idiv __aeabi_idiv: __divsi3: # 37 "divsi3.S" push {r4, r7, lr} ; add r7, sp, #4 eor r4, r0, r1 eor r2, r0, r0, asr #31 eor r3, r1, r1, asr #31 sub r0, r2, r0, asr #31 sub r1, r3, r1, asr #31 bl __udivsi3 eor r0, r0, r4, asr #31 sub r0, r0, r4, asr #31 pop {r4, r7, pc} .align 3 ; .globl __modsi3 ; __modsi3: # 36 "modsi3.S" push {r4, r7, lr} ; add r7, sp, #4 mov r4, r0 eor r2, r0, r0, asr #31 eor r3, r1, r1, asr #31 sub r0, r2, r0, asr #31 sub r1, r3, r1, asr #31 bl __umodsi3 eor r0, r0, r4, asr #31 sub r0, r0, r4, asr #31 pop {r4, r7, pc} // https://android.googlesource.com/platform/bionic/+/884e4f8/libc/arch-arm/bionic/memset.S /* * Copyright (C) 2008 The Android Open Source Project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Optimized memset() for ARM. * * memset() returns its first argument. */ memset: /* compute the offset to align the destination * offset = (4-(src&3))&3 = -src & 3 */ .fnstart .save {r0, r4-r7, lr} stmfd sp!, {r0, r4-r7, lr} rsb r3, r0, #0 ands r3, r3, #3 cmp r3, r2 movhi r3, r2 /* splat r1 */ mov r1, r1, lsl #24 orr r1, r1, r1, lsr #8 orr r1, r1, r1, lsr #16 movs r12, r3, lsl #31 strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */ strcsb r1, [r0], #1 strmib r1, [r0], #1 subs r2, r2, r3 ldmlsfd sp!, {r0, r4-r7, lr} /* return */ bxls lr /* align the destination to a cache-line */ mov r12, r1 mov lr, r1 mov r4, r1 mov r5, r1 mov r6, r1 mov r7, r1 rsb r3, r0, #0 ands r3, r3, #0x1C beq 3f cmp r3, r2 andhi r3, r2, #0x1C sub r2, r2, r3 /* conditionnaly writes 0 to 7 words (length in r3) */ movs r3, r3, lsl #28 stmcsia r0!, {r1, lr} stmcsia r0!, {r1, lr} stmmiia r0!, {r1, lr} movs r3, r3, lsl #2 strcs r1, [r0], #4 3: subs r2, r2, #32 mov r3, r1 bmi 2f 1: subs r2, r2, #32 stmia r0!, {r1,r3,r4,r5,r6,r7,r12,lr} bhs 1b 2: add r2, r2, #32 /* conditionnaly stores 0 to 31 bytes */ movs r2, r2, lsl #28 stmcsia r0!, {r1,r3,r12,lr} stmmiia r0!, {r1, lr} movs r2, r2, lsl #2 strcs r1, [r0], #4 strmih r1, [r0], #2 movs r2, r2, lsl #2 strcsb r1, [r0] ldmfd sp!, {r0, r4-r7, lr} bx lr
jrusso440/rustps4
168
arch/arm/boot/loader.s
.text .code 32 .syntax unified .cpu arm926ej-s .fpu softvfp .global start .global abort .type start, %function start: mov sp, 0x18000 bl main abort: b .
js2xxx/aster-priv
1,106
framework/libs/linux-bzimage/setup/src/x86/amd64_efi/setup.S
/* SPDX-License-Identifier: MPL-2.0 */ .section ".setup", "ax" .code64 // start_of_setup32 should be loaded at CODE32_START, which is our base. .global start_of_setup32 start_of_setup32: // `efi_handover_setup_entry64` should be at efi_handover_setup_entry32 + 0x200, but // we could provide the 32 bit dummy entry point as the 64 bit entry point - 0x200 // since we do not provide 32-bit entry point in the x86_64 specific implementation. .org 0x210 .global efi_handover_setup_entry efi_handover_setup_entry: // The 3 parameters of is stored in rdi, rsi and rdx (sysv64). // Do not use them. // Setup the stack. lea rsp, [rip + setup_stack_top] lea rax, [rip + halt] push rax # the return address mov rbp, rsp add rbp, -4 push rbp mov rbp, rsp .extern efi_handover_entry lea rax, [rip + efi_handover_entry] call rax // Unreachable here. halt: hlt jmp halt // A small stack for the setup code. .section .data .align 0x1000 / 8 .global setup_stack setup_stack: .skip 0x1000 .global setup_stack_top setup_stack_top:
js2xxx/aster-priv
2,407
framework/libs/linux-bzimage/setup/src/x86/amd64_efi/header.S
/* SPDX-License-Identifier: MPL-2.0 */ // The compatibility file for the Linux x86 Boot Protocol. // See https://www.kernel.org/doc/html/v5.6/x86/boot.html for // more information on the Linux x86 Boot Protocol. // Some of the fields filled with a 0xab* values should be filled // by the torjan builder. // Asterinas will use only a few of these fields, and some of them // are filled by the loader and will be read by Asterinas. .section ".header", "a" CODE32_START = 0x100000 SETUP_SECTS = 7 # so that the legacy setup could occupy a page SETUP_SECTS_SIZE = 0x200 * (SETUP_SECTS + 1) .code16 .org 0x01f1 hdr_start: setup_sects: .byte SETUP_SECTS root_flags: .word 1 syssize: .long 0 ram_size: .word 0 vid_mode: .word 0xfffd root_dev: .word 0 boot_flag: .word 0xAA55 jump: .byte 0xeb jump_addr: .byte hdr_end-jump_addr magic: .ascii "HdrS" .word 0x020f realmode_swtch: .word 0, 0 start_sys_seg: .word 0 .word 0 type_of_loader: .byte 0 loadflags: .byte (1 << 0) setup_move_size: .word 0 code32_start: .long CODE32_START ramdisk_image: .long 0 ramdisk_size: .long 0 bootsect_kludge: .long 0 heap_end_ptr: .word 65535 ext_loader_ver: .byte 0 ext_loader_type: .byte 0 cmd_line_ptr: .long 0 initrd_addr_max: .long 0x7fffffff kernel_alignment: .long 0x1000000 relocatable_kernel: .byte 0 min_alignment: .byte 0x10 xloadflags: .word 0b01111 # all handover protocols except kexec cmdline_size: .long 4096-1 hardware_subarch: .long 0 hardware_subarch_data: .quad 0 payload_offset: .long 0xabababab # at 0x248/4, to be filled by the builder payload_length: .long 0xabababab # at 0x24c/4, to be filled by the builder setup_data: .quad 0 pref_address: .quad CODE32_START - SETUP_SECTS_SIZE init_size: .long 0xabababab # at 0x260/4, to be filled by the builder # The handover_offset should be efi_handover_setup_entry - CODE32_START - 0x200 # But we use ABI workaround to avoid the relocation of efi_handover_setup_entry handover_offset: .long 0x10 kernel_info_offset: .long 0 hdr_end:
js2xxx/aster-priv
689
framework/libs/linux-bzimage/setup/src/x86/legacy_i386/setup.S
/* SPDX-License-Identifier: MPL-2.0 */ // 32-bit setup code starts here, and will be loaded at CODE32_START. .section ".setup", "ax" .code32 .global start_of_setup32 start_of_setup32: mov eax, offset __stack_top mov esp, eax mov eax, offset halt push eax # the return address mov ebp, esp add ebp, -4 push ebp mov ebp, esp // The rust entrypoint of the bzImage .extern _bzimage_entry_32 push esi # the boot_params pointer call _bzimage_entry_32 // Unreachable here. halt: hlt jmp halt // A small stack for the 32-bit code. .section ".stack", "aw" .align 8 .space 0x1000 __stack_top:
js2xxx/aster-priv
2,165
framework/libs/linux-bzimage/setup/src/x86/legacy_i386/header.S
/* SPDX-License-Identifier: MPL-2.0 */ // The compatibility file for the Linux x86 Boot Protocol. // See https://www.kernel.org/doc/html/v5.6/x86/boot.html for // more information on the Linux x86 Boot Protocol. // Some of the fields filled with a 0xab* values should be filled // by the torjan builder. // Asterinas will use only a few of these fields, and some of them // are filled by the loader and will be read by Asterinas. .section ".header", "a" CODE32_START = 0x100000 SETUP_SECTS = 7 # so that the legacy setup could occupy a page .code16 .org 0x01f1 hdr_start: setup_sects: .byte SETUP_SECTS root_flags: .word 1 syssize: .long 0 ram_size: .word 0 vid_mode: .word 0xfffd root_dev: .word 0 boot_flag: .word 0xAA55 jump: .byte 0xeb jump_addr: .byte hdr_end-jump_addr magic: .ascii "HdrS" .word 0x020f realmode_swtch: .word 0, 0 start_sys_seg: .word 0 .word 0 type_of_loader: .byte 0 loadflags: .byte (1 << 0) setup_move_size: .word 0 code32_start: .long CODE32_START ramdisk_image: .long 0 ramdisk_size: .long 0 bootsect_kludge: .long 0 heap_end_ptr: .word 65535 ext_loader_ver: .byte 0 ext_loader_type: .byte 0 cmd_line_ptr: .long 0 initrd_addr_max: .long 0x7fffffff kernel_alignment: .long 0x1000000 relocatable_kernel: .byte 0 min_alignment: .byte 0x10 xloadflags: .word 0 cmdline_size: .long 4096-1 hardware_subarch: .long 0 hardware_subarch_data: .quad 0 payload_offset: .long 0xabababab # at 0x248/4, to be filled by the builder payload_length: .long 0xabababab # at 0x24c/4, to be filled by the builder setup_data: .quad 0 pref_address: .quad CODE32_START - 0x200 * (SETUP_SECTS + 1); init_size: .long 0xabababab # at 0x260/4, to be filled by the builder handover_offset: .long 0 kernel_info_offset: .long 0 hdr_end:
js2xxx/aster-priv
700
framework/aster-frame/src/task/switch.S
/* SPDX-License-Identifier: MPL-2.0 */ .text .global context_switch .code64 context_switch: # (cur: *mut TaskContext, nxt: *TaskContext) # Save cur's register mov rax, [rsp] # return address mov [rdi + 56], rax # 56 = offsetof(Context, rip) mov [rdi + 0], rsp mov [rdi + 8], rbx mov [rdi + 16], rbp mov [rdi + 24], r12 mov [rdi + 32], r13 mov [rdi + 40], r14 mov [rdi + 48], r15 # Restore nxt's registers mov rsp, [rsi + 0] mov rbx, [rsi + 8] mov rbp, [rsi + 16] mov r12, [rsi + 24] mov r13, [rsi + 32] mov r14, [rsi + 40] mov r15, [rsi + 48] mov rax, [rsi + 56] # restore return address mov [rsp], rax # for stack balance, must use mov instead of push ret
js2xxx/aster-priv
10,035
framework/aster-frame/src/arch/x86/boot/boot.S
/* SPDX-License-Identifier: MPL-2.0 */ // The boot header, initial boot setup code, temporary GDT and page tables are // in the boot section. The boot section is mapped writable since kernel may // modify the initial page table. .section ".boot", "awx" .code32 // With every entry types we could go through common paging or machine // state setup routines. Thus we make a mark of protocol used in each entrypoint // on the stack. ENTRYTYPE_MULTIBOOT = 1 ENTRYTYPE_MULTIBOOT2 = 2 ENTRYTYPE_LINUX_32 = 3 ENTRYTYPE_LINUX_64 = 4 MULTIBOOT_ENTRY_MAGIC = 0x2BADB002 MULTIBOOT2_ENTRY_MAGIC = 0x36D76289 // The Linux 32-bit Boot Protocol entry point. // Must be located at 0x8001000, ABI immutable! .code32 .org 0x000 .global __linux32_boot __linux32_boot: cli cld // Set the kernel call stack. mov esp, offset boot_stack_top push 0 // upper 32-bits push esi // boot_params ptr push 0 // upper 32-bits push ENTRYTYPE_LINUX_32 jmp initial_boot_setup // The Linux 64-bit Boot Protocol entry point. // Must be located at 0x8001200, ABI immutable! .code64 .org 0x200 .global __linux64_boot_tag __linux64_boot_tag: // Set the kernel call stack. lea rsp, [boot_stack_top] push rsi // boot_params ptr from the loader push ENTRYTYPE_LINUX_64 // Here RSP/RIP are still using low address. jmp long_mode_in_low_address // The multiboot & multiboot2 entry point. .code32 .global __multiboot_boot __multiboot_boot: cli cld // Set the kernel call stack. mov esp, offset boot_stack_top push 0 // Upper 32-bits. push eax // multiboot magic ptr push 0 // Upper 32-bits. push ebx // multiboot info ptr // Tell the entry type from eax cmp eax, MULTIBOOT_ENTRY_MAGIC je magic_is_mb cmp eax, MULTIBOOT2_ENTRY_MAGIC je magic_is_mb2 jmp halt // Should not be reachable! magic_is_mb: push 0 // Upper 32-bits. push ENTRYTYPE_MULTIBOOT jmp initial_boot_setup magic_is_mb2: push 0 // Upper 32-bits. push ENTRYTYPE_MULTIBOOT2 jmp initial_boot_setup initial_boot_setup: // Prepare for far return. We use a far return as a fence after setting GDT. mov eax, 24 push eax lea edx, [protected_mode] push edx // Switch to our own temporary GDT. lgdt [boot_gdtr] retf protected_mode: mov ax, 16 mov ds, ax mov ss, ax mov es, ax mov fs, ax mov gs, ax page_table_setup: // Zero out the page table. mov al, 0x00 lea edi, [boot_page_table_start] lea ecx, [boot_page_table_end] sub ecx, edi cld rep stosb // PTE flags used in this file. PTE_PRESENT = (1) PTE_WRITE = (1 << 1) PTE_HUGE = (1 << 7) PTE_GLOBAL = (1 << 8) // PML4: 0x00000000_00000000 ~ 0x00000000_3fffffff // 0x00000000_40000000 ~ 0x00000000_7fffffff // 0x00000000_80000000 ~ 0x00000000_bfffffff // 0x00000000_c0000000 ~ 0x00000000_ffffffff lea edi, [boot_pml4] lea eax, [boot_pdpt + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PML4: 0xffff8000_00000000 ~ 0xffff8000_3fffffff // 0xffff8000_40000000 ~ 0xffff8000_7fffffff // 0xffff8000_80000000 ~ 0xffff8000_bfffffff // 0xffff8000_c0000000 ~ 0xffff8000_ffffffff // 0xffff8008_00000000 ~ 0xffff8008_3fffffff lea edi, [boot_pml4 + 0x100 * 8] lea eax, [boot_pdpt + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PML4: 0xffffffff_80000000 ~ 0xffffffff_bfffffff // 0xffffffff_c0000000 ~ 0xffffffff_ffffffff lea edi, [boot_pml4 + 0x1ff * 8] lea eax, [boot_pdpt + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0x00000000_00000000 ~ 0x00000000_3fffffff lea edi, [boot_pdpt] lea eax, [boot_pd_0g_1g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0x00000000_40000000 ~ 0x00000000_7fffffff lea edi, [boot_pdpt + 0x1 * 8] lea eax, [boot_pd_1g_2g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0x00000000_80000000 ~ 0x00000000_bfffffff lea edi, [boot_pdpt + 0x2 * 8] lea eax, [boot_pd_2g_3g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0x00000000_c0000000 ~ 0x00000000_ffffffff lea edi, [boot_pdpt + 0x3 * 8] lea eax, [boot_pd_3g_4g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // 1000 00000|000 100000|00 0000000|0 00000000 000 // PDPT: 0xffff8008_00000000 ~ 0xffff8008_3fffffff lea edi, [boot_pdpt + 0x20 * 8] lea eax, [boot_pd_32g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0xffffffff_80000000 ~ 0xffffffff_bfffffff lea edi, [boot_pdpt + 0x1fe * 8] lea eax, [boot_pd_0g_1g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // PDPT: 0xffffffff_c0000000 ~ 0xffffffff_ffffffff lea edi, [boot_pdpt + 0x1ff * 8] lea eax, [boot_pd_1g_2g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 // Page Directory: map to low 1 GiB * 4 space lea edi, [boot_pd] lea eax, [boot_pt + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov ecx, 512 * 4 // (of entries in PD) * (number of PD) write_pd_entry: mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 add eax, 0x1000 // +4KiB to next table add edi, 8 loop write_pd_entry // Page Directory: map to 1 GiB space offset 32GiB lea edi, [boot_pd_32g] lea eax, [boot_pt_32g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)] mov ecx, 512 // (of entries in PD) write_pd_32g_entry: mov dword ptr [edi], eax mov dword ptr [edi + 4], 0x0 add eax, 0x1000 // +4KiB to next table add edi, 8 loop write_pd_32g_entry // Page Table: map to low 1 GiB * 4 space lea edi, [boot_pt] mov eax, (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL) // Offset 0 mov ecx, 512 * 512 * 4 // (of entries in PT) * (number of PT) * (number of PD) write_pt_entry: mov dword ptr [edi], eax mov dword ptr [edi + 4], 0 add eax, 0x1000 // +4KiB add edi, 8 loop write_pt_entry // Page Table: map to 1 GiB space offset 32GiB lea edi, [boot_pt_32g] mov eax, (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL) // Offset 0x8_00000000 but should write to high 32bits mov ecx, 512 * 512 // (of entries in PT) * (number of PT) write_pt_32g_entry: mov dword ptr [edi], eax mov dword ptr [edi + 4], 0x8 // Offset 0x8_00000000 add eax, 0x1000 // +4KiB add edi, 8 loop write_pt_32g_entry jmp enable_long_mode enable_long_mode: // Enable PAE and PGE. mov eax, cr4 or eax, 0xa0 mov cr4, eax // Set the page table address. lea eax, [boot_pml4] mov cr3, eax // Enable long mode. mov ecx, 0xc0000080 rdmsr or eax, 0x0100 wrmsr // Prepare for far return. mov eax, 8 push eax lea edx, [long_mode_in_low_address] push edx // Enable paging. mov eax, cr0 or eax, 0x80000000 mov cr0, eax retf // Temporary GDTR/GDT entries. This must be located in the .boot section as its // address (gdt) must be physical to load. .align 16 .global boot_gdtr boot_gdtr: .word gdt_end - gdt - 1 .quad gdt .align 16 gdt: .quad 0x0000000000000000 // 0: null descriptor .quad 0x00af9a000000ffff // 8: 64-bit code segment (kernel) .quad 0x00cf92000000ffff // 16: 64-bit data segment (kernel) .quad 0x00cf9a000000ffff // 24: 32-bit code segment (kernel) gdt_end: // The page tables and the stack .align 4096 .global boot_page_table_start boot_page_table_start: .global boot_pml4 boot_pml4: .skip 4096 boot_pdpt: .skip 4096 boot_pd: boot_pd_0g_1g: .skip 4096 boot_pd_1g_2g: .skip 4096 boot_pd_2g_3g: .skip 4096 boot_pd_3g_4g: .skip 4096 boot_pt: .skip 4096 * 512 * 4 boot_pd_32g: .skip 4096 boot_pt_32g: .skip 4096 * 512 boot_page_table_end: .global boot_stack_top boot_stack_bottom: .skip 0x40000 boot_stack_top: .code64 long_mode_in_low_address: mov ax, 0 mov ds, ax mov ss, ax mov es, ax mov fs, ax mov gs, ax // Update RSP/RIP to use the virtual address. mov rbx, 0xffffffff80000000 or rsp, rbx lea rax, [long_mode - 0xffffffff80000000] or rax, rbx jmp rax // From here, we're in the .text section: we no longer use physical address. .code64 .text long_mode: // Clear .bss section. mov al, 0x00 lea rdi, [rip + __bss] lea rcx, [rip + __bss_end] sub rcx, rdi cld rep stosb // Call the corresponding Rust entrypoint according to the boot entrypoint pop rax cmp rax, ENTRYTYPE_MULTIBOOT je entry_type_multiboot cmp rax, ENTRYTYPE_MULTIBOOT2 je entry_type_multiboot2 cmp rax, ENTRYTYPE_LINUX_32 je entry_type_linux cmp rax, ENTRYTYPE_LINUX_64 je entry_type_linux // Unreachable! jmp halt .extern __linux_boot .extern __multiboot_entry .extern __multiboot2_entry entry_type_linux: pop rdi // boot_params ptr xor rbp, rbp lea rax, [rip + __linux_boot] // jump into Rust code call rax jmp halt entry_type_multiboot: pop rsi // the address of multiboot info pop rdi // multiboot magic xor rbp, rbp lea rax, [rip + __multiboot_entry] // jump into Rust code call rax jmp halt entry_type_multiboot2: pop rsi // the address of multiboot info pop rdi // multiboot magic xor rbp, rbp lea rax, [rip + __multiboot2_entry] // jump into Rust code call rax jmp halt halt: cli hlt jmp halt
js2xxx/aster-priv
2,669
framework/aster-frame/src/arch/x86/smp/smp.S
.extern boot_pml4 .extern ap_early_entry .section ".smp", "awx" .align 4096 .code16 IA32_APIC_BASE = 0x1B IA32_X2APIC_APICID = 0x802 MMIO_XAPIC_APICID = 0xFEE00020 ap_boot: cli // disable interrupts cld xor ax, ax // clear ax mov ds, ax // clear ds lgdt [ap_gdtr] // load gdt mov eax, cr0 or eax, 1 mov cr0, eax // enable protected mode ljmp 8, offset ap_protect_entry // 32-bit ap gdt .align 16 ap_gdt: .quad 0x0000000000000000 .quad 0x00cf9a000000ffff .quad 0x00cf92000000ffff .quad 0x00af9a000000ffff ap_gdt_end: .align 16 ap_gdtr: .word ap_gdt_end - ap_gdt - 1 .quad ap_gdt .align 4 .code32 ap_protect_entry: mov ax, 0x10 mov ds, ax mov ss, ax // Get local apic id from xapic or x2apic // IA32_APIC_BASE register: // bit 8: BSP—Processor is BSP // bit 10: EXTD—Enable x2APIC mode // bit 11: EN—xAPIC global enable/disable // bit 12-35: APIC Base—Base physical address // It is best to get this information in protected mode. // After entering long mode, we need to set additional // page table mapping for xapic mode mmio region. mov ecx, IA32_APIC_BASE rdmsr and eax, 0x400 // check EXTD bit cmp eax, 0x400 je x2apic_mode xapic_mode: // In xapic mode, local apic id is stored in // mmio region mov eax, [MMIO_XAPIC_APICID] shr eax, 24 jmp ap_protect x2apic_mode: // In x2apic mode, local apic id is stored in // IA32_X2APIC_APICID MSR mov ecx, IA32_X2APIC_APICID rdmsr jmp ap_protect .code32 ap_protect: // Save the local apic id in an unused register. // We will calculate the stack pointer of this core // by taking the local apic id as the offset. mov edi, eax // prepare page table lea eax, [boot_pml4] mov cr3, eax // enable PAE and PGE mov eax, cr4 or eax, 0xa0 mov cr4, eax // enable long mode mov ecx, 0xc0000080 rdmsr // load EFER MSR or eax, 1<<8 wrmsr // set long bit // enable paging mov eax, cr0 or eax, 1<<31 mov cr0, eax ljmp 0x18, offset ap_long .code64 .global __ap_boot_stack_pointer_array .align 8 // Use two pages to place stack pointers of all aps, thus support up to 1024 aps. // stack_pointer = *(__ap_boot_stack_pointer_array + local_apic_id*8) __ap_boot_stack_pointer_array: .skip 8 * 1024 ap_long: mov ax, 0 mov ds, ax mov ss, ax mov es, ax mov fs, ax mov gs, ax mov rax, rdi shl rax, 3 mov rsp, [__ap_boot_stack_pointer_array + rax] mov rax, offset ap_early_entry call rax hlt
js2xxx/aster-priv
364
framework/aster-frame/src/arch/x86/boot/multiboot/header.S
/* SPDX-License-Identifier: MPL-2.0 */ // This is the GNU Multiboot header. // Reference: https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section ".multiboot_header", "a" MB_MAGIC = 0x1BADB002 MB_FLAGS = 0 MB_CHECKSUM = -(MB_MAGIC + MB_FLAGS) .code32 multiboot_header: .align 8 .long MB_MAGIC .long MB_FLAGS .long MB_CHECKSUM
js2xxx/aster-priv
1,252
framework/aster-frame/src/arch/x86/boot/multiboot2/header.S
/* SPDX-License-Identifier: MPL-2.0 */ // This is the GNU Multiboot 2 header. // Reference: https://www.gnu.org/software/grub/manual/multiboot2/html_node/Index.html//Index .section ".multiboot2_header", "a" .code32 // Macros for cleaner code in the header fields. MB2_MAGIC = 0xE85250D6 MB2_ARCHITECTURE = 0 // 32-bit (protected) mode of i386 MB2_HEADERLEN = header_end - header_start MB2_CHECKSUM = -(MB2_MAGIC + MB2_ARCHITECTURE + MB2_HEADERLEN) header_start: .align 8 .long MB2_MAGIC .long MB2_ARCHITECTURE .long MB2_HEADERLEN .long MB2_CHECKSUM // Tag: entry address entry_address_tag_start: .short 3 .short 1 // Optional .long entry_address_tag_end - entry_address_tag_start .extern __multiboot_boot .long __multiboot_boot // entry_addr entry_address_tag_end: // Tag: information request .align 8 info_request: .short 1 .short 0 // Required .long info_request_end - info_request .long 6 // Memory map request .long 15 // ACPI (new) request info_request_end: // Tag: header end .align 8 .short 0 // type: tags end .short 0 // flags .long 8 // size header_end:
js2xxx/aster-priv
2,102
regression/apps/fork/fork.s
# SPDX-License-Identifier: MPL-2.0 .global _start .section .text _start: call print_hello_world mov $57, %rax # syscall number of fork syscall cmp $0, %rax je _child # child process jmp _parent # parent process _parent: call wait_child call get_pid call print_parent_message call exit _child: call get_pid call print_child_message call exit wait_child: mov %rax, %rdi # child process id _loop: mov $61, %rax # syscall number of wait4 mov $0, %rsi # exit status address mov $1, %rdx # WNOHANG syscall cmp %rdi, %rax # The return value is the pid of child jne _loop ret exit: mov $60, %rax # syscall number of exit mov $0, %rdi # exit code syscall get_pid: mov $39, %rax syscall ret print_hello_world: mov $message, %rsi # address of message mov $message_end, %rdx sub %rsi, %rdx # calculate message len jmp _print_message print_parent_message: mov $message_parent, %rsi # address of message mov $message_parent_end, %rdx sub %rsi, %rdx # calculate message len jmp _print_message print_child_message: mov $message_child, %rsi # address of message mov $message_child_end, %rdx sub %rsi, %rdx # calculate message len jmp _print_message # never directly call _print_message _print_message: mov $1, %rax # syscall number of write mov $1, %rdi # stdout syscall ret .section .rodata message: .ascii "Hello, world in fork\n" message_end: message_parent: .ascii "Hello world from parent\n" message_parent_end: message_child: .ascii "Hello world from child\n" message_child_end:
js2xxx/aster-priv
710
regression/apps/hello_world/hello_world.s
# SPDX-License-Identifier: MPL-2.0 .global _start .section .text _start: call print_message call print_message call print_message mov $60, %rax # syscall number of exit mov $0, %rdi # exit code syscall get_pid: mov $39, %rax syscall ret print_message: mov $1, %rax # syscall number of write mov $1, %rdi # stdout mov $message, %rsi # address of message mov $message_end, %rdx sub %rsi, %rdx # calculate message len syscall ret .section .rodata message: .ascii "Hello, world\n" message_end:
jtzhpf/starry
1,604
modules/axhal/linker.lds.S
OUTPUT_ARCH(%ARCH%) BASE_ADDRESS = %KERNEL_BASE%; ENTRY(_start) SECTIONS { . = BASE_ADDRESS; _skernel = .; .text : ALIGN(4K) { _stext = .; *(.text.boot) *(.text .text.*) . = ALIGN(4K); _etext = .; } .rodata : ALIGN(4K) { _srodata = .; *(.rodata .rodata.*) *(.srodata .srodata.*) *(.sdata2 .sdata2.*) . = ALIGN(4K); _erodata = .; } .data : ALIGN(4K) { _sdata = .; *(.data.boot_page_table) . = ALIGN(4K); *(.data .data.*) *(.sdata .sdata.*) *(.got .got.*) } .tdata : ALIGN(0x10) { _stdata = .; *(.tdata .tdata.*) _etdata = .; } .tbss : ALIGN(0x10) { _stbss = .; *(.tbss .tbss.*) *(.tcommon) _etbss = .; } . = ALIGN(4K); _percpu_start = .; .percpu 0x0 : AT(_percpu_start) { _percpu_load_start = .; *(.percpu .percpu.*) _percpu_load_end = .; . = ALIGN(64); _percpu_size_aligned = .; . = _percpu_load_start + _percpu_size_aligned * %SMP%; } . = _percpu_start + SIZEOF(.percpu); _percpu_end = .; . = ALIGN(4K); _edata = .; .bss : AT(.) ALIGN(4K) { boot_stack = .; *(.bss.stack) . = ALIGN(4K); boot_stack_top = .; _sbss = .; *(.bss .bss.*) *(.sbss .sbss.*) *(COMMON) . = ALIGN(4K); _ebss = .; } _ekernel = .; /DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) } }
jtzhpf/starry
121
modules/axdriver/image.S
.section .data .global img_start .global img_end .align 16 img_start: .incbin "./disk.img" img_end:
jtzhpf/starry
2,034
modules/axhal/src/arch/riscv/trap.S
.macro SAVE_REGS, from_user addi sp, sp, -{trapframe_size} PUSH_GENERAL_REGS csrr t0, sepc csrr t1, sstatus csrrw t2, sscratch, zero // save sscratch (sp) and zero it STR t0, sp, 31 // tf.sepc STR t1, sp, 32 // tf.sstatus STR t2, sp, 1 // tf.regs.sp .short 0xa622 // fsd fs0,264(sp) .short 0xaa26 // fsd fs1,272(sp) .if \from_user == 1 LDR t1, sp, 2 // load user gp with CPU ID LDR t0, sp, 3 // load supervisor tp STR gp, sp, 2 // save user gp and tp STR tp, sp, 3 mv gp, t1 mv tp, t0 .endif .endm .macro RESTORE_REGS, from_user .if \from_user == 1 LDR t1, sp, 2 LDR t0, sp, 3 STR gp, sp, 2 // load user gp and tp STR tp, sp, 3 // save supervisor tp mv gp, t1 mv tp, t0 addi t0, sp, {trapframe_size} // put supervisor sp to scratch csrw sscratch, t0 .endif LDR t0, sp, 31 LDR t1, sp, 32 csrw sepc, t0 csrw sstatus, t1 .short 0x2432 // fld fs0,264(sp) .short 0x24d2 // fld fs1,272(sp) POP_GENERAL_REGS LDR sp, sp, 1 // load sp from tf.regs.sp .endm .section .text .balign 4 .global trap_vector_base trap_vector_base: // sscratch == 0: trap from S mode // sscratch != 0: trap from U mode csrrw sp, sscratch, sp // switch sscratch and sp bnez sp, .Ltrap_entry_u csrr sp, sscratch // put supervisor sp back j .Ltrap_entry_s .Ltrap_entry_s: SAVE_REGS 0 mv a0, sp li a1, 0 call riscv_trap_handler RESTORE_REGS 0 sret .Ltrap_entry_u: SAVE_REGS 1 mv a0, sp li a1, 1 call riscv_trap_handler RESTORE_REGS 1 sret
jtzhpf/starry
2,415
modules/axhal/src/arch/aarch64/trap.S
.macro SAVE_REGS sub sp, sp, 34 * 8 stp x0, x1, [sp] stp x2, x3, [sp, 2 * 8] stp x4, x5, [sp, 4 * 8] stp x6, x7, [sp, 6 * 8] stp x8, x9, [sp, 8 * 8] stp x10, x11, [sp, 10 * 8] stp x12, x13, [sp, 12 * 8] stp x14, x15, [sp, 14 * 8] stp x16, x17, [sp, 16 * 8] stp x18, x19, [sp, 18 * 8] stp x20, x21, [sp, 20 * 8] stp x22, x23, [sp, 22 * 8] stp x24, x25, [sp, 24 * 8] stp x26, x27, [sp, 26 * 8] stp x28, x29, [sp, 28 * 8] mrs x9, sp_el0 mrs x10, elr_el1 mrs x11, spsr_el1 stp x30, x9, [sp, 30 * 8] stp x10, x11, [sp, 32 * 8] .endm .macro RESTORE_REGS ldp x10, x11, [sp, 32 * 8] ldp x30, x9, [sp, 30 * 8] msr sp_el0, x9 msr elr_el1, x10 msr spsr_el1, x11 ldp x28, x29, [sp, 28 * 8] ldp x26, x27, [sp, 26 * 8] ldp x24, x25, [sp, 24 * 8] ldp x22, x23, [sp, 22 * 8] ldp x20, x21, [sp, 20 * 8] ldp x18, x19, [sp, 18 * 8] ldp x16, x17, [sp, 16 * 8] ldp x14, x15, [sp, 14 * 8] ldp x12, x13, [sp, 12 * 8] ldp x10, x11, [sp, 10 * 8] ldp x8, x9, [sp, 8 * 8] ldp x6, x7, [sp, 6 * 8] ldp x4, x5, [sp, 4 * 8] ldp x2, x3, [sp, 2 * 8] ldp x0, x1, [sp] add sp, sp, 34 * 8 .endm .macro INVALID_EXCP, kind, source .p2align 7 SAVE_REGS mov x0, sp mov x1, \kind mov x2, \source bl invalid_exception b .Lexception_return .endm .macro HANDLE_SYNC .p2align 7 SAVE_REGS mov x0, sp bl handle_sync_exception b .Lexception_return .endm .macro HANDLE_IRQ .p2align 7 SAVE_REGS mov x0, sp bl handle_irq_exception b .Lexception_return .endm .section .text .p2align 11 .global exception_vector_base exception_vector_base: // current EL, with SP_EL0 INVALID_EXCP 0 0 INVALID_EXCP 1 0 INVALID_EXCP 2 0 INVALID_EXCP 3 0 // current EL, with SP_ELx HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 1 INVALID_EXCP 3 1 // lower EL, aarch64 HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 2 INVALID_EXCP 3 2 // lower EL, aarch32 INVALID_EXCP 0 3 INVALID_EXCP 1 3 INVALID_EXCP 2 3 INVALID_EXCP 3 3 .Lexception_return: RESTORE_REGS eret
jtzhpf/starry
1,505
modules/axhal/src/arch/x86_64/trap.S
.equ NUM_INT, 256 .altmacro .macro DEF_HANDLER, i .Ltrap_handler_\i: .if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17 # error code pushed by CPU push \i # interrupt vector jmp .Ltrap_common .else push 0 # fill in error code in TrapFrame push \i # interrupt vector jmp .Ltrap_common .endif .endm .macro DEF_TABLE_ENTRY, i .quad .Ltrap_handler_\i .endm .section .text .code64 _trap_handlers: .set i, 0 .rept NUM_INT DEF_HANDLER %i .set i, i + 1 .endr .Ltrap_common: test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space jz 1f swapgs 1: push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push rdi push rsi push rbp push rbx push rdx push rcx push rax mov rdi, rsp call x86_trap_handler pop rax pop rcx pop rdx pop rbx pop rbp pop rsi pop rdi pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space jz 2f swapgs 2: add rsp, 16 # pop vector, error_code iretq .section .rodata .global trap_handler_table trap_handler_table: .set i, 0 .rept NUM_INT DEF_TABLE_ENTRY %i .set i, i + 1 .endr
jtzhpf/starry
4,399
modules/axhal/src/arch/loongarch64/trap.S
.equ KSAVE_KSP, 0x30 .equ KSAVE_T0, 0x31 .equ KSAVE_USP, 0x32 .macro SAVE_REGS st.d $ra, $sp, 8 csrrd $t0, KSAVE_T0 st.d $t0, $sp, 12*8 st.d $a0, $sp, 4*8 st.d $a1, $sp, 5*8 st.d $a2, $sp, 6*8 st.d $a3, $sp, 7*8 st.d $a4, $sp, 8*8 st.d $a5, $sp, 9*8 st.d $a6, $sp, 10*8 st.d $a7, $sp, 11*8 st.d $t1, $sp, 13*8 st.d $t2, $sp, 14*8 st.d $t3, $sp, 15*8 st.d $t4, $sp, 16*8 st.d $t5, $sp, 17*8 st.d $t6, $sp, 18*8 st.d $t7, $sp, 19*8 st.d $t8, $sp, 20*8 st.d $fp, $sp, 22*8 st.d $s0, $sp, 23*8 st.d $s1, $sp, 24*8 st.d $s2, $sp, 25*8 st.d $s3, $sp, 26*8 st.d $s4, $sp, 27*8 st.d $s5, $sp, 28*8 st.d $s6, $sp, 29*8 st.d $s7, $sp, 30*8 st.d $s8, $sp, 31*8 .endm .macro RESTORE_REGS csrrd $t0, 0x1 andi $t0, $t0, 0x3 bnez $t0, .Ltmp_user .Ltmp_kernel: b .Ltmp_common .Ltmp_user: st.d $tp, $sp, 36*8 ld.d $tp, $sp, 2*8 st.d $r21, $sp, 37*8 ld.d $r21, $sp, 21*8 .Ltmp_common: ld.d $ra, $sp, 1*8 ld.d $a0, $sp, 4*8 ld.d $a1, $sp, 5*8 ld.d $a2, $sp, 6*8 ld.d $a3, $sp, 7*8 ld.d $a4, $sp, 8*8 ld.d $a5, $sp, 9*8 ld.d $a6, $sp, 10*8 ld.d $a7, $sp, 11*8 ld.d $t0, $sp, 12*8 ld.d $t1, $sp, 13*8 ld.d $t2, $sp, 14*8 ld.d $t3, $sp, 15*8 ld.d $t4, $sp, 16*8 ld.d $t5, $sp, 17*8 ld.d $t6, $sp, 18*8 ld.d $t7, $sp, 19*8 ld.d $t8, $sp, 20*8 ld.d $fp, $sp, 22*8 ld.d $s0, $sp, 23*8 ld.d $s1, $sp, 24*8 ld.d $s2, $sp, 25*8 ld.d $s3, $sp, 26*8 ld.d $s4, $sp, 27*8 ld.d $s5, $sp, 28*8 ld.d $s6, $sp, 29*8 ld.d $s7, $sp, 30*8 ld.d $s8, $sp, 31*8 .endm .section .text .balign 4096 .global trap_vector_base trap_vector_base: csrwr $t0, KSAVE_T0 csrrd $t0, 0x1 andi $t0, $t0, 0x3 bnez $t0, .Lfrom_userspace .Lfrom_kernel: move $t0, $sp addi.d $sp, $sp, -{trapframe_size} // allocate space // save kernel sp st.d $t0, $sp, 3*8 b .Lcommon .Lfrom_userspace: csrwr $sp, KSAVE_USP // save user sp into SAVE1 CSR csrrd $sp, KSAVE_KSP // restore kernel sp addi.d $sp, $sp, -{trapframe_size} // allocate space // switch tp and r21 st.d $tp, $sp, 2*8 ld.d $tp, $sp, 36*8 st.d $r21, $sp, 21*8 ld.d $r21, $sp, 37*8 // save user sp csrrd $t0, KSAVE_USP st.d $t0, $sp, 3*8 // sp .Lcommon: // save the registers. SAVE_REGS csrrd $t2, 0x1 st.d $t2, $sp, 8*32 // prmd csrrd $t1, 0x6 st.d $t1, $sp, 8*33 // era csrrd $t1, 0x7 st.d $t1, $sp, 8*34 // badv csrrd $t1, 0x0 st.d $t1, $sp, 8*35 // crmd move $a0, $sp csrrd $t0, 0x1 andi $a1, $t0, 0x3 // if user or kernel bl loongarch64_trap_handler // restore the registers. ld.d $t1, $sp, 8*33 // era csrwr $t1, 0x6 ld.d $t2, $sp, 8*32 // prmd csrwr $t2, 0x1 // Save kernel sp when exit kernel mode addi.d $t1, $sp, {trapframe_size} csrwr $t1, KSAVE_KSP RESTORE_REGS // restore sp ld.d $sp, $sp, 3*8 ertn // TLB Refill handler .equ LA_CSR_PGDL, 0x19 /* Page table base address when VA[47] = 0 */ .equ LA_CSR_PGDH, 0x1a /* Page table base address when VA[47] = 1 */ .equ LA_CSR_PGD, 0x1b /* Page table base */ .equ LA_CSR_TLBRENTRY, 0x88 /* TLB refill exception entry */ .equ LA_CSR_TLBRBADV, 0x89 /* TLB refill badvaddr */ .equ LA_CSR_TLBRERA, 0x8a /* TLB refill ERA */ .equ LA_CSR_TLBRSAVE, 0x8b /* KScratch for TLB refill exception */ .equ LA_CSR_TLBRELO0, 0x8c /* TLB refill entrylo0 */ .equ LA_CSR_TLBRELO1, 0x8d /* TLB refill entrylo1 */ .equ LA_CSR_TLBREHI, 0x8e /* TLB refill entryhi */ .section .text .balign 4096 .global handle_tlb_refill handle_tlb_refill: csrwr $t0, LA_CSR_TLBRSAVE csrrd $t0, LA_CSR_PGD lddir $t0, $t0, 3 lddir $t0, $t0, 1 ldpte $t0, 0 ldpte $t0, 1 tlbfill csrrd $t0, LA_CSR_TLBRSAVE ertn
jtzhpf/starry
1,965
modules/axhal/src/platform/x86_pc/ap_start.S
# Boot application processors into the protected mode. # Each non-boot CPU ("AP") is started up in response to a STARTUP # IPI from the boot CPU. Section B.4.2 of the Multi-Processor # Specification says that the AP will start in real mode with CS:IP # set to XY00:0000, where XY is an 8-bit value sent with the # STARTUP. Thus this code must start at a 4096-byte boundary. # # Because this code sets DS to zero, it must sit # at an address in the low 2^16 bytes. .equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr} .equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr} .equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr} .equ stack_ptr, {start_page_paddr} + 0xff0 .equ entry_ptr, {start_page_paddr} + 0xff8 # 0x6000 .section .text .code16 .p2align 12 .global ap_start ap_start: cli wbinvd xor ax, ax mov ds, ax mov es, ax mov ss, ax mov fs, ax mov gs, ax # load the 64-bit GDT lgdt [pa_ap_gdt_desc] # switch to protected-mode mov eax, cr0 or eax, (1 << 0) mov cr0, eax # far jump to 32-bit code. 0x8 is code32 segment selector ljmp 0x8, offset pa_ap_start32 .code32 ap_start32: mov esp, [stack_ptr] mov eax, [entry_ptr] jmp eax .balign 8 # .type multiboot_header, STT_OBJECT .Lap_tmp_gdt_desc: .short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit .long pa_ap_gdt # base .balign 16 .Lap_tmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Lap_tmp_gdt_end: # 0x7000 .p2align 12 .global ap_end ap_end:
jtzhpf/starry
4,307
modules/axhal/src/platform/x86_pc/multiboot.S
# Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section .text.boot .code32 .global _start _start: mov edi, eax # arg1: magic: 0x2BADB002 mov esi, ebx # arg2: multiboot info jmp bsp_entry32 .balign 4 .type multiboot_header, STT_OBJECT multiboot_header: .int {mb_hdr_magic} # magic: 0x1BADB002 .int {mb_hdr_flags} # flags .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum .int multiboot_header - {offset} # header_addr .int _skernel - {offset} # load_addr .int _edata - {offset} # load_end .int _ebss - {offset} # bss_end_addr .int _start - {offset} # entry_addr # Common code in 32-bit, prepare states to enter 64-bit. .macro ENTRY32_COMMON # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set PAE, PGE bit in CR4 mov eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [.Ltmp_pml4 - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} mov edx, 0 mov eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, {cr0} mov cr0, eax .endm # Common code in 64-bit .macro ENTRY64_COMMON # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax .endm .code32 bsp_entry32: lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT ENTRY32_COMMON ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment .code32 .global ap_entry32 ap_entry32: ENTRY32_COMMON ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment .code64 bsp_entry64: ENTRY64_COMMON # set RSP to boot stack movabs rsp, offset {boot_stack} add rsp, {boot_stack_size} # call rust_entry(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .code64 ap_entry64: ENTRY64_COMMON # set RSP to high address (already set in ap_start.S) mov rax, {offset} add rsp, rax # call rust_entry_secondary(magic) mov rdi, {mb_magic} movabs rax, offset {entry_secondary} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end: .balign 4096 .Ltmp_pml4: # 0x0000_0000 ~ 0xffff_ffff .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 510 # 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff .quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) # FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb) .Ltmp_pdpt_low: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508 .Ltmp_pdpt_high: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508
jtzhpf/starry
298
modules/axtask/src/copy.S
.altmacro .macro COPY n ld t2, (\n)*8(a0) sd t2, (\n)*8(a1) .endm .section .text .globl __copy __copy: # __copy( # frame_address: *const TrapFrame, # kernel_base: *mut T # ) .set n, 0 .rept 33 COPY %n .set n, n + 1 .endr ret
jtzhpf/starry
306
modules/axtask/src/copy-la64.S
.altmacro .macro COPY n ld.d $t2, $a0, (\n)*8 st.d $t2, $a1, (\n)*8 .endm .section .text .globl __copy __copy: # __copy( # frame_address: *const TrapFrame, # kernel_base: *mut T # ) .set n, 0 .rept 38 COPY %n .set n, n + 1 .endr ret
jtzhpf/starry
2,868
crates/alter_trap/src/trap.S
.section .text // a0: [input] read addr; [output] value to be read // a1: [input] should be 0; [output] 0 if ok, scause if trapped .type __alter_trap_read_usize, %function __alter_trap_read_usize: #mv a1, zero ld a0, 0(a0) ret // a0: [input] write addr // a1: [input] should be 0; [output] 0 if ok, scause if trapped // a2: [input] value to be write .type __alter_trap_write_usize, %function __alter_trap_write_usize: #mv a1, zero sd a2, 0(a0) ret // a0: [input] read/write addr; [output] value to be read // a1: [input] should be 0; [output] 0 if ok, scause if trapped .type __alter_trap_read_write_usize, %function __alter_trap_read_write_usize: #mv a1, zero mv a2, a0 ld a0, 0(a2) sd a1, 0(a2) sd a0, 0(a2) ret // a0: [input] read addr; [output] value to be read // a1: [input] should be 0; [output] 0 if ok, scause if trapped .type __alter_trap_read_u8, %function __alter_trap_read_u8: #mv a1, zero lb a0, 0(a0) ret // a0: [input] write addr // a1: [input] should be 0; [output] 0 if ok, scause if trapped // a2: [input] value to be write .type __alter_trap_write_u8, %function __alter_trap_write_u8: #mv a1, zero sb a2, 0(a0) ret // a0: [input] start addr of slice; // a1: [input] should be 0; [output] 0 if ok, scause if trapped // a2: [input] end addr of slice; .type __alter_trap_check_slice_readable, %function __alter_trap_check_slice_readable: #mv a1, zero lb t0, 0(a0) # try read at start point of slice lui t1, 0x1000 # page size sub t0, zero, t1 # mask of page size, =0xFFFFFFFFFFFFF000 and a0, a0, t0 add a0, a0, t1 # a0 switch to next page .Lcheck_loop_read: bltu a2, a0, .Lcheck_end_read # check if a0 crossed endpoint a2 lb t0, 0(a0) add a0, a0, t1 # a0 switch to next page j .Lcheck_loop_read .Lcheck_end_read: ret // a0: [input] start addr of slice; // a1: [input] should be 0; [output] 0 if ok, scause if trapped // a2: [input] end addr of slice; .type __alter_trap_check_slice_writable, %function __alter_trap_check_slice_writable: #mv a1, zero sb a1, 0(a0) # try write at start point of slice # we must carefully write BYTE instand of DWORD, # cuz the other bytes may belong to other objects. lui t1, 0x1000 # page size sub t0, zero, t1 # mask of page size, =0xFFFFFFFFFFFFF000 and a0, a0, t0 add a0, a0, t1 # a0 switch to next page .Lcheck_loop_write: bltu a2, a0, .Lcheck_end_write # check if a0 crossed endpoint a2 sb a1, 0(a0) add a0, a0, t1 # a0 switch to next page j .Lcheck_loop_write .Lcheck_end_write: ret // if trapped, write scause to a1, // and return next intr of __alter_trap_(read/write)_at .type __alter_trap_entry, %function .align 2 __alter_trap_entry: csrw sepc, ra # ra -> __try_x_user_u8's return addr csrr a1, scause sret
juliawalowska/Regulator-PID-ogrzewanie
25,517
projekt/Core/Startup/startup_stm32f746zgtx.s
/** ****************************************************************************** * @file startup_stm32f746xx.s * @author MCD Application Team * @brief STM32F746xx Devices vector table for GCC based toolchain. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M7 processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * Copyright (c) 2016 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ .syntax unified .cpu cortex-m7 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss /* stack used for SystemInit_ExtMemCtl; always internal RAM used */ /** * @brief This is the code that gets called when the processor first * starts execution following a reset event. Only the absolutely * necessary set is performed, after which the application * supplied main() routine is called. * @param None * @retval : None */ .section .text.Reset_Handler .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: ldr sp, =_estack /* set stack pointer */ /* Copy the data segment initializers from flash to SRAM */ ldr r0, =_sdata ldr r1, =_edata ldr r2, =_sidata movs r3, #0 b LoopCopyDataInit CopyDataInit: ldr r4, [r2, r3] str r4, [r0, r3] adds r3, r3, #4 LoopCopyDataInit: adds r4, r0, r3 cmp r4, r1 bcc CopyDataInit /* Zero fill the bss segment. */ ldr r2, =_sbss ldr r4, =_ebss movs r3, #0 b LoopFillZerobss FillZerobss: str r3, [r2] adds r2, r2, #4 LoopFillZerobss: cmp r2, r4 bcc FillZerobss /* Call the clock system initialization function.*/ bl SystemInit /* Call static constructors */ bl __libc_init_array /* Call the application's entry point.*/ bl main bx lr .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * @param None * @retval None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M7. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * *******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack .word Reset_Handler .word NMI_Handler .word HardFault_Handler .word MemManage_Handler .word BusFault_Handler .word UsageFault_Handler .word 0 .word 0 .word 0 .word 0 .word SVC_Handler .word DebugMon_Handler .word 0 .word PendSV_Handler .word SysTick_Handler /* External Interrupts */ .word WWDG_IRQHandler /* Window WatchDog */ .word PVD_IRQHandler /* PVD through EXTI Line detection */ .word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ .word FLASH_IRQHandler /* FLASH */ .word RCC_IRQHandler /* RCC */ .word EXTI0_IRQHandler /* EXTI Line0 */ .word EXTI1_IRQHandler /* EXTI Line1 */ .word EXTI2_IRQHandler /* EXTI Line2 */ .word EXTI3_IRQHandler /* EXTI Line3 */ .word EXTI4_IRQHandler /* EXTI Line4 */ .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ .word CAN1_TX_IRQHandler /* CAN1 TX */ .word CAN1_RX0_IRQHandler /* CAN1 RX0 */ .word CAN1_RX1_IRQHandler /* CAN1 RX1 */ .word CAN1_SCE_IRQHandler /* CAN1 SCE */ .word EXTI9_5_IRQHandler /* External Line[9:5]s */ .word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ .word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ .word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ .word TIM2_IRQHandler /* TIM2 */ .word TIM3_IRQHandler /* TIM3 */ .word TIM4_IRQHandler /* TIM4 */ .word I2C1_EV_IRQHandler /* I2C1 Event */ .word I2C1_ER_IRQHandler /* I2C1 Error */ .word I2C2_EV_IRQHandler /* I2C2 Event */ .word I2C2_ER_IRQHandler /* I2C2 Error */ .word SPI1_IRQHandler /* SPI1 */ .word SPI2_IRQHandler /* SPI2 */ .word USART1_IRQHandler /* USART1 */ .word USART2_IRQHandler /* USART2 */ .word USART3_IRQHandler /* USART3 */ .word EXTI15_10_IRQHandler /* External Line[15:10]s */ .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ .word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ .word FMC_IRQHandler /* FMC */ .word SDMMC1_IRQHandler /* SDMMC1 */ .word TIM5_IRQHandler /* TIM5 */ .word SPI3_IRQHandler /* SPI3 */ .word UART4_IRQHandler /* UART4 */ .word UART5_IRQHandler /* UART5 */ .word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */ .word TIM7_IRQHandler /* TIM7 */ .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ .word ETH_IRQHandler /* Ethernet */ .word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */ .word CAN2_TX_IRQHandler /* CAN2 TX */ .word CAN2_RX0_IRQHandler /* CAN2 RX0 */ .word CAN2_RX1_IRQHandler /* CAN2 RX1 */ .word CAN2_SCE_IRQHandler /* CAN2 SCE */ .word OTG_FS_IRQHandler /* USB OTG FS */ .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ .word USART6_IRQHandler /* USART6 */ .word I2C3_EV_IRQHandler /* I2C3 event */ .word I2C3_ER_IRQHandler /* I2C3 error */ .word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */ .word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */ .word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */ .word OTG_HS_IRQHandler /* USB OTG HS */ .word DCMI_IRQHandler /* DCMI */ .word 0 /* Reserved */ .word RNG_IRQHandler /* Rng */ .word FPU_IRQHandler /* FPU */ .word UART7_IRQHandler /* UART7 */ .word UART8_IRQHandler /* UART8 */ .word SPI4_IRQHandler /* SPI4 */ .word SPI5_IRQHandler /* SPI5 */ .word SPI6_IRQHandler /* SPI6 */ .word SAI1_IRQHandler /* SAI1 */ .word LTDC_IRQHandler /* LTDC */ .word LTDC_ER_IRQHandler /* LTDC error */ .word DMA2D_IRQHandler /* DMA2D */ .word SAI2_IRQHandler /* SAI2 */ .word QUADSPI_IRQHandler /* QUADSPI */ .word LPTIM1_IRQHandler /* LPTIM1 */ .word CEC_IRQHandler /* HDMI_CEC */ .word I2C4_EV_IRQHandler /* I2C4 Event */ .word I2C4_ER_IRQHandler /* I2C4 Error */ .word SPDIF_RX_IRQHandler /* SPDIF_RX */ /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak WWDG_IRQHandler .thumb_set WWDG_IRQHandler,Default_Handler .weak PVD_IRQHandler .thumb_set PVD_IRQHandler,Default_Handler .weak TAMP_STAMP_IRQHandler .thumb_set TAMP_STAMP_IRQHandler,Default_Handler .weak RTC_WKUP_IRQHandler .thumb_set RTC_WKUP_IRQHandler,Default_Handler .weak FLASH_IRQHandler .thumb_set FLASH_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Stream0_IRQHandler .thumb_set DMA1_Stream0_IRQHandler,Default_Handler .weak DMA1_Stream1_IRQHandler .thumb_set DMA1_Stream1_IRQHandler,Default_Handler .weak DMA1_Stream2_IRQHandler .thumb_set DMA1_Stream2_IRQHandler,Default_Handler .weak DMA1_Stream3_IRQHandler .thumb_set DMA1_Stream3_IRQHandler,Default_Handler .weak DMA1_Stream4_IRQHandler .thumb_set DMA1_Stream4_IRQHandler,Default_Handler .weak DMA1_Stream5_IRQHandler .thumb_set DMA1_Stream5_IRQHandler,Default_Handler .weak DMA1_Stream6_IRQHandler .thumb_set DMA1_Stream6_IRQHandler,Default_Handler .weak ADC_IRQHandler .thumb_set ADC_IRQHandler,Default_Handler .weak CAN1_TX_IRQHandler .thumb_set CAN1_TX_IRQHandler,Default_Handler .weak CAN1_RX0_IRQHandler .thumb_set CAN1_RX0_IRQHandler,Default_Handler .weak CAN1_RX1_IRQHandler .thumb_set CAN1_RX1_IRQHandler,Default_Handler .weak CAN1_SCE_IRQHandler .thumb_set CAN1_SCE_IRQHandler,Default_Handler .weak EXTI9_5_IRQHandler .thumb_set EXTI9_5_IRQHandler,Default_Handler .weak TIM1_BRK_TIM9_IRQHandler .thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler .weak TIM1_UP_TIM10_IRQHandler .thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler .weak TIM1_TRG_COM_TIM11_IRQHandler .thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI15_10_IRQHandler .thumb_set EXTI15_10_IRQHandler,Default_Handler .weak RTC_Alarm_IRQHandler .thumb_set RTC_Alarm_IRQHandler,Default_Handler .weak OTG_FS_WKUP_IRQHandler .thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler .weak TIM8_BRK_TIM12_IRQHandler .thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler .weak TIM8_UP_TIM13_IRQHandler .thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler .weak TIM8_TRG_COM_TIM14_IRQHandler .thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak DMA1_Stream7_IRQHandler .thumb_set DMA1_Stream7_IRQHandler,Default_Handler .weak FMC_IRQHandler .thumb_set FMC_IRQHandler,Default_Handler .weak SDMMC1_IRQHandler .thumb_set SDMMC1_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak UART4_IRQHandler .thumb_set UART4_IRQHandler,Default_Handler .weak UART5_IRQHandler .thumb_set UART5_IRQHandler,Default_Handler .weak TIM6_DAC_IRQHandler .thumb_set TIM6_DAC_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Stream0_IRQHandler .thumb_set DMA2_Stream0_IRQHandler,Default_Handler .weak DMA2_Stream1_IRQHandler .thumb_set DMA2_Stream1_IRQHandler,Default_Handler .weak DMA2_Stream2_IRQHandler .thumb_set DMA2_Stream2_IRQHandler,Default_Handler .weak DMA2_Stream3_IRQHandler .thumb_set DMA2_Stream3_IRQHandler,Default_Handler .weak DMA2_Stream4_IRQHandler .thumb_set DMA2_Stream4_IRQHandler,Default_Handler .weak ETH_IRQHandler .thumb_set ETH_IRQHandler,Default_Handler .weak ETH_WKUP_IRQHandler .thumb_set ETH_WKUP_IRQHandler,Default_Handler .weak CAN2_TX_IRQHandler .thumb_set CAN2_TX_IRQHandler,Default_Handler .weak CAN2_RX0_IRQHandler .thumb_set CAN2_RX0_IRQHandler,Default_Handler .weak CAN2_RX1_IRQHandler .thumb_set CAN2_RX1_IRQHandler,Default_Handler .weak CAN2_SCE_IRQHandler .thumb_set CAN2_SCE_IRQHandler,Default_Handler .weak OTG_FS_IRQHandler .thumb_set OTG_FS_IRQHandler,Default_Handler .weak DMA2_Stream5_IRQHandler .thumb_set DMA2_Stream5_IRQHandler,Default_Handler .weak DMA2_Stream6_IRQHandler .thumb_set DMA2_Stream6_IRQHandler,Default_Handler .weak DMA2_Stream7_IRQHandler .thumb_set DMA2_Stream7_IRQHandler,Default_Handler .weak USART6_IRQHandler .thumb_set USART6_IRQHandler,Default_Handler .weak I2C3_EV_IRQHandler .thumb_set I2C3_EV_IRQHandler,Default_Handler .weak I2C3_ER_IRQHandler .thumb_set I2C3_ER_IRQHandler,Default_Handler .weak OTG_HS_EP1_OUT_IRQHandler .thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler .weak OTG_HS_EP1_IN_IRQHandler .thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler .weak OTG_HS_WKUP_IRQHandler .thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler .weak OTG_HS_IRQHandler .thumb_set OTG_HS_IRQHandler,Default_Handler .weak DCMI_IRQHandler .thumb_set DCMI_IRQHandler,Default_Handler .weak RNG_IRQHandler .thumb_set RNG_IRQHandler,Default_Handler .weak FPU_IRQHandler .thumb_set FPU_IRQHandler,Default_Handler .weak UART7_IRQHandler .thumb_set UART7_IRQHandler,Default_Handler .weak UART8_IRQHandler .thumb_set UART8_IRQHandler,Default_Handler .weak SPI4_IRQHandler .thumb_set SPI4_IRQHandler,Default_Handler .weak SPI5_IRQHandler .thumb_set SPI5_IRQHandler,Default_Handler .weak SPI6_IRQHandler .thumb_set SPI6_IRQHandler,Default_Handler .weak SAI1_IRQHandler .thumb_set SAI1_IRQHandler,Default_Handler .weak LTDC_IRQHandler .thumb_set LTDC_IRQHandler,Default_Handler .weak LTDC_ER_IRQHandler .thumb_set LTDC_ER_IRQHandler,Default_Handler .weak DMA2D_IRQHandler .thumb_set DMA2D_IRQHandler,Default_Handler .weak SAI2_IRQHandler .thumb_set SAI2_IRQHandler,Default_Handler .weak QUADSPI_IRQHandler .thumb_set QUADSPI_IRQHandler,Default_Handler .weak LPTIM1_IRQHandler .thumb_set LPTIM1_IRQHandler,Default_Handler .weak CEC_IRQHandler .thumb_set CEC_IRQHandler,Default_Handler .weak I2C4_EV_IRQHandler .thumb_set I2C4_EV_IRQHandler,Default_Handler .weak I2C4_ER_IRQHandler .thumb_set I2C4_ER_IRQHandler,Default_Handler .weak SPDIF_RX_IRQHandler .thumb_set SPDIF_RX_IRQHandler,Default_Handler
jul-sh/not-an-oak-fork
1,167
oak_restricted_kernel_wrapper/src/asm/boot.s
/* * Copyright 2023 The Project Oak Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .section .boot, "ax" .global _wrapper_entry .code64 _wrapper_entry: # At this point we expect to have a valid page table identity mapping (at least) the lowest 1G # of physical memory. # # Note: don't touch %rsi, as that contains the address of the zero page. # Set up the new stack. lea stack_start(%rip), %rsp # Push 8 bytes to fix stack alignment issue. Because we enter rust64_start with a jmp rather # than a call the function prologue means that the stack is no longer 16-byte aligned. push $0 jmp rust64_start
jul-sh/not-an-oak-fork
9,721
stage0_bin/src/asm/boot.s
.code16 .align 16 .section .text16, "ax" .global _start _start : # Enter long mode. This code is inspired by the approach shown at # https://wiki.osdev.org/Entering_Long_Mode_Directly cli xor %eax, %eax mov %eax, %cr3 # Set up descriptors mov $gdt_desc_offset, %si lgdtl %cs:(%si) mov $idt_desc_offset, %si lidtl %cs:(%si) # Enter protected mode, but don't enable paging. mov %cr0, %eax or $1, %eax mov %eax, %cr0 ljmpl $cs32, $_protected_mode_start .align 16 .code32 .global gp_handler gp_handler: pop %eax # ignore the error code for now pop %eax # pop the return address cmpw $0x320F, (%eax) # are we trying to execute RDMSR? jne 2f # if not, skip ahead add $2, %eax # increment it by 2 (size of the RDMSR instruction) push %eax # push it back on stack for iret xor %eax, %eax # zero out RAX xor %edx, %edx # zero out RDX iret # go back 2: # this wasn't because RDMSR int $8 # trigger a double fault and crash .global vc_handler # Really limited #VC handler that only knows how to fill in EBX in case of CPUID. # As CPUID can alter EAX, EBX, ECX and EDX we zero out the other three registers. vc_handler: pop %ebx # get the error code cmp $0x72, %ebx # is this about CPUID? jne 2f # if not, skip ahead and crash mov (%esp), %ebx # get the instruction pointer cmpw $0xa20f, (%ebx) # was this really a CPUID instruction? jne 2f # if not it might be injected by the hypervisor, skip ahead and crash cmp $0x0, %ecx # are we asked for a CPUID subleaf? jne 2f # if yes, skip ahead, as we don't support subleaves # Use the GHCB MSR protocol to request one page of CPUID information. The protocol itself is # described in Section 2.3.1 of SEV-ES Guest-Hypervisor Communication Block Standardization spec. mov %eax, %edx # EDX = EAX (move the CPUID function number to GHCBData[63:32]) mov $0x40000004, %eax # EAX = Request EBX (0b01 << 30) | CPUID Request (0x004) mov $0xC0010130, %ecx # ECX = 0xC001_0130 -- GHCB MSR wrmsr # MSR[ECX] = EDX:EAX rep vmmcall # VMGEXIT rdmsr # EDX:EAX = MSR[ECX] cmp $0x40000005, %eax # EAX should contain EBX data (0b01 << 30) | CPUID Response (0x005) jne 2f # if not, crash addl $2, (%esp) # move return address forward past the CPUID instruction xor %eax, %eax # EAX = 0 mov %edx, %ebx # EBX = EDX (that's where the cpuid value is) xor %ecx, %ecx # ECX = 0 xor %edx, %edx # EDX = 0 iret # go back 2: # this wasn't because CPUID or the response wasn't what we expected int $8 # trigger double fault and crash _protected_mode_start: # Switch to a flat 32-bit data segment, giving us access to all 4G of memory. mov $ds, %eax mov %eax, %ds mov %eax, %es # needed for destination of stosb and movsb mov %eax, %ss # Set up a basic stack, as we may get interrupts. mov $stack_start, %esp # Determine if we're running under SEV. # Keep track of whether encryption is enabled in %ebp. mov $0xc0010131, %ecx # SEV_STATUS MSR. See Section 15.34.10 in AMD64 Architecture Programmer's # Manual, Volume 2 for more details. rdmsr # EDX:EAX <- MSR[ECX] push %edx # Store the raw result for future use on the stack. push %eax and $0b111, %eax # eax &= 0b111; # Bit 0 - SEV enabled # Bit 1 - SEV-ES enabled # Bit 2 - SEV-SNP active mov %eax, %ebp # store the result in EBP for later use # See if we're under SEV-SNP, and if yes, pre-emptively PVALIDATE the first 640 KiB of memory, # as that's where we'll be storing many data structures. and $0b100, %eax # eax &= 0b100; -- SEV-SNP active test %eax, %eax # is eax zero? je 2f # if yes, no SNP, skip validation and jump ahead mov $0x0000, %ebx # ebx = 0x0000 -- start address xor %ecx, %ecx # ecx = 0 -- we're using 4K pages mov $0b1, %edx # edx = 1 -- set RMP VALIDATED bit 1: mov %ebx, %eax # eax = ebx (PVALIDATE will clobber EAX) pvalidate # set validated bit in RMP, but ignore results for now add $0x1000, %ebx # ebx += 0x1000 cmp $0xa0000, %ebx # have we covered the full 640 KiB? jl 1b # if no, go back 2: # Clear BSS: base address goes to EDI, value (0) goes to EAX, count goes into ECX. mov $bss_start, %edi mov $bss_size, %ecx xor %eax, %eax rep stosb mov $ap_bss_start, %edi mov $ap_bss_size, %ecx xor %eax, %eax rep stosb # now that BSS is set up, initialize the raw Rust variables pop %eax pop %edx mov %eax, (SEV_STATUS) # Initialize the SEV_STATUS static variable in Rust. mov %edx, (SEV_STATUS+4) # Copy DATA from the ROM image (stored just after TEXT) to the expected location. # Source address goes to ESI, destination goes to EDI, count goes to ECX. mov $text_end, %esi mov $data_start, %edi mov $data_size, %ecx rep movsb # Copy AP bootstrap code to the expected location, similar to DATA above. mov $0xFFFFF000, %esi mov $ap_text_start, %edi mov $ap_text_size, %ecx rep movsb # Set the first entry of PML4 to point to PDPT (0..512GiB). mov ${pdpt}, %esi orl $3, %esi # esi |= 3 (PRESENT and WRITABLE) mov %esi, ({pml4}) # set first half of PML4[0] # Set the first entry of PDPT to point to PD_0 (0..1GiB). mov ${pd_0}, %esi orl $3, %esi # esi |= 3 (PRESENT and WRITABLE) mov %esi, ({pdpt}) # set first half of PDPT[0] # Set the fourth entry of PDPT to point to PD_3 (3..4GiB). mov ${pdpt}, %eax mov ${pd_3}, %esi orl $3, %esi # esi |= 3 (PRESENT and WRITABLE) mov %esi, 24(%eax) # set first half of PDPT[3], each entry is 8 bytes # Set the first entry of PD_0 to point to and identity mapped huge page (0..2MiB). mov $0x83, %esi # esi = 0x0 | 131 (PRESENT and WRITABLE and HUGE_PAGE) mov %esi, ({pd_0}) # set first half of PD_0[0] # Set the last entry of PD_3 to point to an identity-mapped 2MiB huge page ((4GiB-2MiB)..4GiB). # This is where the firmware ROM image is mapped, so we don't make it writable. mov ${pd_3}, %eax mov $0xFFE00000, %esi # address of 4GiB-2MiB orl $0x81, %esi # esi |= 129 (PRESENT and HUGE_PAGE) mov %esi, 0xFF8(%eax) # set first half of PML4[511], each entry is 8 bytes # Clear EDI, since we will use it later as the encrypted bit location to pass # into the 64-bit Rust entry point and by default we assume no encryption. xor %edi, %edi # Check whether encryption is enabled. The SEV status is stored in %ebp. test %ebp, %ebp # is it zero? je no_encryption # if yes, jump to no_encryption # Memory encryption enabled: set encrypted bits in the page tables. # First, determine the location of the C-bit in the page tables. # Keep track of which bit is the encrypted bit in EDI. mov $0x8000001F, %eax # EAX = Fn8000_001F - Encrypted Memory Capabilities xor %ecx, %ecx # ECX = 0 - we're not interested in a subpage cpuid # EAX, EBX, ECX, EDX = CPUID(EAX, ECX) and $0b111111, %ebx # zero out all but EBX[5:0], which the C-bit location mov %ebx, %edi # save the full C-bit location value for later to pass into the Rust # entry point (RDI contains the first argument according to sysv ABI) sub $32, %ebx # let's assume the encrypted bit is > 32, as it simplifies logic below mov $1, %esi mov %ebx, %ecx shl %cl, %esi # construct the encrypted bit mask, store it in ESI # We set the encrypted bit for each of the page table entries that we previously created. # The encrypted bit is in the second half of each 8-byte entry, so we add an extra offset of 4 bytes. mov ${pml4}, %eax mov %esi, 4(%eax) # set second half of PML4[0] mov ${pdpt}, %eax mov %esi, 4(%eax) # set second half of PDPT[0] mov %esi, 28(%eax) # set second half of PDPT[3], each entry is 8 bytes mov ${pd_0}, %eax mov %esi, 4(%eax) # set second half of PD_0[0] mov ${pd_3}, %eax mov %esi, 0xFFC(%eax) # set second half of PD_3[511], each entry is 8 bytes no_encryption: # Load PML4 mov ${pml4}, %eax mov %eax, %cr3 # PAE mov $0b100000, %eax mov %eax, %cr4 # Read EFER, enable LME mov $0xC0000080, %ecx rdmsr or $0x00000100, %eax wrmsr # Protected mode + paging mov %cr0, %eax or $0x80000001, %eax mov %eax, %cr0 # Reload CS, enter long mode, jump to 64-bit code. ljmpl $cs, $_long_mode_start .align 16 .code64 _long_mode_start: # Clean up data segments. movw $ds, %ax movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movw %ax, %ss # Set up the stack. mov $stack_start, %esp push $0 # ...and jump to Rust code. jmp rust64_start
jul-sh/not-an-oak-fork
695
stage0_bin/src/asm/reset_vector.s
/* * Copyright 2022 The Project Oak Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .section .reset_vector, "ax" .global reset_vector .code16 reset_vector: jmp _start
jul-sh/not-an-oak-fork
2,005
stage0_bin/src/asm/ap_boot.s
.code16 .section .ap_text, "ax" # Entry point for APs. This needs to be page-aligned. .align 4096 .global ap_start ap_start: # Let the BSP know we're alive. lock incl (LIVE_AP_COUNT) 1: hlt jmp 1b # Under SEV-ES, we need to use the AP Reset Hold and AP Jump Tables. We could munge all of it into # `ap_start` above, but it's simpler to keep it separate as if we ever run this code we know we're # under SEV-ES without risking any exceptions (and thus avoid the need for an IDT). .global sev_es_start sev_es_start: lock incl (LIVE_AP_COUNT) 1: xor %edx, %edx # EDX = 0x0 mov $0x006, %eax # EAX = 0x007 - AP Reset Hold mov $0xC0010130, %ecx # ECX = 0xC001_0130 -- GHCB MSR wrmsr # MSR[ECX] = EDX:EAX rep vmmcall # VMGEXIT rdmsr # EDX:EAX = MSR[ECX] # Check return value: GHCBData[63:12] must be non-zero, GHCBData[12:0] must be 0x007 mov %eax, %ebx # EBX = EAX and $0xFFF, %ebx # EBX |= 0xFFF (leave lowest 12 bits) cmp $0x007, %ebx # is the response AP Reset Hold Response? jne 1b # No. Go back to sleep. and $-0xFFF, %eax # EAX |= ~0xFFF (mask lowest 12 bits) add %edx, %eax # EAX += EDX test %eax, %eax # is GHDBData[63:12] zero? je 1b # Yes. Go back to sleep. # Determine where to jump from the AP Jump Table and off we go # First is IP, second is CS mov $AP_JUMP_TABLE, %sp # treat the jump table as stack iret # pop IP, pop CS, pop EFLAGS # if we're still here, something has gone wrong xor %edx, %edx # EDX = 0x0 mov $0x100, %eax # EAX = 0x100 - Termination Request mov $0xC0010130, %ecx # ECX = 0xC001_0130 -- GHCB MSR wrmsr # MSR[ECX] = EDX:EAX rep vmmcall # VMGEXIT 1: # If we're still alive, just go into a HLT loop. hlt jmp 1b
jul-sh/not-an-oak-fork
3,078
oak_restricted_kernel/src/boot/boot.s
/* * Copyright 2022 The Project Oak Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .section .boot, "ax" .global _oak_start .code64 _oak_start: # At this point we expect to have a valid page table identity mapping (at least) the lowest 1G # of physical memory; that means that the first PML4 entry must point to a valid PDP, and the # first entry of that PDP must point to a valid PD. # Our goal is to map the first (physical) gigabyte to -2 GB in virtual address space; thus, we # need to make the last entry of the PML4 (covering the last 256T) point to a PDP, and the # second-to-last entry in that PDP point to the same PD as the PD in the lower half. # # We can reuse the existing data structures to achieve that goal. By pointing the last entry # of PML4 to the same PD as the first entry, and setting the second-to-last entry of that PD # to be the same as the first, we get our desired effect of mapping physical address 0x0 to # virtual address 0xFFFFFFFF80000000. As a side effect, this will map physical address 0x0 to # virtual address 0x0000007F80000000 (510*1G) as well, but that's fine. We'll be rewriting # the page tables soon after jumping to the kernel anyway. # # Note: don't touch %rsi, as that contains the address of the zero page. # Map the last entry of PML4 to the same location as the first. movq %cr3, %rbx # rbx = cr3 movq (%rbx), %rax # rax = *rbx movq %rax, 4088(%rbx) # rbx[511] = rax # Map the last entry of PDP to the same location as the first. # We're ignoring bit 51 (as that's commonly the encrypted bit). movabsq $0x0007FFFFFFFFF000, %rax # rax = $const andq (%rbx), %rax # rax = *rbx & rax (mask out all but the address) movq (%rax), %rdx # rdx = *rax movq %rdx, 4080(%rax) # rax[510] = rdx # Enable PGE (https://wiki.osdev.org/CPU_Registers_x86-64#CR4) movq %cr4, %rax orq $0b10000000, %rax movq %rax, %cr4 # Finally, trigger a full TLB flush by overwriting CR3, even if it is the same value. movq %rbx, %cr3 # Clear BSS: base address goes to RDI, value goes to AX, count goes into CX. mov $bss_start, %rdi mov $bss_size, %rcx xor %rax, %rax rep stosb mov $stack_start, %rsp # Push 8 bytes to fix stack alignment issue. Because we enter rust64_start with a jmp rather # than a call the function prologue means that the stack is no longer 16-byte aligned. push $0 jmp rust64_start
jul-sh/not-an-oak-fork
1,079
testing/sev_snp_hello_world_kernel/src/asm/boot.s
/* * Copyright 2022 The Project Oak Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .section .boot, "ax" .global _start .code64 _start: # Clear BSS: base address goes to RDI, value goes to AX, count goes into CX. mov $bss_start, %rdi mov $bss_size, %rcx xor %rax, %rax rep stosb mov $stack_start, %rsp # Push 8 bytes to fix stack alignment issue. Because we enter rust64_start # with a jmp rather than a call the function prologue means that the stack # is no longer 16-byte aligned. push $0 jmp rust64_start
junkicide/provekit
20,914
skyscraper/block-multiplier/src/aarch64/montgomery_square_interleaved_4.s
// GENERATED FILE, DO NOT EDIT! // in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3], // in("x4") a1[0], in("x5") a1[1], in("x6") a1[2], in("x7") a1[3], // in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3], // lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3], // lateout("x4") out1[0], lateout("x5") out1[1], lateout("x6") out1[2], lateout("x7") out1[3], // lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3], // lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("x17") _, lateout("x20") _, lateout("x21") _, lateout("x22") _, lateout("x23") _, lateout("x24") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, // lateout("lr") _ mov x8, #4503599627370495 mul x9, x0, x0 dup.2d v4, x8 umulh x10, x0, x0 mov x11, #5075556780046548992 mul x12, x0, x1 dup.2d v5, x11 mov x11, #1 umulh x13, x0, x1 movk x11, #18032, lsl 48 adds x10, x12, x10 cinc x14, x13, hs dup.2d v6, x11 mul x11, x0, x2 shl.2d v7, v1, #14 shl.2d v8, v2, #26 umulh x15, x0, x2 shl.2d v9, v3, #38 adds x14, x11, x14 cinc x16, x15, hs ushr.2d v3, v3, #14 mul x17, x0, x3 shl.2d v10, v0, #2 usra.2d v7, v0, #50 umulh x0, x0, x3 usra.2d v8, v1, #38 adds x16, x17, x16 cinc x20, x0, hs usra.2d v9, v2, #26 adds x10, x12, x10 cinc x12, x13, hs and.16b v0, v10, v4 and.16b v1, v7, v4 mul x13, x1, x1 and.16b v2, v8, v4 umulh x21, x1, x1 and.16b v7, v9, v4 adds x12, x13, x12 cinc x13, x21, hs mov x21, #13605374474286268416 adds x12, x12, x14 cinc x13, x13, hs dup.2d v8, x21 mov x14, #6440147467139809280 mul x21, x1, x2 dup.2d v9, x14 umulh x14, x1, x2 mov x22, #3688448094816436224 adds x13, x21, x13 cinc x23, x14, hs dup.2d v10, x22 mov x22, #9209861237972664320 adds x13, x13, x16 cinc x16, x23, hs dup.2d v11, x22 mul x22, x1, x3 mov x23, #12218265789056155648 umulh x1, x1, x3 dup.2d v12, x23 mov x23, #17739678932212383744 adds x16, x22, x16 cinc x24, x1, hs dup.2d v13, x23 adds x16, x16, x20 cinc x20, x24, hs mov x23, #2301339409586323456 adds x11, x11, x12 cinc x12, x15, hs dup.2d v14, x23 mov x15, #7822752552742551552 adds x12, x21, x12 cinc x14, x14, hs dup.2d v15, x15 adds x12, x12, x13 cinc x13, x14, hs mov x14, #5071053180419178496 mul x15, x2, x2 dup.2d v16, x14 mov x14, #16352570246982270976 umulh x21, x2, x2 dup.2d v17, x14 adds x13, x15, x13 cinc x14, x21, hs ucvtf.2d v0, v0 adds x13, x13, x16 cinc x14, x14, hs ucvtf.2d v1, v1 mul x15, x2, x3 ucvtf.2d v2, v2 ucvtf.2d v7, v7 umulh x2, x2, x3 ucvtf.2d v3, v3 adds x14, x15, x14 cinc x16, x2, hs mov.16b v18, v5 adds x14, x14, x20 cinc x16, x16, hs fmla.2d v18, v0, v0 fsub.2d v19, v6, v18 adds x12, x17, x12 cinc x0, x0, hs fmla.2d v19, v0, v0 adds x0, x22, x0 cinc x1, x1, hs add.2d v10, v10, v18 adds x0, x0, x13 cinc x1, x1, hs add.2d v8, v8, v19 mov.16b v18, v5 adds x1, x15, x1 cinc x2, x2, hs fmla.2d v18, v0, v1 adds x1, x1, x14 cinc x2, x2, hs fsub.2d v19, v6, v18 mul x13, x3, x3 fmla.2d v19, v0, v1 add.2d v18, v18, v18 umulh x3, x3, x3 add.2d v19, v19, v19 adds x2, x13, x2 cinc x3, x3, hs add.2d v12, v12, v18 adds x2, x2, x16 cinc x3, x3, hs add.2d v10, v10, v19 mov x13, #48718 mov.16b v18, v5 fmla.2d v18, v0, v2 movk x13, #4732, lsl 16 fsub.2d v19, v6, v18 movk x13, #45078, lsl 32 fmla.2d v19, v0, v2 movk x13, #39852, lsl 48 add.2d v18, v18, v18 add.2d v19, v19, v19 mov x14, #16676 add.2d v14, v14, v18 movk x14, #12692, lsl 16 add.2d v12, v12, v19 movk x14, #20986, lsl 32 mov.16b v18, v5 fmla.2d v18, v0, v7 movk x14, #2848, lsl 48 fsub.2d v19, v6, v18 mov x15, #51052 fmla.2d v19, v0, v7 movk x15, #24721, lsl 16 add.2d v18, v18, v18 add.2d v19, v19, v19 movk x15, #61092, lsl 32 add.2d v16, v16, v18 movk x15, #45156, lsl 48 add.2d v14, v14, v19 mov x16, #3197 mov.16b v18, v5 fmla.2d v18, v0, v3 movk x16, #18936, lsl 16 fsub.2d v19, v6, v18 movk x16, #10922, lsl 32 fmla.2d v19, v0, v3 movk x16, #11014, lsl 48 add.2d v0, v18, v18 mul x17, x13, x9 add.2d v18, v19, v19 add.2d v0, v17, v0 umulh x13, x13, x9 add.2d v16, v16, v18 adds x12, x17, x12 cinc x13, x13, hs mov.16b v17, v5 mul x17, x14, x9 fmla.2d v17, v1, v1 fsub.2d v18, v6, v17 umulh x14, x14, x9 fmla.2d v18, v1, v1 adds x13, x17, x13 cinc x14, x14, hs add.2d v14, v14, v17 adds x0, x13, x0 cinc x13, x14, hs add.2d v12, v12, v18 mov.16b v17, v5 mul x14, x15, x9 fmla.2d v17, v1, v2 umulh x15, x15, x9 fsub.2d v18, v6, v17 adds x13, x14, x13 cinc x14, x15, hs fmla.2d v18, v1, v2 add.2d v17, v17, v17 adds x1, x13, x1 cinc x13, x14, hs add.2d v18, v18, v18 mul x14, x16, x9 add.2d v16, v16, v17 umulh x9, x16, x9 add.2d v14, v14, v18 adds x13, x14, x13 cinc x9, x9, hs mov.16b v17, v5 fmla.2d v17, v1, v7 adds x2, x13, x2 cinc x9, x9, hs fsub.2d v18, v6, v17 add x3, x3, x9 fmla.2d v18, v1, v7 mov x9, #56431 add.2d v17, v17, v17 add.2d v18, v18, v18 movk x9, #30457, lsl 16 add.2d v0, v0, v17 movk x9, #30012, lsl 32 add.2d v16, v16, v18 movk x9, #6382, lsl 48 mov.16b v17, v5 fmla.2d v17, v1, v3 mov x13, #59151 fsub.2d v18, v6, v17 movk x13, #41769, lsl 16 fmla.2d v18, v1, v3 movk x13, #32276, lsl 32 add.2d v1, v17, v17 add.2d v17, v18, v18 movk x13, #21677, lsl 48 add.2d v1, v15, v1 mov x14, #34015 add.2d v0, v0, v17 movk x14, #20342, lsl 16 mov.16b v15, v5 fmla.2d v15, v2, v2 movk x14, #13935, lsl 32 fsub.2d v17, v6, v15 movk x14, #11030, lsl 48 fmla.2d v17, v2, v2 mov x15, #13689 add.2d v0, v0, v15 movk x15, #8159, lsl 16 add.2d v15, v16, v17 mov.16b v16, v5 movk x15, #215, lsl 32 fmla.2d v16, v2, v7 movk x15, #4913, lsl 48 fsub.2d v17, v6, v16 mul x16, x9, x10 fmla.2d v17, v2, v7 add.2d v16, v16, v16 umulh x9, x9, x10 add.2d v17, v17, v17 adds x12, x16, x12 cinc x9, x9, hs add.2d v1, v1, v16 mul x16, x13, x10 add.2d v0, v0, v17 mov.16b v16, v5 umulh x13, x13, x10 fmla.2d v16, v2, v3 adds x9, x16, x9 cinc x13, x13, hs fsub.2d v17, v6, v16 adds x0, x9, x0 cinc x9, x13, hs fmla.2d v17, v2, v3 add.2d v2, v16, v16 mul x13, x14, x10 add.2d v16, v17, v17 umulh x14, x14, x10 add.2d v2, v13, v2 adds x9, x13, x9 cinc x13, x14, hs add.2d v1, v1, v16 mov.16b v13, v5 adds x1, x9, x1 cinc x9, x13, hs fmla.2d v13, v7, v7 mul x13, x15, x10 fsub.2d v16, v6, v13 umulh x10, x15, x10 fmla.2d v16, v7, v7 adds x9, x13, x9 cinc x10, x10, hs add.2d v2, v2, v13 add.2d v1, v1, v16 adds x2, x9, x2 cinc x9, x10, hs mov.16b v13, v5 add x3, x3, x9 fmla.2d v13, v7, v3 mov x9, #61005 fsub.2d v16, v6, v13 fmla.2d v16, v7, v3 movk x9, #58262, lsl 16 add.2d v7, v13, v13 movk x9, #32851, lsl 32 add.2d v13, v16, v16 movk x9, #11582, lsl 48 add.2d v7, v11, v7 add.2d v2, v2, v13 mov x10, #37581 mov.16b v11, v5 movk x10, #43836, lsl 16 fmla.2d v11, v3, v3 movk x10, #36286, lsl 32 fsub.2d v13, v6, v11 fmla.2d v13, v3, v3 movk x10, #51783, lsl 48 add.2d v3, v9, v11 mov x13, #10899 add.2d v7, v7, v13 movk x13, #30709, lsl 16 usra.2d v10, v8, #52 movk x13, #61551, lsl 32 usra.2d v12, v10, #52 usra.2d v14, v12, #52 movk x13, #45784, lsl 48 usra.2d v15, v14, #52 mov x14, #36612 and.16b v8, v8, v4 movk x14, #63402, lsl 16 and.16b v9, v10, v4 and.16b v10, v12, v4 movk x14, #47623, lsl 32 and.16b v4, v14, v4 movk x14, #9430, lsl 48 ucvtf.2d v8, v8 mul x15, x9, x11 mov x16, #37864 movk x16, #1815, lsl 16 umulh x9, x9, x11 movk x16, #28960, lsl 32 adds x12, x15, x12 cinc x9, x9, hs movk x16, #17153, lsl 48 mul x15, x10, x11 dup.2d v11, x16 mov.16b v12, v5 umulh x10, x10, x11 fmla.2d v12, v8, v11 adds x9, x15, x9 cinc x10, x10, hs fsub.2d v13, v6, v12 adds x0, x9, x0 cinc x9, x10, hs fmla.2d v13, v8, v11 add.2d v0, v0, v12 mul x10, x13, x11 add.2d v11, v15, v13 umulh x13, x13, x11 mov x15, #46128 adds x9, x10, x9 cinc x10, x13, hs movk x15, #29964, lsl 16 adds x1, x9, x1 cinc x9, x10, hs movk x15, #7587, lsl 32 movk x15, #17161, lsl 48 mul x10, x14, x11 dup.2d v12, x15 umulh x11, x14, x11 mov.16b v13, v5 adds x9, x10, x9 cinc x10, x11, hs fmla.2d v13, v8, v12 fsub.2d v14, v6, v13 adds x2, x9, x2 cinc x9, x10, hs fmla.2d v14, v8, v12 add x3, x3, x9 add.2d v1, v1, v13 mov x9, #65535 add.2d v0, v0, v14 mov x10, #52826 movk x9, #61439, lsl 16 movk x10, #57790, lsl 16 movk x9, #62867, lsl 32 movk x10, #55431, lsl 32 movk x9, #49889, lsl 48 movk x10, #17196, lsl 48 dup.2d v12, x10 mul x9, x9, x12 mov.16b v13, v5 mov x10, #1 fmla.2d v13, v8, v12 movk x10, #61440, lsl 16 fsub.2d v14, v6, v13 movk x10, #62867, lsl 32 fmla.2d v14, v8, v12 add.2d v2, v2, v13 movk x10, #17377, lsl 48 add.2d v1, v1, v14 mov x11, #28817 mov x13, #31276 movk x11, #31161, lsl 16 movk x13, #21262, lsl 16 movk x13, #2304, lsl 32 movk x11, #59464, lsl 32 movk x13, #17182, lsl 48 movk x11, #10291, lsl 48 dup.2d v12, x13 mov x13, #22621 mov.16b v13, v5 fmla.2d v13, v8, v12 movk x13, #33153, lsl 16 fsub.2d v14, v6, v13 movk x13, #17846, lsl 32 fmla.2d v14, v8, v12 movk x13, #47184, lsl 48 add.2d v7, v7, v13 add.2d v2, v2, v14 mov x14, #41001 mov x15, #28672 movk x14, #57649, lsl 16 movk x15, #24515, lsl 16 movk x14, #20082, lsl 32 movk x15, #54929, lsl 32 movk x15, #17064, lsl 48 movk x14, #12388, lsl 48 dup.2d v12, x15 mul x15, x10, x9 mov.16b v13, v5 umulh x10, x10, x9 fmla.2d v13, v8, v12 cmn x15, x12 cinc x10, x10, hs fsub.2d v14, v6, v13 fmla.2d v14, v8, v12 mul x12, x11, x9 add.2d v3, v3, v13 umulh x11, x11, x9 add.2d v7, v7, v14 adds x10, x12, x10 cinc x11, x11, hs ucvtf.2d v8, v9 mov x12, #44768 adds x0, x10, x0 cinc x10, x11, hs movk x12, #51919, lsl 16 mul x11, x13, x9 movk x12, #6346, lsl 32 umulh x13, x13, x9 movk x12, #17133, lsl 48 dup.2d v9, x12 adds x10, x11, x10 cinc x11, x13, hs mov.16b v12, v5 adds x1, x10, x1 cinc x10, x11, hs fmla.2d v12, v8, v9 mul x11, x14, x9 fsub.2d v13, v6, v12 fmla.2d v13, v8, v9 umulh x9, x14, x9 add.2d v0, v0, v12 adds x10, x11, x10 cinc x9, x9, hs add.2d v9, v11, v13 adds x2, x10, x2 cinc x9, x9, hs mov x10, #47492 movk x10, #23630, lsl 16 add x3, x3, x9 movk x10, #49985, lsl 32 mul x9, x4, x4 movk x10, #17168, lsl 48 umulh x11, x4, x4 dup.2d v11, x10 mul x10, x4, x5 mov.16b v12, v5 fmla.2d v12, v8, v11 umulh x12, x4, x5 fsub.2d v13, v6, v12 adds x11, x10, x11 cinc x13, x12, hs fmla.2d v13, v8, v11 mul x14, x4, x6 add.2d v1, v1, v12 add.2d v0, v0, v13 umulh x15, x4, x6 mov x16, #57936 adds x13, x14, x13 cinc x17, x15, hs movk x16, #54828, lsl 16 mul x20, x4, x7 movk x16, #18292, lsl 32 movk x16, #17197, lsl 48 umulh x4, x4, x7 dup.2d v11, x16 adds x16, x20, x17 cinc x17, x4, hs mov.16b v12, v5 adds x10, x10, x11 cinc x11, x12, hs fmla.2d v12, v8, v11 fsub.2d v13, v6, v12 mul x12, x5, x5 fmla.2d v13, v8, v11 umulh x21, x5, x5 add.2d v2, v2, v12 adds x11, x12, x11 cinc x12, x21, hs add.2d v1, v1, v13 adds x11, x11, x13 cinc x12, x12, hs mov x13, #17708 movk x13, #43915, lsl 16 mul x21, x5, x6 movk x13, #64348, lsl 32 umulh x22, x5, x6 movk x13, #17188, lsl 48 adds x12, x21, x12 cinc x23, x22, hs dup.2d v11, x13 mov.16b v12, v5 adds x12, x12, x16 cinc x13, x23, hs fmla.2d v12, v8, v11 mul x16, x5, x7 fsub.2d v13, v6, v12 umulh x5, x5, x7 fmla.2d v13, v8, v11 add.2d v7, v7, v12 adds x13, x16, x13 cinc x23, x5, hs add.2d v2, v2, v13 adds x13, x13, x17 cinc x17, x23, hs mov x23, #29184 adds x11, x14, x11 cinc x14, x15, hs movk x23, #20789, lsl 16 movk x23, #19197, lsl 32 adds x14, x21, x14 cinc x15, x22, hs movk x23, #17083, lsl 48 adds x12, x14, x12 cinc x14, x15, hs dup.2d v11, x23 mul x15, x6, x6 mov.16b v12, v5 fmla.2d v12, v8, v11 umulh x21, x6, x6 fsub.2d v13, v6, v12 adds x14, x15, x14 cinc x15, x21, hs fmla.2d v13, v8, v11 adds x13, x14, x13 cinc x14, x15, hs add.2d v3, v3, v12 mul x15, x6, x7 add.2d v7, v7, v13 ucvtf.2d v8, v10 umulh x6, x6, x7 mov x21, #58856 adds x14, x15, x14 cinc x22, x6, hs movk x21, #14953, lsl 16 adds x14, x14, x17 cinc x17, x22, hs movk x21, #15155, lsl 32 movk x21, #17181, lsl 48 adds x12, x20, x12 cinc x4, x4, hs dup.2d v10, x21 adds x4, x16, x4 cinc x5, x5, hs mov.16b v11, v5 adds x4, x4, x13 cinc x5, x5, hs fmla.2d v11, v8, v10 fsub.2d v12, v6, v11 adds x5, x15, x5 cinc x6, x6, hs fmla.2d v12, v8, v10 adds x5, x5, x14 cinc x6, x6, hs add.2d v0, v0, v11 mul x13, x7, x7 add.2d v9, v9, v12 mov x14, #35392 umulh x7, x7, x7 movk x14, #12477, lsl 16 adds x6, x13, x6 cinc x7, x7, hs movk x14, #56780, lsl 32 adds x6, x6, x17 cinc x7, x7, hs movk x14, #17142, lsl 48 mov x13, #48718 dup.2d v10, x14 mov.16b v11, v5 movk x13, #4732, lsl 16 fmla.2d v11, v8, v10 movk x13, #45078, lsl 32 fsub.2d v12, v6, v11 movk x13, #39852, lsl 48 fmla.2d v12, v8, v10 add.2d v1, v1, v11 mov x14, #16676 add.2d v0, v0, v12 movk x14, #12692, lsl 16 mov x15, #9848 movk x14, #20986, lsl 32 movk x15, #54501, lsl 16 movk x15, #31540, lsl 32 movk x14, #2848, lsl 48 movk x15, #17170, lsl 48 mov x16, #51052 dup.2d v10, x15 movk x16, #24721, lsl 16 mov.16b v11, v5 fmla.2d v11, v8, v10 movk x16, #61092, lsl 32 fsub.2d v12, v6, v11 movk x16, #45156, lsl 48 fmla.2d v12, v8, v10 mov x15, #3197 add.2d v2, v2, v11 add.2d v1, v1, v12 movk x15, #18936, lsl 16 mov x17, #9584 movk x15, #10922, lsl 32 movk x17, #63883, lsl 16 movk x15, #11014, lsl 48 movk x17, #18253, lsl 32 mul x20, x13, x9 movk x17, #17190, lsl 48 dup.2d v10, x17 umulh x13, x13, x9 mov.16b v11, v5 adds x12, x20, x12 cinc x13, x13, hs fmla.2d v11, v8, v10 mul x17, x14, x9 fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 umulh x14, x14, x9 add.2d v7, v7, v11 adds x13, x17, x13 cinc x14, x14, hs add.2d v2, v2, v12 adds x4, x13, x4 cinc x13, x14, hs mov x14, #51712 movk x14, #16093, lsl 16 mul x17, x16, x9 movk x14, #30633, lsl 32 umulh x16, x16, x9 movk x14, #17068, lsl 48 adds x13, x17, x13 cinc x16, x16, hs dup.2d v10, x14 mov.16b v11, v5 adds x5, x13, x5 cinc x13, x16, hs fmla.2d v11, v8, v10 mul x14, x15, x9 fsub.2d v12, v6, v11 umulh x9, x15, x9 fmla.2d v12, v8, v10 adds x13, x14, x13 cinc x9, x9, hs add.2d v3, v3, v11 add.2d v7, v7, v12 adds x6, x13, x6 cinc x9, x9, hs ucvtf.2d v4, v4 add x7, x7, x9 mov x9, #34724 mov x13, #56431 movk x9, #40393, lsl 16 movk x9, #23752, lsl 32 movk x13, #30457, lsl 16 movk x9, #17184, lsl 48 movk x13, #30012, lsl 32 dup.2d v8, x9 movk x13, #6382, lsl 48 mov.16b v10, v5 fmla.2d v10, v4, v8 mov x9, #59151 fsub.2d v11, v6, v10 movk x9, #41769, lsl 16 fmla.2d v11, v4, v8 movk x9, #32276, lsl 32 add.2d v0, v0, v10 add.2d v8, v9, v11 movk x9, #21677, lsl 48 mov x14, #25532 mov x15, #34015 movk x14, #31025, lsl 16 movk x15, #20342, lsl 16 movk x14, #10002, lsl 32 movk x14, #17199, lsl 48 movk x15, #13935, lsl 32 dup.2d v9, x14 movk x15, #11030, lsl 48 mov.16b v10, v5 mov x14, #13689 fmla.2d v10, v4, v9 movk x14, #8159, lsl 16 fsub.2d v11, v6, v10 fmla.2d v11, v4, v9 movk x14, #215, lsl 32 add.2d v1, v1, v10 movk x14, #4913, lsl 48 add.2d v0, v0, v11 mul x16, x13, x10 mov x17, #18830 movk x17, #2465, lsl 16 umulh x13, x13, x10 movk x17, #36348, lsl 32 adds x12, x16, x12 cinc x13, x13, hs movk x17, #17194, lsl 48 mul x16, x9, x10 dup.2d v9, x17 mov.16b v10, v5 umulh x9, x9, x10 fmla.2d v10, v4, v9 adds x13, x16, x13 cinc x9, x9, hs fsub.2d v11, v6, v10 adds x4, x13, x4 cinc x9, x9, hs fmla.2d v11, v4, v9 add.2d v2, v2, v10 mul x13, x15, x10 add.2d v1, v1, v11 umulh x15, x15, x10 mov x16, #21566 adds x9, x13, x9 cinc x13, x15, hs movk x16, #43708, lsl 16 movk x16, #57685, lsl 32 adds x5, x9, x5 cinc x9, x13, hs movk x16, #17185, lsl 48 mul x13, x14, x10 dup.2d v9, x16 umulh x10, x14, x10 mov.16b v10, v5 adds x9, x13, x9 cinc x10, x10, hs fmla.2d v10, v4, v9 fsub.2d v11, v6, v10 adds x6, x9, x6 cinc x9, x10, hs fmla.2d v11, v4, v9 add x7, x7, x9 add.2d v7, v7, v10 mov x9, #61005 add.2d v2, v2, v11 mov x10, #3072 movk x9, #58262, lsl 16 movk x10, #8058, lsl 16 movk x9, #32851, lsl 32 movk x10, #46097, lsl 32 movk x9, #11582, lsl 48 movk x10, #17047, lsl 48 dup.2d v9, x10 mov x10, #37581 mov.16b v10, v5 movk x10, #43836, lsl 16 fmla.2d v10, v4, v9 movk x10, #36286, lsl 32 fsub.2d v11, v6, v10 fmla.2d v11, v4, v9 movk x10, #51783, lsl 48 add.2d v3, v3, v10 mov x13, #10899 add.2d v4, v7, v11 movk x13, #30709, lsl 16 mov x14, #65535 movk x13, #61551, lsl 32 movk x14, #61439, lsl 16 movk x14, #62867, lsl 32 movk x13, #45784, lsl 48 movk x14, #1, lsl 48 mov x15, #36612 umov x16, v8.d[0] movk x15, #63402, lsl 16 umov x17, v8.d[1] mul x16, x16, x14 movk x15, #47623, lsl 32 mul x14, x17, x14 movk x15, #9430, lsl 48 and x16, x16, x8 mul x17, x9, x11 and x8, x14, x8 ins v7.d[0], x16 ins v7.d[1], x8 umulh x8, x9, x11 ucvtf.2d v7, v7 adds x9, x17, x12 cinc x8, x8, hs mov x12, #16 mul x14, x10, x11 movk x12, #22847, lsl 32 movk x12, #17151, lsl 48 umulh x10, x10, x11 dup.2d v9, x12 adds x8, x14, x8 cinc x10, x10, hs mov.16b v10, v5 adds x4, x8, x4 cinc x8, x10, hs fmla.2d v10, v7, v9 fsub.2d v11, v6, v10 mul x10, x13, x11 fmla.2d v11, v7, v9 umulh x12, x13, x11 add.2d v0, v0, v10 adds x8, x10, x8 cinc x10, x12, hs add.2d v8, v8, v11 adds x5, x8, x5 cinc x8, x10, hs mov x10, #20728 movk x10, #23588, lsl 16 mul x12, x15, x11 movk x10, #7790, lsl 32 umulh x11, x15, x11 movk x10, #17170, lsl 48 adds x8, x12, x8 cinc x11, x11, hs dup.2d v9, x10 mov.16b v10, v5 adds x6, x8, x6 cinc x8, x11, hs fmla.2d v10, v7, v9 add x7, x7, x8 fsub.2d v11, v6, v10 mov x8, #65535 fmla.2d v11, v7, v9 add.2d v1, v1, v10 movk x8, #61439, lsl 16 add.2d v0, v0, v11 movk x8, #62867, lsl 32 mov x10, #16000 movk x8, #49889, lsl 48 movk x10, #53891, lsl 16 movk x10, #5509, lsl 32 mul x8, x8, x9 movk x10, #17144, lsl 48 mov x11, #1 dup.2d v9, x10 movk x11, #61440, lsl 16 mov.16b v10, v5 movk x11, #62867, lsl 32 fmla.2d v10, v7, v9 fsub.2d v11, v6, v10 movk x11, #17377, lsl 48 fmla.2d v11, v7, v9 mov x10, #28817 add.2d v2, v2, v10 movk x10, #31161, lsl 16 add.2d v9, v1, v11 mov x12, #46800 movk x10, #59464, lsl 32 movk x12, #2568, lsl 16 movk x10, #10291, lsl 48 movk x12, #1335, lsl 32 mov x13, #22621 movk x12, #17188, lsl 48 dup.2d v1, x12 movk x13, #33153, lsl 16 mov.16b v10, v5 movk x13, #17846, lsl 32 fmla.2d v10, v7, v1 movk x13, #47184, lsl 48 fsub.2d v11, v6, v10 fmla.2d v11, v7, v1 mov x12, #41001 add.2d v1, v4, v10 movk x12, #57649, lsl 16 add.2d v4, v2, v11 movk x12, #20082, lsl 32 mov x14, #39040 movk x14, #14704, lsl 16 movk x12, #12388, lsl 48 movk x14, #12839, lsl 32 mul x15, x11, x8 movk x14, #17096, lsl 48 umulh x11, x11, x8 dup.2d v2, x14 cmn x15, x9 cinc x11, x11, hs mov.16b v5, v5 fmla.2d v5, v7, v2 mul x9, x10, x8 fsub.2d v6, v6, v5 umulh x10, x10, x8 fmla.2d v6, v7, v2 adds x9, x9, x11 cinc x10, x10, hs add.2d v5, v3, v5 add.2d v6, v1, v6 adds x4, x9, x4 cinc x9, x10, hs ssra.2d v0, v8, #52 mul x10, x13, x8 ssra.2d v9, v0, #52 umulh x11, x13, x8 ssra.2d v4, v9, #52 ssra.2d v6, v4, #52 adds x9, x10, x9 cinc x10, x11, hs ssra.2d v5, v6, #52 adds x5, x9, x5 cinc x9, x10, hs ushr.2d v1, v9, #12 mul x10, x12, x8 ushr.2d v2, v4, #24 ushr.2d v3, v6, #36 umulh x8, x12, x8 sli.2d v0, v9, #52 adds x9, x10, x9 cinc x8, x8, hs sli.2d v1, v4, #40 adds x6, x9, x6 cinc x8, x8, hs sli.2d v2, v6, #28 sli.2d v3, v5, #16 add x7, x7, x8
junkicide/provekit
17,280
skyscraper/block-multiplier/src/aarch64/montgomery_interleaved_3.s
// GENERATED FILE, DO NOT EDIT! // in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3], // in("x4") b[0], in("x5") b[1], in("x6") b[2], in("x7") b[3], // in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3], // in("v4") bv[0], in("v5") bv[1], in("v6") bv[2], in("v7") bv[3], // lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3], // lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3], // lateout("x4") _, lateout("x5") _, lateout("x6") _, lateout("x7") _, lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, lateout("v20") _, lateout("v21") _, lateout("v22") _, lateout("v23") _, lateout("v24") _, // lateout("lr") _ mov x8, #4503599627370495 dup.2d v8, x8 mul x9, x0, x4 mov x10, #5075556780046548992 dup.2d v9, x10 mov x10, #1 umulh x11, x0, x4 movk x10, #18032, lsl 48 dup.2d v10, x10 shl.2d v11, v1, #14 mul x10, x1, x4 shl.2d v12, v2, #26 shl.2d v13, v3, #38 ushr.2d v3, v3, #14 umulh x12, x1, x4 shl.2d v14, v0, #2 usra.2d v11, v0, #50 adds x10, x10, x11 cinc x11, x12, hs usra.2d v12, v1, #38 usra.2d v13, v2, #26 and.16b v0, v14, v8 mul x12, x2, x4 and.16b v1, v11, v8 and.16b v2, v12, v8 and.16b v11, v13, v8 umulh x13, x2, x4 shl.2d v12, v5, #14 shl.2d v13, v6, #26 shl.2d v14, v7, #38 adds x11, x12, x11 cinc x12, x13, hs ushr.2d v7, v7, #14 shl.2d v15, v4, #2 mul x13, x3, x4 usra.2d v12, v4, #50 usra.2d v13, v5, #38 usra.2d v14, v6, #26 umulh x4, x3, x4 and.16b v4, v15, v8 and.16b v5, v12, v8 and.16b v6, v13, v8 adds x12, x13, x12 cinc x4, x4, hs and.16b v12, v14, v8 mov x13, #13605374474286268416 dup.2d v13, x13 mul x13, x0, x5 mov x14, #6440147467139809280 dup.2d v14, x14 umulh x14, x0, x5 mov x15, #3688448094816436224 dup.2d v15, x15 mov x15, #9209861237972664320 adds x10, x13, x10 cinc x13, x14, hs dup.2d v16, x15 mov x14, #12218265789056155648 dup.2d v17, x14 mul x14, x1, x5 mov x15, #17739678932212383744 dup.2d v18, x15 mov x15, #2301339409586323456 umulh x16, x1, x5 dup.2d v19, x15 mov x15, #7822752552742551552 adds x13, x14, x13 cinc x14, x16, hs dup.2d v20, x15 mov x15, #5071053180419178496 dup.2d v21, x15 adds x11, x13, x11 cinc x13, x14, hs mov x14, #16352570246982270976 dup.2d v22, x14 ucvtf.2d v0, v0 mul x14, x2, x5 ucvtf.2d v1, v1 ucvtf.2d v2, v2 ucvtf.2d v11, v11 umulh x15, x2, x5 ucvtf.2d v3, v3 ucvtf.2d v4, v4 adds x13, x14, x13 cinc x14, x15, hs ucvtf.2d v5, v5 ucvtf.2d v6, v6 ucvtf.2d v12, v12 adds x12, x13, x12 cinc x13, x14, hs ucvtf.2d v7, v7 mov.16b v23, v9 fmla.2d v23, v0, v4 mul x14, x3, x5 fsub.2d v24, v10, v23 fmla.2d v24, v0, v4 add.2d v15, v15, v23 umulh x5, x3, x5 add.2d v13, v13, v24 mov.16b v23, v9 adds x13, x14, x13 cinc x5, x5, hs fmla.2d v23, v0, v5 fsub.2d v24, v10, v23 fmla.2d v24, v0, v5 adds x4, x13, x4 cinc x5, x5, hs add.2d v17, v17, v23 add.2d v15, v15, v24 mov.16b v23, v9 mul x13, x0, x6 fmla.2d v23, v0, v6 fsub.2d v24, v10, v23 fmla.2d v24, v0, v6 umulh x14, x0, x6 add.2d v19, v19, v23 add.2d v17, v17, v24 adds x11, x13, x11 cinc x13, x14, hs mov.16b v23, v9 fmla.2d v23, v0, v12 fsub.2d v24, v10, v23 mul x14, x1, x6 fmla.2d v24, v0, v12 add.2d v21, v21, v23 add.2d v19, v19, v24 umulh x15, x1, x6 mov.16b v23, v9 fmla.2d v23, v0, v7 fsub.2d v24, v10, v23 adds x13, x14, x13 cinc x14, x15, hs fmla.2d v24, v0, v7 add.2d v0, v22, v23 adds x12, x13, x12 cinc x13, x14, hs add.2d v21, v21, v24 mov.16b v22, v9 fmla.2d v22, v1, v4 mul x14, x2, x6 fsub.2d v23, v10, v22 fmla.2d v23, v1, v4 add.2d v17, v17, v22 umulh x15, x2, x6 add.2d v15, v15, v23 mov.16b v22, v9 fmla.2d v22, v1, v5 adds x13, x14, x13 cinc x14, x15, hs fsub.2d v23, v10, v22 fmla.2d v23, v1, v5 adds x4, x13, x4 cinc x13, x14, hs add.2d v19, v19, v22 add.2d v17, v17, v23 mov.16b v22, v9 mul x14, x3, x6 fmla.2d v22, v1, v6 fsub.2d v23, v10, v22 fmla.2d v23, v1, v6 umulh x6, x3, x6 add.2d v21, v21, v22 add.2d v19, v19, v23 mov.16b v22, v9 adds x13, x14, x13 cinc x6, x6, hs fmla.2d v22, v1, v12 fsub.2d v23, v10, v22 fmla.2d v23, v1, v12 adds x5, x13, x5 cinc x6, x6, hs add.2d v0, v0, v22 add.2d v21, v21, v23 mul x13, x0, x7 mov.16b v22, v9 fmla.2d v22, v1, v7 fsub.2d v23, v10, v22 umulh x0, x0, x7 fmla.2d v23, v1, v7 add.2d v1, v20, v22 add.2d v0, v0, v23 adds x12, x13, x12 cinc x0, x0, hs mov.16b v20, v9 fmla.2d v20, v2, v4 fsub.2d v22, v10, v20 mul x13, x1, x7 fmla.2d v22, v2, v4 add.2d v19, v19, v20 umulh x1, x1, x7 add.2d v17, v17, v22 mov.16b v20, v9 fmla.2d v20, v2, v5 adds x0, x13, x0 cinc x1, x1, hs fsub.2d v22, v10, v20 fmla.2d v22, v2, v5 add.2d v20, v21, v20 adds x0, x0, x4 cinc x1, x1, hs add.2d v19, v19, v22 mov.16b v21, v9 fmla.2d v21, v2, v6 mul x4, x2, x7 fsub.2d v22, v10, v21 fmla.2d v22, v2, v6 umulh x2, x2, x7 add.2d v0, v0, v21 add.2d v20, v20, v22 mov.16b v21, v9 adds x1, x4, x1 cinc x2, x2, hs fmla.2d v21, v2, v12 fsub.2d v22, v10, v21 fmla.2d v22, v2, v12 adds x1, x1, x5 cinc x2, x2, hs add.2d v1, v1, v21 add.2d v0, v0, v22 mov.16b v21, v9 mul x4, x3, x7 fmla.2d v21, v2, v7 fsub.2d v22, v10, v21 umulh x3, x3, x7 fmla.2d v22, v2, v7 add.2d v2, v18, v21 add.2d v1, v1, v22 adds x2, x4, x2 cinc x3, x3, hs mov.16b v18, v9 fmla.2d v18, v11, v4 fsub.2d v21, v10, v18 adds x2, x2, x6 cinc x3, x3, hs fmla.2d v21, v11, v4 add.2d v18, v20, v18 add.2d v19, v19, v21 mov x4, #48718 mov.16b v20, v9 fmla.2d v20, v11, v5 movk x4, #4732, lsl 16 fsub.2d v21, v10, v20 fmla.2d v21, v11, v5 add.2d v0, v0, v20 movk x4, #45078, lsl 32 add.2d v18, v18, v21 mov.16b v20, v9 fmla.2d v20, v11, v6 movk x4, #39852, lsl 48 fsub.2d v21, v10, v20 fmla.2d v21, v11, v6 add.2d v1, v1, v20 mov x5, #16676 add.2d v0, v0, v21 mov.16b v20, v9 movk x5, #12692, lsl 16 fmla.2d v20, v11, v12 fsub.2d v21, v10, v20 fmla.2d v21, v11, v12 movk x5, #20986, lsl 32 add.2d v2, v2, v20 add.2d v1, v1, v21 mov.16b v20, v9 movk x5, #2848, lsl 48 fmla.2d v20, v11, v7 fsub.2d v21, v10, v20 fmla.2d v21, v11, v7 mov x6, #51052 add.2d v11, v16, v20 add.2d v2, v2, v21 movk x6, #24721, lsl 16 mov.16b v16, v9 fmla.2d v16, v3, v4 fsub.2d v20, v10, v16 movk x6, #61092, lsl 32 fmla.2d v20, v3, v4 add.2d v0, v0, v16 add.2d v4, v18, v20 movk x6, #45156, lsl 48 mov.16b v16, v9 fmla.2d v16, v3, v5 fsub.2d v18, v10, v16 mov x7, #3197 fmla.2d v18, v3, v5 add.2d v1, v1, v16 movk x7, #18936, lsl 16 add.2d v0, v0, v18 mov.16b v5, v9 fmla.2d v5, v3, v6 movk x7, #10922, lsl 32 fsub.2d v16, v10, v5 fmla.2d v16, v3, v6 add.2d v2, v2, v5 movk x7, #11014, lsl 48 add.2d v1, v1, v16 mov.16b v5, v9 fmla.2d v5, v3, v12 mul x13, x4, x9 fsub.2d v6, v10, v5 fmla.2d v6, v3, v12 umulh x4, x4, x9 add.2d v5, v11, v5 add.2d v2, v2, v6 mov.16b v6, v9 adds x12, x13, x12 cinc x4, x4, hs fmla.2d v6, v3, v7 fsub.2d v11, v10, v6 fmla.2d v11, v3, v7 mul x13, x5, x9 add.2d v3, v14, v6 add.2d v5, v5, v11 usra.2d v15, v13, #52 umulh x5, x5, x9 usra.2d v17, v15, #52 usra.2d v19, v17, #52 usra.2d v4, v19, #52 adds x4, x13, x4 cinc x5, x5, hs and.16b v6, v13, v8 and.16b v7, v15, v8 adds x0, x4, x0 cinc x4, x5, hs and.16b v11, v17, v8 and.16b v8, v19, v8 ucvtf.2d v6, v6 mul x5, x6, x9 mov x13, #37864 movk x13, #1815, lsl 16 movk x13, #28960, lsl 32 umulh x6, x6, x9 movk x13, #17153, lsl 48 dup.2d v12, x13 mov.16b v13, v9 adds x4, x5, x4 cinc x5, x6, hs fmla.2d v13, v6, v12 fsub.2d v14, v10, v13 adds x1, x4, x1 cinc x4, x5, hs fmla.2d v14, v6, v12 add.2d v0, v0, v13 add.2d v4, v4, v14 mul x5, x7, x9 mov x6, #46128 movk x6, #29964, lsl 16 movk x6, #7587, lsl 32 umulh x7, x7, x9 movk x6, #17161, lsl 48 dup.2d v12, x6 mov.16b v13, v9 adds x4, x5, x4 cinc x5, x7, hs fmla.2d v13, v6, v12 fsub.2d v14, v10, v13 adds x2, x4, x2 cinc x4, x5, hs fmla.2d v14, v6, v12 add.2d v1, v1, v13 add.2d v0, v0, v14 add x3, x3, x4 mov x4, #52826 movk x4, #57790, lsl 16 movk x4, #55431, lsl 32 mov x5, #56431 movk x4, #17196, lsl 48 dup.2d v12, x4 mov.16b v13, v9 movk x5, #30457, lsl 16 fmla.2d v13, v6, v12 fsub.2d v14, v10, v13 movk x5, #30012, lsl 32 fmla.2d v14, v6, v12 add.2d v2, v2, v13 add.2d v1, v1, v14 movk x5, #6382, lsl 48 mov x4, #31276 movk x4, #21262, lsl 16 movk x4, #2304, lsl 32 mov x6, #59151 movk x4, #17182, lsl 48 dup.2d v12, x4 mov.16b v13, v9 movk x6, #41769, lsl 16 fmla.2d v13, v6, v12 fsub.2d v14, v10, v13 movk x6, #32276, lsl 32 fmla.2d v14, v6, v12 add.2d v5, v5, v13 add.2d v2, v2, v14 movk x6, #21677, lsl 48 mov x4, #28672 movk x4, #24515, lsl 16 movk x4, #54929, lsl 32 mov x7, #34015 movk x4, #17064, lsl 48 dup.2d v12, x4 mov.16b v13, v9 movk x7, #20342, lsl 16 fmla.2d v13, v6, v12 fsub.2d v14, v10, v13 movk x7, #13935, lsl 32 fmla.2d v14, v6, v12 add.2d v3, v3, v13 add.2d v5, v5, v14 movk x7, #11030, lsl 48 ucvtf.2d v6, v7 mov x4, #44768 movk x4, #51919, lsl 16 mov x9, #13689 movk x4, #6346, lsl 32 movk x4, #17133, lsl 48 dup.2d v7, x4 movk x9, #8159, lsl 16 mov.16b v12, v9 fmla.2d v12, v6, v7 movk x9, #215, lsl 32 fsub.2d v13, v10, v12 fmla.2d v13, v6, v7 add.2d v0, v0, v12 movk x9, #4913, lsl 48 add.2d v4, v4, v13 mov x4, #47492 movk x4, #23630, lsl 16 mul x13, x5, x10 movk x4, #49985, lsl 32 movk x4, #17168, lsl 48 dup.2d v7, x4 umulh x4, x5, x10 mov.16b v12, v9 fmla.2d v12, v6, v7 adds x5, x13, x12 cinc x4, x4, hs fsub.2d v13, v10, v12 fmla.2d v13, v6, v7 add.2d v1, v1, v12 mul x12, x6, x10 add.2d v0, v0, v13 mov x13, #57936 movk x13, #54828, lsl 16 umulh x6, x6, x10 movk x13, #18292, lsl 32 movk x13, #17197, lsl 48 dup.2d v7, x13 adds x4, x12, x4 cinc x6, x6, hs mov.16b v12, v9 fmla.2d v12, v6, v7 adds x0, x4, x0 cinc x4, x6, hs fsub.2d v13, v10, v12 fmla.2d v13, v6, v7 add.2d v2, v2, v12 mul x6, x7, x10 add.2d v1, v1, v13 mov x12, #17708 movk x12, #43915, lsl 16 umulh x7, x7, x10 movk x12, #64348, lsl 32 movk x12, #17188, lsl 48 dup.2d v7, x12 adds x4, x6, x4 cinc x6, x7, hs mov.16b v12, v9 fmla.2d v12, v6, v7 fsub.2d v13, v10, v12 adds x1, x4, x1 cinc x4, x6, hs fmla.2d v13, v6, v7 add.2d v5, v5, v12 mul x6, x9, x10 add.2d v2, v2, v13 mov x7, #29184 movk x7, #20789, lsl 16 umulh x9, x9, x10 movk x7, #19197, lsl 32 movk x7, #17083, lsl 48 dup.2d v7, x7 adds x4, x6, x4 cinc x6, x9, hs mov.16b v12, v9 fmla.2d v12, v6, v7 fsub.2d v13, v10, v12 adds x2, x4, x2 cinc x4, x6, hs fmla.2d v13, v6, v7 add.2d v3, v3, v12 add x3, x3, x4 add.2d v5, v5, v13 ucvtf.2d v6, v11 mov x4, #58856 mov x6, #61005 movk x4, #14953, lsl 16 movk x4, #15155, lsl 32 movk x4, #17181, lsl 48 movk x6, #58262, lsl 16 dup.2d v7, x4 mov.16b v11, v9 fmla.2d v11, v6, v7 movk x6, #32851, lsl 32 fsub.2d v12, v10, v11 fmla.2d v12, v6, v7 movk x6, #11582, lsl 48 add.2d v0, v0, v11 add.2d v4, v4, v12 mov x4, #35392 mov x7, #37581 movk x4, #12477, lsl 16 movk x4, #56780, lsl 32 movk x4, #17142, lsl 48 movk x7, #43836, lsl 16 dup.2d v7, x4 mov.16b v11, v9 fmla.2d v11, v6, v7 movk x7, #36286, lsl 32 fsub.2d v12, v10, v11 fmla.2d v12, v6, v7 movk x7, #51783, lsl 48 add.2d v1, v1, v11 add.2d v0, v0, v12 mov x4, #9848 mov x9, #10899 movk x4, #54501, lsl 16 movk x4, #31540, lsl 32 movk x4, #17170, lsl 48 movk x9, #30709, lsl 16 dup.2d v7, x4 mov.16b v11, v9 fmla.2d v11, v6, v7 movk x9, #61551, lsl 32 fsub.2d v12, v10, v11 fmla.2d v12, v6, v7 movk x9, #45784, lsl 48 add.2d v2, v2, v11 add.2d v1, v1, v12 mov x4, #9584 mov x10, #36612 movk x4, #63883, lsl 16 movk x4, #18253, lsl 32 movk x4, #17190, lsl 48 movk x10, #63402, lsl 16 dup.2d v7, x4 mov.16b v11, v9 fmla.2d v11, v6, v7 movk x10, #47623, lsl 32 fsub.2d v12, v10, v11 fmla.2d v12, v6, v7 movk x10, #9430, lsl 48 add.2d v5, v5, v11 add.2d v2, v2, v12 mov x4, #51712 mul x12, x6, x11 movk x4, #16093, lsl 16 movk x4, #30633, lsl 32 movk x4, #17068, lsl 48 umulh x6, x6, x11 dup.2d v7, x4 mov.16b v11, v9 fmla.2d v11, v6, v7 adds x4, x12, x5 cinc x5, x6, hs fsub.2d v12, v10, v11 fmla.2d v12, v6, v7 mul x6, x7, x11 add.2d v3, v3, v11 add.2d v5, v5, v12 ucvtf.2d v6, v8 umulh x7, x7, x11 mov x12, #34724 movk x12, #40393, lsl 16 movk x12, #23752, lsl 32 adds x5, x6, x5 cinc x6, x7, hs movk x12, #17184, lsl 48 dup.2d v7, x12 mov.16b v8, v9 adds x0, x5, x0 cinc x5, x6, hs fmla.2d v8, v6, v7 fsub.2d v11, v10, v8 mul x6, x9, x11 fmla.2d v11, v6, v7 add.2d v0, v0, v8 add.2d v4, v4, v11 umulh x7, x9, x11 mov x9, #25532 movk x9, #31025, lsl 16 movk x9, #10002, lsl 32 adds x5, x6, x5 cinc x6, x7, hs movk x9, #17199, lsl 48 dup.2d v7, x9 mov.16b v8, v9 adds x1, x5, x1 cinc x5, x6, hs fmla.2d v8, v6, v7 fsub.2d v11, v10, v8 mul x6, x10, x11 fmla.2d v11, v6, v7 add.2d v1, v1, v8 add.2d v0, v0, v11 umulh x7, x10, x11 mov x9, #18830 movk x9, #2465, lsl 16 movk x9, #36348, lsl 32 adds x5, x6, x5 cinc x6, x7, hs movk x9, #17194, lsl 48 dup.2d v7, x9 mov.16b v8, v9 adds x2, x5, x2 cinc x5, x6, hs fmla.2d v8, v6, v7 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 add x3, x3, x5 add.2d v2, v2, v8 add.2d v1, v1, v11 mov x5, #65535 mov x6, #21566 movk x6, #43708, lsl 16 movk x6, #57685, lsl 32 movk x5, #61439, lsl 16 movk x6, #17185, lsl 48 dup.2d v7, x6 mov.16b v8, v9 movk x5, #62867, lsl 32 fmla.2d v8, v6, v7 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 movk x5, #49889, lsl 48 add.2d v5, v5, v8 add.2d v2, v2, v11 mul x5, x5, x4 mov x6, #3072 movk x6, #8058, lsl 16 movk x6, #46097, lsl 32 mov x7, #1 movk x6, #17047, lsl 48 dup.2d v7, x6 mov.16b v8, v9 movk x7, #61440, lsl 16 fmla.2d v8, v6, v7 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 movk x7, #62867, lsl 32 add.2d v3, v3, v8 add.2d v5, v5, v11 movk x7, #17377, lsl 48 mov x6, #65535 movk x6, #61439, lsl 16 movk x6, #62867, lsl 32 mov x9, #28817 movk x6, #1, lsl 48 umov x10, v4.d[0] umov x11, v4.d[1] movk x9, #31161, lsl 16 mul x10, x10, x6 mul x6, x11, x6 and x10, x10, x8 movk x9, #59464, lsl 32 and x6, x6, x8 ins v6.d[0], x10 ins v6.d[1], x6 movk x9, #10291, lsl 48 ucvtf.2d v6, v6 mov x6, #16 movk x6, #22847, lsl 32 mov x8, #22621 movk x6, #17151, lsl 48 dup.2d v7, x6 mov.16b v8, v9 movk x8, #33153, lsl 16 fmla.2d v8, v6, v7 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 movk x8, #17846, lsl 32 add.2d v0, v0, v8 add.2d v4, v4, v11 movk x8, #47184, lsl 48 mov x6, #20728 movk x6, #23588, lsl 16 movk x6, #7790, lsl 32 mov x10, #41001 movk x6, #17170, lsl 48 dup.2d v7, x6 mov.16b v8, v9 movk x10, #57649, lsl 16 fmla.2d v8, v6, v7 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 movk x10, #20082, lsl 32 add.2d v1, v1, v8 add.2d v0, v0, v11 movk x10, #12388, lsl 48 mov x6, #16000 movk x6, #53891, lsl 16 movk x6, #5509, lsl 32 mul x11, x7, x5 movk x6, #17144, lsl 48 dup.2d v7, x6 mov.16b v8, v9 umulh x6, x7, x5 fmla.2d v8, v6, v7 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 cmn x11, x4 cinc x6, x6, hs add.2d v2, v2, v8 add.2d v7, v1, v11 mul x4, x9, x5 mov x7, #46800 movk x7, #2568, lsl 16 movk x7, #1335, lsl 32 umulh x9, x9, x5 movk x7, #17188, lsl 48 dup.2d v1, x7 mov.16b v8, v9 adds x4, x4, x6 cinc x6, x9, hs fmla.2d v8, v6, v1 fsub.2d v11, v10, v8 fmla.2d v11, v6, v1 adds x0, x4, x0 cinc x4, x6, hs add.2d v1, v5, v8 add.2d v5, v2, v11 mul x6, x8, x5 mov x7, #39040 movk x7, #14704, lsl 16 movk x7, #12839, lsl 32 umulh x8, x8, x5 movk x7, #17096, lsl 48 dup.2d v2, x7 mov.16b v8, v9 adds x4, x6, x4 cinc x6, x8, hs fmla.2d v8, v6, v2 fsub.2d v9, v10, v8 fmla.2d v9, v6, v2 adds x1, x4, x1 cinc x4, x6, hs add.2d v6, v3, v8 add.2d v8, v1, v9 mul x6, x10, x5 ssra.2d v0, v4, #52 ssra.2d v7, v0, #52 ssra.2d v5, v7, #52 umulh x5, x10, x5 ssra.2d v8, v5, #52 ssra.2d v6, v8, #52 ushr.2d v1, v7, #12 adds x4, x6, x4 cinc x5, x5, hs ushr.2d v2, v5, #24 ushr.2d v3, v8, #36 sli.2d v0, v7, #52 adds x2, x4, x2 cinc x4, x5, hs sli.2d v1, v5, #40 sli.2d v2, v8, #28 sli.2d v3, v6, #16 add x3, x3, x4
junkicide/provekit
15,672
skyscraper/block-multiplier/src/aarch64/montgomery_square_interleaved_3.s
// GENERATED FILE, DO NOT EDIT! // in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3], // in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3], // lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3], // lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3], // lateout("x4") _, lateout("x5") _, lateout("x6") _, lateout("x7") _, lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("x17") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, // lateout("lr") _ mov x4, #4503599627370495 dup.2d v4, x4 mul x5, x0, x0 mov x6, #5075556780046548992 dup.2d v5, x6 mov x6, #1 umulh x7, x0, x0 movk x6, #18032, lsl 48 dup.2d v6, x6 mul x6, x0, x1 shl.2d v7, v1, #14 shl.2d v8, v2, #26 shl.2d v9, v3, #38 umulh x8, x0, x1 ushr.2d v3, v3, #14 shl.2d v10, v0, #2 usra.2d v7, v0, #50 adds x7, x6, x7 cinc x9, x8, hs usra.2d v8, v1, #38 usra.2d v9, v2, #26 mul x10, x0, x2 and.16b v0, v10, v4 and.16b v1, v7, v4 and.16b v2, v8, v4 umulh x11, x0, x2 and.16b v7, v9, v4 mov x12, #13605374474286268416 adds x9, x10, x9 cinc x13, x11, hs dup.2d v8, x12 mov x12, #6440147467139809280 dup.2d v9, x12 mul x12, x0, x3 mov x14, #3688448094816436224 dup.2d v10, x14 mov x14, #9209861237972664320 umulh x0, x0, x3 dup.2d v11, x14 mov x14, #12218265789056155648 adds x13, x12, x13 cinc x15, x0, hs dup.2d v12, x14 mov x14, #17739678932212383744 dup.2d v13, x14 adds x6, x6, x7 cinc x7, x8, hs mov x8, #2301339409586323456 dup.2d v14, x8 mov x8, #7822752552742551552 mul x14, x1, x1 dup.2d v15, x8 mov x8, #5071053180419178496 umulh x16, x1, x1 dup.2d v16, x8 mov x8, #16352570246982270976 dup.2d v17, x8 adds x7, x14, x7 cinc x8, x16, hs ucvtf.2d v0, v0 ucvtf.2d v1, v1 adds x7, x7, x9 cinc x8, x8, hs ucvtf.2d v2, v2 ucvtf.2d v7, v7 ucvtf.2d v3, v3 mul x9, x1, x2 mov.16b v18, v5 fmla.2d v18, v0, v0 fsub.2d v19, v6, v18 umulh x14, x1, x2 fmla.2d v19, v0, v0 add.2d v10, v10, v18 adds x8, x9, x8 cinc x16, x14, hs add.2d v8, v8, v19 mov.16b v18, v5 fmla.2d v18, v0, v1 adds x8, x8, x13 cinc x13, x16, hs fsub.2d v19, v6, v18 fmla.2d v19, v0, v1 add.2d v18, v18, v18 mul x16, x1, x3 add.2d v19, v19, v19 add.2d v12, v12, v18 umulh x1, x1, x3 add.2d v10, v10, v19 mov.16b v18, v5 fmla.2d v18, v0, v2 adds x13, x16, x13 cinc x17, x1, hs fsub.2d v19, v6, v18 fmla.2d v19, v0, v2 adds x13, x13, x15 cinc x15, x17, hs add.2d v18, v18, v18 add.2d v19, v19, v19 add.2d v14, v14, v18 adds x7, x10, x7 cinc x10, x11, hs add.2d v12, v12, v19 mov.16b v18, v5 fmla.2d v18, v0, v7 adds x9, x9, x10 cinc x10, x14, hs fsub.2d v19, v6, v18 fmla.2d v19, v0, v7 adds x8, x9, x8 cinc x9, x10, hs add.2d v18, v18, v18 add.2d v19, v19, v19 add.2d v16, v16, v18 mul x10, x2, x2 add.2d v14, v14, v19 mov.16b v18, v5 fmla.2d v18, v0, v3 umulh x11, x2, x2 fsub.2d v19, v6, v18 fmla.2d v19, v0, v3 adds x9, x10, x9 cinc x10, x11, hs add.2d v0, v18, v18 add.2d v18, v19, v19 add.2d v0, v17, v0 adds x9, x9, x13 cinc x10, x10, hs add.2d v16, v16, v18 mov.16b v17, v5 mul x11, x2, x3 fmla.2d v17, v1, v1 fsub.2d v18, v6, v17 fmla.2d v18, v1, v1 umulh x2, x2, x3 add.2d v14, v14, v17 add.2d v12, v12, v18 mov.16b v17, v5 adds x10, x11, x10 cinc x13, x2, hs fmla.2d v17, v1, v2 fsub.2d v18, v6, v17 adds x10, x10, x15 cinc x13, x13, hs fmla.2d v18, v1, v2 add.2d v17, v17, v17 add.2d v18, v18, v18 adds x8, x12, x8 cinc x0, x0, hs add.2d v16, v16, v17 add.2d v14, v14, v18 adds x0, x16, x0 cinc x1, x1, hs mov.16b v17, v5 fmla.2d v17, v1, v7 fsub.2d v18, v6, v17 adds x0, x0, x9 cinc x1, x1, hs fmla.2d v18, v1, v7 add.2d v17, v17, v17 add.2d v18, v18, v18 adds x1, x11, x1 cinc x2, x2, hs add.2d v0, v0, v17 add.2d v16, v16, v18 adds x1, x1, x10 cinc x2, x2, hs mov.16b v17, v5 fmla.2d v17, v1, v3 fsub.2d v18, v6, v17 mul x9, x3, x3 fmla.2d v18, v1, v3 add.2d v1, v17, v17 add.2d v17, v18, v18 umulh x3, x3, x3 add.2d v1, v15, v1 add.2d v0, v0, v17 adds x2, x9, x2 cinc x3, x3, hs mov.16b v15, v5 fmla.2d v15, v2, v2 fsub.2d v17, v6, v15 adds x2, x2, x13 cinc x3, x3, hs fmla.2d v17, v2, v2 add.2d v0, v0, v15 mov x9, #48718 add.2d v15, v16, v17 mov.16b v16, v5 fmla.2d v16, v2, v7 movk x9, #4732, lsl 16 fsub.2d v17, v6, v16 fmla.2d v17, v2, v7 add.2d v16, v16, v16 movk x9, #45078, lsl 32 add.2d v17, v17, v17 add.2d v1, v1, v16 movk x9, #39852, lsl 48 add.2d v0, v0, v17 mov.16b v16, v5 fmla.2d v16, v2, v3 mov x10, #16676 fsub.2d v17, v6, v16 fmla.2d v17, v2, v3 add.2d v2, v16, v16 movk x10, #12692, lsl 16 add.2d v16, v17, v17 add.2d v2, v13, v2 movk x10, #20986, lsl 32 add.2d v1, v1, v16 mov.16b v13, v5 fmla.2d v13, v7, v7 movk x10, #2848, lsl 48 fsub.2d v16, v6, v13 fmla.2d v16, v7, v7 mov x11, #51052 add.2d v2, v2, v13 add.2d v1, v1, v16 mov.16b v13, v5 movk x11, #24721, lsl 16 fmla.2d v13, v7, v3 fsub.2d v16, v6, v13 fmla.2d v16, v7, v3 movk x11, #61092, lsl 32 add.2d v7, v13, v13 add.2d v13, v16, v16 movk x11, #45156, lsl 48 add.2d v7, v11, v7 add.2d v2, v2, v13 mov.16b v11, v5 mov x12, #3197 fmla.2d v11, v3, v3 fsub.2d v13, v6, v11 fmla.2d v13, v3, v3 movk x12, #18936, lsl 16 add.2d v3, v9, v11 add.2d v7, v7, v13 movk x12, #10922, lsl 32 usra.2d v10, v8, #52 usra.2d v12, v10, #52 usra.2d v14, v12, #52 movk x12, #11014, lsl 48 usra.2d v15, v14, #52 and.16b v8, v8, v4 mul x13, x9, x5 and.16b v9, v10, v4 and.16b v10, v12, v4 and.16b v4, v14, v4 umulh x9, x9, x5 ucvtf.2d v8, v8 mov x14, #37864 movk x14, #1815, lsl 16 adds x8, x13, x8 cinc x9, x9, hs movk x14, #28960, lsl 32 movk x14, #17153, lsl 48 mul x13, x10, x5 dup.2d v11, x14 mov.16b v12, v5 fmla.2d v12, v8, v11 umulh x10, x10, x5 fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 add.2d v0, v0, v12 adds x9, x13, x9 cinc x10, x10, hs add.2d v11, v15, v13 mov x13, #46128 adds x0, x9, x0 cinc x9, x10, hs movk x13, #29964, lsl 16 movk x13, #7587, lsl 32 movk x13, #17161, lsl 48 mul x10, x11, x5 dup.2d v12, x13 mov.16b v13, v5 umulh x11, x11, x5 fmla.2d v13, v8, v12 fsub.2d v14, v6, v13 fmla.2d v14, v8, v12 adds x9, x10, x9 cinc x10, x11, hs add.2d v1, v1, v13 add.2d v0, v0, v14 mov x11, #52826 adds x1, x9, x1 cinc x9, x10, hs movk x11, #57790, lsl 16 movk x11, #55431, lsl 32 mul x10, x12, x5 movk x11, #17196, lsl 48 dup.2d v12, x11 mov.16b v13, v5 umulh x5, x12, x5 fmla.2d v13, v8, v12 fsub.2d v14, v6, v13 adds x9, x10, x9 cinc x5, x5, hs fmla.2d v14, v8, v12 add.2d v2, v2, v13 add.2d v1, v1, v14 adds x2, x9, x2 cinc x5, x5, hs mov x9, #31276 movk x9, #21262, lsl 16 movk x9, #2304, lsl 32 add x3, x3, x5 movk x9, #17182, lsl 48 dup.2d v12, x9 mov x5, #56431 mov.16b v13, v5 fmla.2d v13, v8, v12 fsub.2d v14, v6, v13 movk x5, #30457, lsl 16 fmla.2d v14, v8, v12 add.2d v7, v7, v13 add.2d v2, v2, v14 movk x5, #30012, lsl 32 mov x9, #28672 movk x9, #24515, lsl 16 movk x5, #6382, lsl 48 movk x9, #54929, lsl 32 movk x9, #17064, lsl 48 dup.2d v12, x9 mov x9, #59151 mov.16b v13, v5 fmla.2d v13, v8, v12 movk x9, #41769, lsl 16 fsub.2d v14, v6, v13 fmla.2d v14, v8, v12 add.2d v3, v3, v13 movk x9, #32276, lsl 32 add.2d v7, v7, v14 ucvtf.2d v8, v9 mov x10, #44768 movk x9, #21677, lsl 48 movk x10, #51919, lsl 16 movk x10, #6346, lsl 32 mov x11, #34015 movk x10, #17133, lsl 48 dup.2d v9, x10 mov.16b v12, v5 movk x11, #20342, lsl 16 fmla.2d v12, v8, v9 fsub.2d v13, v6, v12 fmla.2d v13, v8, v9 movk x11, #13935, lsl 32 add.2d v0, v0, v12 add.2d v9, v11, v13 movk x11, #11030, lsl 48 mov x10, #47492 movk x10, #23630, lsl 16 movk x10, #49985, lsl 32 mov x12, #13689 movk x10, #17168, lsl 48 dup.2d v11, x10 movk x12, #8159, lsl 16 mov.16b v12, v5 fmla.2d v12, v8, v11 fsub.2d v13, v6, v12 movk x12, #215, lsl 32 fmla.2d v13, v8, v11 add.2d v1, v1, v12 add.2d v0, v0, v13 movk x12, #4913, lsl 48 mov x10, #57936 movk x10, #54828, lsl 16 mul x13, x5, x6 movk x10, #18292, lsl 32 movk x10, #17197, lsl 48 dup.2d v11, x10 umulh x5, x5, x6 mov.16b v12, v5 fmla.2d v12, v8, v11 fsub.2d v13, v6, v12 adds x8, x13, x8 cinc x5, x5, hs fmla.2d v13, v8, v11 add.2d v2, v2, v12 mul x10, x9, x6 add.2d v1, v1, v13 mov x13, #17708 movk x13, #43915, lsl 16 umulh x9, x9, x6 movk x13, #64348, lsl 32 movk x13, #17188, lsl 48 adds x5, x10, x5 cinc x9, x9, hs dup.2d v11, x13 mov.16b v12, v5 fmla.2d v12, v8, v11 adds x0, x5, x0 cinc x5, x9, hs fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 add.2d v7, v7, v12 mul x9, x11, x6 add.2d v2, v2, v13 mov x10, #29184 umulh x11, x11, x6 movk x10, #20789, lsl 16 movk x10, #19197, lsl 32 movk x10, #17083, lsl 48 adds x5, x9, x5 cinc x9, x11, hs dup.2d v11, x10 mov.16b v12, v5 fmla.2d v12, v8, v11 adds x1, x5, x1 cinc x5, x9, hs fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 mul x9, x12, x6 add.2d v3, v3, v12 add.2d v7, v7, v13 ucvtf.2d v8, v10 umulh x6, x12, x6 mov x10, #58856 movk x10, #14953, lsl 16 adds x5, x9, x5 cinc x6, x6, hs movk x10, #15155, lsl 32 movk x10, #17181, lsl 48 dup.2d v10, x10 adds x2, x5, x2 cinc x5, x6, hs mov.16b v11, v5 fmla.2d v11, v8, v10 fsub.2d v12, v6, v11 add x3, x3, x5 fmla.2d v12, v8, v10 add.2d v0, v0, v11 mov x5, #61005 add.2d v9, v9, v12 mov x6, #35392 movk x6, #12477, lsl 16 movk x5, #58262, lsl 16 movk x6, #56780, lsl 32 movk x6, #17142, lsl 48 movk x5, #32851, lsl 32 dup.2d v10, x6 mov.16b v11, v5 fmla.2d v11, v8, v10 movk x5, #11582, lsl 48 fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 add.2d v1, v1, v11 mov x6, #37581 add.2d v0, v0, v12 mov x9, #9848 movk x6, #43836, lsl 16 movk x9, #54501, lsl 16 movk x9, #31540, lsl 32 movk x9, #17170, lsl 48 movk x6, #36286, lsl 32 dup.2d v10, x9 mov.16b v11, v5 fmla.2d v11, v8, v10 movk x6, #51783, lsl 48 fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 mov x9, #10899 add.2d v2, v2, v11 add.2d v1, v1, v12 mov x10, #9584 movk x9, #30709, lsl 16 movk x10, #63883, lsl 16 movk x10, #18253, lsl 32 movk x9, #61551, lsl 32 movk x10, #17190, lsl 48 dup.2d v10, x10 mov.16b v11, v5 movk x9, #45784, lsl 48 fmla.2d v11, v8, v10 fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 mov x10, #36612 add.2d v7, v7, v11 add.2d v2, v2, v12 movk x10, #63402, lsl 16 mov x11, #51712 movk x11, #16093, lsl 16 movk x11, #30633, lsl 32 movk x10, #47623, lsl 32 movk x11, #17068, lsl 48 dup.2d v10, x11 mov.16b v11, v5 movk x10, #9430, lsl 48 fmla.2d v11, v8, v10 fsub.2d v12, v6, v11 mul x11, x5, x7 fmla.2d v12, v8, v10 add.2d v3, v3, v11 add.2d v7, v7, v12 umulh x5, x5, x7 ucvtf.2d v4, v4 mov x12, #34724 adds x8, x11, x8 cinc x5, x5, hs movk x12, #40393, lsl 16 movk x12, #23752, lsl 32 movk x12, #17184, lsl 48 mul x11, x6, x7 dup.2d v8, x12 mov.16b v10, v5 fmla.2d v10, v4, v8 umulh x6, x6, x7 fsub.2d v11, v6, v10 fmla.2d v11, v4, v8 adds x5, x11, x5 cinc x6, x6, hs add.2d v0, v0, v10 add.2d v8, v9, v11 mov x11, #25532 adds x0, x5, x0 cinc x5, x6, hs movk x11, #31025, lsl 16 movk x11, #10002, lsl 32 movk x11, #17199, lsl 48 mul x6, x9, x7 dup.2d v9, x11 mov.16b v10, v5 umulh x9, x9, x7 fmla.2d v10, v4, v9 fsub.2d v11, v6, v10 fmla.2d v11, v4, v9 adds x5, x6, x5 cinc x6, x9, hs add.2d v1, v1, v10 add.2d v0, v0, v11 adds x1, x5, x1 cinc x5, x6, hs mov x6, #18830 movk x6, #2465, lsl 16 movk x6, #36348, lsl 32 mul x9, x10, x7 movk x6, #17194, lsl 48 dup.2d v9, x6 mov.16b v10, v5 umulh x6, x10, x7 fmla.2d v10, v4, v9 fsub.2d v11, v6, v10 adds x5, x9, x5 cinc x6, x6, hs fmla.2d v11, v4, v9 add.2d v2, v2, v10 add.2d v1, v1, v11 adds x2, x5, x2 cinc x5, x6, hs mov x6, #21566 movk x6, #43708, lsl 16 movk x6, #57685, lsl 32 add x3, x3, x5 movk x6, #17185, lsl 48 dup.2d v9, x6 mov x5, #65535 mov.16b v10, v5 fmla.2d v10, v4, v9 fsub.2d v11, v6, v10 movk x5, #61439, lsl 16 fmla.2d v11, v4, v9 add.2d v7, v7, v10 movk x5, #62867, lsl 32 add.2d v2, v2, v11 mov x6, #3072 movk x6, #8058, lsl 16 movk x5, #49889, lsl 48 movk x6, #46097, lsl 32 movk x6, #17047, lsl 48 dup.2d v9, x6 mul x5, x5, x8 mov.16b v10, v5 fmla.2d v10, v4, v9 mov x6, #1 fsub.2d v11, v6, v10 fmla.2d v11, v4, v9 add.2d v3, v3, v10 movk x6, #61440, lsl 16 add.2d v4, v7, v11 mov x7, #65535 movk x6, #62867, lsl 32 movk x7, #61439, lsl 16 movk x7, #62867, lsl 32 movk x7, #1, lsl 48 movk x6, #17377, lsl 48 umov x9, v8.d[0] umov x10, v8.d[1] mul x9, x9, x7 mov x11, #28817 mul x7, x10, x7 and x9, x9, x4 movk x11, #31161, lsl 16 and x4, x7, x4 ins v7.d[0], x9 ins v7.d[1], x4 ucvtf.2d v7, v7 movk x11, #59464, lsl 32 mov x4, #16 movk x4, #22847, lsl 32 movk x4, #17151, lsl 48 movk x11, #10291, lsl 48 dup.2d v9, x4 mov.16b v10, v5 mov x4, #22621 fmla.2d v10, v7, v9 fsub.2d v11, v6, v10 fmla.2d v11, v7, v9 movk x4, #33153, lsl 16 add.2d v0, v0, v10 add.2d v8, v8, v11 movk x4, #17846, lsl 32 mov x7, #20728 movk x7, #23588, lsl 16 movk x7, #7790, lsl 32 movk x4, #47184, lsl 48 movk x7, #17170, lsl 48 dup.2d v9, x7 mov.16b v10, v5 mov x7, #41001 fmla.2d v10, v7, v9 fsub.2d v11, v6, v10 movk x7, #57649, lsl 16 fmla.2d v11, v7, v9 add.2d v1, v1, v10 add.2d v0, v0, v11 movk x7, #20082, lsl 32 mov x9, #16000 movk x9, #53891, lsl 16 movk x9, #5509, lsl 32 movk x7, #12388, lsl 48 movk x9, #17144, lsl 48 dup.2d v9, x9 mul x9, x6, x5 mov.16b v10, v5 fmla.2d v10, v7, v9 fsub.2d v11, v6, v10 umulh x6, x6, x5 fmla.2d v11, v7, v9 add.2d v2, v2, v10 cmn x9, x8 cinc x6, x6, hs add.2d v9, v1, v11 mov x8, #46800 movk x8, #2568, lsl 16 mul x9, x11, x5 movk x8, #1335, lsl 32 movk x8, #17188, lsl 48 dup.2d v1, x8 umulh x8, x11, x5 mov.16b v10, v5 fmla.2d v10, v7, v1 adds x6, x9, x6 cinc x8, x8, hs fsub.2d v11, v6, v10 fmla.2d v11, v7, v1 add.2d v1, v4, v10 adds x0, x6, x0 cinc x6, x8, hs add.2d v4, v2, v11 mov x8, #39040 movk x8, #14704, lsl 16 mul x9, x4, x5 movk x8, #12839, lsl 32 movk x8, #17096, lsl 48 umulh x4, x4, x5 dup.2d v2, x8 mov.16b v5, v5 fmla.2d v5, v7, v2 adds x6, x9, x6 cinc x4, x4, hs fsub.2d v6, v6, v5 fmla.2d v6, v7, v2 adds x1, x6, x1 cinc x4, x4, hs add.2d v5, v3, v5 add.2d v6, v1, v6 ssra.2d v0, v8, #52 mul x6, x7, x5 ssra.2d v9, v0, #52 ssra.2d v4, v9, #52 ssra.2d v6, v4, #52 umulh x5, x7, x5 ssra.2d v5, v6, #52 ushr.2d v1, v9, #12 adds x4, x6, x4 cinc x5, x5, hs ushr.2d v2, v4, #24 ushr.2d v3, v6, #36 sli.2d v0, v9, #52 adds x2, x4, x2 cinc x4, x5, hs sli.2d v1, v4, #40 sli.2d v2, v6, #28 sli.2d v3, v5, #16 add x3, x3, x4
junkicide/provekit
22,743
skyscraper/block-multiplier/src/aarch64/montgomery_interleaved_4.s
// GENERATED FILE, DO NOT EDIT! // in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3], // in("x4") b[0], in("x5") b[1], in("x6") b[2], in("x7") b[3], // in("x8") a1[0], in("x9") a1[1], in("x10") a1[2], in("x11") a1[3], // in("x12") b1[0], in("x13") b1[1], in("x14") b1[2], in("x15") b1[3], // in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3], // in("v4") bv[0], in("v5") bv[1], in("v6") bv[2], in("v7") bv[3], // lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3], // lateout("x4") out1[0], lateout("x5") out1[1], lateout("x6") out1[2], lateout("x7") out1[3], // lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3], // lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("x17") _, lateout("x20") _, lateout("x21") _, lateout("x22") _, lateout("x23") _, lateout("x24") _, lateout("x25") _, lateout("x26") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, lateout("v20") _, lateout("v21") _, lateout("v22") _, lateout("v23") _, lateout("v24") _, // lateout("lr") _ mov x16, #4503599627370495 mul x17, x0, x4 dup.2d v8, x16 umulh x20, x0, x4 mov x21, #5075556780046548992 dup.2d v9, x21 mul x21, x1, x4 mov x22, #1 umulh x23, x1, x4 movk x22, #18032, lsl 48 adds x20, x21, x20 cinc x21, x23, hs dup.2d v10, x22 shl.2d v11, v1, #14 mul x22, x2, x4 shl.2d v12, v2, #26 umulh x23, x2, x4 shl.2d v13, v3, #38 ushr.2d v3, v3, #14 adds x21, x22, x21 cinc x22, x23, hs shl.2d v14, v0, #2 mul x23, x3, x4 usra.2d v11, v0, #50 umulh x4, x3, x4 usra.2d v12, v1, #38 usra.2d v13, v2, #26 adds x22, x23, x22 cinc x4, x4, hs and.16b v0, v14, v8 mul x23, x0, x5 and.16b v1, v11, v8 umulh x24, x0, x5 and.16b v2, v12, v8 and.16b v11, v13, v8 adds x20, x23, x20 cinc x23, x24, hs shl.2d v12, v5, #14 mul x24, x1, x5 shl.2d v13, v6, #26 shl.2d v14, v7, #38 umulh x25, x1, x5 ushr.2d v7, v7, #14 adds x23, x24, x23 cinc x24, x25, hs shl.2d v15, v4, #2 adds x21, x23, x21 cinc x23, x24, hs usra.2d v12, v4, #50 usra.2d v13, v5, #38 mul x24, x2, x5 usra.2d v14, v6, #26 umulh x25, x2, x5 and.16b v4, v15, v8 adds x23, x24, x23 cinc x24, x25, hs and.16b v5, v12, v8 and.16b v6, v13, v8 adds x22, x23, x22 cinc x23, x24, hs and.16b v12, v14, v8 mul x24, x3, x5 mov x25, #13605374474286268416 dup.2d v13, x25 umulh x5, x3, x5 mov x25, #6440147467139809280 adds x23, x24, x23 cinc x5, x5, hs dup.2d v14, x25 adds x4, x23, x4 cinc x5, x5, hs mov x23, #3688448094816436224 dup.2d v15, x23 mul x23, x0, x6 mov x24, #9209861237972664320 umulh x25, x0, x6 dup.2d v16, x24 adds x21, x23, x21 cinc x23, x25, hs mov x24, #12218265789056155648 dup.2d v17, x24 mul x24, x1, x6 mov x25, #17739678932212383744 umulh x26, x1, x6 dup.2d v18, x25 mov x25, #2301339409586323456 adds x23, x24, x23 cinc x24, x26, hs dup.2d v19, x25 adds x22, x23, x22 cinc x23, x24, hs mov x24, #7822752552742551552 mul x25, x2, x6 dup.2d v20, x24 mov x24, #5071053180419178496 umulh x26, x2, x6 dup.2d v21, x24 adds x23, x25, x23 cinc x24, x26, hs mov x25, #16352570246982270976 adds x4, x23, x4 cinc x23, x24, hs dup.2d v22, x25 ucvtf.2d v0, v0 mul x24, x3, x6 ucvtf.2d v1, v1 umulh x6, x3, x6 ucvtf.2d v2, v2 ucvtf.2d v11, v11 adds x23, x24, x23 cinc x6, x6, hs ucvtf.2d v3, v3 adds x5, x23, x5 cinc x6, x6, hs ucvtf.2d v4, v4 mul x23, x0, x7 ucvtf.2d v5, v5 ucvtf.2d v6, v6 umulh x0, x0, x7 ucvtf.2d v12, v12 adds x22, x23, x22 cinc x0, x0, hs ucvtf.2d v7, v7 mov.16b v23, v9 mul x23, x1, x7 fmla.2d v23, v0, v4 umulh x1, x1, x7 fsub.2d v24, v10, v23 adds x0, x23, x0 cinc x1, x1, hs fmla.2d v24, v0, v4 add.2d v15, v15, v23 adds x0, x0, x4 cinc x1, x1, hs add.2d v13, v13, v24 mul x4, x2, x7 mov.16b v23, v9 umulh x2, x2, x7 fmla.2d v23, v0, v5 fsub.2d v24, v10, v23 adds x1, x4, x1 cinc x2, x2, hs fmla.2d v24, v0, v5 adds x1, x1, x5 cinc x2, x2, hs add.2d v17, v17, v23 add.2d v15, v15, v24 mul x4, x3, x7 mov.16b v23, v9 umulh x3, x3, x7 fmla.2d v23, v0, v6 adds x2, x4, x2 cinc x3, x3, hs fsub.2d v24, v10, v23 fmla.2d v24, v0, v6 adds x2, x2, x6 cinc x3, x3, hs add.2d v19, v19, v23 mov x4, #48718 add.2d v17, v17, v24 movk x4, #4732, lsl 16 mov.16b v23, v9 fmla.2d v23, v0, v12 movk x4, #45078, lsl 32 fsub.2d v24, v10, v23 movk x4, #39852, lsl 48 fmla.2d v24, v0, v12 add.2d v21, v21, v23 mov x5, #16676 add.2d v19, v19, v24 movk x5, #12692, lsl 16 mov.16b v23, v9 movk x5, #20986, lsl 32 fmla.2d v23, v0, v7 fsub.2d v24, v10, v23 movk x5, #2848, lsl 48 fmla.2d v24, v0, v7 mov x6, #51052 add.2d v0, v22, v23 movk x6, #24721, lsl 16 add.2d v21, v21, v24 mov.16b v22, v9 movk x6, #61092, lsl 32 fmla.2d v22, v1, v4 movk x6, #45156, lsl 48 fsub.2d v23, v10, v22 fmla.2d v23, v1, v4 mov x7, #3197 add.2d v17, v17, v22 movk x7, #18936, lsl 16 add.2d v15, v15, v23 movk x7, #10922, lsl 32 mov.16b v22, v9 fmla.2d v22, v1, v5 movk x7, #11014, lsl 48 fsub.2d v23, v10, v22 mul x23, x4, x17 fmla.2d v23, v1, v5 umulh x4, x4, x17 add.2d v19, v19, v22 add.2d v17, v17, v23 adds x22, x23, x22 cinc x4, x4, hs mov.16b v22, v9 mul x23, x5, x17 fmla.2d v22, v1, v6 fsub.2d v23, v10, v22 umulh x5, x5, x17 fmla.2d v23, v1, v6 adds x4, x23, x4 cinc x5, x5, hs add.2d v21, v21, v22 adds x0, x4, x0 cinc x4, x5, hs add.2d v19, v19, v23 mov.16b v22, v9 mul x5, x6, x17 fmla.2d v22, v1, v12 umulh x6, x6, x17 fsub.2d v23, v10, v22 fmla.2d v23, v1, v12 adds x4, x5, x4 cinc x5, x6, hs add.2d v0, v0, v22 adds x1, x4, x1 cinc x4, x5, hs add.2d v21, v21, v23 mul x5, x7, x17 mov.16b v22, v9 fmla.2d v22, v1, v7 umulh x6, x7, x17 fsub.2d v23, v10, v22 adds x4, x5, x4 cinc x5, x6, hs fmla.2d v23, v1, v7 adds x2, x4, x2 cinc x4, x5, hs add.2d v1, v20, v22 add.2d v0, v0, v23 add x3, x3, x4 mov.16b v20, v9 mov x4, #56431 fmla.2d v20, v2, v4 fsub.2d v22, v10, v20 movk x4, #30457, lsl 16 fmla.2d v22, v2, v4 movk x4, #30012, lsl 32 add.2d v19, v19, v20 movk x4, #6382, lsl 48 add.2d v17, v17, v22 mov.16b v20, v9 mov x5, #59151 fmla.2d v20, v2, v5 movk x5, #41769, lsl 16 fsub.2d v22, v10, v20 movk x5, #32276, lsl 32 fmla.2d v22, v2, v5 add.2d v20, v21, v20 movk x5, #21677, lsl 48 add.2d v19, v19, v22 mov x6, #34015 mov.16b v21, v9 fmla.2d v21, v2, v6 movk x6, #20342, lsl 16 fsub.2d v22, v10, v21 movk x6, #13935, lsl 32 fmla.2d v22, v2, v6 movk x6, #11030, lsl 48 add.2d v0, v0, v21 add.2d v20, v20, v22 mov x7, #13689 mov.16b v21, v9 movk x7, #8159, lsl 16 fmla.2d v21, v2, v12 movk x7, #215, lsl 32 fsub.2d v22, v10, v21 fmla.2d v22, v2, v12 movk x7, #4913, lsl 48 add.2d v1, v1, v21 mul x17, x4, x20 add.2d v0, v0, v22 mov.16b v21, v9 umulh x4, x4, x20 fmla.2d v21, v2, v7 adds x17, x17, x22 cinc x4, x4, hs fsub.2d v22, v10, v21 mul x22, x5, x20 fmla.2d v22, v2, v7 add.2d v2, v18, v21 umulh x5, x5, x20 add.2d v1, v1, v22 adds x4, x22, x4 cinc x5, x5, hs mov.16b v18, v9 adds x0, x4, x0 cinc x4, x5, hs fmla.2d v18, v11, v4 fsub.2d v21, v10, v18 mul x5, x6, x20 fmla.2d v21, v11, v4 umulh x6, x6, x20 add.2d v18, v20, v18 add.2d v19, v19, v21 adds x4, x5, x4 cinc x5, x6, hs mov.16b v20, v9 adds x1, x4, x1 cinc x4, x5, hs fmla.2d v20, v11, v5 mul x5, x7, x20 fsub.2d v21, v10, v20 fmla.2d v21, v11, v5 umulh x6, x7, x20 add.2d v0, v0, v20 adds x4, x5, x4 cinc x5, x6, hs add.2d v18, v18, v21 mov.16b v20, v9 adds x2, x4, x2 cinc x4, x5, hs fmla.2d v20, v11, v6 add x3, x3, x4 fsub.2d v21, v10, v20 mov x4, #61005 fmla.2d v21, v11, v6 add.2d v1, v1, v20 movk x4, #58262, lsl 16 add.2d v0, v0, v21 movk x4, #32851, lsl 32 mov.16b v20, v9 movk x4, #11582, lsl 48 fmla.2d v20, v11, v12 fsub.2d v21, v10, v20 mov x5, #37581 fmla.2d v21, v11, v12 movk x5, #43836, lsl 16 add.2d v2, v2, v20 add.2d v1, v1, v21 movk x5, #36286, lsl 32 mov.16b v20, v9 movk x5, #51783, lsl 48 fmla.2d v20, v11, v7 mov x6, #10899 fsub.2d v21, v10, v20 fmla.2d v21, v11, v7 movk x6, #30709, lsl 16 add.2d v11, v16, v20 movk x6, #61551, lsl 32 add.2d v2, v2, v21 movk x6, #45784, lsl 48 mov.16b v16, v9 fmla.2d v16, v3, v4 mov x7, #36612 fsub.2d v20, v10, v16 movk x7, #63402, lsl 16 fmla.2d v20, v3, v4 add.2d v0, v0, v16 movk x7, #47623, lsl 32 add.2d v4, v18, v20 movk x7, #9430, lsl 48 mov.16b v16, v9 mul x20, x4, x21 fmla.2d v16, v3, v5 fsub.2d v18, v10, v16 umulh x4, x4, x21 fmla.2d v18, v3, v5 adds x17, x20, x17 cinc x4, x4, hs add.2d v1, v1, v16 mul x20, x5, x21 add.2d v0, v0, v18 mov.16b v5, v9 umulh x5, x5, x21 fmla.2d v5, v3, v6 adds x4, x20, x4 cinc x5, x5, hs fsub.2d v16, v10, v5 fmla.2d v16, v3, v6 adds x0, x4, x0 cinc x4, x5, hs add.2d v2, v2, v5 mul x5, x6, x21 add.2d v1, v1, v16 umulh x6, x6, x21 mov.16b v5, v9 fmla.2d v5, v3, v12 adds x4, x5, x4 cinc x5, x6, hs fsub.2d v6, v10, v5 adds x1, x4, x1 cinc x4, x5, hs fmla.2d v6, v3, v12 mul x5, x7, x21 add.2d v5, v11, v5 add.2d v2, v2, v6 umulh x6, x7, x21 mov.16b v6, v9 adds x4, x5, x4 cinc x5, x6, hs fmla.2d v6, v3, v7 fsub.2d v11, v10, v6 adds x2, x4, x2 cinc x4, x5, hs fmla.2d v11, v3, v7 add x3, x3, x4 add.2d v3, v14, v6 mov x4, #65535 add.2d v5, v5, v11 usra.2d v15, v13, #52 movk x4, #61439, lsl 16 usra.2d v17, v15, #52 movk x4, #62867, lsl 32 usra.2d v19, v17, #52 usra.2d v4, v19, #52 movk x4, #49889, lsl 48 and.16b v6, v13, v8 mul x4, x4, x17 and.16b v7, v15, v8 mov x5, #1 and.16b v11, v17, v8 and.16b v8, v19, v8 movk x5, #61440, lsl 16 ucvtf.2d v6, v6 movk x5, #62867, lsl 32 mov x6, #37864 movk x5, #17377, lsl 48 movk x6, #1815, lsl 16 movk x6, #28960, lsl 32 mov x7, #28817 movk x6, #17153, lsl 48 movk x7, #31161, lsl 16 dup.2d v12, x6 mov.16b v13, v9 movk x7, #59464, lsl 32 fmla.2d v13, v6, v12 movk x7, #10291, lsl 48 fsub.2d v14, v10, v13 mov x6, #22621 fmla.2d v14, v6, v12 add.2d v0, v0, v13 movk x6, #33153, lsl 16 add.2d v4, v4, v14 movk x6, #17846, lsl 32 mov x20, #46128 movk x6, #47184, lsl 48 movk x20, #29964, lsl 16 movk x20, #7587, lsl 32 mov x21, #41001 movk x20, #17161, lsl 48 movk x21, #57649, lsl 16 dup.2d v12, x20 mov.16b v13, v9 movk x21, #20082, lsl 32 fmla.2d v13, v6, v12 movk x21, #12388, lsl 48 fsub.2d v14, v10, v13 mul x20, x5, x4 fmla.2d v14, v6, v12 add.2d v1, v1, v13 umulh x5, x5, x4 add.2d v0, v0, v14 cmn x20, x17 cinc x5, x5, hs mov x17, #52826 mul x20, x7, x4 movk x17, #57790, lsl 16 movk x17, #55431, lsl 32 umulh x7, x7, x4 movk x17, #17196, lsl 48 adds x5, x20, x5 cinc x7, x7, hs dup.2d v12, x17 mov.16b v13, v9 adds x0, x5, x0 cinc x5, x7, hs fmla.2d v13, v6, v12 mul x7, x6, x4 fsub.2d v14, v10, v13 umulh x6, x6, x4 fmla.2d v14, v6, v12 add.2d v2, v2, v13 adds x5, x7, x5 cinc x6, x6, hs add.2d v1, v1, v14 adds x1, x5, x1 cinc x5, x6, hs mov x6, #31276 mul x7, x21, x4 movk x6, #21262, lsl 16 movk x6, #2304, lsl 32 umulh x4, x21, x4 movk x6, #17182, lsl 48 adds x5, x7, x5 cinc x4, x4, hs dup.2d v12, x6 mov.16b v13, v9 adds x2, x5, x2 cinc x4, x4, hs fmla.2d v13, v6, v12 add x3, x3, x4 fsub.2d v14, v10, v13 mul x4, x8, x12 fmla.2d v14, v6, v12 add.2d v5, v5, v13 umulh x5, x8, x12 add.2d v2, v2, v14 mul x6, x9, x12 mov x7, #28672 movk x7, #24515, lsl 16 umulh x17, x9, x12 movk x7, #54929, lsl 32 adds x5, x6, x5 cinc x6, x17, hs movk x7, #17064, lsl 48 mul x17, x10, x12 dup.2d v12, x7 mov.16b v13, v9 umulh x7, x10, x12 fmla.2d v13, v6, v12 adds x6, x17, x6 cinc x7, x7, hs fsub.2d v14, v10, v13 mul x17, x11, x12 fmla.2d v14, v6, v12 add.2d v3, v3, v13 umulh x12, x11, x12 add.2d v5, v5, v14 adds x7, x17, x7 cinc x12, x12, hs ucvtf.2d v6, v7 mov x17, #44768 mul x20, x8, x13 movk x17, #51919, lsl 16 umulh x21, x8, x13 movk x17, #6346, lsl 32 adds x5, x20, x5 cinc x20, x21, hs movk x17, #17133, lsl 48 dup.2d v7, x17 mul x17, x9, x13 mov.16b v12, v9 umulh x21, x9, x13 fmla.2d v12, v6, v7 adds x17, x17, x20 cinc x20, x21, hs fsub.2d v13, v10, v12 fmla.2d v13, v6, v7 adds x6, x17, x6 cinc x17, x20, hs add.2d v0, v0, v12 mul x20, x10, x13 add.2d v4, v4, v13 mov x21, #47492 umulh x22, x10, x13 movk x21, #23630, lsl 16 adds x17, x20, x17 cinc x20, x22, hs movk x21, #49985, lsl 32 adds x7, x17, x7 cinc x17, x20, hs movk x21, #17168, lsl 48 dup.2d v7, x21 mul x20, x11, x13 mov.16b v12, v9 umulh x13, x11, x13 fmla.2d v12, v6, v7 adds x17, x20, x17 cinc x13, x13, hs fsub.2d v13, v10, v12 fmla.2d v13, v6, v7 adds x12, x17, x12 cinc x13, x13, hs add.2d v1, v1, v12 mul x17, x8, x14 add.2d v0, v0, v13 mov x20, #57936 umulh x21, x8, x14 movk x20, #54828, lsl 16 adds x6, x17, x6 cinc x17, x21, hs movk x20, #18292, lsl 32 mul x21, x9, x14 movk x20, #17197, lsl 48 dup.2d v7, x20 umulh x20, x9, x14 mov.16b v12, v9 adds x17, x21, x17 cinc x20, x20, hs fmla.2d v12, v6, v7 adds x7, x17, x7 cinc x17, x20, hs fsub.2d v13, v10, v12 fmla.2d v13, v6, v7 mul x20, x10, x14 add.2d v2, v2, v12 umulh x21, x10, x14 add.2d v1, v1, v13 mov x22, #17708 adds x17, x20, x17 cinc x20, x21, hs movk x22, #43915, lsl 16 adds x12, x17, x12 cinc x17, x20, hs movk x22, #64348, lsl 32 mul x20, x11, x14 movk x22, #17188, lsl 48 dup.2d v7, x22 umulh x14, x11, x14 mov.16b v12, v9 adds x17, x20, x17 cinc x14, x14, hs fmla.2d v12, v6, v7 fsub.2d v13, v10, v12 adds x13, x17, x13 cinc x14, x14, hs fmla.2d v13, v6, v7 mul x17, x8, x15 add.2d v5, v5, v12 umulh x8, x8, x15 add.2d v2, v2, v13 mov x20, #29184 adds x7, x17, x7 cinc x8, x8, hs movk x20, #20789, lsl 16 mul x17, x9, x15 movk x20, #19197, lsl 32 umulh x9, x9, x15 movk x20, #17083, lsl 48 dup.2d v7, x20 adds x8, x17, x8 cinc x9, x9, hs mov.16b v12, v9 adds x8, x8, x12 cinc x9, x9, hs fmla.2d v12, v6, v7 fsub.2d v13, v10, v12 mul x12, x10, x15 fmla.2d v13, v6, v7 umulh x10, x10, x15 add.2d v3, v3, v12 adds x9, x12, x9 cinc x10, x10, hs add.2d v5, v5, v13 ucvtf.2d v6, v11 adds x9, x9, x13 cinc x10, x10, hs mov x12, #58856 mul x13, x11, x15 movk x12, #14953, lsl 16 umulh x11, x11, x15 movk x12, #15155, lsl 32 movk x12, #17181, lsl 48 adds x10, x13, x10 cinc x11, x11, hs dup.2d v7, x12 adds x10, x10, x14 cinc x11, x11, hs mov.16b v11, v9 fmla.2d v11, v6, v7 mov x12, #48718 fsub.2d v12, v10, v11 movk x12, #4732, lsl 16 fmla.2d v12, v6, v7 movk x12, #45078, lsl 32 add.2d v0, v0, v11 add.2d v4, v4, v12 movk x12, #39852, lsl 48 mov x13, #35392 mov x14, #16676 movk x13, #12477, lsl 16 movk x14, #12692, lsl 16 movk x13, #56780, lsl 32 movk x13, #17142, lsl 48 movk x14, #20986, lsl 32 dup.2d v7, x13 movk x14, #2848, lsl 48 mov.16b v11, v9 fmla.2d v11, v6, v7 mov x13, #51052 fsub.2d v12, v10, v11 movk x13, #24721, lsl 16 fmla.2d v12, v6, v7 movk x13, #61092, lsl 32 add.2d v1, v1, v11 add.2d v0, v0, v12 movk x13, #45156, lsl 48 mov x15, #9848 mov x17, #3197 movk x15, #54501, lsl 16 movk x17, #18936, lsl 16 movk x15, #31540, lsl 32 movk x15, #17170, lsl 48 movk x17, #10922, lsl 32 dup.2d v7, x15 movk x17, #11014, lsl 48 mov.16b v11, v9 fmla.2d v11, v6, v7 mul x15, x12, x4 fsub.2d v12, v10, v11 umulh x12, x12, x4 fmla.2d v12, v6, v7 adds x7, x15, x7 cinc x12, x12, hs add.2d v2, v2, v11 add.2d v1, v1, v12 mul x15, x14, x4 mov x20, #9584 umulh x14, x14, x4 movk x20, #63883, lsl 16 movk x20, #18253, lsl 32 adds x12, x15, x12 cinc x14, x14, hs movk x20, #17190, lsl 48 adds x8, x12, x8 cinc x12, x14, hs dup.2d v7, x20 mul x14, x13, x4 mov.16b v11, v9 fmla.2d v11, v6, v7 umulh x13, x13, x4 fsub.2d v12, v10, v11 adds x12, x14, x12 cinc x13, x13, hs fmla.2d v12, v6, v7 adds x9, x12, x9 cinc x12, x13, hs add.2d v5, v5, v11 add.2d v2, v2, v12 mul x13, x17, x4 mov x14, #51712 umulh x4, x17, x4 movk x14, #16093, lsl 16 movk x14, #30633, lsl 32 adds x12, x13, x12 cinc x4, x4, hs movk x14, #17068, lsl 48 adds x10, x12, x10 cinc x4, x4, hs dup.2d v7, x14 add x4, x11, x4 mov.16b v11, v9 fmla.2d v11, v6, v7 mov x11, #56431 fsub.2d v12, v10, v11 movk x11, #30457, lsl 16 fmla.2d v12, v6, v7 movk x11, #30012, lsl 32 add.2d v3, v3, v11 add.2d v5, v5, v12 movk x11, #6382, lsl 48 ucvtf.2d v6, v8 mov x12, #59151 mov x13, #34724 movk x13, #40393, lsl 16 movk x12, #41769, lsl 16 movk x13, #23752, lsl 32 movk x12, #32276, lsl 32 movk x13, #17184, lsl 48 movk x12, #21677, lsl 48 dup.2d v7, x13 mov.16b v8, v9 mov x13, #34015 fmla.2d v8, v6, v7 movk x13, #20342, lsl 16 fsub.2d v11, v10, v8 movk x13, #13935, lsl 32 fmla.2d v11, v6, v7 add.2d v0, v0, v8 movk x13, #11030, lsl 48 add.2d v4, v4, v11 mov x14, #13689 mov x15, #25532 movk x15, #31025, lsl 16 movk x14, #8159, lsl 16 movk x15, #10002, lsl 32 movk x14, #215, lsl 32 movk x15, #17199, lsl 48 movk x14, #4913, lsl 48 dup.2d v7, x15 mov.16b v8, v9 mul x15, x11, x5 fmla.2d v8, v6, v7 umulh x11, x11, x5 fsub.2d v11, v10, v8 adds x7, x15, x7 cinc x11, x11, hs fmla.2d v11, v6, v7 add.2d v1, v1, v8 mul x15, x12, x5 add.2d v0, v0, v11 umulh x12, x12, x5 mov x17, #18830 movk x17, #2465, lsl 16 adds x11, x15, x11 cinc x12, x12, hs movk x17, #36348, lsl 32 adds x8, x11, x8 cinc x11, x12, hs movk x17, #17194, lsl 48 mul x12, x13, x5 dup.2d v7, x17 mov.16b v8, v9 umulh x13, x13, x5 fmla.2d v8, v6, v7 adds x11, x12, x11 cinc x12, x13, hs fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 adds x9, x11, x9 cinc x11, x12, hs add.2d v2, v2, v8 mul x12, x14, x5 add.2d v1, v1, v11 umulh x5, x14, x5 mov x13, #21566 movk x13, #43708, lsl 16 adds x11, x12, x11 cinc x5, x5, hs movk x13, #57685, lsl 32 adds x10, x11, x10 cinc x5, x5, hs movk x13, #17185, lsl 48 add x4, x4, x5 dup.2d v7, x13 mov.16b v8, v9 mov x5, #61005 fmla.2d v8, v6, v7 movk x5, #58262, lsl 16 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 movk x5, #32851, lsl 32 add.2d v5, v5, v8 movk x5, #11582, lsl 48 add.2d v2, v2, v11 mov x11, #37581 mov x12, #3072 movk x12, #8058, lsl 16 movk x11, #43836, lsl 16 movk x12, #46097, lsl 32 movk x11, #36286, lsl 32 movk x12, #17047, lsl 48 movk x11, #51783, lsl 48 dup.2d v7, x12 mov.16b v8, v9 mov x12, #10899 fmla.2d v8, v6, v7 movk x12, #30709, lsl 16 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 movk x12, #61551, lsl 32 add.2d v3, v3, v8 movk x12, #45784, lsl 48 add.2d v5, v5, v11 mov x13, #36612 mov x14, #65535 movk x14, #61439, lsl 16 movk x13, #63402, lsl 16 movk x14, #62867, lsl 32 movk x13, #47623, lsl 32 movk x14, #1, lsl 48 movk x13, #9430, lsl 48 umov x15, v4.d[0] umov x17, v4.d[1] mul x20, x5, x6 mul x15, x15, x14 umulh x5, x5, x6 mul x14, x17, x14 and x15, x15, x16 adds x7, x20, x7 cinc x5, x5, hs and x14, x14, x16 mul x16, x11, x6 ins v6.d[0], x15 ins v6.d[1], x14 umulh x11, x11, x6 ucvtf.2d v6, v6 mov x14, #16 adds x5, x16, x5 cinc x11, x11, hs movk x14, #22847, lsl 32 adds x5, x5, x8 cinc x8, x11, hs movk x14, #17151, lsl 48 mul x11, x12, x6 dup.2d v7, x14 mov.16b v8, v9 umulh x12, x12, x6 fmla.2d v8, v6, v7 adds x8, x11, x8 cinc x11, x12, hs fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 adds x8, x8, x9 cinc x9, x11, hs add.2d v0, v0, v8 mul x11, x13, x6 add.2d v4, v4, v11 umulh x6, x13, x6 mov x12, #20728 movk x12, #23588, lsl 16 adds x9, x11, x9 cinc x6, x6, hs movk x12, #7790, lsl 32 adds x9, x9, x10 cinc x6, x6, hs movk x12, #17170, lsl 48 dup.2d v7, x12 add x10, x4, x6 mov.16b v8, v9 mov x4, #65535 fmla.2d v8, v6, v7 movk x4, #61439, lsl 16 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 movk x4, #62867, lsl 32 add.2d v1, v1, v8 movk x4, #49889, lsl 48 add.2d v0, v0, v11 mul x6, x4, x7 mov x4, #16000 movk x4, #53891, lsl 16 mov x11, #1 movk x4, #5509, lsl 32 movk x11, #61440, lsl 16 movk x4, #17144, lsl 48 dup.2d v7, x4 movk x11, #62867, lsl 32 mov.16b v8, v9 movk x11, #17377, lsl 48 fmla.2d v8, v6, v7 mov x4, #28817 fsub.2d v11, v10, v8 fmla.2d v11, v6, v7 movk x4, #31161, lsl 16 add.2d v2, v2, v8 movk x4, #59464, lsl 32 add.2d v7, v1, v11 movk x4, #10291, lsl 48 mov x12, #46800 movk x12, #2568, lsl 16 mov x13, #22621 movk x12, #1335, lsl 32 movk x13, #33153, lsl 16 movk x12, #17188, lsl 48 dup.2d v1, x12 movk x13, #17846, lsl 32 mov.16b v8, v9 movk x13, #47184, lsl 48 fmla.2d v8, v6, v1 mov x12, #41001 fsub.2d v11, v10, v8 fmla.2d v11, v6, v1 movk x12, #57649, lsl 16 add.2d v1, v5, v8 movk x12, #20082, lsl 32 add.2d v5, v2, v11 movk x12, #12388, lsl 48 mov x14, #39040 movk x14, #14704, lsl 16 mul x15, x11, x6 movk x14, #12839, lsl 32 umulh x11, x11, x6 movk x14, #17096, lsl 48 dup.2d v2, x14 cmn x15, x7 cinc x11, x11, hs mov.16b v8, v9 mul x7, x4, x6 fmla.2d v8, v6, v2 umulh x4, x4, x6 fsub.2d v9, v10, v8 fmla.2d v9, v6, v2 adds x7, x7, x11 cinc x11, x4, hs add.2d v6, v3, v8 adds x4, x7, x5 cinc x5, x11, hs add.2d v8, v1, v9 mul x7, x13, x6 ssra.2d v0, v4, #52 ssra.2d v7, v0, #52 umulh x11, x13, x6 ssra.2d v5, v7, #52 adds x5, x7, x5 cinc x7, x11, hs ssra.2d v8, v5, #52 ssra.2d v6, v8, #52 adds x5, x5, x8 cinc x7, x7, hs ushr.2d v1, v7, #12 mul x8, x12, x6 ushr.2d v2, v5, #24 umulh x6, x12, x6 ushr.2d v3, v8, #36 sli.2d v0, v7, #52 adds x7, x8, x7 cinc x8, x6, hs sli.2d v1, v5, #40 adds x6, x7, x9 cinc x7, x8, hs sli.2d v2, v8, #28 sli.2d v3, v6, #16 add x7, x10, x7
junkicide/provekit
15,320
skyscraper/block-multiplier/src/aarch64/montgomery_square_log_interleaved_3.s
// GENERATED FILE, DO NOT EDIT! // in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3], // in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3], // lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3], // lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3], // lateout("x4") _, lateout("x5") _, lateout("x6") _, lateout("x7") _, lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("x17") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, // lateout("lr") _ mov x4, #4503599627370495 dup.2d v4, x4 mul x5, x0, x0 mov x6, #5075556780046548992 dup.2d v5, x6 mov x6, #1 umulh x7, x0, x0 movk x6, #18032, lsl 48 dup.2d v6, x6 shl.2d v7, v1, #14 mul x6, x0, x1 shl.2d v8, v2, #26 shl.2d v9, v3, #38 ushr.2d v3, v3, #14 umulh x8, x0, x1 shl.2d v10, v0, #2 usra.2d v7, v0, #50 usra.2d v8, v1, #38 adds x7, x6, x7 cinc x9, x8, hs usra.2d v9, v2, #26 and.16b v0, v10, v4 and.16b v1, v7, v4 mul x10, x0, x2 and.16b v2, v8, v4 and.16b v7, v9, v4 mov x11, #13605374474286268416 umulh x12, x0, x2 dup.2d v8, x11 mov x11, #6440147467139809280 dup.2d v9, x11 adds x9, x10, x9 cinc x11, x12, hs mov x13, #3688448094816436224 dup.2d v10, x13 mul x13, x0, x3 mov x14, #9209861237972664320 dup.2d v11, x14 mov x14, #12218265789056155648 umulh x0, x0, x3 dup.2d v12, x14 mov x14, #17739678932212383744 dup.2d v13, x14 adds x11, x13, x11 cinc x14, x0, hs mov x15, #2301339409586323456 dup.2d v14, x15 mov x15, #7822752552742551552 adds x6, x6, x7 cinc x7, x8, hs dup.2d v15, x15 mov x8, #5071053180419178496 dup.2d v16, x8 mul x8, x1, x1 mov x15, #16352570246982270976 dup.2d v17, x15 ucvtf.2d v0, v0 umulh x15, x1, x1 ucvtf.2d v1, v1 ucvtf.2d v2, v2 ucvtf.2d v7, v7 adds x7, x8, x7 cinc x8, x15, hs ucvtf.2d v3, v3 mov.16b v18, v5 fmla.2d v18, v0, v0 adds x7, x7, x9 cinc x8, x8, hs fsub.2d v19, v6, v18 fmla.2d v19, v0, v0 mul x9, x1, x2 add.2d v10, v10, v18 add.2d v8, v8, v19 mov.16b v18, v5 umulh x15, x1, x2 fmla.2d v18, v0, v1 fsub.2d v19, v6, v18 fmla.2d v19, v0, v1 adds x8, x9, x8 cinc x16, x15, hs add.2d v18, v18, v18 add.2d v19, v19, v19 add.2d v12, v12, v18 adds x8, x8, x11 cinc x11, x16, hs add.2d v10, v10, v19 mov.16b v18, v5 fmla.2d v18, v0, v2 mul x16, x1, x3 fsub.2d v19, v6, v18 fmla.2d v19, v0, v2 add.2d v18, v18, v18 umulh x1, x1, x3 add.2d v19, v19, v19 add.2d v14, v14, v18 add.2d v12, v12, v19 adds x11, x16, x11 cinc x17, x1, hs mov.16b v18, v5 fmla.2d v18, v0, v7 fsub.2d v19, v6, v18 adds x11, x11, x14 cinc x14, x17, hs fmla.2d v19, v0, v7 add.2d v18, v18, v18 add.2d v19, v19, v19 adds x7, x10, x7 cinc x10, x12, hs add.2d v16, v16, v18 add.2d v14, v14, v19 adds x9, x9, x10 cinc x10, x15, hs mov.16b v18, v5 fmla.2d v18, v0, v3 fsub.2d v19, v6, v18 adds x8, x9, x8 cinc x9, x10, hs fmla.2d v19, v0, v3 add.2d v0, v18, v18 add.2d v18, v19, v19 mul x10, x2, x2 add.2d v0, v17, v0 add.2d v16, v16, v18 mov.16b v17, v5 umulh x12, x2, x2 fmla.2d v17, v1, v1 fsub.2d v18, v6, v17 fmla.2d v18, v1, v1 adds x9, x10, x9 cinc x10, x12, hs add.2d v14, v14, v17 add.2d v12, v12, v18 mov.16b v17, v5 adds x9, x9, x11 cinc x10, x10, hs fmla.2d v17, v1, v2 fsub.2d v18, v6, v17 fmla.2d v18, v1, v2 mul x11, x2, x3 add.2d v17, v17, v17 add.2d v18, v18, v18 add.2d v16, v16, v17 umulh x2, x2, x3 add.2d v14, v14, v18 mov.16b v17, v5 adds x10, x11, x10 cinc x12, x2, hs fmla.2d v17, v1, v7 fsub.2d v18, v6, v17 fmla.2d v18, v1, v7 adds x10, x10, x14 cinc x12, x12, hs add.2d v17, v17, v17 add.2d v18, v18, v18 add.2d v0, v0, v17 adds x8, x13, x8 cinc x0, x0, hs add.2d v16, v16, v18 mov.16b v17, v5 fmla.2d v17, v1, v3 adds x0, x16, x0 cinc x1, x1, hs fsub.2d v18, v6, v17 fmla.2d v18, v1, v3 add.2d v1, v17, v17 adds x0, x0, x9 cinc x1, x1, hs add.2d v17, v18, v18 add.2d v1, v15, v1 add.2d v0, v0, v17 adds x1, x11, x1 cinc x2, x2, hs mov.16b v15, v5 fmla.2d v15, v2, v2 fsub.2d v17, v6, v15 adds x1, x1, x10 cinc x2, x2, hs fmla.2d v17, v2, v2 add.2d v0, v0, v15 add.2d v15, v16, v17 mul x9, x3, x3 mov.16b v16, v5 fmla.2d v16, v2, v7 fsub.2d v17, v6, v16 umulh x3, x3, x3 fmla.2d v17, v2, v7 add.2d v16, v16, v16 adds x2, x9, x2 cinc x3, x3, hs add.2d v17, v17, v17 add.2d v1, v1, v16 add.2d v0, v0, v17 adds x2, x2, x12 cinc x3, x3, hs mov.16b v16, v5 fmla.2d v16, v2, v3 fsub.2d v17, v6, v16 mov x9, #56431 fmla.2d v17, v2, v3 add.2d v2, v16, v16 add.2d v16, v17, v17 movk x9, #30457, lsl 16 add.2d v2, v13, v2 add.2d v1, v1, v16 mov.16b v13, v5 movk x9, #30012, lsl 32 fmla.2d v13, v7, v7 fsub.2d v16, v6, v13 fmla.2d v16, v7, v7 movk x9, #6382, lsl 48 add.2d v2, v2, v13 add.2d v1, v1, v16 mov.16b v13, v5 mov x10, #59151 fmla.2d v13, v7, v3 fsub.2d v16, v6, v13 fmla.2d v16, v7, v3 movk x10, #41769, lsl 16 add.2d v7, v13, v13 add.2d v13, v16, v16 movk x10, #32276, lsl 32 add.2d v7, v11, v7 add.2d v2, v2, v13 mov.16b v11, v5 movk x10, #21677, lsl 48 fmla.2d v11, v3, v3 fsub.2d v13, v6, v11 fmla.2d v13, v3, v3 mov x11, #34015 add.2d v3, v9, v11 add.2d v7, v7, v13 usra.2d v10, v8, #52 movk x11, #20342, lsl 16 usra.2d v12, v10, #52 usra.2d v14, v12, #52 usra.2d v15, v14, #52 movk x11, #13935, lsl 32 and.16b v8, v8, v4 and.16b v9, v10, v4 and.16b v10, v12, v4 movk x11, #11030, lsl 48 and.16b v4, v14, v4 ucvtf.2d v8, v8 mov x12, #37864 mov x13, #13689 movk x12, #1815, lsl 16 movk x12, #28960, lsl 32 movk x12, #17153, lsl 48 movk x13, #8159, lsl 16 dup.2d v11, x12 mov.16b v12, v5 fmla.2d v12, v8, v11 movk x13, #215, lsl 32 fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 movk x13, #4913, lsl 48 add.2d v0, v0, v12 add.2d v11, v15, v13 mov x12, #46128 mul x14, x9, x5 movk x12, #29964, lsl 16 movk x12, #7587, lsl 32 movk x12, #17161, lsl 48 umulh x15, x9, x5 dup.2d v12, x12 mov.16b v13, v5 fmla.2d v13, v8, v12 adds x7, x14, x7 cinc x12, x15, hs fsub.2d v14, v6, v13 fmla.2d v14, v8, v12 add.2d v1, v1, v13 mul x14, x10, x5 add.2d v0, v0, v14 mov x15, #52826 movk x15, #57790, lsl 16 umulh x16, x10, x5 movk x15, #55431, lsl 32 movk x15, #17196, lsl 48 dup.2d v12, x15 adds x12, x14, x12 cinc x14, x16, hs mov.16b v13, v5 fmla.2d v13, v8, v12 fsub.2d v14, v6, v13 adds x8, x12, x8 cinc x12, x14, hs fmla.2d v14, v8, v12 add.2d v2, v2, v13 mul x14, x11, x5 add.2d v1, v1, v14 mov x15, #31276 movk x15, #21262, lsl 16 umulh x16, x11, x5 movk x15, #2304, lsl 32 movk x15, #17182, lsl 48 dup.2d v12, x15 adds x12, x14, x12 cinc x14, x16, hs mov.16b v13, v5 fmla.2d v13, v8, v12 fsub.2d v14, v6, v13 adds x0, x12, x0 cinc x12, x14, hs fmla.2d v14, v8, v12 add.2d v7, v7, v13 add.2d v2, v2, v14 mul x14, x13, x5 mov x15, #28672 movk x15, #24515, lsl 16 movk x15, #54929, lsl 32 umulh x5, x13, x5 movk x15, #17064, lsl 48 dup.2d v12, x15 mov.16b v13, v5 adds x12, x14, x12 cinc x5, x5, hs fmla.2d v13, v8, v12 fsub.2d v14, v6, v13 fmla.2d v14, v8, v12 adds x1, x12, x1 cinc x5, x5, hs add.2d v3, v3, v13 add.2d v7, v7, v14 ucvtf.2d v8, v9 adds x2, x2, x5 cinc x3, x3, hs mov x5, #44768 movk x5, #51919, lsl 16 mul x12, x9, x6 movk x5, #6346, lsl 32 movk x5, #17133, lsl 48 dup.2d v9, x5 umulh x5, x9, x6 mov.16b v12, v5 fmla.2d v12, v8, v9 fsub.2d v13, v6, v12 adds x8, x12, x8 cinc x5, x5, hs fmla.2d v13, v8, v9 add.2d v0, v0, v12 add.2d v9, v11, v13 mul x9, x10, x6 mov x12, #47492 movk x12, #23630, lsl 16 movk x12, #49985, lsl 32 umulh x10, x10, x6 movk x12, #17168, lsl 48 dup.2d v11, x12 mov.16b v12, v5 adds x5, x9, x5 cinc x9, x10, hs fmla.2d v12, v8, v11 fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 adds x0, x5, x0 cinc x5, x9, hs add.2d v1, v1, v12 add.2d v0, v0, v13 mov x9, #57936 mul x10, x11, x6 movk x9, #54828, lsl 16 movk x9, #18292, lsl 32 umulh x11, x11, x6 movk x9, #17197, lsl 48 dup.2d v11, x9 mov.16b v12, v5 adds x5, x10, x5 cinc x9, x11, hs fmla.2d v12, v8, v11 fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 adds x1, x5, x1 cinc x5, x9, hs add.2d v2, v2, v12 add.2d v1, v1, v13 mov x9, #17708 mul x10, x13, x6 movk x9, #43915, lsl 16 movk x9, #64348, lsl 32 movk x9, #17188, lsl 48 umulh x6, x13, x6 dup.2d v11, x9 mov.16b v12, v5 fmla.2d v12, v8, v11 adds x5, x10, x5 cinc x6, x6, hs fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 add.2d v7, v7, v12 adds x2, x5, x2 cinc x5, x6, hs add.2d v2, v2, v13 mov x6, #29184 movk x6, #20789, lsl 16 add x3, x3, x5 movk x6, #19197, lsl 32 movk x6, #17083, lsl 48 dup.2d v11, x6 mov x5, #61005 mov.16b v12, v5 fmla.2d v12, v8, v11 movk x5, #58262, lsl 16 fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 add.2d v3, v3, v12 movk x5, #32851, lsl 32 add.2d v7, v7, v13 ucvtf.2d v8, v10 mov x6, #58856 movk x5, #11582, lsl 48 movk x6, #14953, lsl 16 movk x6, #15155, lsl 32 movk x6, #17181, lsl 48 mov x9, #37581 dup.2d v10, x6 mov.16b v11, v5 fmla.2d v11, v8, v10 movk x9, #43836, lsl 16 fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 add.2d v0, v0, v11 movk x9, #36286, lsl 32 add.2d v9, v9, v12 mov x6, #35392 movk x6, #12477, lsl 16 movk x9, #51783, lsl 48 movk x6, #56780, lsl 32 movk x6, #17142, lsl 48 dup.2d v10, x6 mov x6, #10899 mov.16b v11, v5 fmla.2d v11, v8, v10 movk x6, #30709, lsl 16 fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 add.2d v1, v1, v11 movk x6, #61551, lsl 32 add.2d v0, v0, v12 mov x10, #9848 movk x10, #54501, lsl 16 movk x6, #45784, lsl 48 movk x10, #31540, lsl 32 movk x10, #17170, lsl 48 dup.2d v10, x10 mov x10, #36612 mov.16b v11, v5 fmla.2d v11, v8, v10 fsub.2d v12, v6, v11 movk x10, #63402, lsl 16 fmla.2d v12, v8, v10 add.2d v2, v2, v11 add.2d v1, v1, v12 movk x10, #47623, lsl 32 mov x11, #9584 movk x11, #63883, lsl 16 movk x11, #18253, lsl 32 movk x10, #9430, lsl 48 movk x11, #17190, lsl 48 dup.2d v10, x11 mov.16b v11, v5 mul x11, x5, x7 fmla.2d v11, v8, v10 fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 umulh x5, x5, x7 add.2d v7, v7, v11 add.2d v2, v2, v12 adds x8, x11, x8 cinc x5, x5, hs mov x11, #51712 movk x11, #16093, lsl 16 movk x11, #30633, lsl 32 mul x12, x9, x7 movk x11, #17068, lsl 48 dup.2d v10, x11 mov.16b v11, v5 umulh x9, x9, x7 fmla.2d v11, v8, v10 fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 adds x5, x12, x5 cinc x9, x9, hs add.2d v3, v3, v11 add.2d v7, v7, v12 ucvtf.2d v4, v4 adds x0, x5, x0 cinc x5, x9, hs mov x9, #34724 movk x9, #40393, lsl 16 movk x9, #23752, lsl 32 mul x11, x6, x7 movk x9, #17184, lsl 48 dup.2d v8, x9 mov.16b v10, v5 umulh x6, x6, x7 fmla.2d v10, v4, v8 fsub.2d v11, v6, v10 fmla.2d v11, v4, v8 adds x5, x11, x5 cinc x6, x6, hs add.2d v0, v0, v10 add.2d v8, v9, v11 adds x1, x5, x1 cinc x5, x6, hs mov x6, #25532 movk x6, #31025, lsl 16 movk x6, #10002, lsl 32 mul x9, x10, x7 movk x6, #17199, lsl 48 dup.2d v9, x6 mov.16b v10, v5 umulh x6, x10, x7 fmla.2d v10, v4, v9 fsub.2d v11, v6, v10 fmla.2d v11, v4, v9 adds x5, x9, x5 cinc x6, x6, hs add.2d v1, v1, v10 add.2d v0, v0, v11 mov x7, #18830 adds x2, x5, x2 cinc x5, x6, hs movk x7, #2465, lsl 16 movk x7, #36348, lsl 32 movk x7, #17194, lsl 48 add x3, x3, x5 dup.2d v9, x7 mov.16b v10, v5 fmla.2d v10, v4, v9 mov x5, #65535 fsub.2d v11, v6, v10 fmla.2d v11, v4, v9 add.2d v2, v2, v10 movk x5, #61439, lsl 16 add.2d v1, v1, v11 mov x6, #21566 movk x6, #43708, lsl 16 movk x5, #62867, lsl 32 movk x6, #57685, lsl 32 movk x6, #17185, lsl 48 movk x5, #49889, lsl 48 dup.2d v9, x6 mov.16b v10, v5 fmla.2d v10, v4, v9 mul x5, x5, x8 fsub.2d v11, v6, v10 fmla.2d v11, v4, v9 add.2d v7, v7, v10 mov x6, #1 add.2d v2, v2, v11 mov x7, #3072 movk x7, #8058, lsl 16 movk x6, #61440, lsl 16 movk x7, #46097, lsl 32 movk x7, #17047, lsl 48 dup.2d v9, x7 movk x6, #62867, lsl 32 mov.16b v10, v5 fmla.2d v10, v4, v9 fsub.2d v11, v6, v10 movk x6, #17377, lsl 48 fmla.2d v11, v4, v9 add.2d v3, v3, v10 add.2d v4, v7, v11 mov x7, #28817 mov x9, #65535 movk x9, #61439, lsl 16 movk x9, #62867, lsl 32 movk x7, #31161, lsl 16 movk x9, #1, lsl 48 umov x10, v8.d[0] movk x7, #59464, lsl 32 umov x11, v8.d[1] mul x10, x10, x9 mul x9, x11, x9 movk x7, #10291, lsl 48 and x10, x10, x4 and x4, x9, x4 ins v7.d[0], x10 ins v7.d[1], x4 mov x4, #22621 ucvtf.2d v7, v7 mov x9, #16 movk x9, #22847, lsl 32 movk x4, #33153, lsl 16 movk x9, #17151, lsl 48 dup.2d v9, x9 mov.16b v10, v5 movk x4, #17846, lsl 32 fmla.2d v10, v7, v9 fsub.2d v11, v6, v10 fmla.2d v11, v7, v9 movk x4, #47184, lsl 48 add.2d v0, v0, v10 add.2d v8, v8, v11 mov x9, #20728 mov x10, #41001 movk x9, #23588, lsl 16 movk x9, #7790, lsl 32 movk x9, #17170, lsl 48 movk x10, #57649, lsl 16 dup.2d v9, x9 mov.16b v10, v5 fmla.2d v10, v7, v9 movk x10, #20082, lsl 32 fsub.2d v11, v6, v10 fmla.2d v11, v7, v9 movk x10, #12388, lsl 48 add.2d v1, v1, v10 add.2d v0, v0, v11 mov x9, #16000 mul x11, x6, x5 movk x9, #53891, lsl 16 movk x9, #5509, lsl 32 movk x9, #17144, lsl 48 umulh x6, x6, x5 dup.2d v9, x9 mov.16b v10, v5 fmla.2d v10, v7, v9 cmn x11, x8 cinc x6, x6, hs fsub.2d v11, v6, v10 fmla.2d v11, v7, v9 add.2d v2, v2, v10 mul x8, x7, x5 add.2d v9, v1, v11 mov x9, #46800 movk x9, #2568, lsl 16 umulh x7, x7, x5 movk x9, #1335, lsl 32 movk x9, #17188, lsl 48 dup.2d v1, x9 adds x6, x8, x6 cinc x7, x7, hs mov.16b v10, v5 fmla.2d v10, v7, v1 fsub.2d v11, v6, v10 adds x0, x6, x0 cinc x6, x7, hs fmla.2d v11, v7, v1 add.2d v1, v4, v10 mul x7, x4, x5 add.2d v4, v2, v11 mov x8, #39040 movk x8, #14704, lsl 16 umulh x4, x4, x5 movk x8, #12839, lsl 32 movk x8, #17096, lsl 48 dup.2d v2, x8 adds x6, x7, x6 cinc x4, x4, hs mov.16b v5, v5 fmla.2d v5, v7, v2 fsub.2d v6, v6, v5 adds x1, x6, x1 cinc x4, x4, hs fmla.2d v6, v7, v2 add.2d v5, v3, v5 add.2d v6, v1, v6 mul x6, x10, x5 ssra.2d v0, v8, #52 ssra.2d v9, v0, #52 ssra.2d v4, v9, #52 umulh x5, x10, x5 ssra.2d v6, v4, #52 ssra.2d v5, v6, #52 ushr.2d v1, v9, #12 adds x4, x6, x4 cinc x5, x5, hs ushr.2d v2, v4, #24 ushr.2d v3, v6, #36 sli.2d v0, v9, #52 adds x2, x4, x2 cinc x4, x5, hs sli.2d v1, v4, #40 sli.2d v2, v6, #28 sli.2d v3, v5, #16 add x3, x3, x4
junkicide/provekit
20,246
skyscraper/block-multiplier/src/aarch64/montgomery_square_log_interleaved_4.s
// GENERATED FILE, DO NOT EDIT! // in("x0") a[0], in("x1") a[1], in("x2") a[2], in("x3") a[3], // in("x4") a1[0], in("x5") a1[1], in("x6") a1[2], in("x7") a1[3], // in("v0") av[0], in("v1") av[1], in("v2") av[2], in("v3") av[3], // lateout("x0") out[0], lateout("x1") out[1], lateout("x2") out[2], lateout("x3") out[3], // lateout("x4") out1[0], lateout("x5") out1[1], lateout("x6") out1[2], lateout("x7") out1[3], // lateout("v0") outv[0], lateout("v1") outv[1], lateout("v2") outv[2], lateout("v3") outv[3], // lateout("x8") _, lateout("x9") _, lateout("x10") _, lateout("x11") _, lateout("x12") _, lateout("x13") _, lateout("x14") _, lateout("x15") _, lateout("x16") _, lateout("x17") _, lateout("x20") _, lateout("x21") _, lateout("x22") _, lateout("x23") _, lateout("x24") _, lateout("v4") _, lateout("v5") _, lateout("v6") _, lateout("v7") _, lateout("v8") _, lateout("v9") _, lateout("v10") _, lateout("v11") _, lateout("v12") _, lateout("v13") _, lateout("v14") _, lateout("v15") _, lateout("v16") _, lateout("v17") _, lateout("v18") _, lateout("v19") _, // lateout("lr") _ mov x8, #4503599627370495 mul x9, x0, x0 dup.2d v4, x8 umulh x10, x0, x0 mov x11, #5075556780046548992 dup.2d v5, x11 mul x11, x0, x1 mov x12, #1 umulh x13, x0, x1 movk x12, #18032, lsl 48 dup.2d v6, x12 adds x10, x11, x10 cinc x12, x13, hs shl.2d v7, v1, #14 mul x14, x0, x2 shl.2d v8, v2, #26 shl.2d v9, v3, #38 umulh x15, x0, x2 ushr.2d v3, v3, #14 adds x12, x14, x12 cinc x16, x15, hs shl.2d v10, v0, #2 mul x17, x0, x3 usra.2d v7, v0, #50 usra.2d v8, v1, #38 umulh x0, x0, x3 usra.2d v9, v2, #26 adds x16, x17, x16 cinc x20, x0, hs and.16b v0, v10, v4 and.16b v1, v7, v4 adds x10, x11, x10 cinc x11, x13, hs and.16b v2, v8, v4 mul x13, x1, x1 and.16b v7, v9, v4 mov x21, #13605374474286268416 umulh x22, x1, x1 dup.2d v8, x21 adds x11, x13, x11 cinc x13, x22, hs mov x21, #6440147467139809280 dup.2d v9, x21 adds x11, x11, x12 cinc x12, x13, hs mov x13, #3688448094816436224 mul x21, x1, x2 dup.2d v10, x13 umulh x13, x1, x2 mov x22, #9209861237972664320 dup.2d v11, x22 adds x12, x21, x12 cinc x22, x13, hs mov x23, #12218265789056155648 adds x12, x12, x16 cinc x16, x22, hs dup.2d v12, x23 mov x22, #17739678932212383744 mul x23, x1, x3 dup.2d v13, x22 umulh x1, x1, x3 mov x22, #2301339409586323456 dup.2d v14, x22 adds x16, x23, x16 cinc x22, x1, hs mov x24, #7822752552742551552 adds x16, x16, x20 cinc x20, x22, hs dup.2d v15, x24 mov x22, #5071053180419178496 adds x11, x14, x11 cinc x14, x15, hs dup.2d v16, x22 adds x14, x21, x14 cinc x13, x13, hs mov x15, #16352570246982270976 adds x12, x14, x12 cinc x13, x13, hs dup.2d v17, x15 ucvtf.2d v0, v0 mul x14, x2, x2 ucvtf.2d v1, v1 umulh x15, x2, x2 ucvtf.2d v2, v2 ucvtf.2d v7, v7 adds x13, x14, x13 cinc x14, x15, hs ucvtf.2d v3, v3 adds x13, x13, x16 cinc x14, x14, hs mov.16b v18, v5 fmla.2d v18, v0, v0 mul x15, x2, x3 fsub.2d v19, v6, v18 umulh x2, x2, x3 fmla.2d v19, v0, v0 adds x14, x15, x14 cinc x16, x2, hs add.2d v10, v10, v18 add.2d v8, v8, v19 adds x14, x14, x20 cinc x16, x16, hs mov.16b v18, v5 adds x12, x17, x12 cinc x0, x0, hs fmla.2d v18, v0, v1 fsub.2d v19, v6, v18 adds x0, x23, x0 cinc x1, x1, hs fmla.2d v19, v0, v1 adds x0, x0, x13 cinc x1, x1, hs add.2d v18, v18, v18 add.2d v19, v19, v19 adds x1, x15, x1 cinc x2, x2, hs add.2d v12, v12, v18 adds x1, x1, x14 cinc x2, x2, hs add.2d v10, v10, v19 mov.16b v18, v5 mul x13, x3, x3 fmla.2d v18, v0, v2 umulh x3, x3, x3 fsub.2d v19, v6, v18 adds x2, x13, x2 cinc x3, x3, hs fmla.2d v19, v0, v2 add.2d v18, v18, v18 adds x2, x2, x16 cinc x3, x3, hs add.2d v19, v19, v19 mov x13, #56431 add.2d v14, v14, v18 add.2d v12, v12, v19 movk x13, #30457, lsl 16 mov.16b v18, v5 movk x13, #30012, lsl 32 fmla.2d v18, v0, v7 fsub.2d v19, v6, v18 movk x13, #6382, lsl 48 fmla.2d v19, v0, v7 mov x14, #59151 add.2d v18, v18, v18 add.2d v19, v19, v19 movk x14, #41769, lsl 16 add.2d v16, v16, v18 movk x14, #32276, lsl 32 add.2d v14, v14, v19 movk x14, #21677, lsl 48 mov.16b v18, v5 fmla.2d v18, v0, v3 mov x15, #34015 fsub.2d v19, v6, v18 movk x15, #20342, lsl 16 fmla.2d v19, v0, v3 add.2d v0, v18, v18 movk x15, #13935, lsl 32 add.2d v18, v19, v19 movk x15, #11030, lsl 48 add.2d v0, v17, v0 add.2d v16, v16, v18 mov x16, #13689 mov.16b v17, v5 movk x16, #8159, lsl 16 fmla.2d v17, v1, v1 fsub.2d v18, v6, v17 movk x16, #215, lsl 32 fmla.2d v18, v1, v1 movk x16, #4913, lsl 48 add.2d v14, v14, v17 mul x17, x13, x9 add.2d v12, v12, v18 mov.16b v17, v5 umulh x20, x13, x9 fmla.2d v17, v1, v2 adds x11, x17, x11 cinc x17, x20, hs fsub.2d v18, v6, v17 fmla.2d v18, v1, v2 mul x20, x14, x9 add.2d v17, v17, v17 umulh x21, x14, x9 add.2d v18, v18, v18 add.2d v16, v16, v17 adds x17, x20, x17 cinc x20, x21, hs add.2d v14, v14, v18 adds x12, x17, x12 cinc x17, x20, hs mov.16b v17, v5 mul x20, x15, x9 fmla.2d v17, v1, v7 fsub.2d v18, v6, v17 umulh x21, x15, x9 fmla.2d v18, v1, v7 adds x17, x20, x17 cinc x20, x21, hs add.2d v17, v17, v17 add.2d v18, v18, v18 adds x0, x17, x0 cinc x17, x20, hs add.2d v0, v0, v17 mul x20, x16, x9 add.2d v16, v16, v18 mov.16b v17, v5 umulh x9, x16, x9 fmla.2d v17, v1, v3 adds x17, x20, x17 cinc x9, x9, hs fsub.2d v18, v6, v17 fmla.2d v18, v1, v3 adds x1, x17, x1 cinc x9, x9, hs add.2d v1, v17, v17 adds x2, x2, x9 cinc x3, x3, hs add.2d v17, v18, v18 mul x9, x13, x10 add.2d v1, v15, v1 add.2d v0, v0, v17 umulh x13, x13, x10 mov.16b v15, v5 adds x9, x9, x12 cinc x12, x13, hs fmla.2d v15, v2, v2 fsub.2d v17, v6, v15 mul x13, x14, x10 fmla.2d v17, v2, v2 umulh x14, x14, x10 add.2d v0, v0, v15 add.2d v15, v16, v17 adds x12, x13, x12 cinc x13, x14, hs mov.16b v16, v5 adds x0, x12, x0 cinc x12, x13, hs fmla.2d v16, v2, v7 fsub.2d v17, v6, v16 mul x13, x15, x10 fmla.2d v17, v2, v7 umulh x14, x15, x10 add.2d v16, v16, v16 adds x12, x13, x12 cinc x13, x14, hs add.2d v17, v17, v17 add.2d v1, v1, v16 adds x1, x12, x1 cinc x12, x13, hs add.2d v0, v0, v17 mul x13, x16, x10 mov.16b v16, v5 fmla.2d v16, v2, v3 umulh x10, x16, x10 fsub.2d v17, v6, v16 adds x12, x13, x12 cinc x10, x10, hs fmla.2d v17, v2, v3 add.2d v2, v16, v16 adds x2, x12, x2 cinc x10, x10, hs add.2d v16, v17, v17 add x3, x3, x10 add.2d v2, v13, v2 add.2d v1, v1, v16 mov x10, #61005 mov.16b v13, v5 movk x10, #58262, lsl 16 fmla.2d v13, v7, v7 movk x10, #32851, lsl 32 fsub.2d v16, v6, v13 fmla.2d v16, v7, v7 movk x10, #11582, lsl 48 add.2d v2, v2, v13 mov x12, #37581 add.2d v1, v1, v16 mov.16b v13, v5 movk x12, #43836, lsl 16 fmla.2d v13, v7, v3 movk x12, #36286, lsl 32 fsub.2d v16, v6, v13 fmla.2d v16, v7, v3 movk x12, #51783, lsl 48 add.2d v7, v13, v13 mov x13, #10899 add.2d v13, v16, v16 movk x13, #30709, lsl 16 add.2d v7, v11, v7 add.2d v2, v2, v13 movk x13, #61551, lsl 32 mov.16b v11, v5 movk x13, #45784, lsl 48 fmla.2d v11, v3, v3 fsub.2d v13, v6, v11 mov x14, #36612 fmla.2d v13, v3, v3 movk x14, #63402, lsl 16 add.2d v3, v9, v11 add.2d v7, v7, v13 movk x14, #47623, lsl 32 usra.2d v10, v8, #52 movk x14, #9430, lsl 48 usra.2d v12, v10, #52 usra.2d v14, v12, #52 mul x15, x10, x11 usra.2d v15, v14, #52 umulh x10, x10, x11 and.16b v8, v8, v4 adds x9, x15, x9 cinc x10, x10, hs and.16b v9, v10, v4 and.16b v10, v12, v4 mul x15, x12, x11 and.16b v4, v14, v4 umulh x12, x12, x11 ucvtf.2d v8, v8 mov x16, #37864 adds x10, x15, x10 cinc x12, x12, hs movk x16, #1815, lsl 16 adds x0, x10, x0 cinc x10, x12, hs movk x16, #28960, lsl 32 movk x16, #17153, lsl 48 mul x12, x13, x11 dup.2d v11, x16 umulh x13, x13, x11 mov.16b v12, v5 fmla.2d v12, v8, v11 adds x10, x12, x10 cinc x12, x13, hs fsub.2d v13, v6, v12 adds x1, x10, x1 cinc x10, x12, hs fmla.2d v13, v8, v11 mul x12, x14, x11 add.2d v0, v0, v12 add.2d v11, v15, v13 umulh x11, x14, x11 mov x13, #46128 adds x10, x12, x10 cinc x11, x11, hs movk x13, #29964, lsl 16 movk x13, #7587, lsl 32 adds x2, x10, x2 cinc x10, x11, hs movk x13, #17161, lsl 48 add x3, x3, x10 dup.2d v12, x13 mov.16b v13, v5 mov x10, #65535 fmla.2d v13, v8, v12 movk x10, #61439, lsl 16 fsub.2d v14, v6, v13 fmla.2d v14, v8, v12 movk x10, #62867, lsl 32 add.2d v1, v1, v13 movk x10, #49889, lsl 48 add.2d v0, v0, v14 mul x10, x10, x9 mov x11, #52826 movk x11, #57790, lsl 16 mov x12, #1 movk x11, #55431, lsl 32 movk x12, #61440, lsl 16 movk x11, #17196, lsl 48 dup.2d v12, x11 movk x12, #62867, lsl 32 mov.16b v13, v5 movk x12, #17377, lsl 48 fmla.2d v13, v8, v12 fsub.2d v14, v6, v13 mov x11, #28817 fmla.2d v14, v8, v12 movk x11, #31161, lsl 16 add.2d v2, v2, v13 movk x11, #59464, lsl 32 add.2d v1, v1, v14 mov x13, #31276 movk x11, #10291, lsl 48 movk x13, #21262, lsl 16 mov x14, #22621 movk x13, #2304, lsl 32 movk x13, #17182, lsl 48 movk x14, #33153, lsl 16 dup.2d v12, x13 movk x14, #17846, lsl 32 mov.16b v13, v5 fmla.2d v13, v8, v12 movk x14, #47184, lsl 48 fsub.2d v14, v6, v13 mov x13, #41001 fmla.2d v14, v8, v12 add.2d v7, v7, v13 movk x13, #57649, lsl 16 add.2d v2, v2, v14 movk x13, #20082, lsl 32 mov x15, #28672 movk x13, #12388, lsl 48 movk x15, #24515, lsl 16 movk x15, #54929, lsl 32 mul x16, x12, x10 movk x15, #17064, lsl 48 umulh x12, x12, x10 dup.2d v12, x15 mov.16b v13, v5 cmn x16, x9 cinc x12, x12, hs fmla.2d v13, v8, v12 mul x9, x11, x10 fsub.2d v14, v6, v13 fmla.2d v14, v8, v12 umulh x11, x11, x10 add.2d v3, v3, v13 adds x9, x9, x12 cinc x11, x11, hs add.2d v7, v7, v14 ucvtf.2d v8, v9 adds x0, x9, x0 cinc x9, x11, hs mov x11, #44768 mul x12, x14, x10 movk x11, #51919, lsl 16 umulh x14, x14, x10 movk x11, #6346, lsl 32 movk x11, #17133, lsl 48 adds x9, x12, x9 cinc x12, x14, hs dup.2d v9, x11 adds x1, x9, x1 cinc x9, x12, hs mov.16b v12, v5 fmla.2d v12, v8, v9 mul x11, x13, x10 fsub.2d v13, v6, v12 umulh x10, x13, x10 fmla.2d v13, v8, v9 add.2d v0, v0, v12 adds x9, x11, x9 cinc x10, x10, hs add.2d v9, v11, v13 adds x2, x9, x2 cinc x9, x10, hs mov x10, #47492 movk x10, #23630, lsl 16 add x3, x3, x9 movk x10, #49985, lsl 32 mul x9, x4, x4 movk x10, #17168, lsl 48 umulh x11, x4, x4 dup.2d v11, x10 mov.16b v12, v5 mul x10, x4, x5 fmla.2d v12, v8, v11 umulh x12, x4, x5 fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 adds x11, x10, x11 cinc x13, x12, hs add.2d v1, v1, v12 mul x14, x4, x6 add.2d v0, v0, v13 mov x15, #57936 umulh x16, x4, x6 movk x15, #54828, lsl 16 adds x13, x14, x13 cinc x17, x16, hs movk x15, #18292, lsl 32 mul x20, x4, x7 movk x15, #17197, lsl 48 dup.2d v11, x15 umulh x4, x4, x7 mov.16b v12, v5 adds x15, x20, x17 cinc x17, x4, hs fmla.2d v12, v8, v11 fsub.2d v13, v6, v12 adds x10, x10, x11 cinc x11, x12, hs fmla.2d v13, v8, v11 mul x12, x5, x5 add.2d v2, v2, v12 add.2d v1, v1, v13 umulh x21, x5, x5 mov x22, #17708 adds x11, x12, x11 cinc x12, x21, hs movk x22, #43915, lsl 16 movk x22, #64348, lsl 32 adds x11, x11, x13 cinc x12, x12, hs movk x22, #17188, lsl 48 mul x13, x5, x6 dup.2d v11, x22 umulh x21, x5, x6 mov.16b v12, v5 fmla.2d v12, v8, v11 adds x12, x13, x12 cinc x22, x21, hs fsub.2d v13, v6, v12 adds x12, x12, x15 cinc x15, x22, hs fmla.2d v13, v8, v11 add.2d v7, v7, v12 mul x22, x5, x7 add.2d v2, v2, v13 umulh x5, x5, x7 mov x23, #29184 movk x23, #20789, lsl 16 adds x15, x22, x15 cinc x24, x5, hs movk x23, #19197, lsl 32 adds x15, x15, x17 cinc x17, x24, hs movk x23, #17083, lsl 48 dup.2d v11, x23 adds x11, x14, x11 cinc x14, x16, hs mov.16b v12, v5 adds x13, x13, x14 cinc x14, x21, hs fmla.2d v12, v8, v11 adds x12, x13, x12 cinc x13, x14, hs fsub.2d v13, v6, v12 fmla.2d v13, v8, v11 mul x14, x6, x6 add.2d v3, v3, v12 umulh x16, x6, x6 add.2d v7, v7, v13 ucvtf.2d v8, v10 adds x13, x14, x13 cinc x14, x16, hs mov x16, #58856 adds x13, x13, x15 cinc x14, x14, hs movk x16, #14953, lsl 16 movk x16, #15155, lsl 32 mul x15, x6, x7 movk x16, #17181, lsl 48 umulh x6, x6, x7 dup.2d v10, x16 adds x14, x15, x14 cinc x16, x6, hs mov.16b v11, v5 fmla.2d v11, v8, v10 adds x14, x14, x17 cinc x16, x16, hs fsub.2d v12, v6, v11 adds x12, x20, x12 cinc x4, x4, hs fmla.2d v12, v8, v10 add.2d v0, v0, v11 adds x4, x22, x4 cinc x5, x5, hs add.2d v9, v9, v12 adds x4, x4, x13 cinc x5, x5, hs mov x13, #35392 movk x13, #12477, lsl 16 adds x5, x15, x5 cinc x6, x6, hs movk x13, #56780, lsl 32 adds x5, x5, x14 cinc x6, x6, hs movk x13, #17142, lsl 48 dup.2d v10, x13 mul x13, x7, x7 mov.16b v11, v5 umulh x7, x7, x7 fmla.2d v11, v8, v10 adds x6, x13, x6 cinc x7, x7, hs fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 adds x6, x6, x16 cinc x7, x7, hs add.2d v1, v1, v11 mov x13, #56431 add.2d v0, v0, v12 mov x14, #9848 movk x13, #30457, lsl 16 movk x14, #54501, lsl 16 movk x13, #30012, lsl 32 movk x14, #31540, lsl 32 movk x14, #17170, lsl 48 movk x13, #6382, lsl 48 dup.2d v10, x14 mov x14, #59151 mov.16b v11, v5 fmla.2d v11, v8, v10 movk x14, #41769, lsl 16 fsub.2d v12, v6, v11 movk x14, #32276, lsl 32 fmla.2d v12, v8, v10 movk x14, #21677, lsl 48 add.2d v2, v2, v11 add.2d v1, v1, v12 mov x15, #34015 mov x16, #9584 movk x15, #20342, lsl 16 movk x16, #63883, lsl 16 movk x16, #18253, lsl 32 movk x15, #13935, lsl 32 movk x16, #17190, lsl 48 movk x15, #11030, lsl 48 dup.2d v10, x16 mov.16b v11, v5 mov x16, #13689 fmla.2d v11, v8, v10 movk x16, #8159, lsl 16 fsub.2d v12, v6, v11 fmla.2d v12, v8, v10 movk x16, #215, lsl 32 add.2d v7, v7, v11 movk x16, #4913, lsl 48 add.2d v2, v2, v12 mul x17, x13, x9 mov x20, #51712 movk x20, #16093, lsl 16 umulh x21, x13, x9 movk x20, #30633, lsl 32 adds x11, x17, x11 cinc x17, x21, hs movk x20, #17068, lsl 48 dup.2d v10, x20 mul x20, x14, x9 mov.16b v11, v5 umulh x21, x14, x9 fmla.2d v11, v8, v10 fsub.2d v12, v6, v11 adds x17, x20, x17 cinc x20, x21, hs fmla.2d v12, v8, v10 adds x12, x17, x12 cinc x17, x20, hs add.2d v3, v3, v11 mul x20, x15, x9 add.2d v7, v7, v12 ucvtf.2d v4, v4 umulh x21, x15, x9 mov x22, #34724 adds x17, x20, x17 cinc x20, x21, hs movk x22, #40393, lsl 16 movk x22, #23752, lsl 32 adds x4, x17, x4 cinc x17, x20, hs movk x22, #17184, lsl 48 mul x20, x16, x9 dup.2d v8, x22 mov.16b v10, v5 umulh x9, x16, x9 fmla.2d v10, v4, v8 adds x17, x20, x17 cinc x9, x9, hs fsub.2d v11, v6, v10 fmla.2d v11, v4, v8 adds x5, x17, x5 cinc x9, x9, hs add.2d v0, v0, v10 adds x6, x6, x9 cinc x7, x7, hs add.2d v8, v9, v11 mul x9, x13, x10 mov x17, #25532 movk x17, #31025, lsl 16 umulh x13, x13, x10 movk x17, #10002, lsl 32 adds x9, x9, x12 cinc x12, x13, hs movk x17, #17199, lsl 48 dup.2d v9, x17 mul x13, x14, x10 mov.16b v10, v5 umulh x14, x14, x10 fmla.2d v10, v4, v9 fsub.2d v11, v6, v10 adds x12, x13, x12 cinc x13, x14, hs fmla.2d v11, v4, v9 adds x4, x12, x4 cinc x12, x13, hs add.2d v1, v1, v10 add.2d v0, v0, v11 mul x13, x15, x10 mov x14, #18830 umulh x15, x15, x10 movk x14, #2465, lsl 16 adds x12, x13, x12 cinc x13, x15, hs movk x14, #36348, lsl 32 movk x14, #17194, lsl 48 adds x5, x12, x5 cinc x12, x13, hs dup.2d v9, x14 mul x13, x16, x10 mov.16b v10, v5 fmla.2d v10, v4, v9 umulh x10, x16, x10 fsub.2d v11, v6, v10 adds x12, x13, x12 cinc x10, x10, hs fmla.2d v11, v4, v9 add.2d v2, v2, v10 adds x6, x12, x6 cinc x10, x10, hs add.2d v1, v1, v11 add x7, x7, x10 mov x10, #21566 movk x10, #43708, lsl 16 mov x12, #61005 movk x10, #57685, lsl 32 movk x12, #58262, lsl 16 movk x10, #17185, lsl 48 movk x12, #32851, lsl 32 dup.2d v9, x10 mov.16b v10, v5 movk x12, #11582, lsl 48 fmla.2d v10, v4, v9 mov x10, #37581 fsub.2d v11, v6, v10 fmla.2d v11, v4, v9 movk x10, #43836, lsl 16 add.2d v7, v7, v10 movk x10, #36286, lsl 32 add.2d v2, v2, v11 mov x13, #3072 movk x10, #51783, lsl 48 movk x13, #8058, lsl 16 mov x14, #10899 movk x13, #46097, lsl 32 movk x14, #30709, lsl 16 movk x13, #17047, lsl 48 dup.2d v9, x13 movk x14, #61551, lsl 32 mov.16b v10, v5 movk x14, #45784, lsl 48 fmla.2d v10, v4, v9 fsub.2d v11, v6, v10 mov x13, #36612 fmla.2d v11, v4, v9 movk x13, #63402, lsl 16 add.2d v3, v3, v10 add.2d v4, v7, v11 movk x13, #47623, lsl 32 mov x15, #65535 movk x13, #9430, lsl 48 movk x15, #61439, lsl 16 movk x15, #62867, lsl 32 mul x16, x12, x11 movk x15, #1, lsl 48 umulh x12, x12, x11 umov x17, v8.d[0] adds x9, x16, x9 cinc x12, x12, hs umov x16, v8.d[1] mul x17, x17, x15 mul x20, x10, x11 mul x15, x16, x15 umulh x10, x10, x11 and x16, x17, x8 and x8, x15, x8 adds x12, x20, x12 cinc x10, x10, hs ins v7.d[0], x16 ins v7.d[1], x8 adds x4, x12, x4 cinc x8, x10, hs ucvtf.2d v7, v7 mov x10, #16 mul x12, x14, x11 movk x10, #22847, lsl 32 umulh x14, x14, x11 movk x10, #17151, lsl 48 dup.2d v9, x10 adds x8, x12, x8 cinc x10, x14, hs mov.16b v10, v5 adds x5, x8, x5 cinc x8, x10, hs fmla.2d v10, v7, v9 mul x10, x13, x11 fsub.2d v11, v6, v10 fmla.2d v11, v7, v9 umulh x11, x13, x11 add.2d v0, v0, v10 adds x8, x10, x8 cinc x10, x11, hs add.2d v8, v8, v11 mov x11, #20728 adds x6, x8, x6 cinc x8, x10, hs movk x11, #23588, lsl 16 add x7, x7, x8 movk x11, #7790, lsl 32 movk x11, #17170, lsl 48 mov x8, #65535 dup.2d v9, x11 movk x8, #61439, lsl 16 mov.16b v10, v5 fmla.2d v10, v7, v9 movk x8, #62867, lsl 32 fsub.2d v11, v6, v10 movk x8, #49889, lsl 48 fmla.2d v11, v7, v9 mul x8, x8, x9 add.2d v1, v1, v10 add.2d v0, v0, v11 mov x10, #1 mov x11, #16000 movk x10, #61440, lsl 16 movk x11, #53891, lsl 16 movk x11, #5509, lsl 32 movk x10, #62867, lsl 32 movk x11, #17144, lsl 48 movk x10, #17377, lsl 48 dup.2d v9, x11 mov.16b v10, v5 mov x11, #28817 fmla.2d v10, v7, v9 movk x11, #31161, lsl 16 fsub.2d v11, v6, v10 movk x11, #59464, lsl 32 fmla.2d v11, v7, v9 add.2d v2, v2, v10 movk x11, #10291, lsl 48 add.2d v9, v1, v11 mov x12, #22621 mov x13, #46800 movk x13, #2568, lsl 16 movk x12, #33153, lsl 16 movk x13, #1335, lsl 32 movk x12, #17846, lsl 32 movk x13, #17188, lsl 48 dup.2d v1, x13 movk x12, #47184, lsl 48 mov.16b v10, v5 mov x13, #41001 fmla.2d v10, v7, v1 fsub.2d v11, v6, v10 movk x13, #57649, lsl 16 fmla.2d v11, v7, v1 movk x13, #20082, lsl 32 add.2d v1, v4, v10 movk x13, #12388, lsl 48 add.2d v4, v2, v11 mov x14, #39040 mul x15, x10, x8 movk x14, #14704, lsl 16 umulh x10, x10, x8 movk x14, #12839, lsl 32 movk x14, #17096, lsl 48 cmn x15, x9 cinc x10, x10, hs dup.2d v2, x14 mul x9, x11, x8 mov.16b v5, v5 fmla.2d v5, v7, v2 umulh x11, x11, x8 fsub.2d v6, v6, v5 adds x9, x9, x10 cinc x10, x11, hs fmla.2d v6, v7, v2 add.2d v5, v3, v5 adds x4, x9, x4 cinc x9, x10, hs add.2d v6, v1, v6 mul x10, x12, x8 ssra.2d v0, v8, #52 umulh x11, x12, x8 ssra.2d v9, v0, #52 ssra.2d v4, v9, #52 adds x9, x10, x9 cinc x10, x11, hs ssra.2d v6, v4, #52 adds x5, x9, x5 cinc x9, x10, hs ssra.2d v5, v6, #52 ushr.2d v1, v9, #12 mul x10, x13, x8 ushr.2d v2, v4, #24 umulh x8, x13, x8 ushr.2d v3, v6, #36 sli.2d v0, v9, #52 adds x9, x10, x9 cinc x8, x8, hs sli.2d v1, v4, #40 adds x6, x9, x6 cinc x8, x8, hs sli.2d v2, v6, #28 sli.2d v3, v5, #16 add x7, x7, x8
jzmoolman/xv6-rust
607
src/asm/entry.S
# qemu -kernel loads the kernel at 0x80000000 # and causes each hart (i.e. CPU) to jump there. # kernel.ld causes the following code to # be placed at 0x80000000. .section .text.init .globl _entry _entry: # set up a stack for C. # stack0 is declared in start.c, # with a 4096-byte stack per CPU. # sp = stack0 + (hartid * 4096) la sp, stack0 li a0, 1024*4 csrr a1, mhartid addi a1, a1, 1 mul a0, a0, a1 add sp, sp, a0 # jump to start() in start.c call _start spin: j spin
jzmoolman/xv6-rust
607
src/asm/entry.S
# qemu -kernel loads the kernel at 0x80000000 # and causes each hart (i.e. CPU) to jump there. # kernel.ld causes the following code to # be placed at 0x80000000. .section .text.init .globl _entry _entry: # set up a stack for C. # stack0 is declared in start.c, # with a 4096-byte stack per CPU. # sp = stack0 + (hartid * 4096) la sp, stack0 li a0, 1024*4 csrr a1, mhartid addi a1, a1, 1 mul a0, a0, a1 add sp, sp, a0 # jump to start() in start.c call _start spin: j spin
KaiWalter/wasm-api-gateway
194
api-go/bindings/gateway/api/api/guest/empty.s
// This file exists for testing this package without WebAssembly, // allowing empty function bodies with a //go:wasmimport directive. // See https://pkg.go.dev/cmd/compile for more information.
KaiWalter/wasm-api-gateway
194
api-go/bindings/gateway/api/api/host/empty.s
// This file exists for testing this package without WebAssembly, // allowing empty function bodies with a //go:wasmimport directive. // See https://pkg.go.dev/cmd/compile for more information.
KaiWalter/wasm-api-gateway
194
api-go/bindings/gateway/api/api/guest/empty.s
// This file exists for testing this package without WebAssembly, // allowing empty function bodies with a //go:wasmimport directive. // See https://pkg.go.dev/cmd/compile for more information.
KaiWalter/wasm-api-gateway
194
api-go/bindings/gateway/api/api/host/empty.s
// This file exists for testing this package without WebAssembly, // allowing empty function bodies with a //go:wasmimport directive. // See https://pkg.go.dev/cmd/compile for more information.
kappa8719/kernel2
665
src/arch/rvc/context_switch.S
.global context_switch context_switch: addi sp, sp, -13 * 4 sw ra, 0 * 4(sp) sw s0, 1 * 4(sp) sw s1, 2 * 4(sp) sw s2, 3 * 4(sp) sw s3, 4 * 4(sp) sw s4, 5 * 4(sp) sw s5, 6 * 4(sp) sw s6, 7 * 4(sp) sw s7, 8 * 4(sp) sw s8, 9 * 4(sp) sw s9, 10 * 4(sp) sw s10, 11 * 4(sp) sw s11, 12 * 4(sp) sw sp, (a0) lw sp, (a1) lw ra, 0 * 4(sp) lw s0, 1 * 4(sp) lw s1, 2 * 4(sp) lw s2, 3 * 4(sp) lw s3, 4 * 4(sp) lw s4, 5 * 4(sp) lw s5, 6 * 4(sp) lw s6, 7 * 4(sp) lw s7, 8 * 4(sp) lw s8, 9 * 4(sp) lw s9, 10 * 4(sp) lw s10, 11 * 4(sp) lw s11, 12 * 4(sp) addi sp, sp, 13 * 4 ret
karu-rress/CAU-OS-Security
465
1-Compile/hello.s
.file "hello.c" .text .section .rodata .LC0: .string "Hello, World!" .text .globl main .type main, @function main: .LFB0: .cfi_startproc pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 leaq .LC0(%rip), %rax movq %rax, %rdi call puts@PLT movl $0, %eax popq %rbp .cfi_def_cfa 7, 8 ret .cfi_endproc .LFE0: .size main, .-main .ident "GCC: (GNU) 14.2.1 20240910" .section .note.GNU-stack,"",@progbits
karu-rress/CAU-OS-Security
422
1-Compile/hello_o1.s
.file "hello.c" .text .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello, World!" .text .globl main .type main, @function main: .LFB0: .cfi_startproc subq $8, %rsp .cfi_def_cfa_offset 16 leaq .LC0(%rip), %rdi call puts@PLT movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE0: .size main, .-main .ident "GCC: (GNU) 14.2.1 20240910" .section .note.GNU-stack,"",@progbits
karu-rress/CAU-OS-Security
468
1-Compile/hello_o2.s
.file "hello.c" .text .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello, World!" .section .text.startup,"ax",@progbits .p2align 4 .globl main .type main, @function main: .LFB0: .cfi_startproc subq $8, %rsp .cfi_def_cfa_offset 16 leaq .LC0(%rip), %rdi call puts@PLT xorl %eax, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE0: .size main, .-main .ident "GCC: (GNU) 14.2.1 20240910" .section .note.GNU-stack,"",@progbits
karu-rress/CAU-OS-Security
465
1-Compile/hello_o0.s
.file "hello.c" .text .section .rodata .LC0: .string "Hello, World!" .text .globl main .type main, @function main: .LFB0: .cfi_startproc pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 leaq .LC0(%rip), %rax movq %rax, %rdi call puts@PLT movl $0, %eax popq %rbp .cfi_def_cfa 7, 8 ret .cfi_endproc .LFE0: .size main, .-main .ident "GCC: (GNU) 14.2.1 20240910" .section .note.GNU-stack,"",@progbits
Katya-Incorporated/Virtualization
2,102
guest/vmbase_example/idmap.S
/* * Copyright 2022 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .set .L_TT_TYPE_BLOCK, 0x1 .set .L_TT_TYPE_PAGE, 0x3 .set .L_TT_TYPE_TABLE, 0x3 /* Access flag. */ .set .L_TT_AF, 0x1 << 10 /* Not global. */ .set .L_TT_NG, 0x1 << 11 .set .L_TT_RO, 0x2 << 6 .set .L_TT_XN, 0x3 << 53 .set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE) .set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable .set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN .set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN .set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG .set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO .section ".rodata.idmap", "a", %progbits .global idmap .align 12 idmap: /* level 1 */ .quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings .quad 0x0 // 1 GiB unmapped .quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM .fill 509, 8, 0x0 // 509 GiB of remaining VA space 0: /* level 2 */ #if defined(VMBASE_EXAMPLE_IS_BIOS) .quad 0 // 2 MiB not mapped (DT) .quad .L_BLOCK_MEM_XIP | 0x80200000 // 2 MiB of DRAM containing image .quad .L_BLOCK_MEM | 0x80400000 // 2 MiB of writable DRAM .fill 509, 8, 0x0 #elif defined(VMBASE_EXAMPLE_IS_KERNEL) .quad .L_BLOCK_MEM_XIP | 0x80000000 // 2 MiB of DRAM containing image .quad .L_BLOCK_MEM | 0x80200000 // 2 MiB of writable DRAM .fill 510, 8, 0x0 #else #error "Unexpected vmbase_example mode: failed to generate idmap" #endif
Katya-Incorporated/Virtualization
976
guest/vmbase_example/image.ld.S
/* * Copyright 2022 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ MEMORY { #if defined(VMBASE_EXAMPLE_IS_BIOS) image : ORIGIN = 0x80200000, LENGTH = 2M writable_data : ORIGIN = 0x80400000, LENGTH = 2M #elif defined(VMBASE_EXAMPLE_IS_KERNEL) image : ORIGIN = 0x80000000, LENGTH = 2M writable_data : ORIGIN = 0x80200000, LENGTH = 2M #else #error "Unexpected vmbase_example mode: failed to generate image layout" #endif }
Katya-Incorporated/Virtualization
1,745
guest/pvmfw/idmap.S
/* * Copyright 2022 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .set .L_TT_TYPE_BLOCK, 0x1 .set .L_TT_TYPE_PAGE, 0x3 .set .L_TT_TYPE_TABLE, 0x3 /* Access flag. */ .set .L_TT_AF, 0x1 << 10 /* Not global. */ .set .L_TT_NG, 0x1 << 11 .set .L_TT_RO, 0x2 << 6 .set .L_TT_XN, 0x3 << 53 .set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE) .set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable .set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN .set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN .set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG .set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO .section ".rodata.idmap", "a", %progbits .global idmap .align 12 idmap: /* level 1 */ .quad .L_BLOCK_DEV | 0x0 // 1 GB of device mappings .quad .L_TT_TYPE_TABLE + 0f // Unmapped device memory, and pVM firmware .fill 510, 8, 0x0 // 510 GB of remaining VA space /* level 2 */ 0: .fill 510, 8, 0x0 .quad .L_BLOCK_MEM_XIP | 0x7fc00000 // pVM firmware image .quad .L_BLOCK_MEM | 0x7fe00000 // Writable memory for stack, heap &c.
Katya-Incorporated/Virtualization
2,161
guest/rialto/idmap.S
/* * Copyright 2022 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // // Initial TTBR0 idmap activated before first memory write. // Remains active until a new page table is created by early Rust. // .set .SZ_1K, 1024 .set .SZ_4K, 4 * .SZ_1K .set .SZ_1M, 1024 * .SZ_1K .set .SZ_2M, 2 * .SZ_1M .set .SZ_1G, 1024 * .SZ_1M .set .PAGE_SIZE, .SZ_4K .set .ORIGIN_ADDR, 2 * .SZ_1G .set .TEXT_ADDR, .ORIGIN_ADDR + (0 * .SZ_2M) .set .DATA_ADDR, .ORIGIN_ADDR + (1 * .SZ_2M) .set .L_TT_TYPE_BLOCK, 0x1 .set .L_TT_TYPE_PAGE, 0x3 .set .L_TT_TYPE_TABLE, 0x3 .set .L_TT_AF, 0x1 << 10 // Access flag .set .L_TT_NG, 0x1 << 11 // Not global .set .L_TT_RO, 0x2 << 6 .set .L_TT_XN, 0x3 << 53 .set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE) .set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable .set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN .set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN .set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG .set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO .section ".rodata.idmap", "a", %progbits .global idmap .balign .PAGE_SIZE idmap: /* level 1 */ .quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings .quad 0x0 // 1 GiB unmapped .quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM .balign .PAGE_SIZE, 0 // unmapped /* level 2 */ 0: .quad .L_BLOCK_MEM_XIP | .TEXT_ADDR // 2 MiB of DRAM containing image .quad .L_BLOCK_MEM | .DATA_ADDR // 2 MiB of writable DRAM .balign .PAGE_SIZE, 0 // unmapped
Katya-Incorporated/Virtualization
5,154
libs/libvmbase/entry.S
/* * Copyright 2022 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common.h> .set .L_MAIR_DEV_nGnRE, 0x04 .set .L_MAIR_MEM_WBWA, 0xff .set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8) /* 4 KiB granule size for TTBR0_EL1. */ .set .L_TCR_TG0_4KB, 0x0 << 14 /* 4 KiB granule size for TTBR1_EL1. */ .set .L_TCR_TG1_4KB, 0x2 << 30 /* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */ .set .L_TCR_EPD1, 0x1 << 23 /* Translation table walks for TTBR0_EL1 are inner sharable. */ .set .L_TCR_SH_INNER, 0x3 << 12 /* * Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate * cacheable. */ .set .L_TCR_RGN_OWB, 0x1 << 10 /* * Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate * cacheable. */ .set .L_TCR_RGN_IWB, 0x1 << 8 /* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */ .set .L_TCR_T0SZ_512, 64 - 39 .set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB .set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512 /* Stage 1 instruction access cacheability is unaffected. */ .set .L_SCTLR_ELx_I, 0x1 << 12 /* SP alignment fault if SP is not aligned to a 16 byte boundary. */ .set .L_SCTLR_ELx_SA, 0x1 << 3 /* Stage 1 data access cacheability is unaffected. */ .set .L_SCTLR_ELx_C, 0x1 << 2 /* EL0 and EL1 stage 1 MMU enabled. */ .set .L_SCTLR_ELx_M, 0x1 << 0 /* Privileged Access Never is unchanged on taking an exception to EL1. */ .set .L_SCTLR_EL1_SPAN, 0x1 << 23 /* All writable memory regions are treated as XN. */ .set .L_SCTLR_EL1_WXN, 0x1 << 19 /* SETEND instruction disabled at EL0 in aarch32 mode. */ .set .L_SCTLR_EL1_SED, 0x1 << 8 /* Various IT instructions are disabled at EL0 in aarch32 mode. */ .set .L_SCTLR_EL1_ITD, 0x1 << 7 .set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29) .set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED .set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN /** * This is a generic entry point for an image. It carries out the operations required to prepare the * loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above, * prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3 * for the Rust entry point, as these may contain boot parameters. */ .section .init.entry, "ax" .global entry entry: /* Load and apply the memory management configuration, ready to enable MMU and caches. */ adr x30, vector_table_panic msr vbar_el1, x30 /* * Our load address is set by the host so validate it before proceeding. */ adr x30, entry mov_i x29, entry cmp x29, x30 b.eq 1f reset_or_hang 1: adrp x30, idmap msr ttbr0_el1, x30 mov_i x30, .Lmairval msr mair_el1, x30 mov_i x30, .Ltcrval /* Copy the supported PA range into TCR_EL1.IPS. */ mrs x29, id_aa64mmfr0_el1 bfi x30, x29, #32, #4 msr tcr_el1, x30 mov_i x30, .Lsctlrval /* * Ensure everything before this point has completed, then invalidate any potentially stale * local TLB entries before they start being used. */ isb tlbi vmalle1 ic iallu dsb nsh isb /* * Configure sctlr_el1 to enable MMU and cache and don't proceed until this has completed. */ msr sctlr_el1, x30 isb /* Disable trapping floating point access in EL1. */ mrs x30, cpacr_el1 orr x30, x30, #(0x3 << 20) msr cpacr_el1, x30 isb /* Zero out the bss section. */ adr_l x29, bss_begin adr_l x30, bss_end 0: cmp x29, x30 b.hs 1f stp xzr, xzr, [x29], #16 b 0b 1: /* Copy the data section. */ adr_l x28, data_begin adr_l x29, data_end adr_l x30, data_lma 2: cmp x28, x29 b.ge 3f ldp q0, q1, [x30], #32 stp q0, q1, [x28], #32 b 2b 3: /* Prepare the exception handler stack (SP_EL1). */ adr_l x30, init_eh_stack_pointer msr spsel, #1 mov sp, x30 /* Prepare the main thread stack (SP_EL0). */ adr_l x30, init_stack_pointer msr spsel, #0 mov sp, x30 /* Set up exception vector. */ adr x30, vector_table_el1 msr vbar_el1, x30 /* * Set up Bionic-compatible thread-local storage. * * Note that TPIDR_EL0 can't be configured from rust_entry because the * compiler will dereference it during function entry to access * __stack_chk_guard and Rust doesn't support LLVM's * __attribute__((no_stack_protector)). */ adr_l x30, __bionic_tls msr tpidr_el0, x30 /* Call into Rust code. */ bl rust_entry /* Loop forever waiting for interrupts. */ 4: wfi b 4b
Katya-Incorporated/Virtualization
4,679
libs/libvmbase/exceptions.S
/* * Copyright 2022 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Saves the volatile registers onto the stack. This currently takes 14 * instructions, so it can be used in exception handlers with 18 instructions * left. * * On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively, * which can be used as the first and second arguments of a subsequent call. */ .macro save_volatile_to_stack /* Reserve stack space and save registers x0-x18, x29 & x30. */ stp x0, x1, [sp, #-(8 * 24)]! stp x2, x3, [sp, #8 * 2] stp x4, x5, [sp, #8 * 4] stp x6, x7, [sp, #8 * 6] stp x8, x9, [sp, #8 * 8] stp x10, x11, [sp, #8 * 10] stp x12, x13, [sp, #8 * 12] stp x14, x15, [sp, #8 * 14] stp x16, x17, [sp, #8 * 16] str x18, [sp, #8 * 18] stp x29, x30, [sp, #8 * 20] /* * Save elr_el1 & spsr_el1. This such that we can take nested exception * and still be able to unwind. */ mrs x0, elr_el1 mrs x1, spsr_el1 stp x0, x1, [sp, #8 * 22] .endm /** * Restores the volatile registers from the stack. This currently takes 14 * instructions, so it can be used in exception handlers while still leaving 18 * instructions left; if paired with save_volatile_to_stack, there are 4 * instructions to spare. */ .macro restore_volatile_from_stack /* Restore registers x2-x18, x29 & x30. */ ldp x2, x3, [sp, #8 * 2] ldp x4, x5, [sp, #8 * 4] ldp x6, x7, [sp, #8 * 6] ldp x8, x9, [sp, #8 * 8] ldp x10, x11, [sp, #8 * 10] ldp x12, x13, [sp, #8 * 12] ldp x14, x15, [sp, #8 * 14] ldp x16, x17, [sp, #8 * 16] ldr x18, [sp, #8 * 18] ldp x29, x30, [sp, #8 * 20] /* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */ ldp x0, x1, [sp, #8 * 22] msr elr_el1, x0 msr spsr_el1, x1 /* Restore x0 & x1, and release stack space. */ ldp x0, x1, [sp], #8 * 24 .endm /** * This is a generic handler for exceptions taken at the current EL while using * SP0. It behaves similarly to the SPx case by first switching to SPx, doing * the work, then switching back to SP0 before returning. * * Switching to SPx and calling the Rust handler takes 16 instructions. To * restore and return we need an additional 16 instructions, so we can implement * the whole handler within the allotted 32 instructions. */ .macro current_exception_sp0 handler:req msr spsel, #1 save_volatile_to_stack bl \handler restore_volatile_from_stack msr spsel, #0 eret .endm /** * This is a generic handler for exceptions taken at the current EL while using * SPx. It saves volatile registers, calls the Rust handler, restores volatile * registers, then returns. * * This also works for exceptions taken from EL0, if we don't care about * non-volatile registers. * * Saving state and jumping to the Rust handler takes 15 instructions, and * restoring and returning also takes 15 instructions, so we can fit the whole * handler in 30 instructions, under the limit of 32. */ .macro current_exception_spx handler:req save_volatile_to_stack bl \handler restore_volatile_from_stack eret .endm .section .text.vector_table_el1, "ax" .global vector_table_el1 .balign 0x800 vector_table_el1: sync_cur_sp0: current_exception_sp0 sync_exception_current .balign 0x80 irq_cur_sp0: current_exception_sp0 irq_current .balign 0x80 fiq_cur_sp0: current_exception_sp0 fiq_current .balign 0x80 serr_cur_sp0: current_exception_sp0 serr_current .balign 0x80 sync_cur_spx: current_exception_spx sync_exception_current .balign 0x80 irq_cur_spx: current_exception_spx irq_current .balign 0x80 fiq_cur_spx: current_exception_spx fiq_current .balign 0x80 serr_cur_spx: current_exception_spx serr_current .balign 0x80 sync_lower_64: current_exception_spx sync_lower .balign 0x80 irq_lower_64: current_exception_spx irq_lower .balign 0x80 fiq_lower_64: current_exception_spx fiq_lower .balign 0x80 serr_lower_64: current_exception_spx serr_lower .balign 0x80 sync_lower_32: current_exception_spx sync_lower .balign 0x80 irq_lower_32: current_exception_spx irq_lower .balign 0x80 fiq_lower_32: current_exception_spx fiq_lower .balign 0x80 serr_lower_32: current_exception_spx serr_lower
Katya-Incorporated/Virtualization
1,788
libs/libvmbase/exceptions_panic.S
/* * Copyright 2022 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common.h> /** * The following table is intended to trap any fault resulting from the very * first memory accesses. They assume that PSCI v0.2 is available and provides * the PSCI_SYSTEM_RESET call in an attempt to gracefully exit but otherwise * results in the core busy-looping. */ .section .text.vector_table_panic, "ax" .global vector_table_panic .balign 0x800 vector_table_panic: sync_cur_sp0_panic: reset_or_hang .balign 0x80 irq_cur_sp0_panic: reset_or_hang .balign 0x80 fiq_cur_sp0_panic: reset_or_hang .balign 0x80 serr_cur_sp0_panic: reset_or_hang .balign 0x80 sync_cur_spx_panic: reset_or_hang .balign 0x80 irq_cur_spx_panic: reset_or_hang .balign 0x80 fiq_cur_spx_panic: reset_or_hang .balign 0x80 serr_cur_spx_panic: reset_or_hang .balign 0x80 sync_lower_64_panic: reset_or_hang .balign 0x80 irq_lower_64_panic: reset_or_hang .balign 0x80 fiq_lower_64_panic: reset_or_hang .balign 0x80 serr_lower_64_panic: reset_or_hang .balign 0x80 sync_lower_32_panic: reset_or_hang .balign 0x80 irq_lower_32_panic: reset_or_hang .balign 0x80 fiq_lower_32_panic: reset_or_hang .balign 0x80 serr_lower_32_panic: reset_or_hang
kayleegeorge/sp1-fork
8,449
zkvm/entrypoint/src/memset.s
// This is musl-libc memset commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30: // // src/string/memset.c // // This was compiled into assembly with: // // clang-14 -target riscv32 -march=rv32im -O3 -S memset.c -nostdlib -fno-builtin -funroll-loops // // and labels manually updated to not conflict. // // musl as a whole is licensed under the following standard MIT license: // // ---------------------------------------------------------------------- // Copyright © 2005-2020 Rich Felker, et al. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // ---------------------------------------------------------------------- // // Authors/contributors include: // // A. Wilcox // Ada Worcester // Alex Dowad // Alex Suykov // Alexander Monakov // Andre McCurdy // Andrew Kelley // Anthony G. Basile // Aric Belsito // Arvid Picciani // Bartosz Brachaczek // Benjamin Peterson // Bobby Bingham // Boris Brezillon // Brent Cook // Chris Spiegel // Clément Vasseur // Daniel Micay // Daniel Sabogal // Daurnimator // David Carlier // David Edelsohn // Denys Vlasenko // Dmitry Ivanov // Dmitry V. Levin // Drew DeVault // Emil Renner Berthing // Fangrui Song // Felix Fietkau // Felix Janda // Gianluca Anzolin // Hauke Mehrtens // He X // Hiltjo Posthuma // Isaac Dunham // Jaydeep Patil // Jens Gustedt // Jeremy Huntwork // Jo-Philipp Wich // Joakim Sindholt // John Spencer // Julien Ramseier // Justin Cormack // Kaarle Ritvanen // Khem Raj // Kylie McClain // Leah Neukirchen // Luca Barbato // Luka Perkov // M Farkas-Dyck (Strake) // Mahesh Bodapati // Markus Wichmann // Masanori Ogino // Michael Clark // Michael Forney // Mikhail Kremnyov // Natanael Copa // Nicholas J. Kain // orc // Pascal Cuoq // Patrick Oppenlander // Petr Hosek // Petr Skocik // Pierre Carrier // Reini Urban // Rich Felker // Richard Pennington // Ryan Fairfax // Samuel Holland // Segev Finer // Shiz // sin // Solar Designer // Stefan Kristiansson // Stefan O'Rear // Szabolcs Nagy // Timo Teräs // Trutz Behn // Valentin Ochs // Will Dietz // William Haddon // William Pitcock // // Portions of this software are derived from third-party works licensed // under terms compatible with the above MIT license: // // The TRE regular expression implementation (src/regex/reg* and // src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed // under a 2-clause BSD license (license text in the source files). The // included version has been heavily modified by Rich Felker in 2012, in // the interests of size, simplicity, and namespace cleanliness. // // Much of the math library code (src/math/* and src/complex/*) is // Copyright © 1993,2004 Sun Microsystems or // Copyright © 2003-2011 David Schultz or // Copyright © 2003-2009 Steven G. Kargl or // Copyright © 2003-2009 Bruce D. Evans or // Copyright © 2008 Stephen L. Moshier or // Copyright © 2017-2018 Arm Limited // and labelled as such in comments in the individual source files. All // have been licensed under extremely permissive terms. // // The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008 // The Android Open Source Project and is licensed under a two-clause BSD // license. It was taken from Bionic libc, used on Android. // // The AArch64 memcpy and memset code (src/string/aarch64/*) are // Copyright © 1999-2019, Arm Limited. // // The implementation of DES for crypt (src/crypt/crypt_des.c) is // Copyright © 1994 David Burren. It is licensed under a BSD license. // // The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was // originally written by Solar Designer and placed into the public // domain. The code also comes with a fallback permissive license for use // in jurisdictions that may not recognize the public domain. // // The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 // Valentin Ochs and is licensed under an MIT-style license. // // The x86_64 port was written by Nicholas J. Kain and is licensed under // the standard MIT terms. // // The mips and microblaze ports were originally written by Richard // Pennington for use in the ellcc project. The original code was adapted // by Rich Felker for build system and code conventions during upstream // integration. It is licensed under the standard MIT terms. // // The mips64 port was contributed by Imagination Technologies and is // licensed under the standard MIT terms. // // The powerpc port was also originally written by Richard Pennington, // and later supplemented and integrated by John Spencer. It is licensed // under the standard MIT terms. // // All other files which have no copyright comments are original works // produced specifically for use as part of this library, written either // by Rich Felker, the main author of the library, or by one or more // contibutors listed above. Details on authorship of individual files // can be found in the git version control history of the project. The // omission of copyright and license comments in each file is in the // interest of source tree size. // // In addition, permission is hereby granted for all public header files // (include/* and arch/* /bits/* ) and crt files intended to be linked into // applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit // the copyright notice and permission notice otherwise required by the // license, and to use these files without any requirement of // attribution. These files include substantial contributions from: // // Bobby Bingham // John Spencer // Nicholas J. Kain // Rich Felker // Richard Pennington // Stefan Kristiansson // Szabolcs Nagy // // all of whom have explicitly granted such permission. // // This file previously contained text expressing a belief that most of // the files covered by the above exception were sufficiently trivial not // to be subject to copyright, resulting in confusion over whether it // negated the permissions granted in the license. In the spirit of // permissive licensing, and of not having licensing issues being an // obstacle to adoption, that text has been removed. .text .attribute 4, 16 .attribute 5, "rv32im" .file "musl_memset.c" .globl memset .p2align 2 .type memset,@function memset: beqz a2, .LBB0_9memset sb a1, 0(a0) add a3, a2, a0 li a4, 3 sb a1, -1(a3) bltu a2, a4, .LBB0_9memset sb a1, 1(a0) sb a1, 2(a0) sb a1, -2(a3) li a4, 7 sb a1, -3(a3) bltu a2, a4, .LBB0_9memset sb a1, 3(a0) li a5, 9 sb a1, -4(a3) bltu a2, a5, .LBB0_9memset neg a3, a0 andi a4, a3, 3 add a3, a0, a4 sub a2, a2, a4 andi a2, a2, -4 andi a1, a1, 255 lui a4, 4112 addi a4, a4, 257 mul a1, a1, a4 sw a1, 0(a3) add a4, a3, a2 sw a1, -4(a4) bltu a2, a5, .LBB0_9memset sw a1, 4(a3) sw a1, 8(a3) sw a1, -12(a4) li a5, 25 sw a1, -8(a4) bltu a2, a5, .LBB0_9memset sw a1, 12(a3) sw a1, 16(a3) sw a1, 20(a3) sw a1, 24(a3) sw a1, -28(a4) sw a1, -24(a4) sw a1, -20(a4) andi a5, a3, 4 ori a5, a5, 24 sub a2, a2, a5 li a6, 32 sw a1, -16(a4) bltu a2, a6, .LBB0_9memset add a3, a3, a5 li a4, 31 .LBB0_8memset: sw a1, 0(a3) sw a1, 4(a3) sw a1, 8(a3) sw a1, 12(a3) sw a1, 16(a3) sw a1, 20(a3) sw a1, 24(a3) sw a1, 28(a3) addi a2, a2, -32 addi a3, a3, 32 bltu a4, a2, .LBB0_8memset .LBB0_9memset: ret .Lfunc_end0memset: .size memset, .Lfunc_end0memset-memset .ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157" .section ".note.GNU-stack","",@progbits .addrsig
kayleegeorge/sp1-fork
11,854
zkvm/entrypoint/src/memcpy.s
// This is musl-libc commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30: // // src/string/memcpy.c // // This was compiled into assembly with: // // clang-14 -target riscv32 -march=rv32im -O3 -S memcpy.c -nostdlib -fno-builtin -funroll-loops // // and labels manually updated to not conflict. // // musl as a whole is licensed under the following standard MIT license: // // ---------------------------------------------------------------------- // Copyright © 2005-2020 Rich Felker, et al. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // ---------------------------------------------------------------------- // // Authors/contributors include: // // A. Wilcox // Ada Worcester // Alex Dowad // Alex Suykov // Alexander Monakov // Andre McCurdy // Andrew Kelley // Anthony G. Basile // Aric Belsito // Arvid Picciani // Bartosz Brachaczek // Benjamin Peterson // Bobby Bingham // Boris Brezillon // Brent Cook // Chris Spiegel // Clément Vasseur // Daniel Micay // Daniel Sabogal // Daurnimator // David Carlier // David Edelsohn // Denys Vlasenko // Dmitry Ivanov // Dmitry V. Levin // Drew DeVault // Emil Renner Berthing // Fangrui Song // Felix Fietkau // Felix Janda // Gianluca Anzolin // Hauke Mehrtens // He X // Hiltjo Posthuma // Isaac Dunham // Jaydeep Patil // Jens Gustedt // Jeremy Huntwork // Jo-Philipp Wich // Joakim Sindholt // John Spencer // Julien Ramseier // Justin Cormack // Kaarle Ritvanen // Khem Raj // Kylie McClain // Leah Neukirchen // Luca Barbato // Luka Perkov // M Farkas-Dyck (Strake) // Mahesh Bodapati // Markus Wichmann // Masanori Ogino // Michael Clark // Michael Forney // Mikhail Kremnyov // Natanael Copa // Nicholas J. Kain // orc // Pascal Cuoq // Patrick Oppenlander // Petr Hosek // Petr Skocik // Pierre Carrier // Reini Urban // Rich Felker // Richard Pennington // Ryan Fairfax // Samuel Holland // Segev Finer // Shiz // sin // Solar Designer // Stefan Kristiansson // Stefan O'Rear // Szabolcs Nagy // Timo Teräs // Trutz Behn // Valentin Ochs // Will Dietz // William Haddon // William Pitcock // // Portions of this software are derived from third-party works licensed // under terms compatible with the above MIT license: // // The TRE regular expression implementation (src/regex/reg* and // src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed // under a 2-clause BSD license (license text in the source files). The // included version has been heavily modified by Rich Felker in 2012, in // the interests of size, simplicity, and namespace cleanliness. // // Much of the math library code (src/math/* and src/complex/*) is // Copyright © 1993,2004 Sun Microsystems or // Copyright © 2003-2011 David Schultz or // Copyright © 2003-2009 Steven G. Kargl or // Copyright © 2003-2009 Bruce D. Evans or // Copyright © 2008 Stephen L. Moshier or // Copyright © 2017-2018 Arm Limited // and labelled as such in comments in the individual source files. All // have been licensed under extremely permissive terms. // // The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008 // The Android Open Source Project and is licensed under a two-clause BSD // license. It was taken from Bionic libc, used on Android. // // The AArch64 memcpy and memset code (src/string/aarch64/*) are // Copyright © 1999-2019, Arm Limited. // // The implementation of DES for crypt (src/crypt/crypt_des.c) is // Copyright © 1994 David Burren. It is licensed under a BSD license. // // The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was // originally written by Solar Designer and placed into the public // domain. The code also comes with a fallback permissive license for use // in jurisdictions that may not recognize the public domain. // // The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011 // Valentin Ochs and is licensed under an MIT-style license. // // The x86_64 port was written by Nicholas J. Kain and is licensed under // the standard MIT terms. // // The mips and microblaze ports were originally written by Richard // Pennington for use in the ellcc project. The original code was adapted // by Rich Felker for build system and code conventions during upstream // integration. It is licensed under the standard MIT terms. // // The mips64 port was contributed by Imagination Technologies and is // licensed under the standard MIT terms. // // The powerpc port was also originally written by Richard Pennington, // and later supplemented and integrated by John Spencer. It is licensed // under the standard MIT terms. // // All other files which have no copyright comments are original works // produced specifically for use as part of this library, written either // by Rich Felker, the main author of the library, or by one or more // contibutors listed above. Details on authorship of individual files // can be found in the git version control history of the project. The // omission of copyright and license comments in each file is in the // interest of source tree size. // // In addition, permission is hereby granted for all public header files // (include/* and arch/* /bits/* ) and crt files intended to be linked into // applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit // the copyright notice and permission notice otherwise required by the // license, and to use these files without any requirement of // attribution. These files include substantial contributions from: // // Bobby Bingham // John Spencer // Nicholas J. Kain // Rich Felker // Richard Pennington // Stefan Kristiansson // Szabolcs Nagy // // all of whom have explicitly granted such permission. // // This file previously contained text expressing a belief that most of // the files covered by the above exception were sufficiently trivial not // to be subject to copyright, resulting in confusion over whether it // negated the permissions granted in the license. In the spirit of // permissive licensing, and of not having licensing issues being an // obstacle to adoption, that text has been removed. .text .attribute 4, 16 .attribute 5, "rv32im" .file "musl_memcpy.c" .globl memcpy .p2align 2 .type memcpy,@function memcpy: andi a3, a1, 3 seqz a3, a3 seqz a4, a2 or a3, a3, a4 bnez a3, .LBBmemcpy0_11 addi a5, a1, 1 mv a6, a0 .LBBmemcpy0_2: lb a7, 0(a1) addi a4, a1, 1 addi a3, a6, 1 sb a7, 0(a6) addi a2, a2, -1 andi a1, a5, 3 snez a1, a1 snez a6, a2 and a7, a1, a6 addi a5, a5, 1 mv a1, a4 mv a6, a3 bnez a7, .LBBmemcpy0_2 andi a1, a3, 3 beqz a1, .LBBmemcpy0_12 .LBBmemcpy0_4: li a5, 32 bltu a2, a5, .LBBmemcpy0_26 li a5, 3 beq a1, a5, .LBBmemcpy0_19 li a5, 2 beq a1, a5, .LBBmemcpy0_22 li a5, 1 bne a1, a5, .LBBmemcpy0_26 lw a5, 0(a4) sb a5, 0(a3) srli a1, a5, 8 sb a1, 1(a3) srli a6, a5, 16 addi a1, a3, 3 sb a6, 2(a3) addi a2, a2, -3 addi a3, a4, 16 li a4, 16 .LBBmemcpy0_9: lw a6, -12(a3) srli a5, a5, 24 slli a7, a6, 8 lw t0, -8(a3) or a5, a7, a5 sw a5, 0(a1) srli a5, a6, 24 slli a6, t0, 8 lw a7, -4(a3) or a5, a6, a5 sw a5, 4(a1) srli a6, t0, 24 slli t0, a7, 8 lw a5, 0(a3) or a6, t0, a6 sw a6, 8(a1) srli a6, a7, 24 slli a7, a5, 8 or a6, a7, a6 sw a6, 12(a1) addi a1, a1, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a4, a2, .LBBmemcpy0_9 addi a4, a3, -13 j .LBBmemcpy0_25 .LBBmemcpy0_11: mv a3, a0 mv a4, a1 andi a1, a3, 3 bnez a1, .LBBmemcpy0_4 .LBBmemcpy0_12: li a1, 16 bltu a2, a1, .LBBmemcpy0_15 li a1, 15 .LBBmemcpy0_14: lw a5, 0(a4) lw a6, 4(a4) lw a7, 8(a4) lw t0, 12(a4) sw a5, 0(a3) sw a6, 4(a3) sw a7, 8(a3) sw t0, 12(a3) addi a4, a4, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a1, a2, .LBBmemcpy0_14 .LBBmemcpy0_15: andi a1, a2, 8 beqz a1, .LBBmemcpy0_17 lw a1, 0(a4) lw a5, 4(a4) sw a1, 0(a3) sw a5, 4(a3) addi a3, a3, 8 addi a4, a4, 8 .LBBmemcpy0_17: andi a1, a2, 4 beqz a1, .LBBmemcpy0_30 lw a1, 0(a4) sw a1, 0(a3) addi a3, a3, 4 addi a4, a4, 4 j .LBBmemcpy0_30 .LBBmemcpy0_19: lw a5, 0(a4) addi a1, a3, 1 sb a5, 0(a3) addi a2, a2, -1 addi a3, a4, 16 li a4, 18 .LBBmemcpy0_20: lw a6, -12(a3) srli a5, a5, 8 slli a7, a6, 24 lw t0, -8(a3) or a5, a7, a5 sw a5, 0(a1) srli a5, a6, 8 slli a6, t0, 24 lw a7, -4(a3) or a5, a6, a5 sw a5, 4(a1) srli a6, t0, 8 slli t0, a7, 24 lw a5, 0(a3) or a6, t0, a6 sw a6, 8(a1) srli a6, a7, 8 slli a7, a5, 24 or a6, a7, a6 sw a6, 12(a1) addi a1, a1, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a4, a2, .LBBmemcpy0_20 addi a4, a3, -15 j .LBBmemcpy0_25 .LBBmemcpy0_22: lw a5, 0(a4) sb a5, 0(a3) srli a6, a5, 8 addi a1, a3, 2 sb a6, 1(a3) addi a2, a2, -2 addi a3, a4, 16 li a4, 17 .LBBmemcpy0_23: lw a6, -12(a3) srli a5, a5, 16 slli a7, a6, 16 lw t0, -8(a3) or a5, a7, a5 sw a5, 0(a1) srli a5, a6, 16 slli a6, t0, 16 lw a7, -4(a3) or a5, a6, a5 sw a5, 4(a1) srli a6, t0, 16 slli t0, a7, 16 lw a5, 0(a3) or a6, t0, a6 sw a6, 8(a1) srli a6, a7, 16 slli a7, a5, 16 or a6, a7, a6 sw a6, 12(a1) addi a1, a1, 16 addi a2, a2, -16 addi a3, a3, 16 bltu a4, a2, .LBBmemcpy0_23 addi a4, a3, -14 .LBBmemcpy0_25: mv a3, a1 .LBBmemcpy0_26: andi a1, a2, 16 bnez a1, .LBBmemcpy0_35 andi a1, a2, 8 bnez a1, .LBBmemcpy0_36 .LBBmemcpy0_28: andi a1, a2, 4 beqz a1, .LBBmemcpy0_30 .LBBmemcpy0_29: lb a1, 0(a4) lb a5, 1(a4) lb a6, 2(a4) sb a1, 0(a3) sb a5, 1(a3) lb a1, 3(a4) sb a6, 2(a3) addi a4, a4, 4 addi a5, a3, 4 sb a1, 3(a3) mv a3, a5 .LBBmemcpy0_30: andi a1, a2, 2 bnez a1, .LBBmemcpy0_33 andi a1, a2, 1 bnez a1, .LBBmemcpy0_34 .LBBmemcpy0_32: ret .LBBmemcpy0_33: lb a1, 0(a4) lb a5, 1(a4) sb a1, 0(a3) addi a4, a4, 2 addi a1, a3, 2 sb a5, 1(a3) mv a3, a1 andi a1, a2, 1 beqz a1, .LBBmemcpy0_32 .LBBmemcpy0_34: lb a1, 0(a4) sb a1, 0(a3) ret .LBBmemcpy0_35: lb a1, 0(a4) lb a5, 1(a4) lb a6, 2(a4) sb a1, 0(a3) sb a5, 1(a3) lb a1, 3(a4) sb a6, 2(a3) lb a5, 4(a4) lb a6, 5(a4) sb a1, 3(a3) lb a1, 6(a4) sb a5, 4(a3) sb a6, 5(a3) lb a5, 7(a4) sb a1, 6(a3) lb a1, 8(a4) lb a6, 9(a4) sb a5, 7(a3) lb a5, 10(a4) sb a1, 8(a3) sb a6, 9(a3) lb a1, 11(a4) sb a5, 10(a3) lb a5, 12(a4) lb a6, 13(a4) sb a1, 11(a3) lb a1, 14(a4) sb a5, 12(a3) sb a6, 13(a3) lb a5, 15(a4) sb a1, 14(a3) addi a4, a4, 16 addi a1, a3, 16 sb a5, 15(a3) mv a3, a1 andi a1, a2, 8 beqz a1, .LBBmemcpy0_28 .LBBmemcpy0_36: lb a1, 0(a4) lb a5, 1(a4) lb a6, 2(a4) sb a1, 0(a3) sb a5, 1(a3) lb a1, 3(a4) sb a6, 2(a3) lb a5, 4(a4) lb a6, 5(a4) sb a1, 3(a3) lb a1, 6(a4) sb a5, 4(a3) sb a6, 5(a3) lb a5, 7(a4) sb a1, 6(a3) addi a4, a4, 8 addi a1, a3, 8 sb a5, 7(a3) mv a3, a1 andi a1, a2, 4 bnez a1, .LBBmemcpy0_29 j .LBBmemcpy0_30 .Lfuncmemcpy_end0: .size memcpy, .Lfuncmemcpy_end0-memcpy .ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157" .section ".note.GNU-stack","",@progbits .addrsig
kemkemG0/green_thread_rs
1,038
asm/context.S
#ifdef __APPLE__ // In case of Mac, you need the underscore as the prefix of function name #define SET_CONTEXT _set_context #define SWITCH_CONTEXT _switch_context #else #define SET_CONTEXT set_context #define SWITCH_CONTEXT switch_context #endif .global SET_CONTEXT .global SWITCH_CONTEXT SET_CONTEXT: // save callee-saved register stp d8, d9, [x0] stp d10, d11, [x0, #16] stp d12, d13, [x0, #16 * 2] stp d14, d15, [x0, #16 * 3] stp x19, x20, [x0, #16 * 4] stp x21, x22, [x0, #16 * 5] stp x23, x24, [x0, #16 * 6] stp x25, x26, [x0, #16 * 7] stp x27, x28, [x0, #16 * 8] mov x1, sp stp x30, x1, [x0, #16 * 9] mov x0, 0 ret SWITCH_CONTEXT: // restore callee-saved registers ldp d8, d9, [x0] ldp d10, d11, [x0, #16] ldp d12, d13, [x0, #16 * 2] ldp d14, d15, [x0, #16 * 3] ldp x19, x20, [x0, #16 * 4] ldp x21, x22, [x0, #16 * 5] ldp x23, x24, [x0, #16 * 6] ldp x25, x26, [x0, #16 * 7] ldp x27, x28, [x0, #16 * 8] ldp x30, x2, [x0, #16 * 9] mov sp, x2 mov x0, 1 ret
Kensaa/Turing-Complete-RISC-V
292
src/init.S
.section .text .globl _start _start: .option push .option norelax la gp, __global_pointer$ .option pop la sp, _stack_end # init stack pointer add s0, sp, zero # initialize frame pointer to sp call _rust_start hang: j hang # loop forever
Kensaa/Turing-Complete-RISC-V
1,222
tests/rem.S
.section .text .global _start _start: ### ------- REM (signed remainder) --------- li x5, 10 li x6, 3 rem x7, x5, x6 # x7 = 1 li x20, 1 bne x7, x20, error li x5, -10 li x6, 3 rem x7, x5, x6 # x7 = -1 li x20, -1 bne x7, x20, error li x5, 10 li x6, -3 rem x7, x5, x6 # x7 = 1 li x20, 1 bne x7, x20, error li x5, -10 li x6, -3 rem x7, x5, x6 # x7 = -1 li x20, -1 bne x7, x20, error li x5, 1 li x6, 0 rem x7, x5, x6 # rem by 0 → returns dividend (rs1) li x20, 1 bne x7, x20, error li x5, -2147483648 li x6, -1 rem x7, x5, x6 # remainder = 0 li x20, 0 bne x7, x20, error ### ------- REMU (unsigned remainder) --------- li x5, 10 li x6, 3 remu x7, x5, x6 # x7 = 1 li x20, 1 bne x7, x20, error li x5, -10 # 0xFFFFFFF6 li x6, 3 remu x7, x5, x6 # 4294967286 % 3 = 0 li x20, 0 bne x7, x20, error li x5, 1 li x6, 0 remu x7, x5, x6 # rem by 0 → rs1 li x20, 1 bne x7, x20, error ### ------- All tests passed --------- halt: j halt ### ------- Error handler --------- error: ebreak
Kensaa/Turing-Complete-RISC-V
2,277
tests/mult.S
.section .text .global _start _start: ### ------- MUL (signed × signed → low 32) --------- li x5, -8 # x5 li x6, 8 # x6 mul x7, x5, x6 # x7 = -64 li x20, -64 bne x7, x20, error li x5, -2147483648 # x5 INT_MIN li x6, -1 # x6 mul x8, x5, x6 # x8 = INT_MIN (overflow wraps around) li x20, -2147483648 bne x8, x20, error li x5, 2147483647 # x5 INT_MAX li x6, 2 # x6 mul x9, x5, x6 # x9 = 4294967294 → -2 (wrapped) li x20, -2 bne x9, x20, error li x5, 0 # x5 li x6, 123456 # x6 mul x10, x5, x6 # x10 = 0 li x20, 0 bne x10, x20, error ### ------- MULH (signed × signed → high 32) --------- li x5, -8 # x5 li x6, 8 # x6 mulh x11, x5, x6 # x11 = -1 (high bits of -64) li x20, -1 bne x11, x20, error li x5, 2147483647 # x5 INT_MAX li x6, 2147483647 # x6 INT_MAX mulh x12, x5, x6 # x12 = 0x3FFFFFFF li x20, 0x3FFFFFFF bne x12, x20, error li x5, -2147483648 # x5 INT_MIN li x6, 2 # x6 mulh x13, x5, x6 # x13 = -1 li x20, -1 bne x13, x20, error ### ------- MULHSU (signed × unsigned → high 32) --------- li x5, -8 # x5 li x6, 8 # x6 mulhsu x14, x5, x6 # x14 = -1 li x20, -1 bne x14, x20, error li x5, -1 # x5 li x6, 0xFFFFFFFF # x6 max unsigned mulhsu x15, x5, x6 # x15 = 0xFFFFFFFF li x20, 0xFFFFFFFF bne x15, x20, error li x5, 0 # x5 li x6, 12345 # x6 mulhsu x16, x5, x6 # x16 = 0 li x20, 0 bne x16, x20, error ### ------- MULHU (unsigned × unsigned → high 32) --------- li x5, 0xFFFFFFFF # x5 max unsigned li x6, 0xFFFFFFFF # x6 mulhu x17, x5, x6 # x17 = 0xFFFFFFFE li x20, 0xFFFFFFFE bne x17, x20, error li x5, 1 # x5 li x6, 0xFFFFFFFF # x6 mulhu x18, x5, x6 # x18 = 0x200000000 li x20, 0 bne x18, x20, error li x5, 123456 # x5 li x6, 0 # x6 mulhu x19, x5, x6 # x19 = 0 li x20, 0 bne x19, x20, error # Halt (success) halt: j halt # Error handler: trigger ebreak error: ebreak
Kensaa/Turing-Complete-RISC-V
1,219
tests/div.S
.section .text .global _start _start: ### ------- DIV (signed) --------- li x5, 10 li x6, 2 div x7, x5, x6 # x7 = 5 li x20, 5 bne x7, x20, error li x5, -10 li x6, 2 div x7, x5, x6 # x7 = -5 li x20, -5 bne x7, x20, error li x5, 10 li x6, -2 div x7, x5, x6 # x7 = -5 li x20, -5 bne x7, x20, error li x5, -10 li x6, -2 div x7, x5, x6 # x7 = 5 li x20, 5 bne x7, x20, error li x5, 1 li x6, 0 div x7, x5, x6 # div by 0 → -1 li x20, -1 bne x7, x20, error li x5, -2147483648 # INT_MIN li x6, -1 div x7, x5, x6 # overflow → INT_MIN li x20, -2147483648 bne x7, x20, error ### ------- DIVU (unsigned) --------- li x5, 10 li x6, 2 divu x7, x5, x6 # x7 = 5 li x20, 5 bne x7, x20, error li x5, -10 # 0xFFFFFFF6 li x6, 2 divu x7, x5, x6 # large unsigned value li x20, 0x7FFFFFFB # 4294967286 / 2 = 2147483643 bne x7, x20, error li x5, 1 li x6, 0 divu x7, x5, x6 # div by 0 → 0xFFFFFFFF li x20, -1 bne x7, x20, error halt: j halt ### ------- Error handler --------- error: ebreak
kern-crates/axhal_split
1,741
linker.lds.S
OUTPUT_ARCH(%ARCH%) BASE_ADDRESS = %KERNEL_BASE%; ENTRY(_start) SECTIONS { . = BASE_ADDRESS; _skernel = .; .text : ALIGN(4K) { _stext = .; *(.text.boot) *(.text .text.*) . = ALIGN(4K); _etext = .; } .rodata : ALIGN(4K) { _srodata = .; *(.rodata .rodata.*) *(.srodata .srodata.*) *(.sdata2 .sdata2.*) . = ALIGN(4K); _erodata = .; } .data : ALIGN(4K) { _sdata = .; *(.data.boot_page_table) . = ALIGN(4K); *(.data .data.*) *(.sdata .sdata.*) *(.got .got.*) } .tdata : ALIGN(0x10) { _stdata = .; *(.tdata .tdata.*) _etdata = .; } .tbss : ALIGN(0x10) { _stbss = .; *(.tbss .tbss.*) *(.tcommon) _etbss = .; } . = ALIGN(4K); _percpu_start = .; _percpu_end = _percpu_start + SIZEOF(.percpu); .percpu 0x0 : AT(_percpu_start) { _percpu_load_start = .; *(.percpu .percpu.*) _percpu_load_end = .; . = _percpu_load_start + ALIGN(64) * %SMP%; } . = _percpu_end; . = ALIGN(4K); _edata = .; .bss : ALIGN(4K) { boot_stack = .; *(.bss.stack) . = ALIGN(4K); boot_stack_top = .; _sbss = .; *(.bss .bss.*) *(.sbss .sbss.*) *(COMMON) . = ALIGN(4K); _ebss = .; } _ekernel = .; /DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) } } SECTIONS { linkme_IRQ : { *(linkme_IRQ) } linkm2_IRQ : { *(linkm2_IRQ) } linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) } linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) } } INSERT AFTER .tbss;
kern-crates/axhal_split
1,672
src/arch/riscv/trap.S
.macro SAVE_REGS, from_user addi sp, sp, -{trapframe_size} PUSH_GENERAL_REGS csrr t0, sepc csrr t1, sstatus csrrw t2, sscratch, zero // save sscratch (sp) and zero it STR t0, sp, 31 // tf.sepc STR t1, sp, 32 // tf.sstatus STR t2, sp, 1 // tf.regs.sp .if \from_user == 1 LDR t0, sp, 3 // load supervisor tp STR gp, sp, 2 // save user gp and tp STR tp, sp, 3 mv tp, t0 .endif .endm .macro RESTORE_REGS, from_user .if \from_user == 1 LDR gp, sp, 2 // load user gp and tp LDR t0, sp, 3 STR tp, sp, 3 // save supervisor tp mv tp, t0 addi t0, sp, {trapframe_size} // put supervisor sp to scratch csrw sscratch, t0 .endif LDR t0, sp, 31 LDR t1, sp, 32 csrw sepc, t0 csrw sstatus, t1 POP_GENERAL_REGS LDR sp, sp, 1 // load sp from tf.regs.sp .endm .section .text .balign 4 .global trap_vector_base trap_vector_base: // sscratch == 0: trap from S mode // sscratch != 0: trap from U mode csrrw sp, sscratch, sp // switch sscratch and sp bnez sp, .Ltrap_entry_u csrr sp, sscratch // put supervisor sp back j .Ltrap_entry_s .Ltrap_entry_s: SAVE_REGS 0 mv a0, sp li a1, 0 call riscv_trap_handler RESTORE_REGS 0 sret .Ltrap_entry_u: SAVE_REGS 1 mv a0, sp li a1, 1 call riscv_trap_handler RESTORE_REGS 1 sret
kern-crates/axhal_split
2,415
src/arch/aarch64/trap.S
.macro SAVE_REGS sub sp, sp, 34 * 8 stp x0, x1, [sp] stp x2, x3, [sp, 2 * 8] stp x4, x5, [sp, 4 * 8] stp x6, x7, [sp, 6 * 8] stp x8, x9, [sp, 8 * 8] stp x10, x11, [sp, 10 * 8] stp x12, x13, [sp, 12 * 8] stp x14, x15, [sp, 14 * 8] stp x16, x17, [sp, 16 * 8] stp x18, x19, [sp, 18 * 8] stp x20, x21, [sp, 20 * 8] stp x22, x23, [sp, 22 * 8] stp x24, x25, [sp, 24 * 8] stp x26, x27, [sp, 26 * 8] stp x28, x29, [sp, 28 * 8] mrs x9, sp_el0 mrs x10, elr_el1 mrs x11, spsr_el1 stp x30, x9, [sp, 30 * 8] stp x10, x11, [sp, 32 * 8] .endm .macro RESTORE_REGS ldp x10, x11, [sp, 32 * 8] ldp x30, x9, [sp, 30 * 8] msr sp_el0, x9 msr elr_el1, x10 msr spsr_el1, x11 ldp x28, x29, [sp, 28 * 8] ldp x26, x27, [sp, 26 * 8] ldp x24, x25, [sp, 24 * 8] ldp x22, x23, [sp, 22 * 8] ldp x20, x21, [sp, 20 * 8] ldp x18, x19, [sp, 18 * 8] ldp x16, x17, [sp, 16 * 8] ldp x14, x15, [sp, 14 * 8] ldp x12, x13, [sp, 12 * 8] ldp x10, x11, [sp, 10 * 8] ldp x8, x9, [sp, 8 * 8] ldp x6, x7, [sp, 6 * 8] ldp x4, x5, [sp, 4 * 8] ldp x2, x3, [sp, 2 * 8] ldp x0, x1, [sp] add sp, sp, 34 * 8 .endm .macro INVALID_EXCP, kind, source .p2align 7 SAVE_REGS mov x0, sp mov x1, \kind mov x2, \source bl invalid_exception b .Lexception_return .endm .macro HANDLE_SYNC .p2align 7 SAVE_REGS mov x0, sp bl handle_sync_exception b .Lexception_return .endm .macro HANDLE_IRQ .p2align 7 SAVE_REGS mov x0, sp bl handle_irq_exception b .Lexception_return .endm .section .text .p2align 11 .global exception_vector_base exception_vector_base: // current EL, with SP_EL0 INVALID_EXCP 0 0 INVALID_EXCP 1 0 INVALID_EXCP 2 0 INVALID_EXCP 3 0 // current EL, with SP_ELx HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 1 INVALID_EXCP 3 1 // lower EL, aarch64 HANDLE_SYNC HANDLE_IRQ INVALID_EXCP 2 2 INVALID_EXCP 3 2 // lower EL, aarch32 INVALID_EXCP 0 3 INVALID_EXCP 1 3 INVALID_EXCP 2 3 INVALID_EXCP 3 3 .Lexception_return: RESTORE_REGS eret
kern-crates/axhal_split
1,505
src/arch/x86_64/trap.S
.equ NUM_INT, 256 .altmacro .macro DEF_HANDLER, i .Ltrap_handler_\i: .if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17 # error code pushed by CPU push \i # interrupt vector jmp .Ltrap_common .else push 0 # fill in error code in TrapFrame push \i # interrupt vector jmp .Ltrap_common .endif .endm .macro DEF_TABLE_ENTRY, i .quad .Ltrap_handler_\i .endm .section .text .code64 _trap_handlers: .set i, 0 .rept NUM_INT DEF_HANDLER %i .set i, i + 1 .endr .Ltrap_common: test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space jz 1f swapgs 1: push r15 push r14 push r13 push r12 push r11 push r10 push r9 push r8 push rdi push rsi push rbp push rbx push rdx push rcx push rax mov rdi, rsp call x86_trap_handler pop rax pop rcx pop rdx pop rbx pop rbp pop rsi pop rdi pop r8 pop r9 pop r10 pop r11 pop r12 pop r13 pop r14 pop r15 test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space jz 2f swapgs 2: add rsp, 16 # pop vector, error_code iretq .section .rodata .global trap_handler_table trap_handler_table: .set i, 0 .rept NUM_INT DEF_TABLE_ENTRY %i .set i, i + 1 .endr
kern-crates/axhal_split
1,965
src/platform/x86_pc/ap_start.S
# Boot application processors into the protected mode. # Each non-boot CPU ("AP") is started up in response to a STARTUP # IPI from the boot CPU. Section B.4.2 of the Multi-Processor # Specification says that the AP will start in real mode with CS:IP # set to XY00:0000, where XY is an 8-bit value sent with the # STARTUP. Thus this code must start at a 4096-byte boundary. # # Because this code sets DS to zero, it must sit # at an address in the low 2^16 bytes. .equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr} .equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr} .equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr} .equ stack_ptr, {start_page_paddr} + 0xff0 .equ entry_ptr, {start_page_paddr} + 0xff8 # 0x6000 .section .text .code16 .p2align 12 .global ap_start ap_start: cli wbinvd xor ax, ax mov ds, ax mov es, ax mov ss, ax mov fs, ax mov gs, ax # load the 64-bit GDT lgdt [pa_ap_gdt_desc] # switch to protected-mode mov eax, cr0 or eax, (1 << 0) mov cr0, eax # far jump to 32-bit code. 0x8 is code32 segment selector ljmp 0x8, offset pa_ap_start32 .code32 ap_start32: mov esp, [stack_ptr] mov eax, [entry_ptr] jmp eax .balign 8 # .type multiboot_header, STT_OBJECT .Lap_tmp_gdt_desc: .short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit .long pa_ap_gdt # base .balign 16 .Lap_tmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Lap_tmp_gdt_end: # 0x7000 .p2align 12 .global ap_end ap_end:
kern-crates/axhal_split
4,307
src/platform/x86_pc/multiboot.S
# Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html .section .text.boot .code32 .global _start _start: mov edi, eax # arg1: magic: 0x2BADB002 mov esi, ebx # arg2: multiboot info jmp bsp_entry32 .balign 4 .type multiboot_header, STT_OBJECT multiboot_header: .int {mb_hdr_magic} # magic: 0x1BADB002 .int {mb_hdr_flags} # flags .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum .int multiboot_header - {offset} # header_addr .int _skernel - {offset} # load_addr .int _edata - {offset} # load_end .int _ebss - {offset} # bss_end_addr .int _start - {offset} # entry_addr # Common code in 32-bit, prepare states to enter 64-bit. .macro ENTRY32_COMMON # set data segment selectors mov ax, 0x18 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax # set PAE, PGE bit in CR4 mov eax, {cr4} mov cr4, eax # load the temporary page table lea eax, [.Ltmp_pml4 - {offset}] mov cr3, eax # set LME, NXE bit in IA32_EFER mov ecx, {efer_msr} mov edx, 0 mov eax, {efer} wrmsr # set protected mode, write protect, paging bit in CR0 mov eax, {cr0} mov cr0, eax .endm # Common code in 64-bit .macro ENTRY64_COMMON # clear segment selectors xor ax, ax mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax .endm .code32 bsp_entry32: lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT ENTRY32_COMMON ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment .code32 .global ap_entry32 ap_entry32: ENTRY32_COMMON ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment .code64 bsp_entry64: ENTRY64_COMMON # set RSP to boot stack movabs rsp, offset {boot_stack} add rsp, {boot_stack_size} # call rust_entry(magic, mbi) movabs rax, offset {entry} call rax jmp .Lhlt .code64 ap_entry64: ENTRY64_COMMON # set RSP to high address (already set in ap_start.S) mov rax, {offset} add rsp, rax # call rust_entry_secondary(magic) mov rdi, {mb_magic} movabs rax, offset {entry_secondary} call rax jmp .Lhlt .Lhlt: hlt jmp .Lhlt .section .rodata .balign 8 .Ltmp_gdt_desc: .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit .long .Ltmp_gdt - {offset} # base .section .data .balign 16 .Ltmp_gdt: .quad 0x0000000000000000 # 0x00: null .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) .Ltmp_gdt_end: .balign 4096 .Ltmp_pml4: # 0x0000_0000 ~ 0xffff_ffff .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) .zero 8 * 510 # 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff .quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) # FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb) .Ltmp_pdpt_low: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508 .Ltmp_pdpt_high: .quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0) .quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000) .quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000) .quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000) .zero 8 * 508
kern-crates/zCore
140
src/platform/aarch64/space.s
.section .data .align 12 sdata: .space 0x8000 // 32K .section .bss.stack .align 12 boot_stack: .space 0x8000 // 32K boot_stack_top:
Kevin1212918/koe-os
1,936
src/interrupt/handler.S
.extern exception_handler .macro EXN_ENTRY int_vec isr_\int_vec: sub rsp, 0x8 push \int_vec jmp _do_exception_handler .section .data .quad isr_\int_vec .section .text .endm .macro EXN_ERRNO_ENTRY int_vec isr_\int_vec: push \int_vec jmp _do_exception_handler .section .data .quad isr_\int_vec .section .text .endm .macro ISR_PADDING cnt .section .data .skip 8 * \cnt .section .text .endm _do_exception_handler: push rax push rdi push rsi push rdx push rcx push r8 push r9 push r10 push r11 sub rsp, 0x8 mov rdi, [rsp+0x50] lea rsi, [rsp+0x58] call exception_handler add rsp, 0x8 pop r11 pop r10 pop r9 pop r8 pop rcx pop rdx pop rsi pop rdi pop rax add rsp, 0x10 iretq .macro IRQ_ENTRY int_vec isr_\int_vec: sub rsp, 0x8 push \int_vec jmp _do_irq_handler .section .data .quad isr_\int_vec .section .text .endm _do_irq_handler: push rax push rdi push rsi push rdx push rcx push r8 push r9 push r10 push r11 sub rsp, 0x8 mov rdi, [rsp+0x50] lea rsi, [rsp+0x58] call irq_handler add rsp, 0x8 pop r11 pop r10 pop r9 pop r8 pop rcx pop rdx pop rsi pop rdi pop rax add rsp, 0x10 iretq // Initializing a table holding all isr entrys .globl ISR_TABLE .section .data .align 8 ISR_TABLE: .section .text EXN_ENTRY 0 EXN_ENTRY 1 EXN_ENTRY 2 EXN_ENTRY 3 EXN_ENTRY 4 EXN_ENTRY 5 EXN_ENTRY 6 EXN_ENTRY 7 EXN_ERRNO_ENTRY 8 EXN_ENTRY 9 EXN_ERRNO_ENTRY 10 EXN_ERRNO_ENTRY 11 EXN_ERRNO_ENTRY 12 EXN_ERRNO_ENTRY 13 EXN_ERRNO_ENTRY 14 ISR_PADDING 1 // exn 15 is reserved EXN_ENTRY 16 EXN_ERRNO_ENTRY 17 EXN_ENTRY 18 EXN_ENTRY 19 EXN_ENTRY 20 EXN_ERRNO_ENTRY 21 ISR_PADDING 10 // padding from 22..32 IRQ_ENTRY 32 IRQ_ENTRY 33 IRQ_ENTRY 34 IRQ_ENTRY 35 IRQ_ENTRY 36 IRQ_ENTRY 37 IRQ_ENTRY 38 IRQ_ENTRY 39 IRQ_ENTRY 40 IRQ_ENTRY 41 IRQ_ENTRY 42 IRQ_ENTRY 43 IRQ_ENTRY 44 IRQ_ENTRY 45 IRQ_ENTRY 46 IRQ_ENTRY 47
Kevin1212918/koe-os
4,414
src/boot/boot.S
.code32 .set _KERNEL_OFFSET_VMA, 0xFFFFFFFF80000000 .section .bootstrap.data, "a", @progbits .align 64 gdt: .quad 0 .set gdt_code, . - gdt .quad (1<<43) | (1<<44) | (1<<47) | (1<<53) /* Code Segment */ gdt_ptr: .word . - gdt - 1 .quad gdt .section .bootstrap.bss, "aw", @nobits .align 4096 pg_ml4_table: .skip 4096 pg_dir_ptr_table: .skip 4096 pg_dir_table: .skip 4096 .section .bss .align 4096 stack_bottom: .skip 16384 # 16 KiB stack_top: .section .bootstrap.text, "ax" .global _start _start: /* Check if loaded by multiboot2 compliant bootloader */ cmp eax, 0x36d76289 jne _start_no_multiboot2_err /* Load stack address into registers */ .set stack_top_lma, stack_top - _KERNEL_OFFSET_VMA lea esp, stack_top_lma _check_cpuid: /* Check if CPUID is supported by attempting to flip the ID bit (bit 21) in the FLAGS register. If we can flip it, CPUID is available. Copy FLAGS in to EAX via stack */ pushfd pop eax /* Copy to ECX as well for comparing later on */ mov ecx, eax /* Flip the ID bit */ xor eax, 1 << 21 /* Copy EAX to FLAGS via the stack */ push eax popfd /* Copy FLAGS back to EAX (with the flipped bit if CPUID is supported) */ pushfd pop eax /* Restore FLAGS from the old version stored in ECX (i.e. flipping the ID bit back if it was ever flipped).*/ push ecx popfd /* Compare EAX and ECX. If they are equal then that means the bit wasn't flipped, and CPUID isn't supported. */ xor eax, ecx jz _start_no_cpuid_err _check_long_mode: /* preserve ebx*/ push ebx /* test if extended processor info in available */ mov eax, 0x80000000 /* implicit argument for cpuid */ cpuid /* get highest supported argument */ cmp eax, 0x80000001 /* it needs to be at least 0x80000001 */ jb _start_no_long_mode_err /* extended info to test if long mode is available */ mov eax, 0x80000001 /* for extended processor info */ cpuid /* various feature bits in ecx and edx */ test edx, 1 << 29 /* if the LM-bit is set in the D-register */ jz _start_no_long_mode_err /* it's not set, there is no long mode */ pop ebx _enable_paging: /* Set first entry of pml4 */ lea eax, pg_dir_ptr_table or eax, 0b11 mov [pg_ml4_table], eax /* Set last entry of pml4 */ lea eax, pg_dir_ptr_table or eax, 0b11 mov [offset pg_ml4_table + 511 * 8], eax /* Set first entry of page directory ptr table*/ lea eax, pg_dir_table or eax, 0b11 mov [pg_dir_ptr_table], eax /* Set 511th entry of page directory ptr table*/ lea eax, pg_dir_table or eax, 0b11 mov [offset pg_dir_ptr_table + 510 * 8], eax /* Set first entry of page directory table */ mov eax, 0 or eax, 0b10000011 mov [pg_dir_table], eax /* Set second entry of page directory table */ mov eax, 0x200000 or eax, 0b10000011 mov [pg_dir_table + 8], eax /* Set third entry of page directory table */ mov eax, 0x400000 or eax, 0b10000011 mov [pg_dir_table + 16], eax /* Set fourth entry of page directory table */ mov eax, 0x600000 or eax, 0b10000011 mov [pg_dir_table + 24], eax /* Set up CR3 */ lea eax, pg_ml4_table mov cr3, eax /* Set up PAE */ mov eax, cr4 or eax, 0b100000 mov cr4, eax /* Enable long mode */ mov ecx, 0xC0000080 rdmsr or eax, 1 << 8 wrmsr /* Enable paging */ mov eax, cr0 or eax, 1 << 31 mov cr0, eax /* Set up GDT */ lgdt [gdt_ptr] /* Call rust kernel entry point */ ljmp offset gdt_code, offset _start_long_mode _start_no_long_mode_err: mov al, '2' jmp _start_err _start_no_cpuid_err: mov al, '1' jmp _start_err _start_no_multiboot2_err: mov al, '0' jmp _start_err _start_err: mov dword ptr [0xb8000], 0x4f524f45 mov dword ptr [0xb8004], 0x4f3a4f52 mov dword ptr [0xb8008], 0x4f204f20 mov byte ptr [0xb800a], al hlt .code64 .extern kmain _start_long_mode: lea rsp, stack_top mov ax, 0 mov ss, ax mov ds, ax mov es, ax mov fs, ax mov gs, ax /* Move Multiboot2 boot information pointer to function arg 1 */ mov edi, ebx jmp kmain
kevincal1226/483-lecture-code
84
neonate/bad.neonate.s
section .text global start_here start_here: mov rax, hahahhaha ret
kevincal1226/483-lecture-code
79
neonate/2025.neonate.s
section .text global start_here start_here: mov rax, 2025 ret
kevincal1226/483-lecture-code
78
neonate/483.s
section .text global start_here start_here: mov rax, 483 ret
kevincal1226/483-lecture-code
77
neonate/483.neonate.s
section .text global start_here start_here: mov rax, 483 ret
kevincal1226/483-lecture-code
410
neonate/assembly_code.s
;;; We are defining "code" section .text ;;; global means we are exporting the symbol start_here, making it available for whatever program we link with global start_here ;;; This is a label, which gives a name to the memory location start_here: ;;; move the value 10 into the register rax. ;;; this is dictated by the *calling convention* mov rax, 483 ;;; Returns to the calling function. ret
kevincal1226/483-lecture-code
229
adder/function/examples/ex1.add1.s
;; Here's the parsed abstract syntax tree: Add1(Number(5)) ;; The result of the interpreter with input 483 is 6 ;; Result of the compiler: section .text global start_here start_here: mov rax, 5 add rax, 1 ret
kevincal1226/483-lecture-code
229
adder/function/examples/ex0.add1.s
;; Here's the parsed abstract syntax tree: Number(483) ;; The result of the interpreter with input 483 is 483 ;; Result of the compiler: section .text global start_here start_here: mov rax, 483 add rax, 0 ret
kevincal1226/483-lecture-code
250
adder/function/examples/ex2.add1.s
;; Here's the parsed abstract syntax tree: Add1(Sub1(Add1(Sub1(Number(17))))) ;; The result of the interpreter with input 483 is 17 ;; Result of the compiler: section .text global start_here start_here: mov rax, 17 add rax, 0 ret
kevincal1226/483-lecture-code
250
adder/function/examples/ex4.add1.s
;; Here's the parsed abstract syntax tree: Add1(Sub1(Add1(Add1(Variable)))) ;; The result of the interpreter with input 483 is 485 ;; Result of the compiler: section .text global start_here start_here: mov rax, rdi add rax, 2 ret
kevincal1226/483-lecture-code
634
scope_checker/examples/ex8.adder.s
;; Here's the parsed abstract syntax tree: Program { parameter: "x", body: Let("a", Number(10), Let("c", Let("b", Add1(Variable("a")), Let("d", Add1(Variable("b")), Add1(Variable("b")))), Add1(Variable("c")))) } ;; scope checking succeeded ;; The result of the interpreter with input 483 is 13 ;; Result of the compiler: section .text global start_here start_here: mov [rsp - 8*1], rdi mov rax, 10 mov [rsp - 8*2], rax mov rax, [rsp - 8*2] add rax, 1 mov [rsp - 8*3], rax mov rax, [rsp - 8*3] add rax, 1 mov [rsp - 8*4], rax mov rax, [rsp - 8*3] add rax, 1 mov [rsp - 8*3], rax mov rax, [rsp - 8*3] add rax, 1 ret
kevincal1226/483-lecture-code
267
scope_checker/examples/ex0.add1.s
;; Here's the parsed abstract syntax tree: Program { parameter: "x", body: Number(483) } ;; scope checking succeeded ;; The result of the interpreter with input 483 is 483 ;; Result of the compiler: section .text global start_here start_here: ret
kevincal1226/483-lecture-code
496
scope_checker/examples/ex7.adder.s
;; Here's the parsed abstract syntax tree: Program { parameter: "x", body: Sub1(Let("z", Sub1(Let("a", Sub1(Variable("x")), Variable("a"))), Variable("z"))) } ;; scope checking succeeded ;; The result of the interpreter with input 483 is 480 ;; Result of the compiler: section .text global start_here start_here: mov [rsp - 8*1], rdi mov rax, [rsp - 8*1] sub rax, 1 mov [rsp - 8*2], rax mov rax, [rsp - 8*2] sub rax, 1 mov [rsp - 8*2], rax mov rax, [rsp - 8*2] sub rax, 1 ret
kevincal1226/483-lecture-code
513
scope_checker/examples/ex6.adder.s
;; Here's the parsed abstract syntax tree: Program { parameter: "x", body: Let("y", Let("z", Sub1(Sub1(Variable("x"))), Add1(Variable("z"))), Sub1(Variable("y"))) } ;; scope checking succeeded ;; The result of the interpreter with input 483 is 481 ;; Result of the compiler: section .text global start_here start_here: mov [rsp - 8*1], rdi mov rax, [rsp - 8*1] sub rax, 1 sub rax, 1 mov [rsp - 8*2], rax mov rax, [rsp - 8*2] add rax, 1 mov [rsp - 8*2], rax mov rax, [rsp - 8*2] sub rax, 1 ret
kevincal1226/483-lecture-code
462
scope_checker/examples/ex5.adder.s
;; Here's the parsed abstract syntax tree: Program { parameter: "x", body: Let("y", Let("x", Add1(Variable("x")), Variable("x")), Variable("x")) } ;; scope checking succeeded ;; The result of the interpreter with input 483 is 483 ;; Result of the compiler: section .text global start_here start_here: mov [rsp - 8*1], rdi mov rax, [rsp - 8*1] add rax, 1 mov [rsp - 8*2], rax mov rax, [rsp - 8*2] mov [rsp - 8*2], rax mov rax, [rsp - 8*1] ret
Kevmaninc/openpilot
23,255
body/board/startup_stm32f413xx.s
/** ****************************************************************************** * @file startup_stm32f413xx.s * @author MCD Application Team * @brief STM32F413xx Devices vector table for GCC based toolchains. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M4 processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2017 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ .syntax unified .cpu cortex-m4 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss /* stack used for SystemInit_ExtMemCtl; always internal RAM used */ /** * @brief This is the code that gets called when the processor first * starts execution following a reset event. Only the absolutely * necessary set is performed, after which the application * supplied main() routine is called. * @param None * @retval : None */ .section .text.Reset_Handler .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: ldr sp, =_estack /* set stack pointer */ bl __initialize_hardware_early /* Copy the data segment initializers from flash to SRAM */ movs r1, #0 b LoopCopyDataInit CopyDataInit: ldr r3, =_sidata ldr r3, [r3, r1] str r3, [r0, r1] adds r1, r1, #4 LoopCopyDataInit: ldr r0, =_sdata ldr r3, =_edata adds r2, r0, r1 cmp r2, r3 bcc CopyDataInit ldr r2, =_sbss b LoopFillZerobss /* Zero fill the bss segment. */ FillZerobss: movs r3, #0 str r3, [r2], #4 LoopFillZerobss: ldr r3, = _ebss cmp r2, r3 bcc FillZerobss /* Call the clock system intitialization function.*/ /* bl SystemInit */ /* Call static constructors */ /* bl __libc_init_array */ /* Call the application's entry point.*/ bl main bx lr .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * @param None * @retval None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M3. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * *******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack .word Reset_Handler .word NMI_Handler .word HardFault_Handler .word MemManage_Handler .word BusFault_Handler .word UsageFault_Handler .word 0 .word 0 .word 0 .word 0 .word SVC_Handler .word DebugMon_Handler .word 0 .word PendSV_Handler .word SysTick_Handler /* External Interrupts */ .word WWDG_IRQHandler /* Window WatchDog */ .word PVD_IRQHandler /* PVD through EXTI Line detection */ .word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ .word FLASH_IRQHandler /* FLASH */ .word RCC_IRQHandler /* RCC */ .word EXTI0_IRQHandler /* EXTI Line0 */ .word EXTI1_IRQHandler /* EXTI Line1 */ .word EXTI2_IRQHandler /* EXTI Line2 */ .word EXTI3_IRQHandler /* EXTI Line3 */ .word EXTI4_IRQHandler /* EXTI Line4 */ .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ .word CAN1_TX_IRQHandler /* CAN1 TX */ .word CAN1_RX0_IRQHandler /* CAN1 RX0 */ .word CAN1_RX1_IRQHandler /* CAN1 RX1 */ .word CAN1_SCE_IRQHandler /* CAN1 SCE */ .word EXTI9_5_IRQHandler /* External Line[9:5]s */ .word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ .word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ .word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ .word TIM2_IRQHandler /* TIM2 */ .word TIM3_IRQHandler /* TIM3 */ .word TIM4_IRQHandler /* TIM4 */ .word I2C1_EV_IRQHandler /* I2C1 Event */ .word I2C1_ER_IRQHandler /* I2C1 Error */ .word I2C2_EV_IRQHandler /* I2C2 Event */ .word I2C2_ER_IRQHandler /* I2C2 Error */ .word SPI1_IRQHandler /* SPI1 */ .word SPI2_IRQHandler /* SPI2 */ .word USART1_IRQHandler /* USART1 */ .word USART2_IRQHandler /* USART2 */ .word USART3_IRQHandler /* USART3 */ .word EXTI15_10_IRQHandler /* External Line[15:10]s */ .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ .word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ .word FSMC_IRQHandler /* FSMC */ .word SDIO_IRQHandler /* SDIO */ .word TIM5_IRQHandler /* TIM5 */ .word SPI3_IRQHandler /* SPI3 */ .word UART4_IRQHandler /* UART4 */ .word UART5_IRQHandler /* UART5 */ .word TIM6_DAC_IRQHandler /* TIM6, DAC1 and DAC2 */ .word TIM7_IRQHandler /* TIM7 */ .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ .word DFSDM1_FLT0_IRQHandler /* DFSDM1 Filter0 */ .word DFSDM1_FLT1_IRQHandler /* DFSDM1 Filter1 */ .word CAN2_TX_IRQHandler /* CAN2 TX */ .word CAN2_RX0_IRQHandler /* CAN2 RX0 */ .word CAN2_RX1_IRQHandler /* CAN2 RX1 */ .word CAN2_SCE_IRQHandler /* CAN2 SCE */ .word OTG_FS_IRQHandler /* USB OTG FS */ .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ .word USART6_IRQHandler /* USART6 */ .word I2C3_EV_IRQHandler /* I2C3 event */ .word I2C3_ER_IRQHandler /* I2C3 error */ .word CAN3_TX_IRQHandler /* CAN3 TX */ .word CAN3_RX0_IRQHandler /* CAN3 RX0 */ .word CAN3_RX1_IRQHandler /* CAN3 RX1 */ .word CAN3_SCE_IRQHandler /* CAN3 SCE */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word RNG_IRQHandler /* RNG */ .word FPU_IRQHandler /* FPU */ .word UART7_IRQHandler /* UART7 */ .word UART8_IRQHandler /* UART8 */ .word SPI4_IRQHandler /* SPI4 */ .word SPI5_IRQHandler /* SPI5 */ .word 0 /* Reserved */ .word SAI1_IRQHandler /* SAI1 */ .word UART9_IRQHandler /* UART9 */ .word UART10_IRQHandler /* UART10 */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word QUADSPI_IRQHandler /* QuadSPI */ .word 0 /* Reserved */ .word 0 /* Reserved */ .word FMPI2C1_EV_IRQHandler /* FMPI2C1 Event */ .word FMPI2C1_ER_IRQHandler /* FMPI2C1 Error */ .word LPTIM1_IRQHandler /* LPTIM1 */ .word DFSDM2_FLT0_IRQHandler /* DFSDM2 Filter0 */ .word DFSDM2_FLT1_IRQHandler /* DFSDM2 Filter1 */ .word DFSDM2_FLT2_IRQHandler /* DFSDM2 Filter2 */ .word DFSDM2_FLT3_IRQHandler /* DFSDM2 Filter3 */ /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak WWDG_IRQHandler .thumb_set WWDG_IRQHandler,Default_Handler .weak PVD_IRQHandler .thumb_set PVD_IRQHandler,Default_Handler .weak TAMP_STAMP_IRQHandler .thumb_set TAMP_STAMP_IRQHandler,Default_Handler .weak RTC_WKUP_IRQHandler .thumb_set RTC_WKUP_IRQHandler,Default_Handler .weak FLASH_IRQHandler .thumb_set FLASH_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Stream0_IRQHandler .thumb_set DMA1_Stream0_IRQHandler,Default_Handler .weak DMA1_Stream1_IRQHandler .thumb_set DMA1_Stream1_IRQHandler,Default_Handler .weak DMA1_Stream2_IRQHandler .thumb_set DMA1_Stream2_IRQHandler,Default_Handler .weak DMA1_Stream3_IRQHandler .thumb_set DMA1_Stream3_IRQHandler,Default_Handler .weak DMA1_Stream4_IRQHandler .thumb_set DMA1_Stream4_IRQHandler,Default_Handler .weak DMA1_Stream5_IRQHandler .thumb_set DMA1_Stream5_IRQHandler,Default_Handler .weak DMA1_Stream6_IRQHandler .thumb_set DMA1_Stream6_IRQHandler,Default_Handler .weak ADC_IRQHandler .thumb_set ADC_IRQHandler,Default_Handler .weak CAN1_TX_IRQHandler .thumb_set CAN1_TX_IRQHandler,Default_Handler .weak CAN1_RX0_IRQHandler .thumb_set CAN1_RX0_IRQHandler,Default_Handler .weak CAN1_RX1_IRQHandler .thumb_set CAN1_RX1_IRQHandler,Default_Handler .weak CAN1_SCE_IRQHandler .thumb_set CAN1_SCE_IRQHandler,Default_Handler .weak EXTI9_5_IRQHandler .thumb_set EXTI9_5_IRQHandler,Default_Handler .weak TIM1_BRK_TIM9_IRQHandler .thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler .weak TIM1_UP_TIM10_IRQHandler .thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler .weak TIM1_TRG_COM_TIM11_IRQHandler .thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI15_10_IRQHandler .thumb_set EXTI15_10_IRQHandler,Default_Handler .weak RTC_Alarm_IRQHandler .thumb_set RTC_Alarm_IRQHandler,Default_Handler .weak OTG_FS_WKUP_IRQHandler .thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler .weak TIM8_BRK_TIM12_IRQHandler .thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler .weak TIM8_UP_TIM13_IRQHandler .thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler .weak TIM8_TRG_COM_TIM14_IRQHandler .thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak DMA1_Stream7_IRQHandler .thumb_set DMA1_Stream7_IRQHandler,Default_Handler .weak FSMC_IRQHandler .thumb_set FSMC_IRQHandler,Default_Handler .weak SDIO_IRQHandler .thumb_set SDIO_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak UART4_IRQHandler .thumb_set UART4_IRQHandler,Default_Handler .weak UART5_IRQHandler .thumb_set UART5_IRQHandler,Default_Handler .weak TIM6_DAC_IRQHandler .thumb_set TIM6_DAC_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Stream0_IRQHandler .thumb_set DMA2_Stream0_IRQHandler,Default_Handler .weak DMA2_Stream1_IRQHandler .thumb_set DMA2_Stream1_IRQHandler,Default_Handler .weak DMA2_Stream2_IRQHandler .thumb_set DMA2_Stream2_IRQHandler,Default_Handler .weak DMA2_Stream3_IRQHandler .thumb_set DMA2_Stream3_IRQHandler,Default_Handler .weak DMA2_Stream4_IRQHandler .thumb_set DMA2_Stream4_IRQHandler,Default_Handler .weak DFSDM1_FLT0_IRQHandler .thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler .weak DFSDM1_FLT1_IRQHandler .thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler .weak CAN2_TX_IRQHandler .thumb_set CAN2_TX_IRQHandler,Default_Handler .weak CAN2_RX0_IRQHandler .thumb_set CAN2_RX0_IRQHandler,Default_Handler .weak CAN2_RX1_IRQHandler .thumb_set CAN2_RX1_IRQHandler,Default_Handler .weak CAN2_SCE_IRQHandler .thumb_set CAN2_SCE_IRQHandler,Default_Handler .weak OTG_FS_IRQHandler .thumb_set OTG_FS_IRQHandler,Default_Handler .weak DMA2_Stream5_IRQHandler .thumb_set DMA2_Stream5_IRQHandler,Default_Handler .weak DMA2_Stream6_IRQHandler .thumb_set DMA2_Stream6_IRQHandler,Default_Handler .weak DMA2_Stream7_IRQHandler .thumb_set DMA2_Stream7_IRQHandler,Default_Handler .weak USART6_IRQHandler .thumb_set USART6_IRQHandler,Default_Handler .weak I2C3_EV_IRQHandler .thumb_set I2C3_EV_IRQHandler,Default_Handler .weak I2C3_ER_IRQHandler .thumb_set I2C3_ER_IRQHandler,Default_Handler .weak CAN3_TX_IRQHandler .thumb_set CAN3_TX_IRQHandler,Default_Handler .weak CAN3_RX0_IRQHandler .thumb_set CAN3_RX0_IRQHandler,Default_Handler .weak CAN3_RX1_IRQHandler .thumb_set CAN3_RX1_IRQHandler,Default_Handler .weak CAN3_SCE_IRQHandler .thumb_set CAN3_SCE_IRQHandler,Default_Handler .weak RNG_IRQHandler .thumb_set RNG_IRQHandler,Default_Handler .weak FPU_IRQHandler .thumb_set FPU_IRQHandler,Default_Handler .weak UART7_IRQHandler .thumb_set UART7_IRQHandler,Default_Handler .weak UART8_IRQHandler .thumb_set UART8_IRQHandler,Default_Handler .weak SPI4_IRQHandler .thumb_set SPI4_IRQHandler,Default_Handler .weak SPI5_IRQHandler .thumb_set SPI5_IRQHandler,Default_Handler .weak SAI1_IRQHandler .thumb_set SAI1_IRQHandler,Default_Handler .weak UART9_IRQHandler .thumb_set UART9_IRQHandler,Default_Handler .weak UART10_IRQHandler .thumb_set UART10_IRQHandler,Default_Handler .weak QUADSPI_IRQHandler .thumb_set QUADSPI_IRQHandler,Default_Handler .weak FMPI2C1_EV_IRQHandler .thumb_set FMPI2C1_EV_IRQHandler,Default_Handler .weak FMPI2C1_ER_IRQHandler .thumb_set FMPI2C1_ER_IRQHandler,Default_Handler .weak LPTIM1_IRQHandler .thumb_set LPTIM1_IRQHandler,Default_Handler .weak DFSDM2_FLT0_IRQHandler .thumb_set DFSDM2_FLT0_IRQHandler,Default_Handler .weak DFSDM2_FLT1_IRQHandler .thumb_set DFSDM2_FLT1_IRQHandler,Default_Handler .weak DFSDM2_FLT2_IRQHandler .thumb_set DFSDM2_FLT2_IRQHandler,Default_Handler .weak DFSDM2_FLT3_IRQHandler .thumb_set DFSDM2_FLT3_IRQHandler,Default_Handler /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
klausz65/rust
11,809
library/std/src/sys/pal/sgx/abi/entry.S
/* This symbol is used at runtime to figure out the virtual address that the */ /* enclave is loaded at. */ .section absolute .global IMAGE_BASE IMAGE_BASE: .section ".note.x86_64-fortanix-unknown-sgx", "", @note .align 4 .long 1f - 0f /* name length (not including padding) */ .long 3f - 2f /* desc length (not including padding) */ .long 1 /* type = NT_VERSION */ 0: .asciz "toolchain-version" /* name */ 1: .align 4 2: .long 1 /* desc - toolchain version number, 32-bit LE */ 3: .align 4 .section .rodata /* The XSAVE area needs to be a large chunk of readable memory, but since we are */ /* going to restore everything to its initial state (XSTATE_BV=0), only certain */ /* parts need to have a defined value. In particular: */ /* */ /* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */ /* RFBM[2] is set, regardless of the value of XSTATE_BV */ /* * XSAVE header */ .align 64 .Lxsave_clear: .org .+24 .Lxsave_mxcsr: .short 0x1fbf /* We can store a bunch of data in the gap between MXCSR and the XSAVE header */ /* The following symbols point at read-only data that will be filled in by the */ /* post-linker. */ /* When using this macro, don't forget to adjust the linker version script! */ .macro globvar name:req size:req .global \name .protected \name .align \size .size \name , \size \name : .org .+\size .endm /* The base address (relative to enclave start) of the heap area */ globvar HEAP_BASE 8 /* The heap size in bytes */ globvar HEAP_SIZE 8 /* Value of the RELA entry in the dynamic table */ globvar RELA 8 /* Value of the RELACOUNT entry in the dynamic table */ globvar RELACOUNT 8 /* The enclave size in bytes */ globvar ENCLAVE_SIZE 8 /* The base address (relative to enclave start) of the enclave configuration area */ globvar CFGDATA_BASE 8 /* Non-zero if debugging is enabled, zero otherwise */ globvar DEBUG 1 /* The base address (relative to enclave start) of the enclave text section */ globvar TEXT_BASE 8 /* The size in bytes of enclave text section */ globvar TEXT_SIZE 8 /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_OFFSET 8 /* The size in bytes of enclave .eh_frame_hdr section */ globvar EH_FRM_HDR_LEN 8 /* The base address (relative to enclave start) of the enclave .eh_frame section */ globvar EH_FRM_OFFSET 8 /* The size in bytes of enclave .eh_frame section */ globvar EH_FRM_LEN 8 .org .Lxsave_clear+512 .Lxsave_header: .int 0, 0 /* XSTATE_BV */ .int 0, 0 /* XCOMP_BV */ .org .+48 /* reserved bits */ .data .Laborted: .byte 0 /* TCS local storage section */ .equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */ .equ tcsls_flags, 0x08 /* initialized by loader */ .equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */ .equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */ /* 14 unused bits */ .equ tcsls_user_fcw, 0x0a .equ tcsls_user_mxcsr, 0x0c .equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */ .equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */ .equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */ .equ tcsls_user_rsp, 0x28 .equ tcsls_user_retip, 0x30 .equ tcsls_user_rbp, 0x38 .equ tcsls_user_r12, 0x40 .equ tcsls_user_r13, 0x48 .equ tcsls_user_r14, 0x50 .equ tcsls_user_r15, 0x58 .equ tcsls_tls_ptr, 0x60 .equ tcsls_tcs_addr, 0x68 .macro load_tcsls_flag_secondary_bool reg:req comments:vararg .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */ .abort .endif mov $(1<<tcsls_flag_secondary),%e\reg and %gs:tcsls_flags,%\reg .endm /* We place the ELF entry point in a separate section so it can be removed by elf2sgxs */ .section .text_no_sgx, "ax" .Lelf_entry_error_msg: .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n" .Lelf_entry_error_msg_end: .global elf_entry .type elf_entry,function elf_entry: /* print error message */ movq $2,%rdi /* write to stderr (fd 2) */ lea .Lelf_entry_error_msg(%rip),%rsi movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx .Lelf_entry_call: movq $1,%rax /* write() syscall */ syscall test %rax,%rax jle .Lelf_exit /* exit on error */ add %rax,%rsi sub %rax,%rdx /* all chars written? */ jnz .Lelf_entry_call .Lelf_exit: movq $60,%rax /* exit() syscall */ movq $1,%rdi /* exit code 1 */ syscall ud2 /* should not be reached */ /* end elf_entry */ /* This code needs to be called *after* the enclave stack has been setup. */ /* There are 3 places where this needs to happen, so this is put in a macro. */ .macro entry_sanitize_final /* Sanitize rflags received from user */ /* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ /* - AC flag: AEX on misaligned memory accesses leaks side channel info */ pushfq andq $~0x40400, (%rsp) popfq /* check for abort */ bt $0,.Laborted(%rip) jc .Lreentry_panic .endm .text .global sgx_entry .type sgx_entry,function sgx_entry: /* save user registers */ mov %rcx,%gs:tcsls_user_retip mov %rsp,%gs:tcsls_user_rsp mov %rbp,%gs:tcsls_user_rbp mov %r12,%gs:tcsls_user_r12 mov %r13,%gs:tcsls_user_r13 mov %r14,%gs:tcsls_user_r14 mov %r15,%gs:tcsls_user_r15 mov %rbx,%gs:tcsls_tcs_addr stmxcsr %gs:tcsls_user_mxcsr fnstcw %gs:tcsls_user_fcw /* check for debug buffer pointer */ testb $0xff,DEBUG(%rip) jz .Lskip_debug_init mov %r10,%gs:tcsls_debug_panic_buf_ptr .Lskip_debug_init: /* reset cpu state */ mov %rdx, %r10 mov $-1, %rax mov $-1, %rdx xrstor .Lxsave_clear(%rip) lfence mov %r10, %rdx /* check if returning from usercall */ mov %gs:tcsls_last_rsp,%r11 test %r11,%r11 jnz .Lusercall_ret /* setup stack */ mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */ /* here. This is fixed below under "adjust stack". */ /* check for thread init */ bts $tcsls_flag_init_once,%gs:tcsls_flags jc .Lskip_init /* adjust stack */ lea IMAGE_BASE(%rip),%rax add %rax,%rsp mov %rsp,%gs:tcsls_tos entry_sanitize_final /* call tcs_init */ /* store caller-saved registers in callee-saved registers */ mov %rdi,%rbx mov %rsi,%r12 mov %rdx,%r13 mov %r8,%r14 mov %r9,%r15 load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */ call tcs_init /* reload caller-saved registers */ mov %rbx,%rdi mov %r12,%rsi mov %r13,%rdx mov %r14,%r8 mov %r15,%r9 jmp .Lafter_init .Lskip_init: entry_sanitize_final .Lafter_init: /* call into main entry point */ load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */ call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */ mov %rax,%rsi /* RSI = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ xor %rdi,%rdi /* RDI = normal exit */ .Lexit: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set later */ /* RCX overwritten by ENCLU */ /* RDX contains return value */ /* RSP set later */ /* RBP set later */ /* RDI contains exit mode */ /* RSI contains return value */ xor %r8,%r8 xor %r9,%r9 xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ .Lsgx_exit: /* clear extended register state */ mov %rdx, %rcx /* save RDX */ mov $-1, %rax mov %rax, %rdx xrstor .Lxsave_clear(%rip) mov %rcx, %rdx /* restore RDX */ /* clear flags */ pushq $0 popfq /* restore user registers */ mov %gs:tcsls_user_r12,%r12 mov %gs:tcsls_user_r13,%r13 mov %gs:tcsls_user_r14,%r14 mov %gs:tcsls_user_r15,%r15 mov %gs:tcsls_user_retip,%rbx mov %gs:tcsls_user_rsp,%rsp mov %gs:tcsls_user_rbp,%rbp fldcw %gs:tcsls_user_fcw ldmxcsr %gs:tcsls_user_mxcsr /* exit enclave */ mov $0x4,%eax /* EEXIT */ enclu /* end sgx_entry */ .Lreentry_panic: orq $8,%rsp jmp abort_reentry /* This *MUST* be called with 6 parameters, otherwise register information */ /* might leak! */ .global usercall usercall: test %rcx,%rcx /* check `abort` function argument */ jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */ jmp .Lusercall_save_state /* non-aborting usercall */ .Lusercall_abort: /* set aborted bit */ movb $1,.Laborted(%rip) /* save registers in DEBUG mode, so that debugger can reconstruct the stack */ testb $0xff,DEBUG(%rip) jz .Lusercall_noreturn .Lusercall_save_state: /* save callee-saved state */ push %r15 push %r14 push %r13 push %r12 push %rbp push %rbx sub $8, %rsp fstcw 4(%rsp) stmxcsr (%rsp) movq %rsp,%gs:tcsls_last_rsp .Lusercall_noreturn: /* clear general purpose register state */ /* RAX overwritten by ENCLU */ /* RBX set by sgx_exit */ /* RCX overwritten by ENCLU */ /* RDX contains parameter */ /* RSP set by sgx_exit */ /* RBP set by sgx_exit */ /* RDI contains parameter */ /* RSI contains parameter */ /* R8 contains parameter */ /* R9 contains parameter */ xor %r10,%r10 xor %r11,%r11 /* R12 ~ R15 set by sgx_exit */ /* extended registers/flags cleared by sgx_exit */ /* exit */ jmp .Lsgx_exit .Lusercall_ret: movq $0,%gs:tcsls_last_rsp /* restore callee-saved state, cf. "save" above */ mov %r11,%rsp /* MCDT mitigation requires an lfence after ldmxcsr _before_ any of the affected */ /* vector instructions is used. We omit the lfence here as one is required before */ /* the jmp instruction anyway. */ ldmxcsr (%rsp) fldcw 4(%rsp) add $8, %rsp entry_sanitize_final pop %rbx pop %rbp pop %r12 pop %r13 pop %r14 pop %r15 /* return */ mov %rsi,%rax /* RAX = return value */ /* NOP: mov %rdx,%rdx */ /* RDX = return value */ pop %r11 lfence jmp *%r11 /* The following functions need to be defined externally: ``` // Called by entry code on re-entry after exit extern "C" fn abort_reentry() -> !; // Called once when a TCS is first entered extern "C" fn tcs_init(secondary: bool); // Standard TCS entrypoint extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64); ``` */ .global get_tcs_addr get_tcs_addr: mov %gs:tcsls_tcs_addr,%rax pop %r11 lfence jmp *%r11 .global get_tls_ptr get_tls_ptr: mov %gs:tcsls_tls_ptr,%rax pop %r11 lfence jmp *%r11 .global set_tls_ptr set_tls_ptr: mov %rdi,%gs:tcsls_tls_ptr pop %r11 lfence jmp *%r11 .global take_debug_panic_buf_ptr take_debug_panic_buf_ptr: xor %rax,%rax xchg %gs:tcsls_debug_panic_buf_ptr,%rax pop %r11 lfence jmp *%r11
klausz65/rust
79
tests/ui/asm/named-asm-labels.s
lab1: nop // do more things lab2: nop // does bar // a: b lab3: nop; lab4: nop