repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
liva/minimal-linux
2,662
arch/x86/lib/getuser.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * __get_user functions. * * (C) Copyright 1998 Linus Torvalds * (C) Copyright 2005 Andi Kleen * (C) Copyright 2008 Glauber Costa * * These functions have a non-standard call interface * to make them more efficient, especially as they * return an error value in addition to the "real" * return value. */ /* * __get_user_X * * Inputs: %[r|e]ax contains the address. * * Outputs: %[r|e]ax is error code (0 or -EFAULT) * %[r|e]dx contains zero-extended value * %ecx contains the high half for 32-bit __get_user_8 * * * These functions should not modify any other registers, * as they get called from within inline assembly. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> .text ENTRY(__get_user_1) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user ASM_STAC 1: movzbl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret ENDPROC(__get_user_1) EXPORT_SYMBOL(__get_user_1) ENTRY(__get_user_2) add $1,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user ASM_STAC 2: movzwl -1(%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret ENDPROC(__get_user_2) EXPORT_SYMBOL(__get_user_2) ENTRY(__get_user_4) add $3,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user ASM_STAC 3: movl -3(%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret ENDPROC(__get_user_4) EXPORT_SYMBOL(__get_user_4) ENTRY(__get_user_8) #ifdef CONFIG_X86_64 add $7,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user ASM_STAC 4: movq -7(%_ASM_AX),%rdx xor %eax,%eax ASM_CLAC ret #else add $7,%_ASM_AX jc bad_get_user_8 mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user_8 ASM_STAC 4: movl -7(%_ASM_AX),%edx 5: movl -3(%_ASM_AX),%ecx xor %eax,%eax ASM_CLAC ret #endif ENDPROC(__get_user_8) EXPORT_SYMBOL(__get_user_8) bad_get_user: xor %edx,%edx mov $(-EFAULT),%_ASM_AX ASM_CLAC ret END(bad_get_user) #ifdef CONFIG_X86_32 bad_get_user_8: xor %edx,%edx xor %ecx,%ecx mov $(-EFAULT),%_ASM_AX ASM_CLAC ret END(bad_get_user_8) #endif _ASM_EXTABLE(1b,bad_get_user) _ASM_EXTABLE(2b,bad_get_user) _ASM_EXTABLE(3b,bad_get_user) #ifdef CONFIG_X86_64 _ASM_EXTABLE(4b,bad_get_user) #else _ASM_EXTABLE(4b,bad_get_user_8) _ASM_EXTABLE(5b,bad_get_user_8) #endif
liva/minimal-linux
10,392
arch/x86/lib/checksum_32.S
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Pentium Pro/II routines: * Alexander Kjeldaas <astor@guardian.no> * Finn Arne Gangstad <finnag@guardian.no> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception * handling. * Andi Kleen, add zeroing on error * converted to pure assembler * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/asm.h> #include <asm/export.h> #include <asm/nospec-branch.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) */ .text #ifndef CONFIG_X86_USE_PPRO_CHECKSUM /* * Experiments with Ethernet and SLIP connections show that buff * is aligned on either a 2-byte or 4-byte boundary. We get at * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. * Fortunately, it is easy to convert 2-byte alignment to 4-byte * alignment for the unrolled loop. */ ENTRY(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: unsigned char *buff testl $3, %esi # Check alignment. jz 2f # Jump if alignment is ok. testl $1, %esi # Check alignment. jz 10f # Jump if alignment is boundary of 2 bytes. # buf is odd dec %ecx jl 8f movzbl (%esi), %ebx adcl %ebx, %eax roll $8, %eax inc %esi testl $2, %esi jz 2f 10: subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f 1: movw (%esi), %bx addl $2, %esi addw %bx, %ax adcl $0, %eax 2: movl %ecx, %edx shrl $5, %ecx jz 2f testl %esi, %esi 1: movl (%esi), %ebx adcl %ebx, %eax movl 4(%esi), %ebx adcl %ebx, %eax movl 8(%esi), %ebx adcl %ebx, %eax movl 12(%esi), %ebx adcl %ebx, %eax movl 16(%esi), %ebx adcl %ebx, %eax movl 20(%esi), %ebx adcl %ebx, %eax movl 24(%esi), %ebx adcl %ebx, %eax movl 28(%esi), %ebx adcl %ebx, %eax lea 32(%esi), %esi dec %ecx jne 1b adcl $0, %eax 2: movl %edx, %ecx andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF 3: adcl (%esi), %eax lea 4(%esi), %esi dec %edx jne 3b adcl $0, %eax 4: andl $3, %ecx jz 7f cmpl $2, %ecx jb 5f movw (%esi),%cx leal 2(%esi),%esi je 6f shll $16,%ecx 5: movb (%esi),%cl 6: addl %ecx,%eax adcl $0, %eax 7: testb $1, 12(%esp) jz 8f roll $8, %eax 8: popl %ebx popl %esi ret ENDPROC(csum_partial) #else /* Version for PentiumII/PPro */ ENTRY(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: const unsigned char *buf testl $3, %esi jnz 25f 10: movl %ecx, %edx movl %ecx, %ebx andl $0x7c, %ebx shrl $7, %ecx addl %ebx,%esi shrl $2, %ebx negl %ebx lea 45f(%ebx,%ebx,2), %ebx testl %esi, %esi JMP_NOSPEC %ebx # Handle 2-byte-aligned regions 20: addw (%esi), %ax lea 2(%esi), %esi adcl $0, %eax jmp 10b 25: testl $1, %esi jz 30f # buf is odd dec %ecx jl 90f movzbl (%esi), %ebx addl %ebx, %eax adcl $0, %eax roll $8, %eax inc %esi testl $2, %esi jz 10b 30: subl $2, %ecx ja 20b je 32f addl $2, %ecx jz 80f movzbl (%esi),%ebx # csumming 1 byte, 2-aligned addl %ebx, %eax adcl $0, %eax jmp 80f 32: addw (%esi), %ax # csumming 2 bytes, 2-aligned adcl $0, %eax jmp 80f 40: addl -128(%esi), %eax adcl -124(%esi), %eax adcl -120(%esi), %eax adcl -116(%esi), %eax adcl -112(%esi), %eax adcl -108(%esi), %eax adcl -104(%esi), %eax adcl -100(%esi), %eax adcl -96(%esi), %eax adcl -92(%esi), %eax adcl -88(%esi), %eax adcl -84(%esi), %eax adcl -80(%esi), %eax adcl -76(%esi), %eax adcl -72(%esi), %eax adcl -68(%esi), %eax adcl -64(%esi), %eax adcl -60(%esi), %eax adcl -56(%esi), %eax adcl -52(%esi), %eax adcl -48(%esi), %eax adcl -44(%esi), %eax adcl -40(%esi), %eax adcl -36(%esi), %eax adcl -32(%esi), %eax adcl -28(%esi), %eax adcl -24(%esi), %eax adcl -20(%esi), %eax adcl -16(%esi), %eax adcl -12(%esi), %eax adcl -8(%esi), %eax adcl -4(%esi), %eax 45: lea 128(%esi), %esi adcl $0, %eax dec %ecx jge 40b movl %edx, %ecx 50: andl $3, %ecx jz 80f # Handle the last 1-3 bytes without jumping notl %ecx # 1->2, 2->1, 3->0, higher bits are masked movl $0xffffff,%ebx # by the shll and shrl instructions shll $3,%ecx shrl %cl,%ebx andl -128(%esi),%ebx # esi is 4-aligned so should be ok addl %ebx,%eax adcl $0,%eax 80: testb $1, 12(%esp) jz 90f roll $8, %eax 90: popl %ebx popl %esi ret ENDPROC(csum_partial) #endif EXPORT_SYMBOL(csum_partial) /* unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, int sum, int *src_err_ptr, int *dst_err_ptr) */ /* * Copy from ds while checksumming, otherwise like csum_partial * * The macros SRC and DST specify the type of access for the instruction. * thus we can call a custom exception handler for all access types. * * FIXME: could someone double-check whether I haven't mixed up some SRC and * DST definitions? It's damn hard to trigger all cases. I hope I got * them all but there's no guarantee. */ #define SRC(y...) \ 9999: y; \ _ASM_EXTABLE(9999b, 6001f) #define DST(y...) \ 9999: y; \ _ASM_EXTABLE(9999b, 6002f) #ifndef CONFIG_X86_USE_PPRO_CHECKSUM #define ARGBASE 16 #define FP 12 ENTRY(csum_partial_copy_generic) subl $4,%esp pushl %edi pushl %esi pushl %ebx movl ARGBASE+16(%esp),%eax # sum movl ARGBASE+12(%esp),%ecx # len movl ARGBASE+4(%esp),%esi # src movl ARGBASE+8(%esp),%edi # dst testl $2, %edi # Check alignment. jz 2f # Jump if alignment is ok. subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f SRC(1: movw (%esi), %bx ) addl $2, %esi DST( movw %bx, (%edi) ) addl $2, %edi addw %bx, %ax adcl $0, %eax 2: movl %ecx, FP(%esp) shrl $5, %ecx jz 2f testl %esi, %esi SRC(1: movl (%esi), %ebx ) SRC( movl 4(%esi), %edx ) adcl %ebx, %eax DST( movl %ebx, (%edi) ) adcl %edx, %eax DST( movl %edx, 4(%edi) ) SRC( movl 8(%esi), %ebx ) SRC( movl 12(%esi), %edx ) adcl %ebx, %eax DST( movl %ebx, 8(%edi) ) adcl %edx, %eax DST( movl %edx, 12(%edi) ) SRC( movl 16(%esi), %ebx ) SRC( movl 20(%esi), %edx ) adcl %ebx, %eax DST( movl %ebx, 16(%edi) ) adcl %edx, %eax DST( movl %edx, 20(%edi) ) SRC( movl 24(%esi), %ebx ) SRC( movl 28(%esi), %edx ) adcl %ebx, %eax DST( movl %ebx, 24(%edi) ) adcl %edx, %eax DST( movl %edx, 28(%edi) ) lea 32(%esi), %esi lea 32(%edi), %edi dec %ecx jne 1b adcl $0, %eax 2: movl FP(%esp), %edx movl %edx, %ecx andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF SRC(3: movl (%esi), %ebx ) adcl %ebx, %eax DST( movl %ebx, (%edi) ) lea 4(%esi), %esi lea 4(%edi), %edi dec %edx jne 3b adcl $0, %eax 4: andl $3, %ecx jz 7f cmpl $2, %ecx jb 5f SRC( movw (%esi), %cx ) leal 2(%esi), %esi DST( movw %cx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%ecx SRC(5: movb (%esi), %cl ) DST( movb %cl, (%edi) ) 6: addl %ecx, %eax adcl $0, %eax 7: 5000: # Exception handler: .section .fixup, "ax" 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr movl $-EFAULT, (%ebx) # zero the complete destination - computing the rest # is too much work movl ARGBASE+8(%esp), %edi # dst movl ARGBASE+12(%esp), %ecx # len xorl %eax,%eax rep ; stosb jmp 5000b 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr movl $-EFAULT,(%ebx) jmp 5000b .previous popl %ebx popl %esi popl %edi popl %ecx # equivalent to addl $4,%esp ret ENDPROC(csum_partial_copy_generic) #else /* Version for PentiumII/PPro */ #define ROUND1(x) \ SRC(movl x(%esi), %ebx ) ; \ addl %ebx, %eax ; \ DST(movl %ebx, x(%edi) ) ; #define ROUND(x) \ SRC(movl x(%esi), %ebx ) ; \ adcl %ebx, %eax ; \ DST(movl %ebx, x(%edi) ) ; #define ARGBASE 12 ENTRY(csum_partial_copy_generic) pushl %ebx pushl %edi pushl %esi movl ARGBASE+4(%esp),%esi #src movl ARGBASE+8(%esp),%edi #dst movl ARGBASE+12(%esp),%ecx #len movl ARGBASE+16(%esp),%eax #sum # movl %ecx, %edx movl %ecx, %ebx movl %esi, %edx shrl $6, %ecx andl $0x3c, %ebx negl %ebx subl %ebx, %esi subl %ebx, %edi lea -1(%esi),%edx andl $-32,%edx lea 3f(%ebx,%ebx), %ebx testl %esi, %esi JMP_NOSPEC %ebx 1: addl $64,%esi addl $64,%edi SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4) 3: adcl $0,%eax addl $64, %edx dec %ecx jge 1b 4: movl ARGBASE+12(%esp),%edx #len andl $3, %edx jz 7f cmpl $2, %edx jb 5f SRC( movw (%esi), %dx ) leal 2(%esi), %esi DST( movw %dx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%edx 5: SRC( movb (%esi), %dl ) DST( movb %dl, (%edi) ) 6: addl %edx, %eax adcl $0, %eax 7: .section .fixup, "ax" 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr movl $-EFAULT, (%ebx) # zero the complete destination (computing the rest is too much work) movl ARGBASE+8(%esp),%edi # dst movl ARGBASE+12(%esp),%ecx # len xorl %eax,%eax rep; stosb jmp 7b 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr movl $-EFAULT, (%ebx) jmp 7b .previous popl %esi popl %edi popl %ebx ret ENDPROC(csum_partial_copy_generic) #undef ROUND #undef ROUND1 #endif EXPORT_SYMBOL(csum_partial_copy_generic)
liva/minimal-linux
2,036
arch/x86/lib/putuser.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * __put_user functions. * * (C) Copyright 2005 Linus Torvalds * (C) Copyright 2005 Andi Kleen * (C) Copyright 2008 Glauber Costa * * These functions have a non-standard call interface * to make them more efficient, especially as they * return an error value in addition to the "real" * return value. */ #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/errno.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> /* * __put_user_X * * Inputs: %eax[:%edx] contains the data * %ecx contains the address * * Outputs: %eax is error code (0 or -EFAULT) * * These functions should not modify any other registers, * as they get called from within inline assembly. */ #define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX #define EXIT ASM_CLAC ; \ ret .text ENTRY(__put_user_1) ENTER cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX jae bad_put_user ASM_STAC 1: movb %al,(%_ASM_CX) xor %eax,%eax EXIT ENDPROC(__put_user_1) EXPORT_SYMBOL(__put_user_1) ENTRY(__put_user_2) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $1,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user ASM_STAC 2: movw %ax,(%_ASM_CX) xor %eax,%eax EXIT ENDPROC(__put_user_2) EXPORT_SYMBOL(__put_user_2) ENTRY(__put_user_4) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $3,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user ASM_STAC 3: movl %eax,(%_ASM_CX) xor %eax,%eax EXIT ENDPROC(__put_user_4) EXPORT_SYMBOL(__put_user_4) ENTRY(__put_user_8) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $7,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user ASM_STAC 4: mov %_ASM_AX,(%_ASM_CX) #ifdef CONFIG_X86_32 5: movl %edx,4(%_ASM_CX) #endif xor %eax,%eax EXIT ENDPROC(__put_user_8) EXPORT_SYMBOL(__put_user_8) bad_put_user: movl $-EFAULT,%eax EXIT END(bad_put_user) _ASM_EXTABLE(1b,bad_put_user) _ASM_EXTABLE(2b,bad_put_user) _ASM_EXTABLE(3b,bad_put_user) _ASM_EXTABLE(4b,bad_put_user) #ifdef CONFIG_X86_32 _ASM_EXTABLE(5b,bad_put_user) #endif
liva/minimal-linux
2,553
arch/x86/lib/atomic64_386_32.S
/* * atomic64_t for 386/486 * * Copyright © 2010 Luca Barbieri * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> #include <asm/alternative-asm.h> /* if you want SMP support, implement these with real spinlocks */ .macro LOCK reg pushfl cli .endm .macro UNLOCK reg popfl .endm #define BEGIN(op) \ .macro endp; \ ENDPROC(atomic64_##op##_386); \ .purgem endp; \ .endm; \ ENTRY(atomic64_##op##_386); \ LOCK v; #define ENDP endp #define RET \ UNLOCK v; \ ret #define RET_ENDP \ RET; \ ENDP #define v %ecx BEGIN(read) movl (v), %eax movl 4(v), %edx RET_ENDP #undef v #define v %esi BEGIN(set) movl %ebx, (v) movl %ecx, 4(v) RET_ENDP #undef v #define v %esi BEGIN(xchg) movl (v), %eax movl 4(v), %edx movl %ebx, (v) movl %ecx, 4(v) RET_ENDP #undef v #define v %ecx BEGIN(add) addl %eax, (v) adcl %edx, 4(v) RET_ENDP #undef v #define v %ecx BEGIN(add_return) addl (v), %eax adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP #undef v #define v %ecx BEGIN(sub) subl %eax, (v) sbbl %edx, 4(v) RET_ENDP #undef v #define v %ecx BEGIN(sub_return) negl %edx negl %eax sbbl $0, %edx addl (v), %eax adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP #undef v #define v %esi BEGIN(inc) addl $1, (v) adcl $0, 4(v) RET_ENDP #undef v #define v %esi BEGIN(inc_return) movl (v), %eax movl 4(v), %edx addl $1, %eax adcl $0, %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP #undef v #define v %esi BEGIN(dec) subl $1, (v) sbbl $0, 4(v) RET_ENDP #undef v #define v %esi BEGIN(dec_return) movl (v), %eax movl 4(v), %edx subl $1, %eax sbbl $0, %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP #undef v #define v %esi BEGIN(add_unless) addl %eax, %ecx adcl %edx, %edi addl (v), %eax adcl 4(v), %edx cmpl %eax, %ecx je 3f 1: movl %eax, (v) movl %edx, 4(v) movl $1, %eax 2: RET 3: cmpl %edx, %edi jne 1b xorl %eax, %eax jmp 2b ENDP #undef v #define v %esi BEGIN(inc_not_zero) movl (v), %eax movl 4(v), %edx testl %eax, %eax je 3f 1: addl $1, %eax adcl $0, %edx movl %eax, (v) movl %edx, 4(v) movl $1, %eax 2: RET 3: testl %edx, %edx jne 1b jmp 2b ENDP #undef v #define v %esi BEGIN(dec_if_positive) movl (v), %eax movl 4(v), %edx subl $1, %eax sbbl $0, %edx js 1f movl %eax, (v) movl %edx, 4(v) 1: RET_ENDP #undef v
liva/minimal-linux
6,329
arch/x86/lib/memcpy_64.S
/* Copyright 2002 Andi Kleen */ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/export.h> /* * We build a jump to memcpy_orig by default which gets NOPped out on * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. */ .weak memcpy /* * memcpy - Copy a memory block. * * Input: * rdi destination * rsi source * rdx count * * Output: * rax original destination */ ENTRY(__memcpy) ENTRY(memcpy) ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS movq %rdi, %rax movq %rdx, %rcx shrq $3, %rcx andl $7, %edx rep movsq movl %edx, %ecx rep movsb ret ENDPROC(memcpy) ENDPROC(__memcpy) EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(__memcpy) /* * memcpy_erms() - enhanced fast string memcpy. This is faster and * simpler than memcpy. Use memcpy_erms when possible. */ ENTRY(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb ret ENDPROC(memcpy_erms) ENTRY(memcpy_orig) movq %rdi, %rax cmpq $0x20, %rdx jb .Lhandle_tail /* * We check whether memory false dependence could occur, * then jump to corresponding copy mode. */ cmp %dil, %sil jl .Lcopy_backward subq $0x20, %rdx .Lcopy_forward_loop: subq $0x20, %rdx /* * Move in blocks of 4x8 bytes: */ movq 0*8(%rsi), %r8 movq 1*8(%rsi), %r9 movq 2*8(%rsi), %r10 movq 3*8(%rsi), %r11 leaq 4*8(%rsi), %rsi movq %r8, 0*8(%rdi) movq %r9, 1*8(%rdi) movq %r10, 2*8(%rdi) movq %r11, 3*8(%rdi) leaq 4*8(%rdi), %rdi jae .Lcopy_forward_loop addl $0x20, %edx jmp .Lhandle_tail .Lcopy_backward: /* * Calculate copy position to tail. */ addq %rdx, %rsi addq %rdx, %rdi subq $0x20, %rdx /* * At most 3 ALU operations in one cycle, * so append NOPS in the same 16 bytes trunk. */ .p2align 4 .Lcopy_backward_loop: subq $0x20, %rdx movq -1*8(%rsi), %r8 movq -2*8(%rsi), %r9 movq -3*8(%rsi), %r10 movq -4*8(%rsi), %r11 leaq -4*8(%rsi), %rsi movq %r8, -1*8(%rdi) movq %r9, -2*8(%rdi) movq %r10, -3*8(%rdi) movq %r11, -4*8(%rdi) leaq -4*8(%rdi), %rdi jae .Lcopy_backward_loop /* * Calculate copy position to head. */ addl $0x20, %edx subq %rdx, %rsi subq %rdx, %rdi .Lhandle_tail: cmpl $16, %edx jb .Lless_16bytes /* * Move data from 16 bytes to 31 bytes. */ movq 0*8(%rsi), %r8 movq 1*8(%rsi), %r9 movq -2*8(%rsi, %rdx), %r10 movq -1*8(%rsi, %rdx), %r11 movq %r8, 0*8(%rdi) movq %r9, 1*8(%rdi) movq %r10, -2*8(%rdi, %rdx) movq %r11, -1*8(%rdi, %rdx) retq .p2align 4 .Lless_16bytes: cmpl $8, %edx jb .Lless_8bytes /* * Move data from 8 bytes to 15 bytes. */ movq 0*8(%rsi), %r8 movq -1*8(%rsi, %rdx), %r9 movq %r8, 0*8(%rdi) movq %r9, -1*8(%rdi, %rdx) retq .p2align 4 .Lless_8bytes: cmpl $4, %edx jb .Lless_3bytes /* * Move data from 4 bytes to 7 bytes. */ movl (%rsi), %ecx movl -4(%rsi, %rdx), %r8d movl %ecx, (%rdi) movl %r8d, -4(%rdi, %rdx) retq .p2align 4 .Lless_3bytes: subl $1, %edx jb .Lend /* * Move data from 1 bytes to 3 bytes. */ movzbl (%rsi), %ecx jz .Lstore_1byte movzbq 1(%rsi), %r8 movzbq (%rsi, %rdx), %r9 movb %r8b, 1(%rdi) movb %r9b, (%rdi, %rdx) .Lstore_1byte: movb %cl, (%rdi) .Lend: retq ENDPROC(memcpy_orig) #ifndef CONFIG_UML /* * memcpy_mcsafe_unrolled - memory copy with machine check exception handling * Note that we only catch machine checks when reading the source addresses. * Writes to target are posted and don't generate machine checks. */ ENTRY(memcpy_mcsafe_unrolled) cmpl $8, %edx /* Less than 8 bytes? Go to byte copy loop */ jb .L_no_whole_words /* Check for bad alignment of source */ testl $7, %esi /* Already aligned */ jz .L_8byte_aligned /* Copy one byte at a time until source is 8-byte aligned */ movl %esi, %ecx andl $7, %ecx subl $8, %ecx negl %ecx subl %ecx, %edx .L_copy_leading_bytes: movb (%rsi), %al movb %al, (%rdi) incq %rsi incq %rdi decl %ecx jnz .L_copy_leading_bytes .L_8byte_aligned: /* Figure out how many whole cache lines (64-bytes) to copy */ movl %edx, %ecx andl $63, %edx shrl $6, %ecx jz .L_no_whole_cache_lines /* Loop copying whole cache lines */ .L_cache_w0: movq (%rsi), %r8 .L_cache_w1: movq 1*8(%rsi), %r9 .L_cache_w2: movq 2*8(%rsi), %r10 .L_cache_w3: movq 3*8(%rsi), %r11 movq %r8, (%rdi) movq %r9, 1*8(%rdi) movq %r10, 2*8(%rdi) movq %r11, 3*8(%rdi) .L_cache_w4: movq 4*8(%rsi), %r8 .L_cache_w5: movq 5*8(%rsi), %r9 .L_cache_w6: movq 6*8(%rsi), %r10 .L_cache_w7: movq 7*8(%rsi), %r11 movq %r8, 4*8(%rdi) movq %r9, 5*8(%rdi) movq %r10, 6*8(%rdi) movq %r11, 7*8(%rdi) leaq 64(%rsi), %rsi leaq 64(%rdi), %rdi decl %ecx jnz .L_cache_w0 /* Are there any trailing 8-byte words? */ .L_no_whole_cache_lines: movl %edx, %ecx andl $7, %edx shrl $3, %ecx jz .L_no_whole_words /* Copy trailing words */ .L_copy_trailing_words: movq (%rsi), %r8 mov %r8, (%rdi) leaq 8(%rsi), %rsi leaq 8(%rdi), %rdi decl %ecx jnz .L_copy_trailing_words /* Any trailing bytes? */ .L_no_whole_words: andl %edx, %edx jz .L_done_memcpy_trap /* Copy trailing bytes */ movl %edx, %ecx .L_copy_trailing_bytes: movb (%rsi), %al movb %al, (%rdi) incq %rsi incq %rdi decl %ecx jnz .L_copy_trailing_bytes /* Copy successful. Return zero */ .L_done_memcpy_trap: xorq %rax, %rax ret ENDPROC(memcpy_mcsafe_unrolled) EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) .section .fixup, "ax" /* Return -EFAULT for any failure */ .L_memcpy_mcsafe_fail: mov $-EFAULT, %rax ret .previous _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail) #endif
liva/minimal-linux
1,837
arch/x86/lib/copy_page_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/export.h> /* * Some CPUs run faster using the string copy instructions (sane microcode). * It is also a lot simpler. Use this when possible. But, don't use streaming * copy unless the CPU indicates X86_FEATURE_REP_GOOD. Could vary the * prefetch distance based on SMP/UP. */ ALIGN ENTRY(copy_page) ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD movl $4096/8, %ecx rep movsq ret ENDPROC(copy_page) EXPORT_SYMBOL(copy_page) ENTRY(copy_page_regs) subq $2*8, %rsp movq %rbx, (%rsp) movq %r12, 1*8(%rsp) movl $(4096/64)-5, %ecx .p2align 4 .Loop64: dec %rcx movq 0x8*0(%rsi), %rax movq 0x8*1(%rsi), %rbx movq 0x8*2(%rsi), %rdx movq 0x8*3(%rsi), %r8 movq 0x8*4(%rsi), %r9 movq 0x8*5(%rsi), %r10 movq 0x8*6(%rsi), %r11 movq 0x8*7(%rsi), %r12 prefetcht0 5*64(%rsi) movq %rax, 0x8*0(%rdi) movq %rbx, 0x8*1(%rdi) movq %rdx, 0x8*2(%rdi) movq %r8, 0x8*3(%rdi) movq %r9, 0x8*4(%rdi) movq %r10, 0x8*5(%rdi) movq %r11, 0x8*6(%rdi) movq %r12, 0x8*7(%rdi) leaq 64 (%rsi), %rsi leaq 64 (%rdi), %rdi jnz .Loop64 movl $5, %ecx .p2align 4 .Loop2: decl %ecx movq 0x8*0(%rsi), %rax movq 0x8*1(%rsi), %rbx movq 0x8*2(%rsi), %rdx movq 0x8*3(%rsi), %r8 movq 0x8*4(%rsi), %r9 movq 0x8*5(%rsi), %r10 movq 0x8*6(%rsi), %r11 movq 0x8*7(%rsi), %r12 movq %rax, 0x8*0(%rdi) movq %rbx, 0x8*1(%rdi) movq %rdx, 0x8*2(%rdi) movq %r8, 0x8*3(%rdi) movq %r9, 0x8*4(%rdi) movq %r10, 0x8*5(%rdi) movq %r11, 0x8*6(%rdi) movq %r12, 0x8*7(%rdi) leaq 64(%rdi), %rdi leaq 64(%rsi), %rsi jnz .Loop2 movq (%rsp), %rbx movq 1*8(%rsp), %r12 addq $2*8, %rsp ret ENDPROC(copy_page_regs)
liva/minimal-linux
1,165
arch/x86/lib/cmpxchg16b_emu.S
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * */ #include <linux/linkage.h> #include <asm/percpu.h> .text /* * Inputs: * %rsi : memory location to compare * %rax : low 64 bits of old value * %rdx : high 64 bits of old value * %rbx : low 64 bits of new value * %rcx : high 64 bits of new value * %al : Operation successful */ ENTRY(this_cpu_cmpxchg16b_emu) # # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not # via the ZF. Caller will access %al to get result. # # Note that this is only useful for a cpuops operation. Meaning that we # do *not* have a fully atomic operation but just an operation that is # *atomic* on a single cpu (as provided by the this_cpu_xx class of # macros). # pushfq cli cmpq PER_CPU_VAR((%rsi)), %rax jne .Lnot_same cmpq PER_CPU_VAR(8(%rsi)), %rdx jne .Lnot_same movq %rbx, PER_CPU_VAR((%rsi)) movq %rcx, PER_CPU_VAR(8(%rsi)) popfq mov $1, %al ret .Lnot_same: popfq xor %al,%al ret ENDPROC(this_cpu_cmpxchg16b_emu)
liva/minimal-linux
4,376
arch/x86/mm/mem_encrypt_boot.S
/* * AMD Memory Encryption Support * * Copyright (C) 2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> .text .code64 ENTRY(sme_encrypt_execute) /* * Entry parameters: * RDI - virtual address for the encrypted mapping * RSI - virtual address for the decrypted mapping * RDX - length to encrypt * RCX - virtual address of the encryption workarea, including: * - stack page (PAGE_SIZE) * - encryption routine page (PAGE_SIZE) * - intermediate copy buffer (PMD_PAGE_SIZE) * R8 - physcial address of the pagetables to use for encryption */ push %rbp movq %rsp, %rbp /* RBP now has original stack pointer */ /* Set up a one page stack in the non-encrypted memory area */ movq %rcx, %rax /* Workarea stack page */ leaq PAGE_SIZE(%rax), %rsp /* Set new stack pointer */ addq $PAGE_SIZE, %rax /* Workarea encryption routine */ push %r12 movq %rdi, %r10 /* Encrypted area */ movq %rsi, %r11 /* Decrypted area */ movq %rdx, %r12 /* Area length */ /* Copy encryption routine into the workarea */ movq %rax, %rdi /* Workarea encryption routine */ leaq __enc_copy(%rip), %rsi /* Encryption routine */ movq $(.L__enc_copy_end - __enc_copy), %rcx /* Encryption routine length */ rep movsb /* Setup registers for call */ movq %r10, %rdi /* Encrypted area */ movq %r11, %rsi /* Decrypted area */ movq %r8, %rdx /* Pagetables used for encryption */ movq %r12, %rcx /* Area length */ movq %rax, %r8 /* Workarea encryption routine */ addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ call *%rax /* Call the encryption routine */ pop %r12 movq %rbp, %rsp /* Restore original stack pointer */ pop %rbp ret ENDPROC(sme_encrypt_execute) ENTRY(__enc_copy) /* * Routine used to encrypt memory in place. * This routine must be run outside of the kernel proper since * the kernel will be encrypted during the process. So this * routine is defined here and then copied to an area outside * of the kernel where it will remain and run decrypted * during execution. * * On entry the registers must be: * RDI - virtual address for the encrypted mapping * RSI - virtual address for the decrypted mapping * RDX - address of the pagetables to use for encryption * RCX - length of area * R8 - intermediate copy buffer * * RAX - points to this routine * * The area will be encrypted by copying from the non-encrypted * memory space to an intermediate buffer and then copying from the * intermediate buffer back to the encrypted memory space. The physical * addresses of the two mappings are the same which results in the area * being encrypted "in place". */ /* Enable the new page tables */ mov %rdx, %cr3 /* Flush any global TLBs */ mov %cr4, %rdx andq $~X86_CR4_PGE, %rdx mov %rdx, %cr4 orq $X86_CR4_PGE, %rdx mov %rdx, %cr4 push %r15 push %r12 movq %rcx, %r9 /* Save area length */ movq %rdi, %r10 /* Save encrypted area address */ movq %rsi, %r11 /* Save decrypted area address */ /* Set the PAT register PA5 entry to write-protect */ movl $MSR_IA32_CR_PAT, %ecx rdmsr mov %rdx, %r15 /* Save original PAT value */ andl $0xffff00ff, %edx /* Clear PA5 */ orl $0x00000500, %edx /* Set PA5 to WP */ wrmsr wbinvd /* Invalidate any cache entries */ /* Copy/encrypt up to 2MB at a time */ movq $PMD_PAGE_SIZE, %r12 1: cmpq %r12, %r9 jnb 2f movq %r9, %r12 2: movq %r11, %rsi /* Source - decrypted area */ movq %r8, %rdi /* Dest - intermediate copy buffer */ movq %r12, %rcx rep movsb movq %r8, %rsi /* Source - intermediate copy buffer */ movq %r10, %rdi /* Dest - encrypted area */ movq %r12, %rcx rep movsb addq %r12, %r11 addq %r12, %r10 subq %r12, %r9 /* Kernel length decrement */ jnz 1b /* Kernel length not zero? */ /* Restore PAT register */ movl $MSR_IA32_CR_PAT, %ecx rdmsr mov %r15, %rdx /* Restore original PAT value */ wrmsr pop %r12 pop %r15 ret .L__enc_copy_end: ENDPROC(__enc_copy)
liva/minimal-linux
1,605
arch/x86/entry/thunk_64.S
/* * Save registers before calling assembly functions. This avoids * disturbance of register allocation in some inline assembly constructs. * Copyright 2001,2002 by Andi Kleen, SuSE Labs. * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc. * Subject to the GNU public license, v.2. No warranty of any kind. */ #include <linux/linkage.h> #include "calling.h" #include <asm/asm.h> #include <asm/export.h> /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 .globl \name .type \name, @function \name: pushq %rbp movq %rsp, %rbp pushq %rdi pushq %rsi pushq %rdx pushq %rcx pushq %rax pushq %r8 pushq %r9 pushq %r10 pushq %r11 .if \put_ret_addr_in_rdi /* 8(%rbp) is return addr on stack */ movq 8(%rbp), %rdi .endif call \func jmp .L_restore _ASM_NOKPROBE(\name) .endm #ifdef CONFIG_TRACE_IRQFLAGS THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1 THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1 #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC THUNK lockdep_sys_exit_thunk,lockdep_sys_exit #endif #ifdef CONFIG_PREEMPT THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule_notrace, preempt_schedule_notrace EXPORT_SYMBOL(___preempt_schedule) EXPORT_SYMBOL(___preempt_schedule_notrace) #endif #if defined(CONFIG_TRACE_IRQFLAGS) \ || defined(CONFIG_DEBUG_LOCK_ALLOC) \ || defined(CONFIG_PREEMPT) .L_restore: popq %r11 popq %r10 popq %r9 popq %r8 popq %rax popq %rcx popq %rdx popq %rsi popq %rdi popq %rbp ret _ASM_NOKPROBE(.L_restore) #endif
liva/minimal-linux
50,457
arch/x86/entry/entry_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/x86_64/entry.S * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * * entry.S contains the system-call and fault low-level handling routines. * * Some of this is documented in Documentation/x86/entry_64.txt * * A note on terminology: * - iret frame: Architecture defined interrupt frame from SS to RIP * at the top of the kernel process stack. * * Some macro usage: * - ENTRY/END: Define functions in the symbol table. * - TRACE_IRQ_*: Trace hardirq state for lock debugging. * - idtentry: Define exception entry points. */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/msr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/hw_irq.h> #include <asm/page_types.h> #include <asm/irqflags.h> #include <asm/paravirt.h> #include <asm/percpu.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/pgtable_types.h> #include <asm/export.h> #include <asm/frame.h> #include <asm/nospec-branch.h> #include <linux/err.h> #include "calling.h" .code64 .section .entry.text, "ax" #ifdef CONFIG_PARAVIRT ENTRY(native_usergs_sysret64) UNWIND_HINT_EMPTY swapgs sysretq END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ .macro TRACE_IRQS_FLAGS flags:req #ifdef CONFIG_TRACE_IRQFLAGS bt $9, \flags /* interrupts off? */ jnc 1f TRACE_IRQS_ON 1: #endif .endm .macro TRACE_IRQS_IRETQ TRACE_IRQS_FLAGS EFLAGS(%rsp) .endm /* * When dynamic function tracer is enabled it will add a breakpoint * to all locations that it is about to modify, sync CPUs, update * all the code, sync CPUs, then remove the breakpoints. In this time * if lockdep is enabled, it might jump back into the debug handler * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). * * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to * make sure the stack pointer does not get reset back to the top * of the debug stack, and instead just reuses the current stack. */ #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) .macro TRACE_IRQS_OFF_DEBUG call debug_stack_set_zero TRACE_IRQS_OFF call debug_stack_reset .endm .macro TRACE_IRQS_ON_DEBUG call debug_stack_set_zero TRACE_IRQS_ON call debug_stack_reset .endm .macro TRACE_IRQS_IRETQ_DEBUG bt $9, EFLAGS(%rsp) /* interrupts off? */ jnc 1f TRACE_IRQS_ON_DEBUG 1: .endm #else # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ #endif /* * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. * * This is the only entry point used for 64-bit system calls. The * hardware interface is reasonably well designed and the register to * argument mapping Linux uses fits well with the registers that are * available when SYSCALL is used. * * SYSCALL instructions can be found inlined in libc implementations as * well as some other programs and libraries. There are also a handful * of SYSCALL instructions in the vDSO used, for example, as a * clock_gettimeofday fallback. * * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, * then loads new ss, cs, and rip from previously programmed MSRs. * rflags gets masked by a value from another MSR (so CLD and CLAC * are not needed). SYSCALL does not save anything on the stack * and does not change rsp. * * Registers on entry: * rax system call number * rcx return address * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) * rdi arg0 * rsi arg1 * rdx arg2 * r10 arg3 (needs to be moved to rcx to conform to C ABI) * r8 arg4 * r9 arg5 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) * * Only called from user space. * * When user can change pt_regs->foo always force IRET. That is because * it deals with uncanonical addresses better. SYSRET has trouble * with them due to bugs in both AMD and Intel CPUs. */ .pushsection .entry_trampoline, "ax" /* * The code in here gets remapped into cpu_entry_area's trampoline. This means * that the assembler and linker have the wrong idea as to where this code * lives (and, in fact, it's mapped more than once, so it's not even at a * fixed address). So we can't reference any symbols outside the entry * trampoline and expect it to work. * * Instead, we carefully abuse %rip-relative addressing. * _entry_trampoline(%rip) refers to the start of the remapped) entry * trampoline. We can thus find cpu_entry_area with this macro: */ #define CPU_ENTRY_AREA \ _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) /* The top word of the SYSENTER stack is hot and is usable as scratch space. */ #define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA ENTRY(entry_SYSCALL_64_trampoline) UNWIND_HINT_EMPTY swapgs /* Stash the user RSP. */ movq %rsp, RSP_SCRATCH /* Note: using %rsp as a scratch reg. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp /* Load the top of the task stack into RSP */ movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp /* Start building the simulated IRET frame. */ pushq $__USER_DS /* pt_regs->ss */ pushq RSP_SCRATCH /* pt_regs->sp */ pushq %r11 /* pt_regs->flags */ pushq $__USER_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ /* * x86 lacks a near absolute jump, and we can't jump to the real * entry text with a relative jump. We could push the target * address and then use retq, but this destroys the pipeline on * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, * spill RDI and restore it in a second-stage trampoline. */ pushq %rdi movq $entry_SYSCALL_64_stage2, %rdi JMP_NOSPEC %rdi END(entry_SYSCALL_64_trampoline) .popsection ENTRY(entry_SYSCALL_64_stage2) UNWIND_HINT_EMPTY popq %rdi jmp entry_SYSCALL_64_after_hwframe END(entry_SYSCALL_64_stage2) ENTRY(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* * Interrupts are off on entry. * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * it is too small to ever cause noticeable irq latency. */ swapgs /* * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it * is not required to switch CR3. */ movq %rsp, PER_CPU_VAR(rsp_scratch) movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* Construct struct pt_regs on stack */ pushq $__USER_DS /* pt_regs->ss */ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ pushq %r11 /* pt_regs->flags */ pushq $__USER_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq %rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ pushq %r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ UNWIND_HINT_REGS extra=0 TRACE_IRQS_OFF /* * If we need to do entry work or if we guess we'll need to do * exit work, go straight to the slow path. */ movq PER_CPU_VAR(current_task), %r11 testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) jnz entry_SYSCALL64_slow_path entry_SYSCALL_64_fastpath: /* * Easy case: enable interrupts and issue the syscall. If the syscall * needs pt_regs, we'll call a stub that disables interrupts again * and jumps to the slow path. */ TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) #if __SYSCALL_MASK == ~0 cmpq $__NR_syscall_max, %rax #else andl $__SYSCALL_MASK, %eax cmpl $__NR_syscall_max, %eax #endif ja 1f /* return -ENOSYS (already in pt_regs->ax) */ movq %r10, %rcx /* * This call instruction is handled specially in stub_ptregs_64. * It might end up jumping to the slow path. If it jumps, RAX * and all argument registers are clobbered. */ #ifdef CONFIG_RETPOLINE movq sys_call_table(, %rax, 8), %rax call __x86_indirect_thunk_rax #else call *sys_call_table(, %rax, 8) #endif .Lentry_SYSCALL_64_after_fastpath_call: movq %rax, RAX(%rsp) 1: /* * If we get here, then we know that pt_regs is clean for SYSRET64. * If we see that no exit work is required (which we are required * to check with IRQs off), then we can go straight to SYSRET64. */ DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movq PER_CPU_VAR(current_task), %r11 testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) jnz 1f LOCKDEP_SYS_EXIT TRACE_IRQS_ON /* user mode is traced as IRQs on */ movq RIP(%rsp), %rcx movq EFLAGS(%rsp), %r11 addq $6*8, %rsp /* skip extra regs -- they were preserved */ UNWIND_HINT_EMPTY jmp .Lpop_c_regs_except_rcx_r11_and_sysret 1: /* * The fast path looked good when we started, but something changed * along the way and we need to switch to the slow path. Calling * raise(3) will trigger this, for example. IRQs are off. */ TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_ANY) SAVE_EXTRA_REGS movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ jmp return_from_SYSCALL_64 entry_SYSCALL64_slow_path: /* IRQs are off. */ SAVE_EXTRA_REGS movq %rsp, %rdi call do_syscall_64 /* returns with IRQs disabled */ return_from_SYSCALL_64: TRACE_IRQS_IRETQ /* we're about to change IF */ /* * Try to use SYSRET instead of IRET if we're returning to * a completely clean 64-bit userspace context. If we're not, * go to the slow exit path. */ movq RCX(%rsp), %rcx movq RIP(%rsp), %r11 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ jne swapgs_restore_regs_and_return_to_usermode /* * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP * in kernel space. This essentially lets the user take over * the kernel, since userspace controls RSP. * * If width of "canonical tail" ever becomes variable, this will need * to be updated to remain correct on both old and new CPUs. * * Change top bits to match most significant bit (47th or 56th bit * depending on paging mode) in the address. */ shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx /* If this changed %rcx, it was not canonical */ cmpq %rcx, %r11 jne swapgs_restore_regs_and_return_to_usermode cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ jne swapgs_restore_regs_and_return_to_usermode movq R11(%rsp), %r11 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ jne swapgs_restore_regs_and_return_to_usermode /* * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot * restore RF properly. If the slowpath sets it for whatever reason, we * need to restore it correctly. * * SYSRET can restore TF, but unlike IRET, restoring TF results in a * trap from userspace immediately after SYSRET. This would cause an * infinite loop whenever #DB happens with register state that satisfies * the opportunistic SYSRET conditions. For example, single-stepping * this user code: * * movq $stuck_here, %rcx * pushfq * popq %r11 * stuck_here: * * would never get past 'stuck_here'. */ testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 jnz swapgs_restore_regs_and_return_to_usermode /* nothing to check for RSP */ cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ jne swapgs_restore_regs_and_return_to_usermode /* * We win! This label is here just for ease of understanding * perf profiles. Nothing jumps here. */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ UNWIND_HINT_EMPTY POP_EXTRA_REGS .Lpop_c_regs_except_rcx_r11_and_sysret: popq %rsi /* skip r11 */ popq %r10 popq %r9 popq %r8 popq %rax popq %rsi /* skip rcx */ popq %rdx popq %rsi /* * Now all regs are restored except RSP and RDI. * Save old stack pointer and switch to trampoline stack. */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp pushq RSP-RDI(%rdi) /* RSP */ pushq (%rdi) /* RDI */ /* * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi popq %rdi popq %rsp USERGS_SYSRET64 END(entry_SYSCALL_64) ENTRY(stub_ptregs_64) /* * Syscalls marked as needing ptregs land here. * If we are on the fast path, we need to save the extra regs, * which we achieve by trying again on the slow path. If we are on * the slow path, the extra regs are already saved. * * RAX stores a pointer to the C function implementing the syscall. * IRQs are on. */ cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) jne 1f /* * Called from fast path -- disable IRQs again, pop return address * and jump to slow path */ DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF popq %rax UNWIND_HINT_REGS extra=0 jmp entry_SYSCALL64_slow_path 1: JMP_NOSPEC %rax /* Called from C */ END(stub_ptregs_64) .macro ptregs_stub func ENTRY(ptregs_\func) UNWIND_HINT_FUNC leaq \func(%rip), %rax jmp stub_ptregs_64 END(ptregs_\func) .endm /* Instantiate ptregs_stub for each ptregs-using syscall */ #define __SYSCALL_64_QUAL_(sym) #define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym #define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) #include <asm/syscalls_64.h> /* * %rdi: prev task * %rsi: next task */ ENTRY(__switch_to_asm) UNWIND_HINT_FUNC /* * Save callee-saved registers * This must match the order in inactive_task_frame */ pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 /* switch stack */ movq %rsp, TASK_threadsp(%rdi) movq TASK_threadsp(%rsi), %rsp #ifdef CONFIG_CC_STACKPROTECTOR movq TASK_stack_canary(%rsi), %rbx movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset #endif #ifdef CONFIG_RETPOLINE /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated * with userspace addresses. On CPUs where those concerns * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW #endif /* restore callee-saved registers */ popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp jmp __switch_to END(__switch_to_asm) /* * A newly forked process directly context switches into this address. * * rax: prev task we switched from * rbx: kernel thread func (NULL for user thread) * r12: kernel thread arg */ ENTRY(ret_from_fork) UNWIND_HINT_EMPTY movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ testq %rbx, %rbx /* from kernel_thread? */ jnz 1f /* kernel threads are uncommon */ 2: UNWIND_HINT_REGS movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ TRACE_IRQS_ON /* user mode is traced as IRQS on */ jmp swapgs_restore_regs_and_return_to_usermode 1: /* kernel thread */ movq %r12, %rdi CALL_NOSPEC %rbx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() * syscall. */ movq $0, RAX(%rsp) jmp 2b END(ret_from_fork) /* * Build the entry stubs with some assembler magic. * We pack 1 stub into every 8-byte block. */ .align 8 ENTRY(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) UNWIND_HINT_IRET_REGS pushq $(~vector+0x80) /* Note: always in signed byte range */ jmp common_interrupt .align 8 vector=vector+1 .endr END(irq_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY pushq %rax SAVE_FLAGS(CLBR_RAX) testl $X86_EFLAGS_IF, %eax jz .Lokay_\@ ud2 .Lokay_\@: popq %rax #endif .endm /* * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. * Requires kernel GSBASE. * * The invariant is that, if irq_count != -1, then the IRQ stack is in use. */ .macro ENTER_IRQ_STACK regs=1 old_rsp DEBUG_ENTRY_ASSERT_IRQS_OFF movq %rsp, \old_rsp .if \regs UNWIND_HINT_REGS base=\old_rsp .endif incl PER_CPU_VAR(irq_count) jnz .Lirq_stack_push_old_rsp_\@ /* * Right now, if we just incremented irq_count to zero, we've * claimed the IRQ stack but we haven't switched to it yet. * * If anything is added that can interrupt us here without using IST, * it must be *extremely* careful to limit its stack usage. This * could include kprobes and a hypothetical future IST-less #DB * handler. * * The OOPS unwinder relies on the word at the top of the IRQ * stack linking back to the previous RSP for the entire time we're * on the IRQ stack. For this to work reliably, we need to write * it before we actually move ourselves to the IRQ stack. */ movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) movq PER_CPU_VAR(irq_stack_ptr), %rsp #ifdef CONFIG_DEBUG_ENTRY /* * If the first movq above becomes wrong due to IRQ stack layout * changes, the only way we'll notice is if we try to unwind right * here. Assert that we set up the stack right to catch this type * of bug quickly. */ cmpq -8(%rsp), \old_rsp je .Lirq_stack_okay\@ ud2 .Lirq_stack_okay\@: #endif .Lirq_stack_push_old_rsp_\@: pushq \old_rsp .if \regs UNWIND_HINT_REGS indirect=1 .endif .endm /* * Undoes ENTER_IRQ_STACK. */ .macro LEAVE_IRQ_STACK regs=1 DEBUG_ENTRY_ASSERT_IRQS_OFF /* We need to be off the IRQ stack before decrementing irq_count. */ popq %rsp .if \regs UNWIND_HINT_REGS .endif /* * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming * the irq stack but we're not on it. */ decl PER_CPU_VAR(irq_count) .endm /* * Interrupt entry/exit. * * Interrupt entry points save only callee clobbered registers in fast path. * * Entry runs with interrupts off. */ /* 0(%rsp): ~(interrupt number) */ .macro interrupt func cld testb $3, CS-ORIG_RAX(%rsp) jz 1f SWAPGS call switch_to_thread_stack 1: ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS SAVE_EXTRA_REGS ENCODE_FRAME_POINTER testb $3, CS(%rsp) jz 1f /* * IRQ from user mode. * * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode * (which can take locks). Since TRACE_IRQS_OFF idempotent, * the simplest way to handle it is to just call it twice if * we enter from user mode. There's no reason to optimize this since * TRACE_IRQS_OFF is a no-op if lockdep is off. */ TRACE_IRQS_OFF CALL_enter_from_user_mode 1: ENTER_IRQ_STACK old_rsp=%rdi /* We entered an interrupt context - irqs are off: */ TRACE_IRQS_OFF call \func /* rdi points to pt_regs */ .endm /* * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_interrupt. */ .p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: ASM_CLAC addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ interrupt do_IRQ /* 0(%rsp): old RSP */ ret_from_intr: DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF LEAVE_IRQ_STACK testb $3, CS(%rsp) jz retint_kernel /* Interrupt came from user space */ GLOBAL(retint_user) mov %rsp,%rdi call prepare_exit_to_usermode TRACE_IRQS_IRETQ GLOBAL(swapgs_restore_regs_and_return_to_usermode) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testb $3, CS(%rsp) jnz 1f ud2 1: #endif POP_EXTRA_REGS popq %r11 popq %r10 popq %r9 popq %r8 popq %rax popq %rcx popq %rdx popq %rsi /* * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. * Save old stack pointer and switch to trampoline stack. */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp /* Copy the IRET frame to the trampoline stack. */ pushq 6*8(%rdi) /* SS */ pushq 5*8(%rdi) /* RSP */ pushq 4*8(%rdi) /* EFLAGS */ pushq 3*8(%rdi) /* CS */ pushq 2*8(%rdi) /* RIP */ /* Push user RDI on the trampoline stack. */ pushq (%rdi) /* * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi /* Restore RDI. */ popq %rdi SWAPGS INTERRUPT_RETURN /* Returning to kernel space */ retint_kernel: #ifdef CONFIG_PREEMPT /* Interrupts are off */ /* Check if we need preemption */ bt $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) jnz 1f call preempt_schedule_irq jmp 0b 1: #endif /* * The iretq could re-enable interrupts: */ TRACE_IRQS_IRETQ GLOBAL(restore_regs_and_return_to_kernel) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates kernel mode. */ testb $3, CS(%rsp) jz 1f ud2 1: #endif POP_EXTRA_REGS POP_C_REGS addq $8, %rsp /* skip regs->orig_ax */ INTERRUPT_RETURN ENTRY(native_iret) UNWIND_HINT_IRET_REGS /* * Are we returning to a stack segment from the LDT? Note: in * 64-bit mode SS:RSP on the exception stack is always valid. */ #ifdef CONFIG_X86_ESPFIX64 testb $4, (SS-RIP)(%rsp) jnz native_irq_return_ldt #endif .global native_irq_return_iret native_irq_return_iret: /* * This may fault. Non-paranoid faults on return to userspace are * handled by fixup_bad_iret. These include #SS, #GP, and #NP. * Double-faults due to espfix64 are handled in do_double_fault. * Other faults here are fatal. */ iretq #ifdef CONFIG_X86_ESPFIX64 native_irq_return_ldt: /* * We are running with user GSBASE. All GPRs contain their user * values. We have a percpu ESPFIX stack that is eight slots * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom * of the ESPFIX stack. * * We clobber RAX and RDI in this code. We stash RDI on the * normal stack and RAX on the ESPFIX stack. * * The ESPFIX stack layout we set up looks like this: * * --- top of ESPFIX stack --- * SS * RSP * RFLAGS * CS * RIP <-- RSP points here when we're done * RAX <-- espfix_waddr points here * --- bottom of ESPFIX stack --- */ pushq %rdi /* Stash user RDI */ SWAPGS /* to kernel GS */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ movq PER_CPU_VAR(espfix_waddr), %rdi movq %rax, (0*8)(%rdi) /* user RAX */ movq (1*8)(%rsp), %rax /* user RIP */ movq %rax, (1*8)(%rdi) movq (2*8)(%rsp), %rax /* user CS */ movq %rax, (2*8)(%rdi) movq (3*8)(%rsp), %rax /* user RFLAGS */ movq %rax, (3*8)(%rdi) movq (5*8)(%rsp), %rax /* user SS */ movq %rax, (5*8)(%rdi) movq (4*8)(%rsp), %rax /* user RSP */ movq %rax, (4*8)(%rdi) /* Now RAX == RSP. */ andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ /* * espfix_stack[31:16] == 0. The page tables are set up such that * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of * espfix_waddr for any X. That is, there are 65536 RO aliases of * the same page. Set up RSP so that RSP[31:16] contains the * respective 16 bits of the /userspace/ RSP and RSP nonetheless * still points to an RO alias of the ESPFIX stack. */ orq PER_CPU_VAR(espfix_stack), %rax SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi SWAPGS /* to user GS */ popq %rdi /* Restore user RDI */ movq %rax, %rsp UNWIND_HINT_IRET_REGS offset=8 /* * At this point, we cannot write to the stack any more, but we can * still read. */ popq %rax /* Restore user RAX */ /* * RSP now points to an ordinary IRET frame, except that the page * is read-only and RSP[31:16] are preloaded with the userspace * values. We can now IRET back to userspace. */ jmp native_irq_return_iret #endif END(common_interrupt) /* * APIC interrupts. */ .macro apicinterrupt3 num sym do_sym ENTRY(\sym) UNWIND_HINT_IRET_REGS ASM_CLAC pushq $~(\num) .Lcommon_\sym: interrupt \do_sym jmp ret_from_intr END(\sym) .endm /* Make sure APIC interrupt handlers end up in the irqentry section: */ #define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" #define POP_SECTION_IRQENTRY .popsection .macro apicinterrupt num sym do_sym PUSH_SECTION_IRQENTRY apicinterrupt3 \num \sym \do_sym POP_SECTION_IRQENTRY .endm #ifdef CONFIG_SMP apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt #endif #ifdef CONFIG_X86_UV apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt #endif apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi #ifdef CONFIG_HAVE_KVM apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi #endif #ifdef CONFIG_X86_MCE_THRESHOLD apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt #endif #ifdef CONFIG_X86_MCE_AMD apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt #endif #ifdef CONFIG_X86_THERMAL_VECTOR apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt #endif #ifdef CONFIG_SMP apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt #endif apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt #ifdef CONFIG_IRQ_WORK apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt #endif /* * Exception entry points. */ #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) /* * Switch to the thread stack. This is called with the IRET frame and * orig_ax on the stack. (That is, RDI..R12 are not on the stack and * space has not been allocated for them.) */ ENTRY(switch_to_thread_stack) UNWIND_HINT_FUNC pushq %rdi /* Need to switch before accessing the thread stack. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi movq %rsp, %rdi movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI pushq 7*8(%rdi) /* regs->ss */ pushq 6*8(%rdi) /* regs->rsp */ pushq 5*8(%rdi) /* regs->eflags */ pushq 4*8(%rdi) /* regs->cs */ pushq 3*8(%rdi) /* regs->ip */ pushq 2*8(%rdi) /* regs->orig_ax */ pushq 8(%rdi) /* return address */ UNWIND_HINT_FUNC movq (%rdi), %rdi ret END(switch_to_thread_stack) .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 /* Sanity check */ .if \shift_ist != -1 && \paranoid == 0 .error "using shift_ist requires paranoid=1" .endif ASM_CLAC .if \has_error_code == 0 pushq $-1 /* ORIG_RAX: no syscall to restart */ .endif ALLOC_PT_GPREGS_ON_STACK .if \paranoid < 2 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ jnz .Lfrom_usermode_switch_stack_\@ .endif .if \paranoid call paranoid_entry .else call error_entry .endif UNWIND_HINT_REGS /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ .if \paranoid .if \shift_ist != -1 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ .else TRACE_IRQS_OFF .endif .endif movq %rsp, %rdi /* pt_regs pointer */ .if \has_error_code movq ORIG_RAX(%rsp), %rsi /* get error code */ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ .else xorl %esi, %esi /* no error code */ .endif .if \shift_ist != -1 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) .endif call \do_sym .if \shift_ist != -1 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) .endif /* these procedures expect "no swapgs" flag in ebx */ .if \paranoid jmp paranoid_exit .else jmp error_exit .endif .if \paranoid < 2 /* * Entry from userspace. Switch stacks and treat it * as a normal entry. This means that paranoid handlers * run in real process context if user_mode(regs). */ .Lfrom_usermode_switch_stack_\@: call error_entry movq %rsp, %rdi /* pt_regs pointer */ .if \has_error_code movq ORIG_RAX(%rsp), %rsi /* get error code */ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ .else xorl %esi, %esi /* no error code */ .endif call \do_sym jmp error_exit /* %ebx: no swapgs flag */ .endif END(\sym) .endm idtentry divide_error do_divide_error has_error_code=0 idtentry overflow do_overflow has_error_code=0 idtentry bounds do_bounds has_error_code=0 idtentry invalid_op do_invalid_op has_error_code=0 idtentry device_not_available do_device_not_available has_error_code=0 idtentry double_fault do_double_fault has_error_code=1 paranoid=2 idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 idtentry invalid_TSS do_invalid_TSS has_error_code=1 idtentry segment_not_present do_segment_not_present has_error_code=1 idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 idtentry coprocessor_error do_coprocessor_error has_error_code=0 idtentry alignment_check do_alignment_check has_error_code=1 idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 /* * Reload gs selector with exception handling * edi: new selector */ ENTRY(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) TRACE_IRQS_OFF SWAPGS .Lgs_change: movl %edi, %gs 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE SWAPGS TRACE_IRQS_FLAGS (%rsp) popfq FRAME_END ret ENDPROC(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, bad_gs) .section .fixup, "ax" /* running with kernelgs */ bad_gs: SWAPGS /* switch back to user gs */ .macro ZAP_GS /* This can't be a string because the preprocessor needs to see it. */ movl $__USER_DS, %eax movl %eax, %gs .endm ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG xorl %eax, %eax movl %eax, %gs jmp 2b .previous /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(do_softirq_own_stack) pushq %rbp mov %rsp, %rbp ENTER_IRQ_STACK regs=0 old_rsp=%r11 call __do_softirq LEAVE_IRQ_STACK regs=0 leaveq ret ENDPROC(do_softirq_own_stack) #ifdef CONFIG_XEN idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 /* * A note on the "critical region" in our callback handler. * We want to avoid stacking callback handlers due to events occurring * during handling of the last event. To do this, we keep events disabled * until we've done all processing. HOWEVER, we must enable events before * popping the stack frame (can't be done atomically) and so it would still * be possible to get enough handler activations to overflow the stack. * Although unlikely, bugs of that kind are hard to track down, so we'd * like to avoid the possibility. * So, on entry to the handler we detect whether we interrupted an * existing activation in its critical region -- if so, we pop the current * activation and restart the handler using the previous one. */ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ /* * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * see the correct pointer to the pt_regs */ UNWIND_HINT_FUNC movq %rdi, %rsp /* we don't return, adjust the stack frame */ UNWIND_HINT_REGS ENTER_IRQ_STACK old_rsp=%r10 call xen_evtchn_do_upcall LEAVE_IRQ_STACK #ifndef CONFIG_PREEMPT call xen_maybe_preempt_hcall #endif jmp error_exit END(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. * We get here for two reasons: * 1. Fault while reloading DS, ES, FS or GS * 2. Fault while executing IRET * Category 1 we do not need to fix up as Xen has already reloaded all segment * registers that could be reloaded and zeroed the others. * Category 2 we fix up by killing the current process. We cannot use the * normal Linux return path in this case because if we use the IRET hypercall * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by comparing each saved segment register * with its current contents: any discrepancy means we in category 1. */ ENTRY(xen_failsafe_callback) UNWIND_HINT_EMPTY movl %ds, %ecx cmpw %cx, 0x10(%rsp) jne 1f movl %es, %ecx cmpw %cx, 0x18(%rsp) jne 1f movl %fs, %ecx cmpw %cx, 0x20(%rsp) jne 1f movl %gs, %ecx cmpw %cx, 0x28(%rsp) jne 1f /* All segments match their saved values => Category 2 (Bad IRET). */ movq (%rsp), %rcx movq 8(%rsp), %r11 addq $0x30, %rsp pushq $0 /* RIP */ UNWIND_HINT_IRET_REGS offset=8 jmp general_protection 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ movq (%rsp), %rcx movq 8(%rsp), %r11 addq $0x30, %rsp UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS SAVE_EXTRA_REGS ENCODE_FRAME_POINTER jmp error_exit END(xen_failsafe_callback) apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ xen_hvm_callback_vector xen_evtchn_do_upcall #endif /* CONFIG_XEN */ #if IS_ENABLED(CONFIG_HYPERV) apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ hyperv_callback_vector hyperv_vector_handler #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN idtentry xennmi do_nmi has_error_code=0 idtentry xendebug do_debug has_error_code=0 idtentry xenint3 do_int3 has_error_code=0 #endif idtentry general_protection do_general_protection has_error_code=1 idtentry page_fault do_page_fault has_error_code=1 #ifdef CONFIG_KVM_GUEST idtentry async_page_fault do_async_page_fault has_error_code=1 #endif #ifdef CONFIG_X86_MCE idtentry machine_check do_mce has_error_code=0 paranoid=1 #endif /* * Save all registers in pt_regs, and switch gs if needed. * Use slow, but surefire "are we in kernel?" check. * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld SAVE_C_REGS 8 SAVE_EXTRA_REGS 8 ENCODE_FRAME_POINTER 8 movl $1, %ebx movl $MSR_GS_BASE, %ecx rdmsr testl %edx, %edx js 1f /* negative -> in kernel */ SWAPGS xorl %ebx, %ebx 1: SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 ret END(paranoid_entry) /* * "Paranoid" exit path from exception stack. This is invoked * only on return from non-NMI IST interrupts that came * from kernel space. * * We may be returning to very strange contexts (e.g. very early * in syscall entry), so checking for preemption here would * be complicated. Fortunately, we there's no good reason * to try to handle preemption here. * * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ ENTRY(paranoid_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK jmp .Lparanoid_exit_restore .Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG .Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel END(paranoid_exit) /* * Save all registers in pt_regs, and switch gs if needed. * Return: EBX=0: came from user mode; EBX=1: otherwise */ ENTRY(error_entry) UNWIND_HINT_FUNC cld SAVE_C_REGS 8 SAVE_EXTRA_REGS 8 ENCODE_FRAME_POINTER 8 xorl %ebx, %ebx testb $3, CS+8(%rsp) jz .Lerror_kernelspace /* * We entered from user mode or we're pretending to have entered * from user mode due to an IRET fault. */ SWAPGS /* We have user CR3. Change to kernel CR3. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax .Lerror_entry_from_usermode_after_swapgs: /* Put us onto the real thread stack. */ popq %r12 /* save return addr in %12 */ movq %rsp, %rdi /* arg0 = pt_regs pointer */ call sync_regs movq %rax, %rsp /* switch stack */ ENCODE_FRAME_POINTER pushq %r12 /* * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode * (which can take locks). */ TRACE_IRQS_OFF CALL_enter_from_user_mode ret .Lerror_entry_done: TRACE_IRQS_OFF ret /* * There are two places in the kernel that can potentially fault with * usergs. Handle them here. B stepping K8s sometimes report a * truncated RIP for IRET exceptions returning to compat mode. Check * for these here too. */ .Lerror_kernelspace: incl %ebx leaq native_irq_return_iret(%rip), %rcx cmpq %rcx, RIP+8(%rsp) je .Lerror_bad_iret movl %ecx, %eax /* zero extend */ cmpq %rax, RIP+8(%rsp) je .Lbstep_iret cmpq $.Lgs_change, RIP+8(%rsp) jne .Lerror_entry_done /* * hack: .Lgs_change can fail with user gsbase. If this happens, fix up * gsbase and proceed. We'll fix up the exception and land in * .Lgs_change's error handler with kernel gsbase. */ SWAPGS SWITCH_TO_KERNEL_CR3 scratch_reg=%rax jmp .Lerror_entry_done .Lbstep_iret: /* Fix truncated RIP */ movq %rcx, RIP+8(%rsp) /* fall through */ .Lerror_bad_iret: /* * We came from an IRET to user mode, so we have user * gsbase and CR3. Switch to kernel gsbase and CR3: */ SWAPGS SWITCH_TO_KERNEL_CR3 scratch_reg=%rax /* * Pretend that the exception came from user mode: set up pt_regs * as if we faulted immediately after IRET and clear EBX so that * error_exit knows that we will be returning to user mode. */ mov %rsp, %rdi call fixup_bad_iret mov %rax, %rsp decl %ebx jmp .Lerror_entry_from_usermode_after_swapgs END(error_entry) /* * On entry, EBX is a "return to kernel mode" flag: * 1: already in kernel mode, don't need SWAPGS * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode */ ENTRY(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF testl %ebx, %ebx jnz retint_kernel jmp retint_user END(error_exit) /* * Runs on exception stack. Xen PV does not go through this path at all, * so we can use real assembly here. * * Registers: * %r14: Used to save/restore the CR3 of the interrupted context * when PAGE_TABLE_ISOLATION is in use. Do not clobber. */ ENTRY(nmi) UNWIND_HINT_IRET_REGS /* * We allow breakpoints in NMIs. If a breakpoint occurs, then * the iretq it performs will take us out of NMI context. * This means that we can have nested NMIs where the next * NMI is using the top of the stack of the previous NMI. We * can't let it execute because the nested NMI will corrupt the * stack of the previous NMI. NMI handlers are not re-entrant * anyway. * * To handle this case we do the following: * Check the a special location on the stack that contains * a variable that is set when NMIs are executing. * The interrupted task's stack is also checked to see if it * is an NMI stack. * If the variable is not set and the stack is not the NMI * stack then: * o Set the special variable on the stack * o Copy the interrupt frame into an "outermost" location on the * stack * o Copy the interrupt frame into an "iret" location on the stack * o Continue processing the NMI * If the variable is set or the previous stack is the NMI stack: * o Modify the "iret" location to jump to the repeat_nmi * o return back to the first NMI * * Now on exit of the first NMI, we first clear the stack variable * The NMI stack will tell any nested NMIs at that point that it is * nested. Then we pop the stack normally with iret, and if there was * a nested NMI that updated the copy interrupt stack frame, a * jump will be made to the repeat_nmi code that will handle the second * NMI. * * However, espfix prevents us from directly returning to userspace * with a single IRET instruction. Similarly, IRET to user mode * can fault. We therefore handle NMIs from user space like * other IST entries. */ ASM_CLAC /* Use %rdx as our temp variable throughout */ pushq %rdx testb $3, CS-RIP+8(%rsp) jz .Lnmi_from_kernel /* * NMI from user mode. We need to run on the thread stack, but we * can't go through the normal entry paths: NMIs are masked, and * we don't want to enable interrupts, because then we'll end * up in an awkward situation in which IRQs are on but NMIs * are off. * * We also must not push anything to the stack before switching * stacks lest we corrupt the "NMI executing" variable. */ swapgs cld SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 pushq 5*8(%rdx) /* pt_regs->ss */ pushq 4*8(%rdx) /* pt_regs->rsp */ pushq 3*8(%rdx) /* pt_regs->flags */ pushq 2*8(%rdx) /* pt_regs->cs */ pushq 1*8(%rdx) /* pt_regs->rip */ UNWIND_HINT_IRET_REGS pushq $-1 /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq (%rdx) /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq %rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ pushq %r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */ pushq %rbx /* pt_regs->rbx */ pushq %rbp /* pt_regs->rbp */ pushq %r12 /* pt_regs->r12 */ pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ UNWIND_HINT_REGS ENCODE_FRAME_POINTER /* * At this point we no longer need to worry about stack damage * due to nesting -- we're on the normal thread stack and we're * done with the NMI stack. */ movq %rsp, %rdi movq $-1, %rsi call do_nmi /* * Return back to user mode. We must *not* do the normal exit * work, because we don't want to enable interrupts. */ jmp swapgs_restore_regs_and_return_to_usermode .Lnmi_from_kernel: /* * Here's what our stack frame will look like: * +---------------------------------------------------------+ * | original SS | * | original Return RSP | * | original RFLAGS | * | original CS | * | original RIP | * +---------------------------------------------------------+ * | temp storage for rdx | * +---------------------------------------------------------+ * | "NMI executing" variable | * +---------------------------------------------------------+ * | iret SS } Copied from "outermost" frame | * | iret Return RSP } on each loop iteration; overwritten | * | iret RFLAGS } by a nested NMI to force another | * | iret CS } iteration if needed. | * | iret RIP } | * +---------------------------------------------------------+ * | outermost SS } initialized in first_nmi; | * | outermost Return RSP } will not be changed before | * | outermost RFLAGS } NMI processing is done. | * | outermost CS } Copied to "iret" frame on each | * | outermost RIP } iteration. | * +---------------------------------------------------------+ * | pt_regs | * +---------------------------------------------------------+ * * The "original" frame is used by hardware. Before re-enabling * NMIs, we need to be done with it, and we need to leave enough * space for the asm code here. * * We return by executing IRET while RSP points to the "iret" frame. * That will either return for real or it will loop back into NMI * processing. * * The "outermost" frame is copied to the "iret" frame on each * iteration of the loop, so each iteration starts with the "iret" * frame pointing to the final return target. */ /* * Determine whether we're a nested NMI. * * If we interrupted kernel code between repeat_nmi and * end_repeat_nmi, then we are a nested NMI. We must not * modify the "iret" frame because it's being written by * the outer NMI. That's okay; the outer NMI handler is * about to about to call do_nmi anyway, so we can just * resume the outer NMI. */ movq $repeat_nmi, %rdx cmpq 8(%rsp), %rdx ja 1f movq $end_repeat_nmi, %rdx cmpq 8(%rsp), %rdx ja nested_nmi_out 1: /* * Now check "NMI executing". If it's set, then we're nested. * This will not detect if we interrupted an outer NMI just * before IRET. */ cmpl $1, -8(%rsp) je nested_nmi /* * Now test if the previous stack was an NMI stack. This covers * the case where we interrupt an outer NMI after it clears * "NMI executing" but before IRET. We need to be careful, though: * there is one case in which RSP could point to the NMI stack * despite there being no NMI active: naughty userspace controls * RSP at the very beginning of the SYSCALL targets. We can * pull a fast one on naughty userspace, though: we program * SYSCALL to mask DF, so userspace cannot cause DF to be set * if it controls the kernel's RSP. We set DF before we clear * "NMI executing". */ lea 6*8(%rsp), %rdx /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ cmpq %rdx, 4*8(%rsp) /* If the stack pointer is above the NMI stack, this is a normal NMI */ ja first_nmi subq $EXCEPTION_STKSZ, %rdx cmpq %rdx, 4*8(%rsp) /* If it is below the NMI stack, it is a normal NMI */ jb first_nmi /* Ah, it is within the NMI stack. */ testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) jz first_nmi /* RSP was user controlled. */ /* This is a nested NMI. */ nested_nmi: /* * Modify the "iret" frame to point to repeat_nmi, forcing another * iteration of NMI handling. */ subq $8, %rsp leaq -10*8(%rsp), %rdx pushq $__KERNEL_DS pushq %rdx pushfq pushq $__KERNEL_CS pushq $repeat_nmi /* Put stack back */ addq $(6*8), %rsp nested_nmi_out: popq %rdx /* We are returning to kernel mode, so this cannot result in a fault. */ iretq first_nmi: /* Restore rdx. */ movq (%rsp), %rdx /* Make room for "NMI executing". */ pushq $0 /* Leave room for the "iret" frame */ subq $(5*8), %rsp /* Copy the "original" frame to the "outermost" frame */ .rept 5 pushq 11*8(%rsp) .endr UNWIND_HINT_IRET_REGS /* Everything up to here is safe from nested NMIs */ #ifdef CONFIG_DEBUG_ENTRY /* * For ease of testing, unmask NMIs right away. Disabled by * default because IRET is very expensive. */ pushq $0 /* SS */ pushq %rsp /* RSP (minus 8 because of the previous push) */ addq $8, (%rsp) /* Fix up RSP */ pushfq /* RFLAGS */ pushq $__KERNEL_CS /* CS */ pushq $1f /* RIP */ iretq /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: #endif repeat_nmi: /* * If there was a nested NMI, the first NMI's iret will return * here. But NMIs are still enabled and we can take another * nested NMI. The nested NMI checks the interrupted RIP to see * if it is between repeat_nmi and end_repeat_nmi, and if so * it will just return, as we are about to repeat an NMI anyway. * This makes it safe to copy to the stack frame that a nested * NMI will update. * * RSP is pointing to "outermost RIP". gsbase is unknown, but, if * we're repeating an NMI, gsbase has the same value that it had on * the first iteration. paranoid_entry will load the kernel * gsbase if needed before we call do_nmi. "NMI executing" * is zero. */ movq $1, 10*8(%rsp) /* Set "NMI executing". */ /* * Copy the "outermost" frame to the "iret" frame. NMIs that nest * here must not modify the "iret" frame while we're writing to * it or it will end up containing garbage. */ addq $(10*8), %rsp .rept 5 pushq -6*8(%rsp) .endr subq $(5*8), %rsp end_repeat_nmi: /* * Everything below this point can be preempted by a nested NMI. * If this happens, then the inner NMI will change the "iret" * frame to point back to repeat_nmi. */ pushq $-1 /* ORIG_RAX: no syscall to restart */ ALLOC_PT_GPREGS_ON_STACK /* * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit * as we should not be calling schedule in NMI context. * Even with normal interrupts enabled. An NMI should not be * setting NEED_RESCHED or anything that normal interrupts and * exceptions might do. */ call paranoid_entry UNWIND_HINT_REGS /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp, %rdi movq $-1, %rsi call do_nmi RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 testl %ebx, %ebx /* swapgs needed? */ jnz nmi_restore nmi_swapgs: SWAPGS_UNSAFE_STACK nmi_restore: POP_EXTRA_REGS POP_C_REGS /* * Skip orig_ax and the "outermost" frame to point RSP at the "iret" * at the "iret" frame. */ addq $6*8, %rsp /* * Clear "NMI executing". Set DF first so that we can easily * distinguish the remaining code between here and IRET from * the SYSCALL entry and exit paths. * * We arguably should just inspect RIP instead, but I (Andy) wrote * this code when I had the misapprehension that Xen PV supported * NMIs, and Xen PV would break that approach. */ std movq $0, 5*8(%rsp) /* clear "NMI executing" */ /* * iretq reads the "iret" frame and exits the NMI stack in a * single instruction. We are returning to kernel mode, so this * cannot result in a fault. Similarly, we don't need to worry * about espfix64 on the way back to kernel mode. */ iretq END(nmi) ENTRY(ignore_sysret) UNWIND_HINT_EMPTY mov $-ENOSYS, %eax sysret END(ignore_sysret) ENTRY(rewind_stack_do_exit) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movq PER_CPU_VAR(cpu_current_top_of_stack), %rax leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE call do_exit END(rewind_stack_do_exit)
liva/minimal-linux
12,289
arch/x86/entry/entry_64_compat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Compatibility mode system call entry point for x86-64. * * Copyright 2000-2002 Andi Kleen, SuSE Labs. */ #include "calling.h" #include <asm/asm-offsets.h> #include <asm/current.h> #include <asm/errno.h> #include <asm/ia32_unistd.h> #include <asm/thread_info.h> #include <asm/segment.h> #include <asm/irqflags.h> #include <asm/asm.h> #include <asm/smap.h> #include <linux/linkage.h> #include <linux/err.h> .section .entry.text, "ax" /* * 32-bit SYSENTER entry. * * 32-bit system calls through the vDSO's __kernel_vsyscall enter here * on 64-bit kernels running on Intel CPUs. * * The SYSENTER instruction, in principle, should *only* occur in the * vDSO. In practice, a small number of Android devices were shipped * with a copy of Bionic that inlined a SYSENTER instruction. This * never happened in any of Google's Bionic versions -- it only happened * in a narrow range of Intel-provided versions. * * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs. * IF and VM in RFLAGS are cleared (IOW: interrupts are off). * SYSENTER does not save anything on the stack, * and does not save old RIP (!!!), RSP, or RFLAGS. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp user stack * 0(%ebp) arg6 */ ENTRY(entry_SYSENTER_compat) /* Interrupts are off on entry. */ SWAPGS /* We are about to clobber %rsp anyway, clobbering here is OK */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* * User tracing code (ptrace or signal handlers) might assume that * the saved RAX contains a 32-bit number when we're invoking a 32-bit * syscall. Just in case the high bits are nonzero, zero-extend * the syscall number. (This could almost certainly be deleted * with no ill effects.) */ movl %eax, %eax /* Construct struct pt_regs on stack */ pushq $__USER32_DS /* pt_regs->ss */ pushq %rbp /* pt_regs->sp (stashed in bp) */ /* * Push flags. This is nasty. First, interrupts are currently * off, but we need pt_regs->flags to have IF set. Second, even * if TF was set when SYSENTER started, it's clear by now. We fix * that later using TIF_SINGLESTEP. */ pushfq /* pt_regs->flags (except IF = 0) */ orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */ pushq $__USER32_CS /* pt_regs->cs */ pushq $0 /* pt_regs->ip = 0 (placeholder) */ pushq %rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ pushq $0 /* pt_regs->r9 = 0 */ pushq $0 /* pt_regs->r10 = 0 */ pushq $0 /* pt_regs->r11 = 0 */ pushq %rbx /* pt_regs->rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ pushq $0 /* pt_regs->r12 = 0 */ pushq $0 /* pt_regs->r13 = 0 */ pushq $0 /* pt_regs->r14 = 0 */ pushq $0 /* pt_regs->r15 = 0 */ cld /* * SYSENTER doesn't filter flags, so we need to clear NT and AC * ourselves. To save a few cycles, we can check whether * either was set instead of doing an unconditional popfq. * This needs to happen before enabling interrupts so that * we don't get preempted with NT set. * * If TF is set, we will single-step all the way to here -- do_debug * will ignore all the traps. (Yes, this is slow, but so is * single-stepping in general. This allows us to avoid having * a more complicated code to handle the case where a user program * forces us to single-step through the SYSENTER entry code.) * * NB.: .Lsysenter_fix_flags is a label with the code under it moved * out-of-line as an optimization: NT is unlikely to be set in the * majority of the cases and instead of polluting the I$ unnecessarily, * we're keeping that code behind a branch which will predict as * not-taken and therefore its instructions won't be fetched. */ testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp) jnz .Lsysenter_fix_flags .Lsysenter_flags_fixed: /* * User mode is traced as though IRQs are on, and SYSENTER * turned them off. */ TRACE_IRQS_OFF movq %rsp, %rdi call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV jmp sysret32_from_system_call .Lsysenter_fix_flags: pushq $X86_EFLAGS_FIXED popfq jmp .Lsysenter_flags_fixed GLOBAL(__end_entry_SYSENTER_compat) ENDPROC(entry_SYSENTER_compat) /* * 32-bit SYSCALL entry. * * 32-bit system calls through the vDSO's __kernel_vsyscall enter here * on 64-bit kernels running on AMD CPUs. * * The SYSCALL instruction, in principle, should *only* occur in the * vDSO. In practice, it appears that this really is the case. * As evidence: * * - The calling convention for SYSCALL has changed several times without * anyone noticing. * * - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything * user task that did SYSCALL without immediately reloading SS * would randomly crash. * * - Most programmers do not directly target AMD CPUs, and the 32-bit * SYSCALL instruction does not exist on Intel CPUs. Even on AMD * CPUs, Linux disables the SYSCALL instruction on 32-bit kernels * because the SYSCALL instruction in legacy/native 32-bit mode (as * opposed to compat mode) is sufficiently poorly designed as to be * essentially unusable. * * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves * RFLAGS to R11, then loads new SS, CS, and RIP from previously * programmed MSRs. RFLAGS gets masked by a value from another MSR * (so CLD and CLAC are not needed). SYSCALL does not save anything on * the stack and does not change RSP. * * Note: RFLAGS saving+masking-with-MSR happens only in Long mode * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it). * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes * or target CS descriptor's L bit (SYSCALL does not read segment descriptors). * * Arguments: * eax system call number * ecx return address * ebx arg1 * ebp arg2 (note: not saved in the stack frame, should not be touched) * edx arg3 * esi arg4 * edi arg5 * esp user stack * 0(%esp) arg6 */ ENTRY(entry_SYSCALL_compat) /* Interrupts are off on entry. */ swapgs /* Stash user ESP */ movl %esp, %r8d /* Use %rsp as scratch reg. User ESP is stashed in r8 */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp /* Switch to the kernel stack */ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* Construct struct pt_regs on stack */ pushq $__USER32_DS /* pt_regs->ss */ pushq %r8 /* pt_regs->sp */ pushq %r11 /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ GLOBAL(entry_SYSCALL_compat_after_hwframe) movl %eax, %eax /* discard orig_ax high bits */ pushq %rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ pushq %rbp /* pt_regs->cx (stashed in bp) */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ pushq $0 /* pt_regs->r9 = 0 */ pushq $0 /* pt_regs->r10 = 0 */ pushq $0 /* pt_regs->r11 = 0 */ pushq %rbx /* pt_regs->rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ pushq $0 /* pt_regs->r12 = 0 */ pushq $0 /* pt_regs->r13 = 0 */ pushq $0 /* pt_regs->r14 = 0 */ pushq $0 /* pt_regs->r15 = 0 */ /* * User mode is traced as though IRQs are on, and SYSENTER * turned them off. */ TRACE_IRQS_OFF movq %rsp, %rdi call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV /* Opportunistic SYSRET */ sysret32_from_system_call: TRACE_IRQS_ON /* User mode traces as IRQs on. */ movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ movq RIP(%rsp), %rcx /* pt_regs->ip (in rcx) */ addq $RAX, %rsp /* Skip r8-r15 */ popq %rax /* pt_regs->rax */ popq %rdx /* Skip pt_regs->cx */ popq %rdx /* pt_regs->dx */ popq %rsi /* pt_regs->si */ popq %rdi /* pt_regs->di */ /* * USERGS_SYSRET32 does: * GSBASE = user's GS base * EIP = ECX * RFLAGS = R11 * CS = __USER32_CS * SS = __USER_DS * * ECX will not match pt_regs->cx, but we're returning to a vDSO * trampoline that will fix up RCX, so this is okay. * * R12-R15 are callee-saved, so they contain whatever was in them * when the system call started, which is already known to user * code. We zero R8-R10 to avoid info leaks. */ movq RSP-ORIG_RAX(%rsp), %rsp /* * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored * on the process stack which is not mapped to userspace and * not readable after we SWITCH_TO_USER_CR3. Delay the CR3 * switch until after after the last reference to the process * stack. * * %r8/%r9 are zeroed before the sysret, thus safe to clobber. */ SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 xorq %r8, %r8 xorq %r9, %r9 xorq %r10, %r10 swapgs sysretl END(entry_SYSCALL_compat) /* * 32-bit legacy system call entry. * * 32-bit x86 Linux system calls traditionally used the INT $0x80 * instruction. INT $0x80 lands here. * * This entry point can be used by 32-bit and 64-bit programs to perform * 32-bit system calls. Instances of INT $0x80 can be found inline in * various programs and libraries. It is also used by the vDSO's * __kernel_vsyscall fallback for hardware that doesn't support a faster * entry method. Restarted 32-bit system calls also fall back to INT * $0x80 regardless of what instruction was originally used to do the * system call. * * This is considered a slow path. It is not used by most libc * implementations on modern hardware except during process startup. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp arg6 */ ENTRY(entry_INT80_compat) /* * Interrupts are off on entry. */ ASM_CLAC /* Do this early to minimize exposure */ SWAPGS /* * User tracing code (ptrace or signal handlers) might assume that * the saved RAX contains a 32-bit number when we're invoking a 32-bit * syscall. Just in case the high bits are nonzero, zero-extend * the syscall number. (This could almost certainly be deleted * with no ill effects.) */ movl %eax, %eax pushq %rax /* pt_regs->orig_ax */ /* switch to thread stack expects orig_ax to be pushed */ call switch_to_thread_stack pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ pushq $0 /* pt_regs->r9 = 0 */ pushq $0 /* pt_regs->r10 = 0 */ pushq $0 /* pt_regs->r11 = 0 */ pushq %rbx /* pt_regs->rbx */ pushq %rbp /* pt_regs->rbp */ pushq %r12 /* pt_regs->r12 */ pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ cld /* * User mode is traced as though IRQs are on, and the interrupt * gate turned them off. */ TRACE_IRQS_OFF movq %rsp, %rdi call do_int80_syscall_32 .Lsyscall_32_done: /* Go back to user mode. */ TRACE_IRQS_ON jmp swapgs_restore_regs_and_return_to_usermode END(entry_INT80_compat) ENTRY(stub32_clone) /* * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). * * The native 64-bit kernel's sys_clone() implements the latter, * so we need to swap arguments here before calling it: */ xchg %r8, %rcx jmp sys_clone ENDPROC(stub32_clone)
liva/minimal-linux
26,805
arch/x86/entry/entry_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1991,1992 Linus Torvalds * * entry_32.S contains the system-call and low-level fault and trap handling routines. * * Stack layout while running C code: * ptrace needs to have all registers on the stack. * If the order here is changed, it needs to be * updated in fork.c:copy_process(), signal.c:do_signal(), * ptrace.c and ptrace.h * * 0(%esp) - %ebx * 4(%esp) - %ecx * 8(%esp) - %edx * C(%esp) - %esi * 10(%esp) - %edi * 14(%esp) - %ebp * 18(%esp) - %eax * 1C(%esp) - %ds * 20(%esp) - %es * 24(%esp) - %fs * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS * 2C(%esp) - orig_eax * 30(%esp) - %eip * 34(%esp) - %cs * 38(%esp) - %eflags * 3C(%esp) - %oldesp * 40(%esp) - %oldss */ #include <linux/linkage.h> #include <linux/err.h> #include <asm/thread_info.h> #include <asm/irqflags.h> #include <asm/errno.h> #include <asm/segment.h> #include <asm/smp.h> #include <asm/percpu.h> #include <asm/processor-flags.h> #include <asm/irq_vectors.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/frame.h> #include <asm/nospec-branch.h> .section .entry.text, "ax" /* * We use macros for low-level operations which need to be overridden * for paravirtualization. The following will never clobber any registers: * INTERRUPT_RETURN (aka. "iret") * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). * * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). * Allowing a register to be clobbered can shrink the paravirt replacement * enough to patch inline, increasing performance. */ #ifdef CONFIG_PREEMPT # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF #else # define preempt_stop(clobbers) # define resume_kernel restore_all #endif .macro TRACE_IRQS_IRET #ifdef CONFIG_TRACE_IRQFLAGS testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? jz 1f TRACE_IRQS_ON 1: #endif .endm /* * User gs save/restore * * %gs is used for userland TLS and kernel only uses it for stack * canary which is required to be at %gs:20 by gcc. Read the comment * at the top of stackprotector.h for more info. * * Local labels 98 and 99 are used. */ #ifdef CONFIG_X86_32_LAZY_GS /* unfortunately push/pop can't be no-op */ .macro PUSH_GS pushl $0 .endm .macro POP_GS pop=0 addl $(4 + \pop), %esp .endm .macro POP_GS_EX .endm /* all the rest are no-op */ .macro PTGS_TO_GS .endm .macro PTGS_TO_GS_EX .endm .macro GS_TO_REG reg .endm .macro REG_TO_PTGS reg .endm .macro SET_KERNEL_GS reg .endm #else /* CONFIG_X86_32_LAZY_GS */ .macro PUSH_GS pushl %gs .endm .macro POP_GS pop=0 98: popl %gs .if \pop <> 0 add $\pop, %esp .endif .endm .macro POP_GS_EX .pushsection .fixup, "ax" 99: movl $0, (%esp) jmp 98b .popsection _ASM_EXTABLE(98b, 99b) .endm .macro PTGS_TO_GS 98: mov PT_GS(%esp), %gs .endm .macro PTGS_TO_GS_EX .pushsection .fixup, "ax" 99: movl $0, PT_GS(%esp) jmp 98b .popsection _ASM_EXTABLE(98b, 99b) .endm .macro GS_TO_REG reg movl %gs, \reg .endm .macro REG_TO_PTGS reg movl \reg, PT_GS(%esp) .endm .macro SET_KERNEL_GS reg movl $(__KERNEL_STACK_CANARY), \reg movl \reg, %gs .endm #endif /* CONFIG_X86_32_LAZY_GS */ .macro SAVE_ALL pt_regs_ax=%eax cld PUSH_GS pushl %fs pushl %es pushl %ds pushl \pt_regs_ax pushl %ebp pushl %edi pushl %esi pushl %edx pushl %ecx pushl %ebx movl $(__USER_DS), %edx movl %edx, %ds movl %edx, %es movl $(__KERNEL_PERCPU), %edx movl %edx, %fs SET_KERNEL_GS %edx .endm /* * This is a sneaky trick to help the unwinder find pt_regs on the stack. The * frame pointer is replaced with an encoded pointer to pt_regs. The encoding * is just clearing the MSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the * original rbp. */ .macro ENCODE_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER mov %esp, %ebp andl $0x7fffffff, %ebp #endif .endm .macro RESTORE_INT_REGS popl %ebx popl %ecx popl %edx popl %esi popl %edi popl %ebp popl %eax .endm .macro RESTORE_REGS pop=0 RESTORE_INT_REGS 1: popl %ds 2: popl %es 3: popl %fs POP_GS \pop .pushsection .fixup, "ax" 4: movl $0, (%esp) jmp 1b 5: movl $0, (%esp) jmp 2b 6: movl $0, (%esp) jmp 3b .popsection _ASM_EXTABLE(1b, 4b) _ASM_EXTABLE(2b, 5b) _ASM_EXTABLE(3b, 6b) POP_GS_EX .endm /* * %eax: prev task * %edx: next task */ ENTRY(__switch_to_asm) /* * Save callee-saved registers * This must match the order in struct inactive_task_frame */ pushl %ebp pushl %ebx pushl %edi pushl %esi /* switch stack */ movl %esp, TASK_threadsp(%eax) movl TASK_threadsp(%edx), %esp #ifdef CONFIG_CC_STACKPROTECTOR movl TASK_stack_canary(%edx), %ebx movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif #ifdef CONFIG_RETPOLINE /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated * with userspace addresses. On CPUs where those concerns * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW #endif /* restore callee-saved registers */ popl %esi popl %edi popl %ebx popl %ebp jmp __switch_to END(__switch_to_asm) /* * The unwinder expects the last frame on the stack to always be at the same * offset from the end of the page, which allows it to validate the stack. * Calling schedule_tail() directly would break that convention because its an * asmlinkage function so its argument has to be pushed on the stack. This * wrapper creates a proper "end of stack" frame header before the call. */ ENTRY(schedule_tail_wrapper) FRAME_BEGIN pushl %eax call schedule_tail popl %eax FRAME_END ret ENDPROC(schedule_tail_wrapper) /* * A newly forked process directly context switches into this address. * * eax: prev task we switched from * ebx: kernel thread func (NULL for user thread) * edi: kernel thread arg */ ENTRY(ret_from_fork) call schedule_tail_wrapper testl %ebx, %ebx jnz 1f /* kernel threads are uncommon */ 2: /* When we fork, we trace the syscall return in the child, too. */ movl %esp, %eax call syscall_return_slowpath jmp restore_all /* kernel thread */ 1: movl %edi, %eax CALL_NOSPEC %ebx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() * syscall. */ movl $0, PT_EAX(%esp) jmp 2b END(ret_from_fork) /* * Return to user mode is not as complex as all this looks, * but we want the default path for a system call return to * go as quickly as possible which is why some of this is * less clear than it otherwise should be. */ # userspace resumption stub bypassing syscall exit tracing ALIGN ret_from_exception: preempt_stop(CLBR_ANY) ret_from_intr: #ifdef CONFIG_VM86 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS movb PT_CS(%esp), %al andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax #else /* * We can be coming here from child spawned by kernel_thread(). */ movl PT_CS(%esp), %eax andl $SEGMENT_RPL_MASK, %eax #endif cmpl $USER_RPL, %eax jb resume_kernel # not returning to v8086 or userspace ENTRY(resume_userspace) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movl %esp, %eax call prepare_exit_to_usermode jmp restore_all END(ret_from_exception) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) .Lneed_resched: cmpl $0, PER_CPU_VAR(__preempt_count) jnz restore_all testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all call preempt_schedule_irq jmp .Lneed_resched END(resume_kernel) #endif GLOBAL(__begin_SYSENTER_singlestep_region) /* * All code from here through __end_SYSENTER_singlestep_region is subject * to being single-stepped if a user program sets TF and executes SYSENTER. * There is absolutely nothing that we can do to prevent this from happening * (thanks Intel!). To keep our handling of this situation as simple as * possible, we handle TF just like AC and NT, except that our #DB handler * will ignore all of the single-step traps generated in this range. */ #ifdef CONFIG_XEN /* * Xen doesn't set %esp to be precisely what the normal SYSENTER * entry point expects, so fix it up before using the normal path. */ ENTRY(xen_sysenter_target) addl $5*4, %esp /* remove xen-provided frame */ jmp .Lsysenter_past_esp #endif /* * 32-bit SYSENTER entry. * * 32-bit system calls through the vDSO's __kernel_vsyscall enter here * if X86_FEATURE_SEP is available. This is the preferred system call * entry on 32-bit systems. * * The SYSENTER instruction, in principle, should *only* occur in the * vDSO. In practice, a small number of Android devices were shipped * with a copy of Bionic that inlined a SYSENTER instruction. This * never happened in any of Google's Bionic versions -- it only happened * in a narrow range of Intel-provided versions. * * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. * IF and VM in RFLAGS are cleared (IOW: interrupts are off). * SYSENTER does not save anything on the stack, * and does not save old EIP (!!!), ESP, or EFLAGS. * * To avoid losing track of EFLAGS.VM (and thus potentially corrupting * user and/or vm86 state), we explicitly disable the SYSENTER * instruction in vm86 mode by reprogramming the MSRs. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp user stack * 0(%ebp) arg6 */ ENTRY(entry_SYSENTER_32) movl TSS_sysenter_sp0(%esp), %esp .Lsysenter_past_esp: pushl $__USER_DS /* pt_regs->ss */ pushl %ebp /* pt_regs->sp (stashed in bp) */ pushfl /* pt_regs->flags (except IF = 0) */ orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ pushl $__USER_CS /* pt_regs->cs */ pushl $0 /* pt_regs->ip = 0 (placeholder) */ pushl %eax /* pt_regs->orig_ax */ SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ /* * SYSENTER doesn't filter flags, so we need to clear NT, AC * and TF ourselves. To save a few cycles, we can check whether * either was set instead of doing an unconditional popfq. * This needs to happen before enabling interrupts so that * we don't get preempted with NT set. * * If TF is set, we will single-step all the way to here -- do_debug * will ignore all the traps. (Yes, this is slow, but so is * single-stepping in general. This allows us to avoid having * a more complicated code to handle the case where a user program * forces us to single-step through the SYSENTER entry code.) * * NB.: .Lsysenter_fix_flags is a label with the code under it moved * out-of-line as an optimization: NT is unlikely to be set in the * majority of the cases and instead of polluting the I$ unnecessarily, * we're keeping that code behind a branch which will predict as * not-taken and therefore its instructions won't be fetched. */ testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) jnz .Lsysenter_fix_flags .Lsysenter_flags_fixed: /* * User mode is traced as though IRQs are on, and SYSENTER * turned them off. */ TRACE_IRQS_OFF movl %esp, %eax call do_fast_syscall_32 /* XEN PV guests always use IRET path */ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV /* Opportunistic SYSEXIT */ TRACE_IRQS_ON /* User mode traces as IRQs on. */ movl PT_EIP(%esp), %edx /* pt_regs->ip */ movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 1: mov PT_FS(%esp), %fs PTGS_TO_GS popl %ebx /* pt_regs->bx */ addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ popl %esi /* pt_regs->si */ popl %edi /* pt_regs->di */ popl %ebp /* pt_regs->bp */ popl %eax /* pt_regs->ax */ /* * Restore all flags except IF. (We restore IF separately because * STI gives a one-instruction window in which we won't be interrupted, * whereas POPF does not.) */ addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */ btr $X86_EFLAGS_IF_BIT, (%esp) popfl /* * Return back to the vDSO, which will pop ecx and edx. * Don't bother with DS and ES (they already contain __USER_DS). */ sti sysexit .pushsection .fixup, "ax" 2: movl $0, PT_FS(%esp) jmp 1b .popsection _ASM_EXTABLE(1b, 2b) PTGS_TO_GS_EX .Lsysenter_fix_flags: pushl $X86_EFLAGS_FIXED popfl jmp .Lsysenter_flags_fixed GLOBAL(__end_SYSENTER_singlestep_region) ENDPROC(entry_SYSENTER_32) /* * 32-bit legacy system call entry. * * 32-bit x86 Linux system calls traditionally used the INT $0x80 * instruction. INT $0x80 lands here. * * This entry point can be used by any 32-bit perform system calls. * Instances of INT $0x80 can be found inline in various programs and * libraries. It is also used by the vDSO's __kernel_vsyscall * fallback for hardware that doesn't support a faster entry method. * Restarted 32-bit system calls also fall back to INT $0x80 * regardless of what instruction was originally used to do the system * call. (64-bit programs can use INT $0x80 as well, but they can * only run on 64-bit kernels and therefore land in * entry_INT80_compat.) * * This is considered a slow path. It is not used by most libc * implementations on modern hardware except during process startup. * * Arguments: * eax system call number * ebx arg1 * ecx arg2 * edx arg3 * esi arg4 * edi arg5 * ebp arg6 */ ENTRY(entry_INT80_32) ASM_CLAC pushl %eax /* pt_regs->orig_ax */ SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ /* * User mode is traced as though IRQs are on, and the interrupt gate * turned them off. */ TRACE_IRQS_OFF movl %esp, %eax call do_int80_syscall_32 .Lsyscall_32_done: restore_all: TRACE_IRQS_IRET .Lrestore_all_notrace: #ifdef CONFIG_X86_ESPFIX32 ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS /* * Warning: PT_OLDSS(%esp) contains the wrong/random values if we * are returning to the kernel. * See comments in process.c:copy_thread() for details. */ movb PT_OLDSS(%esp), %ah movb PT_CS(%esp), %al andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax je .Lldt_ss # returning to user-space with LDT SS #endif .Lrestore_nocheck: RESTORE_REGS 4 # skip orig_eax/error_code .Lirq_return: INTERRUPT_RETURN .section .fixup, "ax" ENTRY(iret_exc ) pushl $0 # no error code pushl $do_iret_error jmp common_exception .previous _ASM_EXTABLE(.Lirq_return, iret_exc) #ifdef CONFIG_X86_ESPFIX32 .Lldt_ss: /* * Setup and switch to ESPFIX stack * * We're returning to userspace with a 16 bit stack. The CPU will not * restore the high word of ESP for us on executing iret... This is an * "official" bug of all the x86-compatible CPUs, which we can work * around to make dosemu and wine happy. We do this by preloading the * high word of ESP with the high word of the userspace ESP while * compensating for the offset by changing to the ESPFIX segment with * a base address that matches for the difference. */ #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) mov %esp, %edx /* load kernel esp */ mov PT_OLDESP(%esp), %eax /* load userspace esp */ mov %dx, %ax /* eax: new kernel esp */ sub %eax, %edx /* offset (low word is 0) */ shr $16, %edx mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ pushl $__ESPFIX_SS pushl %eax /* new kernel esp */ /* * Disable interrupts, but do not irqtrace this section: we * will soon execute iret and the tracer was already set to * the irqstate after the IRET: */ DISABLE_INTERRUPTS(CLBR_ANY) lss (%esp), %esp /* switch to espfix segment */ jmp .Lrestore_nocheck #endif ENDPROC(entry_INT80_32) .macro FIXUP_ESPFIX_STACK /* * Switch back for ESPFIX stack to the normal zerobased stack * * We can't call C functions using the ESPFIX stack. This code reads * the high word of the segment base from the GDT and swiches to the * normal stack and adjusts ESP with the matching offset. */ #ifdef CONFIG_X86_ESPFIX32 /* fixup the stack */ mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ shl $16, %eax addl %esp, %eax /* the adjusted stack pointer */ pushl $__KERNEL_DS pushl %eax lss (%esp), %esp /* switch to the normal stack segment */ #endif .endm .macro UNWIND_ESPFIX_STACK #ifdef CONFIG_X86_ESPFIX32 movl %ss, %eax /* see if on espfix stack */ cmpw $__ESPFIX_SS, %ax jne 27f movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %es /* switch to normal stack */ FIXUP_ESPFIX_STACK 27: #endif .endm /* * Build the entry stubs with some assembler magic. * We pack 1 stub into every 8-byte block. */ .align 8 ENTRY(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ vector=vector+1 jmp common_interrupt .align 8 .endr END(irq_entries_start) /* * the CPU automatically disables interrupts when executing an IRQ vector, * so IRQ-flags tracing has to follow that: */ .p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: ASM_CLAC addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ SAVE_ALL ENCODE_FRAME_POINTER TRACE_IRQS_OFF movl %esp, %eax call do_IRQ jmp ret_from_intr ENDPROC(common_interrupt) #define BUILD_INTERRUPT3(name, nr, fn) \ ENTRY(name) \ ASM_CLAC; \ pushl $~(nr); \ SAVE_ALL; \ ENCODE_FRAME_POINTER; \ TRACE_IRQS_OFF \ movl %esp, %eax; \ call fn; \ jmp ret_from_intr; \ ENDPROC(name) #define BUILD_INTERRUPT(name, nr) \ BUILD_INTERRUPT3(name, nr, smp_##name); \ /* The include is where all of the SMP etc. interrupts come from */ #include <asm/entry_arch.h> ENTRY(coprocessor_error) ASM_CLAC pushl $0 pushl $do_coprocessor_error jmp common_exception END(coprocessor_error) ENTRY(simd_coprocessor_error) ASM_CLAC pushl $0 #ifdef CONFIG_X86_INVD_BUG /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ ALTERNATIVE "pushl $do_general_protection", \ "pushl $do_simd_coprocessor_error", \ X86_FEATURE_XMM #else pushl $do_simd_coprocessor_error #endif jmp common_exception END(simd_coprocessor_error) ENTRY(device_not_available) ASM_CLAC pushl $-1 # mark this as an int pushl $do_device_not_available jmp common_exception END(device_not_available) #ifdef CONFIG_PARAVIRT ENTRY(native_iret) iret _ASM_EXTABLE(native_iret, iret_exc) END(native_iret) #endif ENTRY(overflow) ASM_CLAC pushl $0 pushl $do_overflow jmp common_exception END(overflow) ENTRY(bounds) ASM_CLAC pushl $0 pushl $do_bounds jmp common_exception END(bounds) ENTRY(invalid_op) ASM_CLAC pushl $0 pushl $do_invalid_op jmp common_exception END(invalid_op) ENTRY(coprocessor_segment_overrun) ASM_CLAC pushl $0 pushl $do_coprocessor_segment_overrun jmp common_exception END(coprocessor_segment_overrun) ENTRY(invalid_TSS) ASM_CLAC pushl $do_invalid_TSS jmp common_exception END(invalid_TSS) ENTRY(segment_not_present) ASM_CLAC pushl $do_segment_not_present jmp common_exception END(segment_not_present) ENTRY(stack_segment) ASM_CLAC pushl $do_stack_segment jmp common_exception END(stack_segment) ENTRY(alignment_check) ASM_CLAC pushl $do_alignment_check jmp common_exception END(alignment_check) ENTRY(divide_error) ASM_CLAC pushl $0 # no error code pushl $do_divide_error jmp common_exception END(divide_error) #ifdef CONFIG_X86_MCE ENTRY(machine_check) ASM_CLAC pushl $0 pushl machine_check_vector jmp common_exception END(machine_check) #endif ENTRY(spurious_interrupt_bug) ASM_CLAC pushl $0 pushl $do_spurious_interrupt_bug jmp common_exception END(spurious_interrupt_bug) #ifdef CONFIG_XEN ENTRY(xen_hypervisor_callback) pushl $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL ENCODE_FRAME_POINTER TRACE_IRQS_OFF /* * Check to see if we got the event in the critical * region in xen_iret_direct, after we've reenabled * events and checked for pending events. This simulates * iret instruction's behaviour where it delivers a * pending interrupt when enabling interrupts: */ movl PT_EIP(%esp), %eax cmpl $xen_iret_start_crit, %eax jb 1f cmpl $xen_iret_end_crit, %eax jae 1f jmp xen_iret_crit_fixup ENTRY(xen_do_upcall) 1: mov %esp, %eax call xen_evtchn_do_upcall #ifndef CONFIG_PREEMPT call xen_maybe_preempt_hcall #endif jmp ret_from_intr ENDPROC(xen_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. * We get here for two reasons: * 1. Fault while reloading DS, ES, FS or GS * 2. Fault while executing IRET * Category 1 we fix up by reattempting the load, and zeroing the segment * register if the load fails. * Category 2 we fix up by jumping to do_iret_error. We cannot use the * normal Linux return path in this case because if we use the IRET hypercall * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by maintaining a status value in EAX. */ ENTRY(xen_failsafe_callback) pushl %eax movl $1, %eax 1: mov 4(%esp), %ds 2: mov 8(%esp), %es 3: mov 12(%esp), %fs 4: mov 16(%esp), %gs /* EAX == 0 => Category 1 (Bad segment) EAX != 0 => Category 2 (Bad IRET) */ testl %eax, %eax popl %eax lea 16(%esp), %esp jz 5f jmp iret_exc 5: pushl $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL ENCODE_FRAME_POINTER jmp ret_from_exception .section .fixup, "ax" 6: xorl %eax, %eax movl %eax, 4(%esp) jmp 1b 7: xorl %eax, %eax movl %eax, 8(%esp) jmp 2b 8: xorl %eax, %eax movl %eax, 12(%esp) jmp 3b 9: xorl %eax, %eax movl %eax, 16(%esp) jmp 4b .previous _ASM_EXTABLE(1b, 6b) _ASM_EXTABLE(2b, 7b) _ASM_EXTABLE(3b, 8b) _ASM_EXTABLE(4b, 9b) ENDPROC(xen_failsafe_callback) BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, xen_evtchn_do_upcall) #endif /* CONFIG_XEN */ #if IS_ENABLED(CONFIG_HYPERV) BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, hyperv_vector_handler) #endif /* CONFIG_HYPERV */ ENTRY(page_fault) ASM_CLAC pushl $do_page_fault ALIGN jmp common_exception END(page_fault) common_exception: /* the function address is in %gs's slot on the stack */ pushl %fs pushl %es pushl %ds pushl %eax pushl %ebp pushl %edi pushl %esi pushl %edx pushl %ecx pushl %ebx ENCODE_FRAME_POINTER cld movl $(__KERNEL_PERCPU), %ecx movl %ecx, %fs UNWIND_ESPFIX_STACK GS_TO_REG %ecx movl PT_GS(%esp), %edi # get the function address movl PT_ORIG_EAX(%esp), %edx # get the error code movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart REG_TO_PTGS %ecx SET_KERNEL_GS %ecx movl $(__USER_DS), %ecx movl %ecx, %ds movl %ecx, %es TRACE_IRQS_OFF movl %esp, %eax # pt_regs pointer CALL_NOSPEC %edi jmp ret_from_exception END(common_exception) ENTRY(debug) /* * #DB can happen at the first instruction of * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this * happens, then we will be running on a very small stack. We * need to detect this condition and switch to the thread * stack before calling any C code at all. * * If you edit this code, keep in mind that NMIs can happen in here. */ ASM_CLAC pushl $-1 # mark this as an int SAVE_ALL ENCODE_FRAME_POINTER xorl %edx, %edx # error code 0 movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ cmpl $SIZEOF_entry_stack, %ecx jb .Ldebug_from_sysenter_stack TRACE_IRQS_OFF call do_debug jmp ret_from_exception .Ldebug_from_sysenter_stack: /* We're on the SYSENTER stack. Switch off. */ movl %esp, %ebx movl PER_CPU_VAR(cpu_current_top_of_stack), %esp TRACE_IRQS_OFF call do_debug movl %ebx, %esp jmp ret_from_exception END(debug) /* * NMI is doubly nasty. It can happen on the first instruction of * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 * switched stacks. We handle both conditions by simply checking whether we * interrupted kernel code running on the SYSENTER stack. */ ENTRY(nmi) ASM_CLAC #ifdef CONFIG_X86_ESPFIX32 pushl %eax movl %ss, %eax cmpw $__ESPFIX_SS, %ax popl %eax je .Lnmi_espfix_stack #endif pushl %eax # pt_regs->orig_ax SAVE_ALL ENCODE_FRAME_POINTER xorl %edx, %edx # zero error code movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ cmpl $SIZEOF_entry_stack, %ecx jb .Lnmi_from_sysenter_stack /* Not on SYSENTER stack. */ call do_nmi jmp .Lrestore_all_notrace .Lnmi_from_sysenter_stack: /* * We're on the SYSENTER stack. Switch off. No one (not even debug) * is using the thread stack right now, so it's safe for us to use it. */ movl %esp, %ebx movl PER_CPU_VAR(cpu_current_top_of_stack), %esp call do_nmi movl %ebx, %esp jmp .Lrestore_all_notrace #ifdef CONFIG_X86_ESPFIX32 .Lnmi_espfix_stack: /* * create the pointer to lss back */ pushl %ss pushl %esp addl $4, (%esp) /* copy the iret frame of 12 bytes */ .rept 3 pushl 16(%esp) .endr pushl %eax SAVE_ALL ENCODE_FRAME_POINTER FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx, %edx # zero error code call do_nmi RESTORE_REGS lss 12+4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif END(nmi) ENTRY(int3) ASM_CLAC pushl $-1 # mark this as an int SAVE_ALL ENCODE_FRAME_POINTER TRACE_IRQS_OFF xorl %edx, %edx # zero error code movl %esp, %eax # pt_regs pointer call do_int3 jmp ret_from_exception END(int3) ENTRY(general_protection) pushl $do_general_protection jmp common_exception END(general_protection) #ifdef CONFIG_KVM_GUEST ENTRY(async_page_fault) ASM_CLAC pushl $do_async_page_fault jmp common_exception END(async_page_fault) #endif ENTRY(rewind_stack_do_exit) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movl PER_CPU_VAR(cpu_current_top_of_stack), %esi leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp call do_exit 1: jmp 1b END(rewind_stack_do_exit)
liva/minimal-linux
9,990
arch/x86/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ld script for the x86 kernel * * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> * * Modernisation, unification and other changes and fixes: * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> * * * Don't define absolute symbols until and unless you know that symbol * value is should remain constant even if kernel image is relocated * at run time. Absolute symbols are not relocated. If symbol value should * change if kernel is relocated, make the symbol section relative and * put it inside the section definition. */ #ifdef CONFIG_X86_32 #define LOAD_OFFSET __PAGE_OFFSET #else #define LOAD_OFFSET __START_KERNEL_map #endif #include <asm-generic/vmlinux.lds.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page_types.h> #include <asm/orc_lookup.h> #include <asm/cache.h> #include <asm/boot.h> #undef i386 /* in case the preprocessor is a 32bit one */ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) ENTRY(phys_startup_32) jiffies = jiffies_64; #else OUTPUT_ARCH(i386:x86-64) ENTRY(phys_startup_64) jiffies_64 = jiffies; #endif #if defined(CONFIG_X86_64) /* * On 64-bit, align RODATA to 2MB so we retain large page mappings for * boundaries spanning kernel text, rodata and data sections. * * However, kernel identity mappings will have different RWX permissions * to the pages mapping to text and to the pages padding (which are freed) the * text section. Hence kernel identity mappings will be broken to smaller * pages. For 64-bit, kernel text and kernel identity mappings are different, * so we can enable protection checks as well as retain 2MB large page * mappings for kernel text. */ #define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); #define X64_ALIGN_RODATA_END \ . = ALIGN(HPAGE_SIZE); \ __end_rodata_hpage_align = .; #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); #else #define X64_ALIGN_RODATA_BEGIN #define X64_ALIGN_RODATA_END #define ALIGN_ENTRY_TEXT_BEGIN #define ALIGN_ENTRY_TEXT_END #endif PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(6); /* RW_ */ #ifdef CONFIG_X86_64 #ifdef CONFIG_SMP percpu PT_LOAD FLAGS(6); /* RW_ */ #endif init PT_LOAD FLAGS(7); /* RWE */ #endif note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { #ifdef CONFIG_X86_32 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); #else . = __START_KERNEL; phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); #endif /* Text and read-only data */ .text : AT(ADDR(.text) - LOAD_OFFSET) { _text = .; _stext = .; /* bootstrapping code */ HEAD_TEXT . = ALIGN(8); TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT ALIGN_ENTRY_TEXT_BEGIN ENTRY_TEXT IRQENTRY_TEXT ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) #ifdef CONFIG_X86_64 . = ALIGN(PAGE_SIZE); _entry_trampoline = .; *(.entry_trampoline) . = ALIGN(PAGE_SIZE); ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); #endif #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; *(.text.__x86.indirect_thunk) __indirect_thunk_end = .; #endif /* End of text section */ _etext = .; } :text = 0x9090 NOTES :text :note EXCEPTION_TABLE(16) :text = 0x9090 /* .text should occupy whole number of pages */ . = ALIGN(PAGE_SIZE); X64_ALIGN_RODATA_BEGIN RO_DATA(PAGE_SIZE) X64_ALIGN_RODATA_END /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Start of data section */ _sdata = .; /* init_task */ INIT_TASK_DATA(THREAD_SIZE) #ifdef CONFIG_X86_32 /* 32 bit has nosave before _edata */ NOSAVE_DATA #endif PAGE_ALIGNED_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) DATA_DATA CONSTRUCTORS /* rarely changed data like cpu maps */ READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) /* End of data section */ _edata = .; } :data BUG_TABLE ORC_UNWIND_TABLE . = ALIGN(PAGE_SIZE); __vvar_page = .; .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { /* work around gold bug 13023 */ __vvar_beginning_hack = .; /* Place all vvars at the offsets in asm/vvar.h. */ #define EMIT_VVAR(name, offset) \ . = __vvar_beginning_hack + offset; \ *(.vvar_ ## name) #define __VVAR_KERNEL_LDS #include <asm/vvar.h> #undef __VVAR_KERNEL_LDS #undef EMIT_VVAR /* * Pad the rest of the page with zeros. Otherwise the loader * can leave garbage here. */ . = __vvar_beginning_hack + PAGE_SIZE; } :data . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); /* Init code and data - will be freed after init */ . = ALIGN(PAGE_SIZE); .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { __init_begin = .; /* paired with __init_end */ } #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) /* * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - .init.text - should * start another segment - init. */ PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, "per-CPU data too large - increase CONFIG_PHYSICAL_START") #endif INIT_TEXT_SECTION(PAGE_SIZE) #ifdef CONFIG_X86_64 :init #endif /* * Section for code used exclusively before alternatives are run. All * references to such code must be patched out by alternatives, normally * by using X86_FEATURE_ALWAYS CPU feature bit. * * See static_cpu_has() for an example. */ .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { *(.altinstr_aux) } INIT_DATA_SECTION(16) .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { __x86_cpu_dev_start = .; *(.x86_cpu_dev.init) __x86_cpu_dev_end = .; } #ifdef CONFIG_X86_INTEL_MID .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ LOAD_OFFSET) { __x86_intel_mid_dev_start = .; *(.x86_intel_mid_dev.init) __x86_intel_mid_dev_end = .; } #endif /* * start address and size of operations which during runtime * can be patched with virtualization friendly instructions or * baremetal native ones. Think page table operations. * Details in paravirt_types.h */ . = ALIGN(8); .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { __parainstructions = .; *(.parainstructions) __parainstructions_end = .; } /* * struct alt_inst entries. From the header (alternative.h): * "Alternative instructions for different CPU types or capabilities" * Think locking instructions on spinlocks. */ . = ALIGN(8); .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } /* * And here are the replacement instructions. The linker sticks * them as binary blobs. The .altinstructions has enough data to * get the address and the length of them to patch the kernel safely. */ .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { *(.altinstr_replacement) } /* * struct iommu_table_entry entries are injected in this section. * It is an array of IOMMUs which during run time gets sorted depending * on its dependency order. After rootfs_initcall is complete * this section can be safely removed. */ .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { __iommu_table = .; *(.iommu_table) __iommu_table_end = .; } . = ALIGN(8); .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { __apicdrivers = .; *(.apicdrivers); __apicdrivers_end = .; } . = ALIGN(8); /* * .exit.text is discard at runtime, not link time, to deal with * references from .altinstructions and .eh_frame */ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT } .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) PERCPU_SECTION(INTERNODE_CACHE_BYTES) #endif . = ALIGN(PAGE_SIZE); /* freed after init ends here */ .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { __init_end = .; } /* * smp_locks might be freed after init * start/end must be page aligned */ . = ALIGN(PAGE_SIZE); .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { __smp_locks = .; *(.smp_locks) . = ALIGN(PAGE_SIZE); __smp_locks_end = .; } #ifdef CONFIG_X86_64 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { NOSAVE_DATA } #endif /* BSS */ . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) *(.bss) . = ALIGN(PAGE_SIZE); __bss_stop = .; } . = ALIGN(PAGE_SIZE); .brk : AT(ADDR(.brk) - LOAD_OFFSET) { __brk_base = .; . += 64 * 1024; /* 64k alignment slop space */ *(.brk_reservation) /* areas brk users have reserved */ __brk_limit = .; } . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ _end = .; STABS_DEBUG DWARF_DEBUG /* Sections to be discarded */ DISCARDS /DISCARD/ : { *(.eh_frame) } } #ifdef CONFIG_X86_32 /* * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: */ . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); #else /* * Per-cpu symbols which need to be offset from __per_cpu_load * for the boot processor. */ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load INIT_PER_CPU(gdt_page); INIT_PER_CPU(irq_stack_union); /* * Build-time check on the image size: */ . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); #ifdef CONFIG_SMP . = ASSERT((irq_stack_union == 0), "irq_stack_union is not at start of per-cpu area"); #endif #endif /* CONFIG_X86_32 */ #ifdef CONFIG_KEXEC_CORE #include <asm/kexec.h> . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, "kexec control code size is too big"); #endif
liva/minimal-linux
5,131
arch/x86/kernel/ftrace_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017 Steven Rostedt, VMware Inc. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/segment.h> #include <asm/export.h> #include <asm/ftrace.h> #include <asm/nospec-branch.h> #ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) #else # define function_hook mcount EXPORT_SYMBOL(mcount) #endif #ifdef CONFIG_DYNAMIC_FTRACE /* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */ #if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER) # define USING_FRAME_POINTER #endif #ifdef USING_FRAME_POINTER # define MCOUNT_FRAME 1 /* using frame = true */ #else # define MCOUNT_FRAME 0 /* using frame = false */ #endif ENTRY(function_hook) ret END(function_hook) ENTRY(ftrace_caller) #ifdef USING_FRAME_POINTER # ifdef CC_USING_FENTRY /* * Frame pointers are of ip followed by bp. * Since fentry is an immediate jump, we are left with * parent-ip, function-ip. We need to add a frame with * parent-ip followed by ebp. */ pushl 4(%esp) /* parent ip */ pushl %ebp movl %esp, %ebp pushl 2*4(%esp) /* function ip */ # endif /* For mcount, the function ip is directly above */ pushl %ebp movl %esp, %ebp #endif pushl %eax pushl %ecx pushl %edx pushl $0 /* Pass NULL as regs pointer */ #ifdef USING_FRAME_POINTER /* Load parent ebp into edx */ movl 4*4(%esp), %edx #else /* There's no frame pointer, load the appropriate stack addr instead */ lea 4*4(%esp), %edx #endif movl (MCOUNT_FRAME+4)*4(%esp), %eax /* load the rip */ /* Get the parent ip */ movl 4(%edx), %edx /* edx has ebp */ movl function_trace_op, %ecx subl $MCOUNT_INSN_SIZE, %eax .globl ftrace_call ftrace_call: call ftrace_stub addl $4, %esp /* skip NULL pointer */ popl %edx popl %ecx popl %eax #ifdef USING_FRAME_POINTER popl %ebp # ifdef CC_USING_FENTRY addl $4,%esp /* skip function ip */ popl %ebp /* this is the orig bp */ addl $4, %esp /* skip parent ip */ # endif #endif .Lftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: jmp ftrace_stub #endif /* This is weak to keep gas from relaxing the jumps */ WEAK(ftrace_stub) ret END(ftrace_caller) ENTRY(ftrace_regs_caller) /* * i386 does not save SS and ESP when coming from kernel. * Instead, to get sp, &regs->sp is used (see ptrace.h). * Unfortunately, that means eflags must be at the same location * as the current return ip is. We move the return ip into the * regs->ip location, and move flags into the return ip location. */ pushl $__KERNEL_CS pushl 4(%esp) /* Save the return ip */ pushl $0 /* Load 0 into orig_ax */ pushl %gs pushl %fs pushl %es pushl %ds pushl %eax /* Get flags and place them into the return ip slot */ pushf popl %eax movl %eax, 8*4(%esp) pushl %ebp pushl %edi pushl %esi pushl %edx pushl %ecx pushl %ebx movl 12*4(%esp), %eax /* Load ip (1st parameter) */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ #ifdef CC_USING_FENTRY movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */ #else movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ #endif movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ pushl %esp /* Save pt_regs as 4th parameter */ GLOBAL(ftrace_regs_call) call ftrace_stub addl $4, %esp /* Skip pt_regs */ /* restore flags */ push 14*4(%esp) popf /* Move return ip back to its original location */ movl 12*4(%esp), %eax movl %eax, 14*4(%esp) popl %ebx popl %ecx popl %edx popl %esi popl %edi popl %ebp popl %eax popl %ds popl %es popl %fs popl %gs /* use lea to not affect flags */ lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */ jmp .Lftrace_ret #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(function_hook) cmpl $__PAGE_OFFSET, %esp jb ftrace_stub /* Paging not enabled yet? */ cmpl $ftrace_stub, ftrace_trace_function jnz .Ltrace #ifdef CONFIG_FUNCTION_GRAPH_TRACER cmpl $ftrace_stub, ftrace_graph_return jnz ftrace_graph_caller cmpl $ftrace_graph_entry_stub, ftrace_graph_entry jnz ftrace_graph_caller #endif .globl ftrace_stub ftrace_stub: ret /* taken from glibc */ .Ltrace: pushl %eax pushl %ecx pushl %edx movl 0xc(%esp), %eax movl 0x4(%ebp), %edx subl $MCOUNT_INSN_SIZE, %eax movl ftrace_trace_function, %ecx CALL_NOSPEC %ecx popl %edx popl %ecx popl %eax jmp ftrace_stub END(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) pushl %eax pushl %ecx pushl %edx movl 3*4(%esp), %eax /* Even with frame pointers, fentry doesn't have one here */ #ifdef CC_USING_FENTRY lea 4*4(%esp), %edx movl $0, %ecx #else lea 0x4(%ebp), %edx movl (%ebp), %ecx #endif subl $MCOUNT_INSN_SIZE, %eax call prepare_ftrace_return popl %edx popl %ecx popl %eax ret END(ftrace_graph_caller) .globl return_to_handler return_to_handler: pushl %eax pushl %edx #ifdef CC_USING_FENTRY movl $0, %eax #else movl %ebp, %eax #endif call ftrace_return_to_handler movl %eax, %ecx popl %edx popl %eax JMP_NOSPEC %ecx #endif
liva/minimal-linux
12,830
arch/x86/kernel/head_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> */ #include <linux/linkage.h> #include <linux/threads.h> #include <linux/init.h> #include <asm/segment.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/msr.h> #include <asm/cache.h> #include <asm/processor-flags.h> #include <asm/percpu.h> #include <asm/nops.h> #include "../entry/calling.h" #include <asm/export.h> #ifdef CONFIG_PARAVIRT #include <asm/asm-offsets.h> #include <asm/paravirt.h> #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg #else #define GET_CR2_INTO(reg) movq %cr2, reg #define INTERRUPT_RETURN iretq #endif /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE * because we need identity-mapped pages. * */ #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) PGD_START_KERNEL = pgd_index(__START_KERNEL_map) #endif L3_START_KERNEL = pud_index(__START_KERNEL_map) .text __HEAD .code64 .globl startup_64 startup_64: UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded an identity mapped page table * for us. These identity mapped page tables map all of the * kernel pages and possibly all of memory. * * %rsi holds a physical pointer to real_mode_data. * * We come here either directly from a 64bit bootloader, or from * arch/x86/boot/compressed/head_64.S. * * We only come here initially at boot nothing else comes here. * * Since we may be loaded at an address different from what we were * compiled to run at we first fixup the physical addresses in our page * tables and then reload them. */ /* Set up the stack for verify_cpu(), similar to initial_stack below */ leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp /* Sanitize CPU configuration */ call verify_cpu /* * Perform pagetable fixups. Additionally, if SME is active, encrypt * the kernel and retrieve the modifier (SME encryption mask if SME * is active) to be added to the initial pgdir entry that will be * programmed into CR3. */ leaq _text(%rip), %rdi pushq %rsi call __startup_64 popq %rsi /* Form the CR3 value being sure to include the CR3 modifier */ addq $(early_top_pgt - __START_KERNEL_map), %rax jmp 1f ENTRY(secondary_startup_64) UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded a mapped page table. * * %rsi holds a physical pointer to real_mode_data. * * We come here either from startup_64 (using physical addresses) * or from trampoline.S (using virtual addresses). * * Using virtual addresses from trampoline.S removes the need * to have any identity mapped pages in the kernel page table * after the boot processor executes this code. */ /* Sanitize CPU configuration */ call verify_cpu /* * Retrieve the modifier (SME encryption mask if SME is active) to be * added to the initial pgdir entry that will be programmed into CR3. */ pushq %rsi call __startup_secondary_64 popq %rsi /* Form the CR3 value being sure to include the CR3 modifier */ addq $(init_top_pgt - __START_KERNEL_map), %rax 1: /* Enable PAE mode, PGE and LA57 */ movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx #ifdef CONFIG_X86_5LEVEL orl $X86_CR4_LA57, %ecx #endif movq %rcx, %cr4 /* Setup early boot stage 4-/5-level pagetables. */ addq phys_base(%rip), %rax movq %rax, %cr3 /* Ensure I am executing from virtual addresses */ movq $1f, %rax jmp *%rax 1: UNWIND_HINT_EMPTY /* Check if nx is implemented */ movl $0x80000001, %eax cpuid movl %edx,%edi /* Setup EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_SCE, %eax /* Enable System Call */ btl $20,%edi /* No Execute supported? */ jnc 1f btsl $_EFER_NX, %eax btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 1: wrmsr /* Make changes effective */ /* Setup cr0 */ movl $CR0_STATE, %eax /* Make changes effective */ movq %rax, %cr0 /* Setup a boot time stack */ movq initial_stack(%rip), %rsp /* zero EFLAGS after setting rsp */ pushq $0 popfq /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace * addresses where we're currently running on. We have to do that here * because in 32bit we couldn't load a 64bit linear address. */ lgdt early_gdt_descr(%rip) /* set up data segments */ xorl %eax,%eax movl %eax,%ds movl %eax,%ss movl %eax,%es /* * We don't really need to load %fs or %gs, but load them anyway * to kill any stale realmode selectors. This allows execution * under VT hardware. */ movl %eax,%fs movl %eax,%gs /* Set up %gs. * * The base of %gs always points to the bottom of the irqstack * union. If the stack protector canary is enabled, it is * located at %gs:40. Note that, on SMP, the boot cpu uses * init data section till per cpu areas are set up. */ movl $MSR_GS_BASE,%ecx movl initial_gs(%rip),%eax movl initial_gs+4(%rip),%edx wrmsr /* rsi is pointer to real mode structure with interesting info. pass it to C */ movq %rsi, %rdi .Ljump_to_C_code: /* * Jump to run C code and to be on a real kernel address. * Since we are running on identity-mapped space we have to jump * to the full 64bit address, this is only possible as indirect * jump. In addition we need to ensure %cs is set so we make this * a far return. * * Note: do not change to far jump indirect with 64bit offset. * * AMD does not support far jump indirect with 64bit offset. * AMD64 Architecture Programmer's Manual, Volume 3: states only * JMP FAR mem16:16 FF /5 Far jump indirect, * with the target specified by a far pointer in memory. * JMP FAR mem16:32 FF /5 Far jump indirect, * with the target specified by a far pointer in memory. * * Intel64 does support 64bit offset. * Software Developer Manual Vol 2: states: * FF /5 JMP m16:16 Jump far, absolute indirect, * address given in m16:16 * FF /5 JMP m16:32 Jump far, absolute indirect, * address given in m16:32. * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, * address given in m16:64. */ pushq $.Lafter_lret # put return address on stack for unwinder xorq %rbp, %rbp # clear frame pointer movq initial_code(%rip), %rax pushq $__KERNEL_CS # set correct cs pushq %rax # target address in negative space lretq .Lafter_lret: END(secondary_startup_64) #include "verify_cpu.S" #ifdef CONFIG_HOTPLUG_CPU /* * Boot CPU0 entry point. It's called from play_dead(). Everything has been set * up already except stack. We just set up stack here. Then call * start_secondary() via .Ljump_to_C_code. */ ENTRY(start_cpu0) movq initial_stack(%rip), %rsp UNWIND_HINT_EMPTY jmp .Ljump_to_C_code ENDPROC(start_cpu0) #endif /* Both SMP bootup and ACPI suspend change these variables */ __REFDATA .balign 8 GLOBAL(initial_code) .quad x86_64_start_kernel GLOBAL(initial_gs) .quad INIT_PER_CPU_VAR(irq_stack_union) GLOBAL(initial_stack) /* * The SIZEOF_PTREGS gap is a convention which helps the in-kernel * unwinder reliably detect the end of the stack. */ .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS __FINITDATA __INIT ENTRY(early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 UNWIND_HINT_IRET_REGS pushq $0 # Dummy error code, to make stack frame uniform .else UNWIND_HINT_IRET_REGS offset=8 .endif pushq $i # 72(%rsp) Vector number jmp early_idt_handler_common UNWIND_HINT_IRET_REGS i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr UNWIND_HINT_IRET_REGS offset=16 END(early_idt_handler_array) early_idt_handler_common: /* * The stack is the hardware frame, an error code or zero, and the * vector number. */ cld incl early_recursion_flag(%rip) /* The vector number is currently in the pt_regs->di slot. */ pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* RSI = vector number */ movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq %rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ pushq %r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */ pushq %rbx /* pt_regs->bx */ pushq %rbp /* pt_regs->bp */ pushq %r12 /* pt_regs->r12 */ pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ UNWIND_HINT_REGS cmpq $14,%rsi /* Page fault? */ jnz 10f GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */ call early_make_pgtable andl %eax,%eax jz 20f /* All good */ 10: movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ call early_fixup_exception 20: decl early_recursion_flag(%rip) jmp restore_regs_and_return_to_kernel END(early_idt_handler_common) __INITDATA .balign 4 GLOBAL(early_recursion_flag) .long 0 #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ GLOBAL(name) #ifdef CONFIG_PAGE_TABLE_ISOLATION /* * Each PGD needs to be 8k long and 8k aligned. We do not * ever go out to userspace with these, so we do not * strictly *need* the second page, but this allows us to * have a single set_pgd() implementation that does not * need to worry about whether it has 4k or 8k to work * with. * * This ensures PGDs are 8k long: */ #define PTI_USER_PGD_FILL 512 /* This ensures they are 8k-aligned: */ #define NEXT_PGD_PAGE(name) \ .balign 2 * PAGE_SIZE; \ GLOBAL(name) #else #define NEXT_PGD_PAGE(name) NEXT_PAGE(name) #define PTI_USER_PGD_FILL 0 #endif /* Automate the creation of 1 to 1 mapping pmd entries */ #define PMDS(START, PERM, COUNT) \ i = 0 ; \ .rept (COUNT) ; \ .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ i = i + 1 ; \ .endr __INITDATA NEXT_PGD_PAGE(early_top_pgt) .fill 511,8,0 #ifdef CONFIG_X86_5LEVEL .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #else .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #endif .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 .data #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) NEXT_PGD_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .fill 511, 8, 0 NEXT_PAGE(level2_ident_pgt) /* Since I easily can, map the first 1G. * Don't set NX because code runs from these pages. */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) #else NEXT_PGD_PAGE(init_top_pgt) .fill 512,8,0 .fill PTI_USER_PGD_FILL,8,0 #endif #ifdef CONFIG_X86_5LEVEL NEXT_PAGE(level4_kernel_pgt) .fill 511,8,0 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #endif NEXT_PAGE(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC NEXT_PAGE(level2_kernel_pgt) /* * 512 MB kernel mapping. We spend a full page on this pagetable * anyway. * * The kernel code+data+bss must not be bigger than that. * * (NOTE: at +512MB starts the module area, see MODULES_VADDR. * If you want to increase this then increase MODULES_VADDR * too.) */ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) NEXT_PAGE(level2_fixmap_pgt) .fill 506,8,0 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ .fill 5,8,0 NEXT_PAGE(level1_fixmap_pgt) .fill 512,8,0 #undef PMDS .data .align 16 .globl early_gdt_descr early_gdt_descr: .word GDT_ENTRIES*8-1 early_gdt_descr_base: .quad INIT_PER_CPU_VAR(gdt_page) ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 EXPORT_SYMBOL(phys_base) __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE EXPORT_SYMBOL(empty_zero_page)
liva/minimal-linux
14,663
arch/x86/kernel/head_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Copyright (C) 1991, 1992 Linus Torvalds * * Enhanced CPU detection and feature setting code by Mike Jagdis * and Martin Mares, November 1997. */ .text #include <linux/threads.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/pgtable_types.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/setup.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> #include <asm/cpufeatures.h> #include <asm/percpu.h> #include <asm/nops.h> #include <asm/bootparam.h> #include <asm/export.h> #include <asm/pgtable_32.h> /* Physical address */ #define pa(X) ((X) - __PAGE_OFFSET) /* * References to members of the new_cpu_data structure. */ #define X86 new_cpu_data+CPUINFO_x86 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor #define X86_MODEL new_cpu_data+CPUINFO_x86_model #define X86_MASK new_cpu_data+CPUINFO_x86_mask #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id #define SIZEOF_PTREGS 17*4 /* * Worst-case size of the kernel mapping we need to make: * a relocatable kernel can live anywhere in lowmem, so we need to be able * to map all of lowmem. */ KERNEL_PAGES = LOWMEM_PAGES INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE RESERVE_BRK(pagetables, INIT_MAP_SIZE) /* * 32-bit kernel entrypoint; only used by the boot CPU. On entry, * %esi points to the real-mode code as a 32-bit pointer. * CS and DS must be 4 GB flat segments, but we don't depend on * any particular GDT layout, because we load our own as soon as we * can. */ __HEAD ENTRY(startup_32) movl pa(initial_stack),%ecx /* test KEEP_SEGMENTS flag to see if the bootloader is asking us to not reload segments */ testb $KEEP_SEGMENTS, BP_loadflags(%esi) jnz 2f /* * Set segments to known values. */ lgdt pa(boot_gdt_descr) movl $(__BOOT_DS),%eax movl %eax,%ds movl %eax,%es movl %eax,%fs movl %eax,%gs movl %eax,%ss 2: leal -__PAGE_OFFSET(%ecx),%esp /* * Clear BSS first so that there are no surprises... */ cld xorl %eax,%eax movl $pa(__bss_start),%edi movl $pa(__bss_stop),%ecx subl %edi,%ecx shrl $2,%ecx rep ; stosl /* * Copy bootup parameters out of the way. * Note: %esi still has the pointer to the real-mode data. * With the kexec as boot loader, parameter segment might be loaded beyond * kernel image and might not even be addressable by early boot page tables. * (kexec on panic case). Hence copy out the parameters before initializing * page tables. */ movl $pa(boot_params),%edi movl $(PARAM_SIZE/4),%ecx cld rep movsl movl pa(boot_params) + NEW_CL_POINTER,%esi andl %esi,%esi jz 1f # No command line movl $pa(boot_command_line),%edi movl $(COMMAND_LINE_SIZE/4),%ecx rep movsl 1: #ifdef CONFIG_OLPC /* save OFW's pgdir table for later use when calling into OFW */ movl %cr3, %eax movl %eax, pa(olpc_ofw_pgd) #endif #ifdef CONFIG_MICROCODE /* Early load ucode on BSP. */ call load_ucode_bsp #endif /* Create early pagetables. */ call mk_early_pgtbl_32 /* Do early initialization of the fixmap area */ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax #ifdef CONFIG_X86_PAE #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) #else movl %eax,pa(initial_page_table+0xffc) #endif #ifdef CONFIG_PARAVIRT /* This is can only trip for a broken bootloader... */ cmpw $0x207, pa(boot_params + BP_version) jb .Ldefault_entry /* Paravirt-compatible boot parameters. Look to see what architecture we're booting under. */ movl pa(boot_params + BP_hardware_subarch), %eax cmpl $num_subarch_entries, %eax jae .Lbad_subarch movl pa(subarch_entries)(,%eax,4), %eax subl $__PAGE_OFFSET, %eax jmp *%eax .Lbad_subarch: WEAK(xen_entry) /* Unknown implementation; there's really nothing we can do at this point. */ ud2a __INITDATA subarch_entries: .long .Ldefault_entry /* normal x86/PC */ .long xen_entry /* Xen hypervisor */ .long .Ldefault_entry /* Moorestown MID */ num_subarch_entries = (. - subarch_entries) / 4 .previous #else jmp .Ldefault_entry #endif /* CONFIG_PARAVIRT */ #ifdef CONFIG_HOTPLUG_CPU /* * Boot CPU0 entry point. It's called from play_dead(). Everything has been set * up already except stack. We just set up stack here. Then call * start_secondary(). */ ENTRY(start_cpu0) movl initial_stack, %ecx movl %ecx, %esp call *(initial_code) 1: jmp 1b ENDPROC(start_cpu0) #endif /* * Non-boot CPU entry point; entered from trampoline.S * We can't lgdt here, because lgdt itself uses a data segment, but * we know the trampoline has already loaded the boot_gdt for us. * * If cpu hotplug is not supported then this code can go in init section * which will be freed later */ ENTRY(startup_32_smp) cld movl $(__BOOT_DS),%eax movl %eax,%ds movl %eax,%es movl %eax,%fs movl %eax,%gs movl pa(initial_stack),%ecx movl %eax,%ss leal -__PAGE_OFFSET(%ecx),%esp #ifdef CONFIG_MICROCODE /* Early load ucode on AP. */ call load_ucode_ap #endif .Ldefault_entry: movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 /* * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave * bits like NT set. This would confuse the debugger if this code is traced. So * initialize them properly now before switching to protected mode. That means * DF in particular (even though we have cleared it earlier after copying the * command line) because GCC expects it. */ pushl $0 popfl /* * New page tables may be in 4Mbyte page mode and may be using the global pages. * * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists * if and only if CPUID exists and has flags other than the FPU flag set. */ movl $-1,pa(X86_CPUID) # preset CPUID level movl $X86_EFLAGS_ID,%ecx pushl %ecx popfl # set EFLAGS=ID pushfl popl %eax # get EFLAGS testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? jz .Lenable_paging # hw disallowed setting of ID bit # which means no CPUID and no CR4 xorl %eax,%eax cpuid movl %eax,pa(X86_CPUID) # save largest std CPUID function movl $1,%eax cpuid andl $~1,%edx # Ignore CPUID.FPU jz .Lenable_paging # No flags or only CPUID.FPU = no CR4 movl pa(mmu_cr4_features),%eax movl %eax,%cr4 testb $X86_CR4_PAE, %al # check if PAE is enabled jz .Lenable_paging /* Check if extended functions are implemented */ movl $0x80000000, %eax cpuid /* Value must be in the range 0x80000001 to 0x8000ffff */ subl $0x80000001, %eax cmpl $(0x8000ffff-0x80000001), %eax ja .Lenable_paging /* Clear bogus XD_DISABLE bits */ call verify_cpu mov $0x80000001, %eax cpuid /* Execute Disable bit supported? */ btl $(X86_FEATURE_NX & 31), %edx jnc .Lenable_paging /* Setup EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_NX, %eax /* Make changes effective */ wrmsr .Lenable_paging: /* * Enable paging */ movl $pa(initial_page_table), %eax movl %eax,%cr3 /* set the page table pointer.. */ movl $CR0_STATE,%eax movl %eax,%cr0 /* ..and set paging (PG) bit */ ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 1: /* Shift the stack pointer to a virtual address */ addl $__PAGE_OFFSET, %esp /* * start system 32-bit setup. We need to re-do some of the things done * in 16-bit mode for the "real" operations. */ movl setup_once_ref,%eax andl %eax,%eax jz 1f # Did we do this already? call *%eax 1: /* * Check if it is 486 */ movb $4,X86 # at least 486 cmpl $-1,X86_CPUID je .Lis486 /* get vendor info */ xorl %eax,%eax # call CPUID with 0 -> return vendor ID cpuid movl %eax,X86_CPUID # save CPUID level movl %ebx,X86_VENDOR_ID # lo 4 chars movl %edx,X86_VENDOR_ID+4 # next 4 chars movl %ecx,X86_VENDOR_ID+8 # last 4 chars orl %eax,%eax # do we have processor info as well? je .Lis486 movl $1,%eax # Use the CPUID instruction to get CPU type cpuid movb %al,%cl # save reg for future use andb $0x0f,%ah # mask processor family movb %ah,X86 andb $0xf0,%al # mask model shrb $4,%al movb %al,X86_MODEL andb $0x0f,%cl # mask mask revision movb %cl,X86_MASK movl %edx,X86_CAPABILITY .Lis486: movl $0x50022,%ecx # set AM, WP, NE and MP movl %cr0,%eax andl $0x80000011,%eax # Save PG,PE,ET orl %ecx,%eax movl %eax,%cr0 lgdt early_gdt_descr ljmp $(__KERNEL_CS),$1f 1: movl $(__KERNEL_DS),%eax # reload all the segment registers movl %eax,%ss # after changing gdt. movl $(__USER_DS),%eax # DS/ES contains default USER segment movl %eax,%ds movl %eax,%es movl $(__KERNEL_PERCPU), %eax movl %eax,%fs # set this cpu's percpu movl $(__KERNEL_STACK_CANARY),%eax movl %eax,%gs xorl %eax,%eax # Clear LDT lldt %ax call *(initial_code) 1: jmp 1b ENDPROC(startup_32_smp) #include "verify_cpu.S" /* * setup_once * * The setup work we only want to run on the BSP. * * Warning: %esi is live across this function. */ __INIT setup_once: #ifdef CONFIG_CC_STACKPROTECTOR /* * Configure the stack canary. The linker can't handle this by * relocation. Manually set base address in stack canary * segment descriptor. */ movl $gdt_page,%eax movl $stack_canary,%ecx movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) shrl $16, %ecx movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) #endif andl $0,setup_once_ref /* Once is enough, thanks */ ret ENTRY(early_idt_handler_array) # 36(%esp) %eflags # 32(%esp) %cs # 28(%esp) %eip # 24(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 pushl $0 # Dummy error code, to make stack frame uniform .endif pushl $i # 20(%esp) Vector number jmp early_idt_handler_common i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr ENDPROC(early_idt_handler_array) early_idt_handler_common: /* * The stack is the hardware frame, an error code or zero, and the * vector number. */ cld incl %ss:early_recursion_flag /* The vector number is in pt_regs->gs */ cld pushl %fs /* pt_regs->fs (__fsh varies by model) */ pushl %es /* pt_regs->es (__esh varies by model) */ pushl %ds /* pt_regs->ds (__dsh varies by model) */ pushl %eax /* pt_regs->ax */ pushl %ebp /* pt_regs->bp */ pushl %edi /* pt_regs->di */ pushl %esi /* pt_regs->si */ pushl %edx /* pt_regs->dx */ pushl %ecx /* pt_regs->cx */ pushl %ebx /* pt_regs->bx */ /* Fix up DS and ES */ movl $(__KERNEL_DS), %ecx movl %ecx, %ds movl %ecx, %es /* Load the vector number into EDX */ movl PT_GS(%esp), %edx /* Load GS into pt_regs->gs (and maybe clobber __gsh) */ movw %gs, PT_GS(%esp) movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */ call early_fixup_exception popl %ebx /* pt_regs->bx */ popl %ecx /* pt_regs->cx */ popl %edx /* pt_regs->dx */ popl %esi /* pt_regs->si */ popl %edi /* pt_regs->di */ popl %ebp /* pt_regs->bp */ popl %eax /* pt_regs->ax */ popl %ds /* pt_regs->ds (always ignores __dsh) */ popl %es /* pt_regs->es (always ignores __esh) */ popl %fs /* pt_regs->fs (always ignores __fsh) */ popl %gs /* pt_regs->gs (always ignores __gsh) */ decl %ss:early_recursion_flag addl $4, %esp /* pop pt_regs->orig_ax */ iret ENDPROC(early_idt_handler_common) /* This is the default interrupt "handler" :-) */ ENTRY(early_ignore_irq) cld #ifdef CONFIG_PRINTK pushl %eax pushl %ecx pushl %edx pushl %es pushl %ds movl $(__KERNEL_DS),%eax movl %eax,%ds movl %eax,%es cmpl $2,early_recursion_flag je hlt_loop incl early_recursion_flag pushl 16(%esp) pushl 24(%esp) pushl 32(%esp) pushl 40(%esp) pushl $int_msg call printk call dump_stack addl $(5*4),%esp popl %ds popl %es popl %edx popl %ecx popl %eax #endif iret hlt_loop: hlt jmp hlt_loop ENDPROC(early_ignore_irq) __INITDATA .align 4 GLOBAL(early_recursion_flag) .long 0 __REFDATA .align 4 ENTRY(initial_code) .long i386_start_kernel ENTRY(setup_once_ref) .long setup_once /* * BSS section */ __PAGE_ALIGNED_BSS .align PAGE_SIZE #ifdef CONFIG_X86_PAE .globl initial_pg_pmd initial_pg_pmd: .fill 1024*KPMDS,4,0 #else .globl initial_page_table initial_page_table: .fill 1024,4,0 #endif initial_pg_fixmap: .fill 1024,4,0 .globl empty_zero_page empty_zero_page: .fill 4096,1,0 .globl swapper_pg_dir swapper_pg_dir: .fill 1024,4,0 EXPORT_SYMBOL(empty_zero_page) /* * This starts the data section. */ #ifdef CONFIG_X86_PAE __PAGE_ALIGNED_DATA /* Page-aligned for the benefit of paravirt? */ .align PAGE_SIZE ENTRY(initial_page_table) .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 # elif KPMDS == 2 .long 0,0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 # elif KPMDS == 1 .long 0,0 .long 0,0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 # else # error "Kernel PMDs should be 1, 2 or 3" # endif .align PAGE_SIZE /* needs to be page-sized too */ #endif .data .balign 4 ENTRY(initial_stack) /* * The SIZEOF_PTREGS gap is a convention which helps the in-kernel * unwinder reliably detect the end of the stack. */ .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - \ TOP_OF_KERNEL_STACK_PADDING; __INITRODATA int_msg: .asciz "Unknown interrupt or fault at: %p %p %p\n" #include "../../x86/xen/xen-head.S" /* * The IDT and GDT 'descriptors' are a strange 48-bit object * only used by the lidt and lgdt instructions. They are not * like usual segment descriptors - they consist of a 16-bit * segment size, and 32-bit linear address value: */ .data .globl boot_gdt_descr ALIGN # early boot GDT descriptor (must use 1:1 address mapping) .word 0 # 32 bit align gdt_desc.address boot_gdt_descr: .word __BOOT_DS+7 .long boot_gdt - __PAGE_OFFSET # boot GDT descriptor (later on used by CPU#0): .word 0 # 32 bit align gdt_desc.address ENTRY(early_gdt_descr) .word GDT_ENTRIES*8-1 .long gdt_page /* Overwritten for secondary CPUs */ /* * The boot_gdt must mirror the equivalent in setup.S and is * used only for booting. */ .align L1_CACHE_BYTES ENTRY(boot_gdt) .fill GDT_ENTRY_BOOT_CS,8,0 .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
liva/minimal-linux
8,198
arch/x86/kernel/ftrace_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Steven Rostedt, Red Hat Inc */ #include <linux/linkage.h> #include <asm/ptrace.h> #include <asm/ftrace.h> #include <asm/export.h> #include <asm/nospec-branch.h> #include <asm/unwind_hints.h> .code64 .section .entry.text, "ax" #ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) #else # define function_hook mcount EXPORT_SYMBOL(mcount) #endif #ifdef CONFIG_FRAME_POINTER # ifdef CC_USING_FENTRY /* Save parent and function stack frames (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16*2) # else /* Save just function stack frame (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16) # endif #else /* No need to save a stack frame */ # define MCOUNT_FRAME_SIZE 0 #endif /* CONFIG_FRAME_POINTER */ /* Size of stack used to save mcount regs in save_mcount_regs */ #define MCOUNT_REG_SIZE (SS+8 + MCOUNT_FRAME_SIZE) /* * gcc -pg option adds a call to 'mcount' in most functions. * When -mfentry is used, the call is to 'fentry' and not 'mcount' * and is done before the function's stack frame is set up. * They both require a set of regs to be saved before calling * any C code and restored before returning back to the function. * * On boot up, all these calls are converted into nops. When tracing * is enabled, the call can jump to either ftrace_caller or * ftrace_regs_caller. Callbacks (tracing functions) that require * ftrace_regs_caller (like kprobes) need to have pt_regs passed to * it. For this reason, the size of the pt_regs structure will be * allocated on the stack and the required mcount registers will * be saved in the locations that pt_regs has them in. */ /* * @added: the amount of stack added before calling this * * After this is called, the following registers contain: * * %rdi - holds the address that called the trampoline * %rsi - holds the parent function (traced function's return address) * %rdx - holds the original %rbp */ .macro save_mcount_regs added=0 #ifdef CONFIG_FRAME_POINTER /* Save the original rbp */ pushq %rbp /* * Stack traces will stop at the ftrace trampoline if the frame pointer * is not set up properly. If fentry is used, we need to save a frame * pointer for the parent as well as the function traced, because the * fentry is called before the stack frame is set up, where as mcount * is called afterward. */ #ifdef CC_USING_FENTRY /* Save the parent pointer (skip orig rbp and our return address) */ pushq \added+8*2(%rsp) pushq %rbp movq %rsp, %rbp /* Save the return address (now skip orig rbp, rbp and parent) */ pushq \added+8*3(%rsp) #else /* Can't assume that rip is before this (unless added was zero) */ pushq \added+8(%rsp) #endif pushq %rbp movq %rsp, %rbp #endif /* CONFIG_FRAME_POINTER */ /* * We add enough stack to save all regs. */ subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp movq %rax, RAX(%rsp) movq %rcx, RCX(%rsp) movq %rdx, RDX(%rsp) movq %rsi, RSI(%rsp) movq %rdi, RDI(%rsp) movq %r8, R8(%rsp) movq %r9, R9(%rsp) /* * Save the original RBP. Even though the mcount ABI does not * require this, it helps out callers. */ #ifdef CONFIG_FRAME_POINTER movq MCOUNT_REG_SIZE-8(%rsp), %rdx #else movq %rbp, %rdx #endif movq %rdx, RBP(%rsp) /* Copy the parent address into %rsi (second parameter) */ #ifdef CC_USING_FENTRY movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi #else /* %rdx contains original %rbp */ movq 8(%rdx), %rsi #endif /* Move RIP to its proper location */ movq MCOUNT_REG_SIZE+\added(%rsp), %rdi movq %rdi, RIP(%rsp) /* * Now %rdi (the first parameter) has the return address of * where ftrace_call returns. But the callbacks expect the * address of the call itself. */ subq $MCOUNT_INSN_SIZE, %rdi .endm .macro restore_mcount_regs movq R9(%rsp), %r9 movq R8(%rsp), %r8 movq RDI(%rsp), %rdi movq RSI(%rsp), %rsi movq RDX(%rsp), %rdx movq RCX(%rsp), %rcx movq RAX(%rsp), %rax /* ftrace_regs_caller can modify %rbp */ movq RBP(%rsp), %rbp addq $MCOUNT_REG_SIZE, %rsp .endm #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(function_hook) retq ENDPROC(function_hook) ENTRY(ftrace_caller) /* save_mcount_regs fills in first two parameters */ save_mcount_regs GLOBAL(ftrace_caller_op_ptr) /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx /* regs go into 4th parameter (but make it NULL) */ movq $0, %rcx GLOBAL(ftrace_call) call ftrace_stub restore_mcount_regs /* * The copied trampoline must call ftrace_epilogue as it * still may need to call the function graph tracer. * * The code up to this label is copied into trampolines so * think twice before adding any new code or changing the * layout here. */ GLOBAL(ftrace_epilogue) #ifdef CONFIG_FUNCTION_GRAPH_TRACER GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif /* This is weak to keep gas from relaxing the jumps */ WEAK(ftrace_stub) retq ENDPROC(ftrace_caller) ENTRY(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ pushfq /* added 8 bytes to save flags */ save_mcount_regs 8 /* save_mcount_regs fills in first two parameters */ GLOBAL(ftrace_regs_caller_op_ptr) /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx /* Save the rest of pt_regs */ movq %r15, R15(%rsp) movq %r14, R14(%rsp) movq %r13, R13(%rsp) movq %r12, R12(%rsp) movq %r11, R11(%rsp) movq %r10, R10(%rsp) movq %rbx, RBX(%rsp) /* Copy saved flags */ movq MCOUNT_REG_SIZE(%rsp), %rcx movq %rcx, EFLAGS(%rsp) /* Kernel segments */ movq $__KERNEL_DS, %rcx movq %rcx, SS(%rsp) movq $__KERNEL_CS, %rcx movq %rcx, CS(%rsp) /* Stack - skipping return address and flags */ leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx movq %rcx, RSP(%rsp) /* regs go into 4th parameter */ leaq (%rsp), %rcx GLOBAL(ftrace_regs_call) call ftrace_stub /* Copy flags back to SS, to restore them */ movq EFLAGS(%rsp), %rax movq %rax, MCOUNT_REG_SIZE(%rsp) /* Handlers can change the RIP */ movq RIP(%rsp), %rax movq %rax, MCOUNT_REG_SIZE+8(%rsp) /* restore the rest of pt_regs */ movq R15(%rsp), %r15 movq R14(%rsp), %r14 movq R13(%rsp), %r13 movq R12(%rsp), %r12 movq R10(%rsp), %r10 movq RBX(%rsp), %rbx restore_mcount_regs /* Restore flags */ popfq /* * As this jmp to ftrace_epilogue can be a short jump * it must not be copied into the trampoline. * The trampoline will add the code to jump * to the return. */ GLOBAL(ftrace_regs_caller_end) jmp ftrace_epilogue ENDPROC(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(function_hook) cmpq $ftrace_stub, ftrace_trace_function jnz trace fgraph_trace: #ifdef CONFIG_FUNCTION_GRAPH_TRACER cmpq $ftrace_stub, ftrace_graph_return jnz ftrace_graph_caller cmpq $ftrace_graph_entry_stub, ftrace_graph_entry jnz ftrace_graph_caller #endif GLOBAL(ftrace_stub) retq trace: /* save_mcount_regs fills in first two parameters */ save_mcount_regs /* * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the * ip and parent ip are used and the list function is called when * function tracing is enabled. */ movq ftrace_trace_function, %r8 CALL_NOSPEC %r8 restore_mcount_regs jmp fgraph_trace ENDPROC(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) /* Saves rbp into %rdx and fills first parameter */ save_mcount_regs #ifdef CC_USING_FENTRY leaq MCOUNT_REG_SIZE+8(%rsp), %rsi movq $0, %rdx /* No framepointers needed */ #else /* Save address of the return address of traced function */ leaq 8(%rdx), %rsi /* ftrace does sanity checks against frame pointers */ movq (%rdx), %rdx #endif call prepare_ftrace_return restore_mcount_regs retq ENDPROC(ftrace_graph_caller) ENTRY(return_to_handler) UNWIND_HINT_EMPTY subq $24, %rsp /* Save the return values */ movq %rax, (%rsp) movq %rdx, 8(%rsp) movq %rbp, %rdi call ftrace_return_to_handler movq %rax, %rdi movq 8(%rsp), %rdx movq (%rsp), %rax addq $24, %rsp JMP_NOSPEC %rdi END(return_to_handler) #endif
liva/minimal-linux
5,697
arch/x86/kernel/relocate_kernel_32.S
/* * relocate_kernel.S - put the kernel image in place to boot * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/kexec.h> #include <asm/processor-flags.h> /* * Must be relocatable PIC code callable as a C function */ #define PTR(x) (x << 2) /* * control_page + KEXEC_CONTROL_CODE_MAX_SIZE * ~ control_page + PAGE_SIZE are used as data storage and stack for * jumping back */ #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) /* Minimal CPU state */ #define ESP DATA(0x0) #define CR0 DATA(0x4) #define CR3 DATA(0x8) #define CR4 DATA(0xc) /* other data */ #define CP_VA_CONTROL_PAGE DATA(0x10) #define CP_PA_PGD DATA(0x14) #define CP_PA_SWAP_PAGE DATA(0x18) #define CP_PA_BACKUP_PAGES_MAP DATA(0x1c) .text .globl relocate_kernel relocate_kernel: /* Save the CPU context, used for jumping back */ pushl %ebx pushl %esi pushl %edi pushl %ebp pushf movl 20+8(%esp), %ebp /* list of pages */ movl PTR(VA_CONTROL_PAGE)(%ebp), %edi movl %esp, ESP(%edi) movl %cr0, %eax movl %eax, CR0(%edi) movl %cr3, %eax movl %eax, CR3(%edi) movl %cr4, %eax movl %eax, CR4(%edi) /* read the arguments and say goodbye to the stack */ movl 20+4(%esp), %ebx /* page_list */ movl 20+8(%esp), %ebp /* list of pages */ movl 20+12(%esp), %edx /* start address */ movl 20+16(%esp), %ecx /* cpu_has_pae */ movl 20+20(%esp), %esi /* preserve_context */ /* zero out flags, and disable interrupts */ pushl $0 popfl /* save some information for jumping back */ movl PTR(VA_CONTROL_PAGE)(%ebp), %edi movl %edi, CP_VA_CONTROL_PAGE(%edi) movl PTR(PA_PGD)(%ebp), %eax movl %eax, CP_PA_PGD(%edi) movl PTR(PA_SWAP_PAGE)(%ebp), %eax movl %eax, CP_PA_SWAP_PAGE(%edi) movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi) /* * get physical address of control page now * this is impossible after page table switch */ movl PTR(PA_CONTROL_PAGE)(%ebp), %edi /* switch to new set of page tables */ movl PTR(PA_PGD)(%ebp), %eax movl %eax, %cr3 /* setup a new stack at the end of the physical control page */ lea PAGE_SIZE(%edi), %esp /* jump to identity mapped page */ movl %edi, %eax addl $(identity_mapped - relocate_kernel), %eax pushl %eax ret identity_mapped: /* set return address to 0 if not preserving context */ pushl $0 /* store the start address on the stack */ pushl %edx /* * Set cr0 to a known state: * - Paging disabled * - Alignment check disabled * - Write protect disabled * - No task switch * - Don't do FP software emulation. * - Proctected mode enabled */ movl %cr0, %eax andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax orl $(X86_CR0_PE), %eax movl %eax, %cr0 /* clear cr4 if applicable */ testl %ecx, %ecx jz 1f /* * Set cr4 to a known state: * Setting everything to zero seems safe. */ xorl %eax, %eax movl %eax, %cr4 jmp 1f 1: /* Flush the TLB (needed?) */ xorl %eax, %eax movl %eax, %cr3 movl CP_PA_SWAP_PAGE(%edi), %eax pushl %eax pushl %ebx call swap_pages addl $8, %esp /* * To be certain of avoiding problems with self-modifying code * I need to execute a serializing instruction here. * So I flush the TLB, it's handy, and not processor dependent. */ xorl %eax, %eax movl %eax, %cr3 /* * set all of the registers to known values * leave %esp alone */ testl %esi, %esi jnz 1f xorl %edi, %edi xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %esi, %esi xorl %ebp, %ebp ret 1: popl %edx movl CP_PA_SWAP_PAGE(%edi), %esp addl $PAGE_SIZE, %esp 2: call *%edx /* get the re-entry point of the peer system */ movl 0(%esp), %ebp call 1f 1: popl %ebx subl $(1b - relocate_kernel), %ebx movl CP_VA_CONTROL_PAGE(%ebx), %edi lea PAGE_SIZE(%ebx), %esp movl CP_PA_SWAP_PAGE(%ebx), %eax movl CP_PA_BACKUP_PAGES_MAP(%ebx), %edx pushl %eax pushl %edx call swap_pages addl $8, %esp movl CP_PA_PGD(%ebx), %eax movl %eax, %cr3 movl %cr0, %eax orl $X86_CR0_PG, %eax movl %eax, %cr0 lea PAGE_SIZE(%edi), %esp movl %edi, %eax addl $(virtual_mapped - relocate_kernel), %eax pushl %eax ret virtual_mapped: movl CR4(%edi), %eax movl %eax, %cr4 movl CR3(%edi), %eax movl %eax, %cr3 movl CR0(%edi), %eax movl %eax, %cr0 movl ESP(%edi), %esp movl %ebp, %eax popf popl %ebp popl %edi popl %esi popl %ebx ret /* Do the copies */ swap_pages: movl 8(%esp), %edx movl 4(%esp), %ecx pushl %ebp pushl %ebx pushl %edi pushl %esi movl %ecx, %ebx jmp 1f 0: /* top, read another word from the indirection page */ movl (%ebx), %ecx addl $4, %ebx 1: testb $0x1, %cl /* is it a destination page */ jz 2f movl %ecx, %edi andl $0xfffff000, %edi jmp 0b 2: testb $0x2, %cl /* is it an indirection page */ jz 2f movl %ecx, %ebx andl $0xfffff000, %ebx jmp 0b 2: testb $0x4, %cl /* is it the done indicator */ jz 2f jmp 3f 2: testb $0x8, %cl /* is it the source indicator */ jz 0b /* Ignore it otherwise */ movl %ecx, %esi /* For every source page do a copy */ andl $0xfffff000, %esi movl %edi, %eax movl %esi, %ebp movl %edx, %edi movl $1024, %ecx rep ; movsl movl %ebp, %edi movl %eax, %esi movl $1024, %ecx rep ; movsl movl %eax, %edi movl %edx, %esi movl $1024, %ecx rep ; movsl lea PAGE_SIZE(%ebp), %esi jmp 0b 3: popl %esi popl %edi popl %ebx popl %ebp ret .globl kexec_control_code_size .set kexec_control_code_size, . - relocate_kernel
liva/minimal-linux
5,653
arch/x86/kernel/relocate_kernel_64.S
/* * relocate_kernel.S - put the kernel image in place to boot * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/kexec.h> #include <asm/processor-flags.h> #include <asm/pgtable_types.h> /* * Must be relocatable PIC code callable as a C function */ #define PTR(x) (x << 3) #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) /* * control_page + KEXEC_CONTROL_CODE_MAX_SIZE * ~ control_page + PAGE_SIZE are used as data storage and stack for * jumping back */ #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) /* Minimal CPU state */ #define RSP DATA(0x0) #define CR0 DATA(0x8) #define CR3 DATA(0x10) #define CR4 DATA(0x18) /* other data */ #define CP_PA_TABLE_PAGE DATA(0x20) #define CP_PA_SWAP_PAGE DATA(0x28) #define CP_PA_BACKUP_PAGES_MAP DATA(0x30) .text .align PAGE_SIZE .code64 .globl relocate_kernel relocate_kernel: /* * %rdi indirection_page * %rsi page_list * %rdx start address * %rcx preserve_context * %r8 sme_active */ /* Save the CPU context, used for jumping back */ pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushf movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 movq %rsp, RSP(%r11) movq %cr0, %rax movq %rax, CR0(%r11) movq %cr3, %rax movq %rax, CR3(%r11) movq %cr4, %rax movq %rax, CR4(%r11) /* zero out flags, and disable interrupts */ pushq $0 popfq /* Save SME active flag */ movq %r8, %r12 /* * get physical address of control page now * this is impossible after page table switch */ movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 /* get physical address of page table now too */ movq PTR(PA_TABLE_PAGE)(%rsi), %r9 /* get physical address of swap page now */ movq PTR(PA_SWAP_PAGE)(%rsi), %r10 /* save some information for jumping back */ movq %r9, CP_PA_TABLE_PAGE(%r11) movq %r10, CP_PA_SWAP_PAGE(%r11) movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11) /* Switch to the identity mapped page tables */ movq %r9, %cr3 /* setup a new stack at the end of the physical control page */ lea PAGE_SIZE(%r8), %rsp /* jump to identity mapped page */ addq $(identity_mapped - relocate_kernel), %r8 pushq %r8 ret identity_mapped: /* set return address to 0 if not preserving context */ pushq $0 /* store the start address on the stack */ pushq %rdx /* * Set cr0 to a known state: * - Paging enabled * - Alignment check disabled * - Write protect disabled * - No task switch * - Don't do FP software emulation. * - Proctected mode enabled */ movq %cr0, %rax andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax orl $(X86_CR0_PG | X86_CR0_PE), %eax movq %rax, %cr0 /* * Set cr4 to a known state: * - physical address extension enabled */ movl $X86_CR4_PAE, %eax movq %rax, %cr4 jmp 1f 1: /* Flush the TLB (needed?) */ movq %r9, %cr3 /* * If SME is active, there could be old encrypted cache line * entries that will conflict with the now unencrypted memory * used by kexec. Flush the caches before copying the kernel. */ testq %r12, %r12 jz 1f wbinvd 1: movq %rcx, %r11 call swap_pages /* * To be certain of avoiding problems with self-modifying code * I need to execute a serializing instruction here. * So I flush the TLB by reloading %cr3 here, it's handy, * and not processor dependent. */ movq %cr3, %rax movq %rax, %cr3 /* * set all of the registers to known values * leave %rsp alone */ testq %r11, %r11 jnz 1f xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %esi, %esi xorl %edi, %edi xorl %ebp, %ebp xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d ret 1: popq %rdx leaq PAGE_SIZE(%r10), %rsp call *%rdx /* get the re-entry point of the peer system */ movq 0(%rsp), %rbp call 1f 1: popq %r8 subq $(1b - relocate_kernel), %r8 movq CP_PA_SWAP_PAGE(%r8), %r10 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi movq CP_PA_TABLE_PAGE(%r8), %rax movq %rax, %cr3 lea PAGE_SIZE(%r8), %rsp call swap_pages movq $virtual_mapped, %rax pushq %rax ret virtual_mapped: movq RSP(%r8), %rsp movq CR4(%r8), %rax movq %rax, %cr4 movq CR3(%r8), %rax movq CR0(%r8), %r8 movq %rax, %cr3 movq %r8, %cr0 movq %rbp, %rax popf popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret /* Do the copies */ swap_pages: movq %rdi, %rcx /* Put the page_list in %rcx */ xorl %edi, %edi xorl %esi, %esi jmp 1f 0: /* top, read another word for the indirection page */ movq (%rbx), %rcx addq $8, %rbx 1: testb $0x1, %cl /* is it a destination page? */ jz 2f movq %rcx, %rdi andq $0xfffffffffffff000, %rdi jmp 0b 2: testb $0x2, %cl /* is it an indirection page? */ jz 2f movq %rcx, %rbx andq $0xfffffffffffff000, %rbx jmp 0b 2: testb $0x4, %cl /* is it the done indicator? */ jz 2f jmp 3f 2: testb $0x8, %cl /* is it the source indicator? */ jz 0b /* Ignore it otherwise */ movq %rcx, %rsi /* For ever source page do a copy */ andq $0xfffffffffffff000, %rsi movq %rdi, %rdx movq %rsi, %rax movq %r10, %rdi movl $512, %ecx rep ; movsq movq %rax, %rdi movq %rdx, %rsi movl $512, %ecx rep ; movsq movq %rdx, %rdi movq %r10, %rsi movl $512, %ecx rep ; movsq lea PAGE_SIZE(%rax), %rsi jmp 0b 3: ret .globl kexec_control_code_size .set kexec_control_code_size, . - relocate_kernel
liva/minimal-linux
3,810
arch/x86/kernel/verify_cpu.S
/* * * verify_cpu.S - Code for cpu long mode and SSE verification. This * code has been borrowed from boot/setup.S and was introduced by * Andi Kleen. * * Copyright (c) 2007 Andi Kleen (ak@suse.de) * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com) * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com) * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com) * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. * * This is a common code for verification whether CPU supports * long mode and SSE or not. It is not called directly instead this * file is included at various places and compiled in that context. * This file is expected to run in 32bit code. Currently: * * arch/x86/boot/compressed/head_64.S: Boot cpu verification * arch/x86/kernel/trampoline_64.S: secondary processor verification * arch/x86/kernel/head_32.S: processor startup * * verify_cpu, returns the status of longmode and SSE in register %eax. * 0: Success 1: Failure * * On Intel, the XD_DISABLE flag will be cleared as a side-effect. * * The caller needs to check for the error code and take the action * appropriately. Either display a message or halt. */ #include <asm/cpufeatures.h> #include <asm/msr-index.h> ENTRY(verify_cpu) pushf # Save caller passed flags push $0 # Kill any dangerous flags popf #ifndef __x86_64__ pushfl # standard way to check for cpuid popl %eax movl %eax,%ebx xorl $0x200000,%eax pushl %eax popfl pushfl popl %eax cmpl %eax,%ebx jz .Lverify_cpu_no_longmode # cpu has no cpuid #endif movl $0x0,%eax # See if cpuid 1 is implemented cpuid cmpl $0x1,%eax jb .Lverify_cpu_no_longmode # no cpuid 1 xor %di,%di cmpl $0x68747541,%ebx # AuthenticAMD jnz .Lverify_cpu_noamd cmpl $0x69746e65,%edx jnz .Lverify_cpu_noamd cmpl $0x444d4163,%ecx jnz .Lverify_cpu_noamd mov $1,%di # cpu is from AMD jmp .Lverify_cpu_check .Lverify_cpu_noamd: cmpl $0x756e6547,%ebx # GenuineIntel? jnz .Lverify_cpu_check cmpl $0x49656e69,%edx jnz .Lverify_cpu_check cmpl $0x6c65746e,%ecx jnz .Lverify_cpu_check # only call IA32_MISC_ENABLE when: # family > 6 || (family == 6 && model >= 0xd) movl $0x1, %eax # check CPU family and model cpuid movl %eax, %ecx andl $0x0ff00f00, %eax # mask family and extended family shrl $8, %eax cmpl $6, %eax ja .Lverify_cpu_clear_xd # family > 6, ok jb .Lverify_cpu_check # family < 6, skip andl $0x000f00f0, %ecx # mask model and extended model shrl $4, %ecx cmpl $0xd, %ecx jb .Lverify_cpu_check # family == 6, model < 0xd, skip .Lverify_cpu_clear_xd: movl $MSR_IA32_MISC_ENABLE, %ecx rdmsr btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE jnc .Lverify_cpu_check # only write MSR if bit was changed wrmsr .Lverify_cpu_check: movl $0x1,%eax # Does the cpu have what it takes cpuid andl $REQUIRED_MASK0,%edx xorl $REQUIRED_MASK0,%edx jnz .Lverify_cpu_no_longmode movl $0x80000000,%eax # See if extended cpuid is implemented cpuid cmpl $0x80000001,%eax jb .Lverify_cpu_no_longmode # no extended cpuid movl $0x80000001,%eax # Does the cpu have what it takes cpuid andl $REQUIRED_MASK1,%edx xorl $REQUIRED_MASK1,%edx jnz .Lverify_cpu_no_longmode .Lverify_cpu_sse_test: movl $1,%eax cpuid andl $SSE_MASK,%edx cmpl $SSE_MASK,%edx je .Lverify_cpu_sse_ok test %di,%di jz .Lverify_cpu_no_longmode # only try to force SSE on AMD movl $MSR_K7_HWCR,%ecx rdmsr btr $15,%eax # enable SSE wrmsr xor %di,%di # don't loop jmp .Lverify_cpu_sse_test # try again .Lverify_cpu_no_longmode: popf # Restore caller passed flags movl $1,%eax ret .Lverify_cpu_sse_ok: popf # Restore caller passed flags xorl %eax, %eax ret ENDPROC(verify_cpu)
liva/minimal-linux
2,407
arch/x86/boot/compressed/mem_encrypt.S
/* * AMD Memory Encryption Support * * Copyright (C) 2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/processor-flags.h> #include <asm/msr.h> #include <asm/asm-offsets.h> .text .code32 ENTRY(get_sev_encryption_bit) xor %eax, %eax #ifdef CONFIG_AMD_MEM_ENCRYPT push %ebx push %ecx push %edx push %edi /* * RIP-relative addressing is needed to access the encryption bit * variable. Since we are running in 32-bit mode we need this call/pop * sequence to get the proper relative addressing. */ call 1f 1: popl %edi subl $1b, %edi movl enc_bit(%edi), %eax cmpl $0, %eax jge .Lsev_exit /* Check if running under a hypervisor */ movl $1, %eax cpuid bt $31, %ecx /* Check the hypervisor bit */ jnc .Lno_sev movl $0x80000000, %eax /* CPUID to check the highest leaf */ cpuid cmpl $0x8000001f, %eax /* See if 0x8000001f is available */ jb .Lno_sev /* * Check for the SEV feature: * CPUID Fn8000_001F[EAX] - Bit 1 * CPUID Fn8000_001F[EBX] - Bits 5:0 * Pagetable bit position used to indicate encryption */ movl $0x8000001f, %eax cpuid bt $1, %eax /* Check if SEV is available */ jnc .Lno_sev movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */ rdmsr bt $MSR_AMD64_SEV_ENABLED_BIT, %eax /* Check if SEV is active */ jnc .Lno_sev movl %ebx, %eax andl $0x3f, %eax /* Return the encryption bit location */ movl %eax, enc_bit(%edi) jmp .Lsev_exit .Lno_sev: xor %eax, %eax movl %eax, enc_bit(%edi) .Lsev_exit: pop %edi pop %edx pop %ecx pop %ebx #endif /* CONFIG_AMD_MEM_ENCRYPT */ ret ENDPROC(get_sev_encryption_bit) .code64 ENTRY(get_sev_encryption_mask) xor %rax, %rax #ifdef CONFIG_AMD_MEM_ENCRYPT push %rbp push %rdx movq %rsp, %rbp /* Save current stack pointer */ call get_sev_encryption_bit /* Get the encryption bit position */ testl %eax, %eax jz .Lno_sev_mask xor %rdx, %rdx bts %rax, %rdx /* Create the encryption mask */ mov %rdx, %rax /* ... and return it */ .Lno_sev_mask: movq %rbp, %rsp /* Restore original stack pointer */ pop %rdx pop %rbp #endif ret ENDPROC(get_sev_encryption_mask) .data enc_bit: .int 0xffffffff
liva/minimal-linux
1,216
arch/x86/boot/compressed/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/vmlinux.lds.h> OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) #undef i386 #include <asm/cache.h> #include <asm/page_types.h> #ifdef CONFIG_X86_64 OUTPUT_ARCH(i386:x86-64) ENTRY(startup_64) #else OUTPUT_ARCH(i386) ENTRY(startup_32) #endif SECTIONS { /* Be careful parts of head_64.S assume startup_32 is at * address 0. */ . = 0; .head.text : { _head = . ; HEAD_TEXT _ehead = . ; } .rodata..compressed : { *(.rodata..compressed) } .text : { _text = .; /* Text */ *(.text) *(.text.*) _etext = . ; } .rodata : { _rodata = . ; *(.rodata) /* read-only data */ *(.rodata.*) _erodata = . ; } .got : { _got = .; KEEP(*(.got.plt)) KEEP(*(.got)) _egot = .; } .data : { _data = . ; *(.data) *(.data.*) _edata = . ; } . = ALIGN(L1_CACHE_BYTES); .bss : { _bss = . ; *(.bss) *(.bss.*) *(COMMON) . = ALIGN(8); /* For convenience during zeroing */ _ebss = .; } #ifdef CONFIG_X86_64 . = ALIGN(PAGE_SIZE); .pgtable : { _pgtable = . ; *(.pgtable) _epgtable = . ; } #endif . = ALIGN(PAGE_SIZE); /* keep ZO size page aligned */ _end = .; }
liva/minimal-linux
12,648
arch/x86/boot/compressed/head_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/boot/head.S * * Copyright (C) 1991, 1992, 1993 Linus Torvalds */ /* * head.S contains the 32-bit startup code. * * NOTE!!! Startup happens at absolute address 0x00001000, which is also where * the page directory will exist. The startup code will be overwritten by * the page directory. [According to comments etc elsewhere on a compressed * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] * * Page 0 is deliberately kept safe, since System Management Mode code in * laptops may need to access the BIOS data stored there. This is also * useful for future device drivers that either access the BIOS via VM86 * mode. */ /* * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ .code32 .text #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/boot.h> #include <asm/msr.h> #include <asm/processor-flags.h> #include <asm/asm-offsets.h> #include <asm/bootparam.h> /* * Locally defined symbols should be marked hidden: */ .hidden _bss .hidden _ebss .hidden _got .hidden _egot __HEAD .code32 ENTRY(startup_32) /* * 32bit entry is 0 and it is ABI so immutable! * If we come here directly from a bootloader, * kernel(text+data+bss+brk) ramdisk, zero_page, command line * all need to be under the 4G limit. */ cld /* * Test KEEP_SEGMENTS flag to see if the bootloader is asking * us to not reload segments */ testb $KEEP_SEGMENTS, BP_loadflags(%esi) jnz 1f cli movl $(__BOOT_DS), %eax movl %eax, %ds movl %eax, %es movl %eax, %ss 1: /* * Calculate the delta between where we were compiled to run * at and where we were actually loaded at. This can only be done * with a short local call on x86. Nothing else will tell us what * address we are running at. The reserved chunk of the real-mode * data at 0x1e4 (defined as a scratch field) are used as the stack * for this calculation. Only 4 bytes are needed. */ leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %ebp subl $1b, %ebp /* setup a stack and make sure cpu supports long mode. */ movl $boot_stack_end, %eax addl %ebp, %eax movl %eax, %esp call verify_cpu testl %eax, %eax jnz no_longmode /* * Compute the delta between where we were compiled to run at * and where the code will actually run at. * * %ebp contains the address we are loaded at by the boot loader and %ebx * contains the address where we should move the kernel image temporarily * for safe in-place decompression. */ #ifdef CONFIG_RELOCATABLE movl %ebp, %ebx movl BP_kernel_alignment(%esi), %eax decl %eax addl %eax, %ebx notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx jge 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: /* Target address to relocate to for decompression */ movl BP_init_size(%esi), %eax subl $_end, %eax addl %eax, %ebx /* * Prepare for entering 64 bit mode */ /* Load new GDT with the 64bit segments using 32bit descriptor */ addl %ebp, gdt+2(%ebp) lgdt gdt(%ebp) /* Enable PAE mode */ movl %cr4, %eax orl $X86_CR4_PAE, %eax movl %eax, %cr4 /* * Build early 4G boot pagetable */ /* * If SEV is active then set the encryption mask in the page tables. * This will insure that when the kernel is copied and decompressed * it will be done so encrypted. */ call get_sev_encryption_bit xorl %edx, %edx testl %eax, %eax jz 1f subl $32, %eax /* Encryption bit is always above bit 31 */ bts %eax, %edx /* Set encryption mask for page tables */ 1: /* Initialize Page tables to 0 */ leal pgtable(%ebx), %edi xorl %eax, %eax movl $(BOOT_INIT_PGT_SIZE/4), %ecx rep stosl /* Build Level 4 */ leal pgtable + 0(%ebx), %edi leal 0x1007 (%edi), %eax movl %eax, 0(%edi) addl %edx, 4(%edi) /* Build Level 3 */ leal pgtable + 0x1000(%ebx), %edi leal 0x1007(%edi), %eax movl $4, %ecx 1: movl %eax, 0x00(%edi) addl %edx, 0x04(%edi) addl $0x00001000, %eax addl $8, %edi decl %ecx jnz 1b /* Build Level 2 */ leal pgtable + 0x2000(%ebx), %edi movl $0x00000183, %eax movl $2048, %ecx 1: movl %eax, 0(%edi) addl %edx, 4(%edi) addl $0x00200000, %eax addl $8, %edi decl %ecx jnz 1b /* Enable the boot page tables */ leal pgtable(%ebx), %eax movl %eax, %cr3 /* Enable Long mode in EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr /* After gdt is loaded */ xorl %eax, %eax lldt %ax movl $__BOOT_TSS, %eax ltr %ax /* * Setup for the jump to 64bit mode * * When the jump is performend we will be in long mode but * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use * the new gdt/idt that has __KERNEL_CS with CS.L = 1. * We place all of the values on our mini stack so lret can * used to perform that far jump. */ pushl $__KERNEL_CS leal startup_64(%ebp), %eax #ifdef CONFIG_EFI_MIXED movl efi32_config(%ebp), %ebx cmp $0, %ebx jz 1f leal handover_entry(%ebp), %eax 1: #endif pushl %eax /* Enter paged protected Mode, activating Long Mode */ movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */ movl %eax, %cr0 /* Jump from 32bit compatibility mode into 64bit mode. */ lret ENDPROC(startup_32) #ifdef CONFIG_EFI_MIXED .org 0x190 ENTRY(efi32_stub_entry) add $0x4, %esp /* Discard return address */ popl %ecx popl %edx popl %esi leal (BP_scratch+4)(%esi), %esp call 1f 1: pop %ebp subl $1b, %ebp movl %ecx, efi32_config(%ebp) movl %edx, efi32_config+8(%ebp) sgdtl efi32_boot_gdt(%ebp) leal efi32_config(%ebp), %eax movl %eax, efi_config(%ebp) jmp startup_32 ENDPROC(efi32_stub_entry) #endif .code64 .org 0x200 ENTRY(startup_64) /* * 64bit entry is 0x200 and it is ABI so immutable! * We come here either from startup_32 or directly from a * 64bit bootloader. * If we come here from a bootloader, kernel(text+data+bss+brk), * ramdisk, zero_page, command line could be above 4G. * We depend on an identity mapped page table being provided * that maps our entire kernel(text+data+bss+brk), zero page * and command line. */ /* Setup data segments. */ xorl %eax, %eax movl %eax, %ds movl %eax, %es movl %eax, %ss movl %eax, %fs movl %eax, %gs /* * Compute the decompressed kernel start address. It is where * we were loaded at aligned to a 2M boundary. %rbp contains the * decompressed kernel start address. * * If it is a relocatable kernel then decompress and run the kernel * from load address aligned to 2MB addr, otherwise decompress and * run the kernel from LOAD_PHYSICAL_ADDR * * We cannot rely on the calculation done in 32-bit mode, since we * may have been invoked via the 64-bit entry point. */ /* Start with the delta to where the kernel will run at. */ #ifdef CONFIG_RELOCATABLE leaq startup_32(%rip) /* - $startup_32 */, %rbp movl BP_kernel_alignment(%rsi), %eax decl %eax addq %rax, %rbp notq %rax andq %rax, %rbp cmpq $LOAD_PHYSICAL_ADDR, %rbp jge 1f #endif movq $LOAD_PHYSICAL_ADDR, %rbp 1: /* Target address to relocate to for decompression */ movl BP_init_size(%rsi), %ebx subl $_end, %ebx addq %rbp, %rbx /* Set up the stack */ leaq boot_stack_end(%rbx), %rsp #ifdef CONFIG_X86_5LEVEL /* * Check if we need to enable 5-level paging. * RSI holds real mode data and need to be preserved across * a function call. */ pushq %rsi call l5_paging_required popq %rsi /* If l5_paging_required() returned zero, we're done here. */ cmpq $0, %rax je lvl5 /* * At this point we are in long mode with 4-level paging enabled, * but we want to enable 5-level paging. * * The problem is that we cannot do it directly. Setting LA57 in * long mode would trigger #GP. So we need to switch off long mode * first. * * NOTE: This is not going to work if bootloader put us above 4G * limit. * * The first step is go into compatibility mode. */ /* Clear additional page table */ leaq lvl5_pgtable(%rbx), %rdi xorq %rax, %rax movq $(PAGE_SIZE/8), %rcx rep stosq /* * Setup current CR3 as the first and only entry in a new top level * page table. */ movq %cr3, %rdi leaq 0x7 (%rdi), %rax movq %rax, lvl5_pgtable(%rbx) /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ pushq $__KERNEL32_CS leaq compatible_mode(%rip), %rax pushq %rax lretq lvl5: #endif /* Zero EFLAGS */ pushq $0 popfq /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushq %rsi leaq (_bss-8)(%rip), %rsi leaq (_bss-8)(%rbx), %rdi movq $_bss /* - $startup_32 */, %rcx shrq $3, %rcx std rep movsq cld popq %rsi /* * Jump to the relocated address. */ leaq relocated(%rbx), %rax jmp *%rax #ifdef CONFIG_EFI_STUB /* The entry point for the PE/COFF executable is efi_pe_entry. */ ENTRY(efi_pe_entry) movq %rcx, efi64_config(%rip) /* Handle */ movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */ leaq efi64_config(%rip), %rax movq %rax, efi_config(%rip) call 1f 1: popq %rbp subq $1b, %rbp /* * Relocate efi_config->call(). */ addq %rbp, efi64_config+40(%rip) movq %rax, %rdi call make_boot_params cmpq $0,%rax je fail mov %rax, %rsi leaq startup_32(%rip), %rax movl %eax, BP_code32_start(%rsi) jmp 2f /* Skip the relocation */ handover_entry: call 1f 1: popq %rbp subq $1b, %rbp /* * Relocate efi_config->call(). */ movq efi_config(%rip), %rax addq %rbp, 40(%rax) 2: movq efi_config(%rip), %rdi call efi_main movq %rax,%rsi cmpq $0,%rax jne 2f fail: /* EFI init failed, so hang. */ hlt jmp fail 2: movl BP_code32_start(%esi), %eax leaq startup_64(%rax), %rax jmp *%rax ENDPROC(efi_pe_entry) .org 0x390 ENTRY(efi64_stub_entry) movq %rdi, efi64_config(%rip) /* Handle */ movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */ leaq efi64_config(%rip), %rax movq %rax, efi_config(%rip) movq %rdx, %rsi jmp handover_entry ENDPROC(efi64_stub_entry) #endif .text relocated: /* * Clear BSS (stack is currently empty) */ xorl %eax, %eax leaq _bss(%rip), %rdi leaq _ebss(%rip), %rcx subq %rdi, %rcx shrq $3, %rcx rep stosq /* * Adjust our own GOT */ leaq _got(%rip), %rdx leaq _egot(%rip), %rcx 1: cmpq %rcx, %rdx jae 2f addq %rbx, (%rdx) addq $8, %rdx jmp 1b 2: /* * Do the extraction, and jump to the new kernel.. */ pushq %rsi /* Save the real mode argument */ movq %rsi, %rdi /* real mode address */ leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ leaq input_data(%rip), %rdx /* input_data */ movl $z_input_len, %ecx /* input_len */ movq %rbp, %r8 /* output target address */ movq $z_output_len, %r9 /* decompressed length, end of relocs */ call extract_kernel /* returns kernel location in %rax */ popq %rsi /* * Jump to the decompressed kernel. */ jmp *%rax .code32 #ifdef CONFIG_X86_5LEVEL compatible_mode: /* Setup data and stack segments */ movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %ss /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 /* Point CR3 to 5-level paging */ leal lvl5_pgtable(%ebx), %eax movl %eax, %cr3 /* Enable PAE and LA57 mode */ movl %cr4, %eax orl $(X86_CR4_PAE | X86_CR4_LA57), %eax movl %eax, %cr4 /* Calculate address we are running at */ call 1f 1: popl %edi subl $1b, %edi /* Prepare stack for far return to Long Mode */ pushl $__KERNEL_CS leal lvl5(%edi), %eax push %eax /* Enable paging back */ movl $(X86_CR0_PG | X86_CR0_PE), %eax movl %eax, %cr0 lret #endif no_longmode: /* This isn't an x86-64 CPU so hang */ 1: hlt jmp 1b #include "../../kernel/verify_cpu.S" .data gdt: .word gdt_end - gdt .long gdt .word 0 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */ .quad 0x00af9a000000ffff /* __KERNEL_CS */ .quad 0x00cf92000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ gdt_end: #ifdef CONFIG_EFI_STUB efi_config: .quad 0 #ifdef CONFIG_EFI_MIXED .global efi32_config efi32_config: .fill 5,8,0 .quad efi64_thunk .byte 0 #endif .global efi64_config efi64_config: .fill 5,8,0 .quad efi_call .byte 1 #endif /* CONFIG_EFI_STUB */ /* * Stack and heap for uncompression */ .bss .balign 4 boot_heap: .fill BOOT_HEAP_SIZE, 1, 0 boot_stack: .fill BOOT_STACK_SIZE, 1, 0 boot_stack_end: /* * Space for page tables (not in .bss so not zeroed) */ .section ".pgtable","a",@nobits .balign 4096 pgtable: .fill BOOT_PGT_SIZE, 1, 0 #ifdef CONFIG_X86_5LEVEL lvl5_pgtable: .fill PAGE_SIZE, 1, 0 #endif
liva/minimal-linux
6,277
arch/x86/boot/compressed/head_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/boot/head.S * * Copyright (C) 1991, 1992, 1993 Linus Torvalds */ /* * head.S contains the 32-bit startup code. * * NOTE!!! Startup happens at absolute address 0x00001000, which is also where * the page directory will exist. The startup code will be overwritten by * the page directory. [According to comments etc elsewhere on a compressed * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] * * Page 0 is deliberately kept safe, since System Management Mode code in * laptops may need to access the BIOS data stored there. This is also * useful for future device drivers that either access the BIOS via VM86 * mode. */ /* * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ .text #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/boot.h> #include <asm/asm-offsets.h> #include <asm/bootparam.h> /* * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X * relocation to get the symbol address in PIC. When the compressed x86 * kernel isn't built as PIC, the linker optimizes R_386_GOT32X * relocations to their fixed symbol addresses. However, when the * compressed x86 kernel is loaded at a different address, it leads * to the following load failure: * * Failed to allocate space for phdrs * * during the decompression stage. * * If the compressed x86 kernel is relocatable at run-time, it should be * compiled with -fPIE, instead of -fPIC, if possible and should be built as * Position Independent Executable (PIE) so that linker won't optimize * R_386_GOT32X relocation to its fixed symbol address. Older * linkers generate R_386_32 relocations against locally defined symbols, * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle * R_386_32 relocations when relocating the kernel. To generate * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as * hidden: */ .hidden _bss .hidden _ebss .hidden _got .hidden _egot __HEAD ENTRY(startup_32) cld /* * Test KEEP_SEGMENTS flag to see if the bootloader is asking * us to not reload segments */ testb $KEEP_SEGMENTS, BP_loadflags(%esi) jnz 1f cli movl $__BOOT_DS, %eax movl %eax, %ds movl %eax, %es movl %eax, %fs movl %eax, %gs movl %eax, %ss 1: /* * Calculate the delta between where we were compiled to run * at and where we were actually loaded at. This can only be done * with a short local call on x86. Nothing else will tell us what * address we are running at. The reserved chunk of the real-mode * data at 0x1e4 (defined as a scratch field) are used as the stack * for this calculation. Only 4 bytes are needed. */ leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %ebp subl $1b, %ebp /* * %ebp contains the address we are loaded at by the boot loader and %ebx * contains the address where we should move the kernel image temporarily * for safe in-place decompression. */ #ifdef CONFIG_RELOCATABLE movl %ebp, %ebx movl BP_kernel_alignment(%esi), %eax decl %eax addl %eax, %ebx notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx jge 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: /* Target address to relocate to for decompression */ movl BP_init_size(%esi), %eax subl $_end, %eax addl %eax, %ebx /* Set up the stack */ leal boot_stack_end(%ebx), %esp /* Zero EFLAGS */ pushl $0 popfl /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushl %esi leal (_bss-4)(%ebp), %esi leal (_bss-4)(%ebx), %edi movl $(_bss - startup_32), %ecx shrl $2, %ecx std rep movsl cld popl %esi /* * Jump to the relocated address. */ leal relocated(%ebx), %eax jmp *%eax ENDPROC(startup_32) #ifdef CONFIG_EFI_STUB /* * We don't need the return address, so set up the stack so efi_main() can find * its arguments. */ ENTRY(efi_pe_entry) add $0x4, %esp call 1f 1: popl %esi subl $1b, %esi popl %ecx movl %ecx, efi32_config(%esi) /* Handle */ popl %ecx movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */ /* Relocate efi_config->call() */ leal efi32_config(%esi), %eax add %esi, 40(%eax) pushl %eax call make_boot_params cmpl $0, %eax je fail movl %esi, BP_code32_start(%eax) popl %ecx pushl %eax pushl %ecx jmp 2f /* Skip efi_config initialization */ ENDPROC(efi_pe_entry) ENTRY(efi32_stub_entry) add $0x4, %esp popl %ecx popl %edx call 1f 1: popl %esi subl $1b, %esi movl %ecx, efi32_config(%esi) /* Handle */ movl %edx, efi32_config+8(%esi) /* EFI System table pointer */ /* Relocate efi_config->call() */ leal efi32_config(%esi), %eax add %esi, 40(%eax) pushl %eax 2: call efi_main cmpl $0, %eax movl %eax, %esi jne 2f fail: /* EFI init failed, so hang. */ hlt jmp fail 2: movl BP_code32_start(%esi), %eax leal startup_32(%eax), %eax jmp *%eax ENDPROC(efi32_stub_entry) #endif .text relocated: /* * Clear BSS (stack is currently empty) */ xorl %eax, %eax leal _bss(%ebx), %edi leal _ebss(%ebx), %ecx subl %edi, %ecx shrl $2, %ecx rep stosl /* * Adjust our own GOT */ leal _got(%ebx), %edx leal _egot(%ebx), %ecx 1: cmpl %ecx, %edx jae 2f addl %ebx, (%edx) addl $4, %edx jmp 1b 2: /* * Do the extraction, and jump to the new kernel.. */ /* push arguments for extract_kernel: */ pushl $z_output_len /* decompressed length, end of relocs */ movl BP_init_size(%esi), %eax subl $_end, %eax movl %ebx, %ebp subl %eax, %ebp pushl %ebp /* output address */ pushl $z_input_len /* input_len */ leal input_data(%ebx), %eax pushl %eax /* input_data */ leal boot_heap(%ebx), %eax pushl %eax /* heap area */ pushl %esi /* real mode pointer */ call extract_kernel /* returns kernel location in %eax */ addl $24, %esp /* * Jump to the extracted kernel. */ xorl %ebx, %ebx jmp *%eax #ifdef CONFIG_EFI_STUB .data efi32_config: .fill 5,8,0 .long efi_call_phys .long 0 .byte 0 #endif /* * Stack and heap for uncompression */ .bss .balign 4 boot_heap: .fill BOOT_HEAP_SIZE, 1, 0 boot_stack: .fill BOOT_STACK_SIZE, 1, 0 boot_stack_end:
liva/minimal-linux
3,694
arch/x86/boot/compressed/efi_thunk_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming * * Early support for invoking 32-bit EFI services from a 64-bit kernel. * * Because this thunking occurs before ExitBootServices() we have to * restore the firmware's 32-bit GDT before we make EFI serivce calls, * since the firmware's 32-bit IDT is still currently installed and it * needs to be able to service interrupts. * * On the plus side, we don't have to worry about mangling 64-bit * addresses into 32-bits because we're executing with an identify * mapped pagetable and haven't transitioned to 64-bit virtual addresses * yet. */ #include <linux/linkage.h> #include <asm/msr.h> #include <asm/page_types.h> #include <asm/processor-flags.h> #include <asm/segment.h> .code64 .text ENTRY(efi64_thunk) push %rbp push %rbx subq $8, %rsp leaq efi_exit32(%rip), %rax movl %eax, 4(%rsp) leaq efi_gdt64(%rip), %rax movl %eax, (%rsp) movl %eax, 2(%rax) /* Fixup the gdt base address */ movl %ds, %eax push %rax movl %es, %eax push %rax movl %ss, %eax push %rax /* * Convert x86-64 ABI params to i386 ABI */ subq $32, %rsp movl %esi, 0x0(%rsp) movl %edx, 0x4(%rsp) movl %ecx, 0x8(%rsp) movq %r8, %rsi movl %esi, 0xc(%rsp) movq %r9, %rsi movl %esi, 0x10(%rsp) sgdt save_gdt(%rip) leaq 1f(%rip), %rbx movq %rbx, func_rt_ptr(%rip) /* * Switch to gdt with 32-bit segments. This is the firmware GDT * that was installed when the kernel started executing. This * pointer was saved at the EFI stub entry point in head_64.S. */ leaq efi32_boot_gdt(%rip), %rax lgdt (%rax) pushq $__KERNEL_CS leaq efi_enter32(%rip), %rax pushq %rax lretq 1: addq $32, %rsp lgdt save_gdt(%rip) pop %rbx movl %ebx, %ss pop %rbx movl %ebx, %es pop %rbx movl %ebx, %ds /* * Convert 32-bit status code into 64-bit. */ test %rax, %rax jz 1f movl %eax, %ecx andl $0x0fffffff, %ecx andl $0xf0000000, %eax shl $32, %rax or %rcx, %rax 1: addq $8, %rsp pop %rbx pop %rbp ret ENDPROC(efi64_thunk) ENTRY(efi_exit32) movq func_rt_ptr(%rip), %rax push %rax mov %rdi, %rax ret ENDPROC(efi_exit32) .code32 /* * EFI service pointer must be in %edi. * * The stack should represent the 32-bit calling convention. */ ENTRY(efi_enter32) movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %es movl %eax, %ss /* Reload pgtables */ movl %cr3, %eax movl %eax, %cr3 /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 /* Disable long mode via EFER */ movl $MSR_EFER, %ecx rdmsr btrl $_EFER_LME, %eax wrmsr call *%edi /* We must preserve return value */ movl %eax, %edi /* * Some firmware will return with interrupts enabled. Be sure to * disable them before we switch GDTs. */ cli movl 56(%esp), %eax movl %eax, 2(%eax) lgdtl (%eax) movl %cr4, %eax btsl $(X86_CR4_PAE_BIT), %eax movl %eax, %cr4 movl %cr3, %eax movl %eax, %cr3 movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr xorl %eax, %eax lldt %ax movl 60(%esp), %eax pushl $__KERNEL_CS pushl %eax /* Enable paging */ movl %cr0, %eax btsl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 lret ENDPROC(efi_enter32) .data .balign 8 .global efi32_boot_gdt efi32_boot_gdt: .word 0 .quad 0 save_gdt: .word 0 .quad 0 func_rt_ptr: .quad 0 .global efi_gdt64 efi_gdt64: .word efi_gdt64_end - efi_gdt64 .long 0 /* Filled out by user */ .word 0 .quad 0x0000000000000000 /* NULL descriptor */ .quad 0x00af9a000000ffff /* __KERNEL_CS */ .quad 0x00cf92000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ efi_gdt64_end:
liva/minimal-linux
2,348
arch/x86/boot/compressed/efi_stub_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * EFI call stub for IA32. * * This stub allows us to make EFI calls in physical mode with interrupts * turned off. Note that this implementation is different from the one in * arch/x86/platform/efi/efi_stub_32.S because we're _already_ in physical * mode at this point. */ #include <linux/linkage.h> #include <asm/page_types.h> /* * efi_call_phys(void *, ...) is a function with variable parameters. * All the callers of this function assure that all the parameters are 4-bytes. */ /* * In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save. * So we'd better save all of them at the beginning of this function and restore * at the end no matter how many we use, because we can not assure EFI runtime * service functions will comply with gcc calling convention, too. */ .text ENTRY(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found * the values of these registers are the same. And, the corresponding * GDT entries are identical. So I will do nothing about segment reg * and GDT, but change GDT base register in prelog and epilog. */ /* * 1. Because we haven't been relocated by this point we need to * use relative addressing. */ call 1f 1: popl %edx subl $1b, %edx /* * 2. Now on the top of stack is the return * address in the caller of efi_call_phys(), then parameter 1, * parameter 2, ..., param n. To make things easy, we save the return * address of efi_call_phys in a global variable. */ popl %ecx movl %ecx, saved_return_addr(%edx) /* get the function pointer into ECX*/ popl %ecx movl %ecx, efi_rt_function_ptr(%edx) /* * 3. Call the physical function. */ call *%ecx /* * 4. Balance the stack. And because EAX contain the return value, * we'd better not clobber it. We need to calculate our address * again because %ecx and %edx are not preserved across EFI function * calls. */ call 1f 1: popl %edx subl $1b, %edx movl efi_rt_function_ptr(%edx), %ecx pushl %ecx /* * 10. Push the saved return address onto the stack and return. */ movl saved_return_addr(%edx), %ecx pushl %ecx ret ENDPROC(efi_call_phys) .previous .data saved_return_addr: .long 0 efi_rt_function_ptr: .long 0
liva/minimal-linux
2,905
arch/x86/entry/vdso/vdso-layout.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/vdso.h> /* * Linker script for vDSO. This is an ELF shared object prelinked to * its virtual address, and with only one read-only segment. * This script controls its layout. */ #if defined(BUILD_VDSO64) # define SHDR_SIZE 64 #elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32) # define SHDR_SIZE 40 #else # error unknown VDSO target #endif #define NUM_FAKE_SHDRS 13 SECTIONS { /* * User/kernel shared data is before the vDSO. This may be a little * uglier than putting it after the vDSO, but it avoids issues with * non-allocatable things that dangle past the end of the PT_LOAD * segment. */ vvar_start = . - 3 * PAGE_SIZE; vvar_page = vvar_start; /* Place all vvars at the offsets in asm/vvar.h. */ #define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset; #define __VVAR_KERNEL_LDS #include <asm/vvar.h> #undef __VVAR_KERNEL_LDS #undef EMIT_VVAR pvclock_page = vvar_start + PAGE_SIZE; hvclock_page = vvar_start + 2 * PAGE_SIZE; . = SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) *(.data*) *(.sdata*) *(.got.plt) *(.got) *(.gnu.linkonce.d.*) *(.bss*) *(.dynbss*) *(.gnu.linkonce.b.*) /* * Ideally this would live in a C file, but that won't * work cleanly for x32 until we start building the x32 * C code using an x32 toolchain. */ VDSO_FAKE_SECTION_TABLE_START = .; . = . + NUM_FAKE_SHDRS * SHDR_SIZE; VDSO_FAKE_SECTION_TABLE_END = .; } :text .fake_shstrtab : { *(.fake_shstrtab) } :text .note : { *(.note.*) } :text :note .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text /* * Text is well-separated from actual data: there's plenty of * stuff that isn't used at runtime in between. */ .text : { *(.text*) } :text =0x90909090, /* * At the end so that eu-elflint stays happy when vdso2c strips * these. A better implementation would avoid allocating space * for these. */ .altinstructions : { *(.altinstructions) } :text .altinstr_replacement : { *(.altinstr_replacement) } :text /DISCARD/ : { *(.discard) *(.discard.*) *(__bug_table) } } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; }
liva/minimal-linux
4,861
arch/x86/entry/vdso/vdso32/sigreturn.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/unistd_32.h> #include <asm/asm-offsets.h> #ifndef SYSCALL_ENTER_KERNEL #define SYSCALL_ENTER_KERNEL int $0x80 #endif .text .globl __kernel_sigreturn .type __kernel_sigreturn,@function nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ ALIGN __kernel_sigreturn: .LSTART_sigreturn: popl %eax /* XXX does this mean it needs unwind info? */ movl $__NR_sigreturn, %eax SYSCALL_ENTER_KERNEL .LEND_sigreturn: nop .size __kernel_sigreturn,.-.LSTART_sigreturn .globl __kernel_rt_sigreturn .type __kernel_rt_sigreturn,@function ALIGN __kernel_rt_sigreturn: .LSTART_rt_sigreturn: movl $__NR_rt_sigreturn, %eax SYSCALL_ENTER_KERNEL .LEND_rt_sigreturn: nop .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn .previous .section .eh_frame,"a",@progbits .LSTARTFRAMEDLSI1: .long .LENDCIEDLSI1-.LSTARTCIEDLSI1 .LSTARTCIEDLSI1: .long 0 /* CIE ID */ .byte 1 /* Version number */ .string "zRS" /* NUL-terminated augmentation string */ .uleb128 1 /* Code alignment factor */ .sleb128 -4 /* Data alignment factor */ .byte 8 /* Return address register column */ .uleb128 1 /* Augmentation value length */ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */ .byte 0 /* DW_CFA_nop */ .align 4 .LENDCIEDLSI1: .long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */ .LSTARTFDEDLSI1: .long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */ /* HACK: The dwarf2 unwind routines will subtract 1 from the return address to get an address in the middle of the presumed call instruction. Since we didn't get here via a call, we need to include the nop before the real start to make up for it. */ .long .LSTART_sigreturn-1-. /* PC-relative start address */ .long .LEND_sigreturn-.LSTART_sigreturn+1 .uleb128 0 /* Augmentation */ /* What follows are the instructions for the table generation. We record the locations of each register saved. This is complicated by the fact that the "CFA" is always assumed to be the value of the stack pointer in the caller. This means that we must define the CFA of this body of code to be the saved value of the stack pointer in the sigcontext. Which also means that there is no fixed relation to the other saved registers, which means that we must use DW_CFA_expression to compute their addresses. It also means that when we adjust the stack with the popl, we have to do it all over again. */ #define do_cfa_expr(offset) \ .byte 0x0f; /* DW_CFA_def_cfa_expression */ \ .uleb128 1f-0f; /* length */ \ 0: .byte 0x74; /* DW_OP_breg4 */ \ .sleb128 offset; /* offset */ \ .byte 0x06; /* DW_OP_deref */ \ 1: #define do_expr(regno, offset) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno; /* regno */ \ .uleb128 1f-0f; /* length */ \ 0: .byte 0x74; /* DW_OP_breg4 */ \ .sleb128 offset; /* offset */ \ 1: do_cfa_expr(IA32_SIGCONTEXT_sp+4) do_expr(0, IA32_SIGCONTEXT_ax+4) do_expr(1, IA32_SIGCONTEXT_cx+4) do_expr(2, IA32_SIGCONTEXT_dx+4) do_expr(3, IA32_SIGCONTEXT_bx+4) do_expr(5, IA32_SIGCONTEXT_bp+4) do_expr(6, IA32_SIGCONTEXT_si+4) do_expr(7, IA32_SIGCONTEXT_di+4) do_expr(8, IA32_SIGCONTEXT_ip+4) .byte 0x42 /* DW_CFA_advance_loc 2 -- nop; popl eax. */ do_cfa_expr(IA32_SIGCONTEXT_sp) do_expr(0, IA32_SIGCONTEXT_ax) do_expr(1, IA32_SIGCONTEXT_cx) do_expr(2, IA32_SIGCONTEXT_dx) do_expr(3, IA32_SIGCONTEXT_bx) do_expr(5, IA32_SIGCONTEXT_bp) do_expr(6, IA32_SIGCONTEXT_si) do_expr(7, IA32_SIGCONTEXT_di) do_expr(8, IA32_SIGCONTEXT_ip) .align 4 .LENDFDEDLSI1: .long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */ .LSTARTFDEDLSI2: .long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */ /* HACK: See above wrt unwind library assumptions. */ .long .LSTART_rt_sigreturn-1-. /* PC-relative start address */ .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1 .uleb128 0 /* Augmentation */ /* What follows are the instructions for the table generation. We record the locations of each register saved. This is slightly less complicated than the above, since we don't modify the stack pointer in the process. */ do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_sp) do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ax) do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_cx) do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_dx) do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bx) do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bp) do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_si) do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_di) do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ip) .align 4 .LENDFDEDLSI2: .previous
liva/minimal-linux
2,410
arch/x86/entry/vdso/vdso32/system_call.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * AT_SYSINFO entry point */ #include <linux/linkage.h> #include <asm/dwarf2.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> .text .globl __kernel_vsyscall .type __kernel_vsyscall,@function ALIGN __kernel_vsyscall: CFI_STARTPROC /* * Reshuffle regs so that all of any of the entry instructions * will preserve enough state. * * A really nice entry sequence would be: * pushl %edx * pushl %ecx * movl %esp, %ecx * * Unfortunately, naughty Android versions between July and December * 2015 actually hardcode the traditional Linux SYSENTER entry * sequence. That is severely broken for a number of reasons (ask * anyone with an AMD CPU, for example). Nonetheless, we try to keep * it working approximately as well as it ever worked. * * This link may eludicate some of the history: * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7 * personally, I find it hard to understand what's going on there. * * Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE. * Execute an indirect call to the address in the AT_SYSINFO auxv * entry. That is the ONLY correct way to make a fast 32-bit system * call on Linux. (Open-coding int $0x80 is also fine, but it's * slow.) */ pushl %ecx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ecx, 0 pushl %edx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET edx, 0 pushl %ebp CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ebp, 0 #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" #ifdef CONFIG_X86_64 /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 #else ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP #endif /* Enter using int $0x80 */ int $0x80 GLOBAL(int80_landing_pad) /* * Restore EDX and ECX in case they were clobbered. EBP is not * clobbered (the kernel restores it), but it's cleaner and * probably faster to pop it than to adjust ESP using addl. */ popl %ebp CFI_RESTORE ebp CFI_ADJUST_CFA_OFFSET -4 popl %edx CFI_RESTORE edx CFI_ADJUST_CFA_OFFSET -4 popl %ecx CFI_RESTORE ecx CFI_ADJUST_CFA_OFFSET -4 ret CFI_ENDPROC .size __kernel_vsyscall,.-__kernel_vsyscall .previous
liva/minimal-linux
1,715
arch/x86/entry/vdso/vdso32/note.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. * Here we can supply some information useful to userland. */ #include <linux/version.h> #include <linux/elfnote.h> /* Ideally this would use UTS_NAME, but using a quoted string here doesn't work. Remember to change this when changing the kernel's name. */ ELFNOTE_START(Linux, 0, "a") .long LINUX_VERSION_CODE ELFNOTE_END #ifdef CONFIG_XEN /* * Add a special note telling glibc's dynamic linker a fake hardware * flavor that it will use to choose the search path for libraries in the * same way it uses real hardware capabilities like "mmx". * We supply "nosegneg" as the fake capability, to indicate that we * do not like negative offsets in instructions using segment overrides, * since we implement those inefficiently. This makes it possible to * install libraries optimized to avoid those access patterns in someplace * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file * corresponding to the bits here is needed to make ldconfig work right. * It should contain: * hwcap 1 nosegneg * to match the mapping of bit to name that we give here. * * At runtime, the fake hardware feature will be considered to be present * if its bit is set in the mask word. So, we start with the mask 0, and * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen. */ #include "../../xen/vdso.h" /* Defines VDSO_NOTE_NONEGSEG_BIT. */ ELFNOTE_START(GNU, 2, "a") .long 1 /* ncaps */ VDSO32_NOTE_MASK: /* Symbol used by arch/x86/xen/setup.c */ .long 0 /* mask */ .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */ ELFNOTE_END #endif
liva/minimal-linux
2,974
arch/x86/kernel/acpi/wakeup_64.S
.text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/pgtable_types.h> #include <asm/page_types.h> #include <asm/msr.h> #include <asm/asm-offsets.h> #include <asm/frame.h> # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 .code64 /* * Hooray, we are in Long 64-bit mode (but still running in low memory) */ ENTRY(wakeup_long64) movq saved_magic, %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax jne bogus_64_magic movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movq saved_rsp, %rsp movq saved_rbx, %rbx movq saved_rdi, %rdi movq saved_rsi, %rsi movq saved_rbp, %rbp movq saved_rip, %rax jmp *%rax ENDPROC(wakeup_long64) bogus_64_magic: jmp bogus_64_magic ENTRY(do_suspend_lowlevel) FRAME_BEGIN subq $8, %rsp xorl %eax, %eax call save_processor_state movq $saved_context, %rax movq %rsp, pt_regs_sp(%rax) movq %rbp, pt_regs_bp(%rax) movq %rsi, pt_regs_si(%rax) movq %rdi, pt_regs_di(%rax) movq %rbx, pt_regs_bx(%rax) movq %rcx, pt_regs_cx(%rax) movq %rdx, pt_regs_dx(%rax) movq %r8, pt_regs_r8(%rax) movq %r9, pt_regs_r9(%rax) movq %r10, pt_regs_r10(%rax) movq %r11, pt_regs_r11(%rax) movq %r12, pt_regs_r12(%rax) movq %r13, pt_regs_r13(%rax) movq %r14, pt_regs_r14(%rax) movq %r15, pt_regs_r15(%rax) pushfq popq pt_regs_flags(%rax) movq $.Lresume_point, saved_rip(%rip) movq %rsp, saved_rsp movq %rbp, saved_rbp movq %rbx, saved_rbx movq %rdi, saved_rdi movq %rsi, saved_rsi addq $8, %rsp movl $3, %edi xorl %eax, %eax call x86_acpi_enter_sleep_state /* in case something went wrong, restore the machine status and go on */ jmp .Lresume_point .align 4 .Lresume_point: /* We don't restore %rax, it must be 0 anyway */ movq $saved_context, %rax movq saved_context_cr4(%rax), %rbx movq %rbx, %cr4 movq saved_context_cr3(%rax), %rbx movq %rbx, %cr3 movq saved_context_cr2(%rax), %rbx movq %rbx, %cr2 movq saved_context_cr0(%rax), %rbx movq %rbx, %cr0 pushq pt_regs_flags(%rax) popfq movq pt_regs_sp(%rax), %rsp movq pt_regs_bp(%rax), %rbp movq pt_regs_si(%rax), %rsi movq pt_regs_di(%rax), %rdi movq pt_regs_bx(%rax), %rbx movq pt_regs_cx(%rax), %rcx movq pt_regs_dx(%rax), %rdx movq pt_regs_r8(%rax), %r8 movq pt_regs_r9(%rax), %r9 movq pt_regs_r10(%rax), %r10 movq pt_regs_r11(%rax), %r11 movq pt_regs_r12(%rax), %r12 movq pt_regs_r13(%rax), %r13 movq pt_regs_r14(%rax), %r14 movq pt_regs_r15(%rax), %r15 #ifdef CONFIG_KASAN /* * The suspend path may have poisoned some areas deeper in the stack, * which we now need to unpoison. */ movq %rsp, %rdi call kasan_unpoison_task_stack_below #endif xorl %eax, %eax addq $8, %rsp FRAME_END jmp restore_processor_state ENDPROC(do_suspend_lowlevel) .data ENTRY(saved_rbp) .quad 0 ENTRY(saved_rsi) .quad 0 ENTRY(saved_rdi) .quad 0 ENTRY(saved_rbx) .quad 0 ENTRY(saved_rip) .quad 0 ENTRY(saved_rsp) .quad 0 ENTRY(saved_magic) .quad 0
liva/minimal-linux
1,717
arch/x86/kernel/acpi/wakeup_32.S
.text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> # Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 .code32 ALIGN ENTRY(wakeup_pmode_return) wakeup_pmode_return: movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %fs movw %ax, %gs movw $__USER_DS, %ax movw %ax, %ds movw %ax, %es # reload the gdt, as we need the full 32 bit address lidt saved_idt lldt saved_ldt ljmp $(__KERNEL_CS), $1f 1: movl %cr3, %eax movl %eax, %cr3 wbinvd # and restore the stack ... but you need gdt for this to work movl saved_context_esp, %esp movl %cs:saved_magic, %eax cmpl $0x12345678, %eax jne bogus_magic # jump to place where we left off movl saved_eip, %eax jmp *%eax bogus_magic: jmp bogus_magic save_registers: sidt saved_idt sldt saved_ldt str saved_tss leal 4(%esp), %eax movl %eax, saved_context_esp movl %ebx, saved_context_ebx movl %ebp, saved_context_ebp movl %esi, saved_context_esi movl %edi, saved_context_edi pushfl popl saved_context_eflags movl $ret_point, saved_eip ret restore_registers: movl saved_context_ebp, %ebp movl saved_context_ebx, %ebx movl saved_context_esi, %esi movl saved_context_edi, %edi pushl saved_context_eflags popfl ret ENTRY(do_suspend_lowlevel) call save_processor_state call save_registers pushl $3 call x86_acpi_enter_sleep_state addl $4, %esp # In case of S3 failure, we'll emerge here. Jump # to ret_point to recover jmp ret_point .p2align 4,,7 ret_point: call restore_registers call restore_processor_state ret .data ALIGN ENTRY(saved_magic) .long 0 ENTRY(saved_eip) .long 0 # saved registers saved_idt: .long 0,0 saved_ldt: .long 0 saved_tss: .long 0
liva/minimal-linux
4,185
arch/x86/realmode/rm/reboot.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> #include "realmode.h" /* * The following code and data reboots the machine by switching to real * mode and jumping to the BIOS reset entry point, as if the CPU has * really been reset. The previous version asked the keyboard * controller to pulse the CPU reset line, which is more thorough, but * doesn't work with at least one type of 486 motherboard. It is easy * to stop this code working; hence the copious comments. * * This code is called with the restart type (0 = BIOS, 1 = APM) in * the primary argument register (%eax for 32 bit, %edi for 64 bit). */ .section ".text32", "ax" .code32 ENTRY(machine_real_restart_asm) #ifdef CONFIG_X86_64 /* Switch to trampoline GDT as it is guaranteed < 4 GiB */ movl $__KERNEL_DS, %eax movl %eax, %ds lgdtl pa_tr_gdt /* Disable paging to drop us out of long mode */ movl %cr0, %eax andl $~X86_CR0_PG, %eax movl %eax, %cr0 ljmpl $__KERNEL32_CS, $pa_machine_real_restart_paging_off GLOBAL(machine_real_restart_paging_off) xorl %eax, %eax xorl %edx, %edx movl $MSR_EFER, %ecx wrmsr movl %edi, %eax #endif /* CONFIG_X86_64 */ /* Set up the IDT for real mode. */ lidtl pa_machine_real_restart_idt /* * Set up a GDT from which we can load segment descriptors for real * mode. The GDT is not used in real mode; it is just needed here to * prepare the descriptors. */ lgdtl pa_machine_real_restart_gdt /* * Load the data segment registers with 16-bit compatible values */ movl $16, %ecx movl %ecx, %ds movl %ecx, %es movl %ecx, %fs movl %ecx, %gs movl %ecx, %ss ljmpw $8, $1f /* * This is 16-bit protected mode code to disable paging and the cache, * switch to real mode and jump to the BIOS reset code. * * The instruction that switches to real mode by writing to CR0 must be * followed immediately by a far jump instruction, which set CS to a * valid value for real mode, and flushes the prefetch queue to avoid * running instructions that have already been decoded in protected * mode. * * Clears all the flags except ET, especially PG (paging), PE * (protected-mode enable) and TS (task switch for coprocessor state * save). Flushes the TLB after paging has been disabled. Sets CD and * NW, to disable the cache on a 486, and invalidates the cache. This * is more like the state of a 486 after reset. I don't know if * something else should be done for other chips. * * More could be done here to set up the registers as if a CPU reset had * occurred; hopefully real BIOSs don't assume much. This is not the * actual BIOS entry point, anyway (that is at 0xfffffff0). * * Most of this work is probably excessive, but it is what is tested. */ .text .code16 .balign 16 machine_real_restart_asm16: 1: xorl %ecx, %ecx movl %cr0, %edx andl $0x00000011, %edx orl $0x60000000, %edx movl %edx, %cr0 movl %ecx, %cr3 movl %cr0, %edx testl $0x60000000, %edx /* If no cache bits -> no wbinvd */ jz 2f wbinvd 2: andb $0x10, %dl movl %edx, %cr0 LJMPW_RM(3f) 3: andw %ax, %ax jz bios apm: movw $0x1000, %ax movw %ax, %ss movw $0xf000, %sp movw $0x5307, %ax movw $0x0001, %bx movw $0x0003, %cx int $0x15 /* This should never return... */ bios: ljmpw $0xf000, $0xfff0 .section ".rodata", "a" .balign 16 GLOBAL(machine_real_restart_idt) .word 0xffff /* Length - real mode default value */ .long 0 /* Base - real mode default value */ END(machine_real_restart_idt) .balign 16 GLOBAL(machine_real_restart_gdt) /* Self-pointer */ .word 0xffff /* Length - real mode default value */ .long pa_machine_real_restart_gdt .word 0 /* * 16-bit code segment pointing to real_mode_seg * Selector value 8 */ .word 0xffff /* Limit */ .long 0x9b000000 + pa_real_mode_base .word 0 /* * 16-bit data segment with the selector value 16 = 0x10 and * base value 0x100; since this is consistent with real mode * semantics we don't have to reload the segments once CR0.PE = 0. */ .quad GDT_ENTRY(0x0093, 0x100, 0xffff) END(machine_real_restart_gdt)
liva/minimal-linux
1,871
arch/x86/realmode/rm/trampoline_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Trampoline.S Derived from Setup.S by Linus Torvalds * * 4 Jan 1997 Michael Chastain: changed to gnu as. * * This is only used for booting secondary CPUs in SMP machine * * Entry: CS:IP point to the start of our code, we are * in real mode with no stack, but the rest of the * trampoline page to make our stack and everything else * is a mystery. * * We jump into arch/x86/kernel/head_32.S. * * On entry to trampoline_start, the processor is in real mode * with 16-bit addressing and 16-bit data. CS has some value * and IP is zero. Thus, we load CS to the physical segment * of the real mode code before doing anything further. */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include "realmode.h" .text .code16 .balign PAGE_SIZE ENTRY(trampoline_start) wbinvd # Needed for NUMA-Q should be harmless for others LJMPW_RM(1f) 1: mov %cs, %ax # Code and data in the same place mov %ax, %ds cli # We should be safe anyway movl tr_start, %eax # where we need to go movl $0xA5A5A5A5, trampoline_status # write marker for master knows we're running /* * GDT tables in non default location kernel can be beyond 16MB and * lgdt will not be able to load the address as in real mode default * operand size is 16bit. Use lgdtl instead to force operand size * to 32 bit. */ lidtl tr_idt # load idt with 0, 0 lgdtl tr_gdt # load gdt with whatever is appropriate movw $1, %dx # protected mode (PE) bit lmsw %dx # into protected mode ljmpl $__BOOT_CS, $pa_startup_32 .section ".text32","ax" .code32 ENTRY(startup_32) # note: also used from wakeup_asm.S jmp *%eax .bss .balign 8 GLOBAL(trampoline_header) tr_start: .space 4 tr_gdt_pad: .space 2 tr_gdt: .space 6 END(trampoline_header) #include "trampoline_common.S"
liva/minimal-linux
4,454
arch/x86/realmode/rm/trampoline_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Trampoline.S Derived from Setup.S by Linus Torvalds * * 4 Jan 1997 Michael Chastain: changed to gnu as. * 15 Sept 2005 Eric Biederman: 64bit PIC support * * Entry: CS:IP point to the start of our code, we are * in real mode with no stack, but the rest of the * trampoline page to make our stack and everything else * is a mystery. * * On entry to trampoline_start, the processor is in real mode * with 16-bit addressing and 16-bit data. CS has some value * and IP is zero. Thus, data addresses need to be absolute * (no relocation) and are taken with regard to r_base. * * With the addition of trampoline_level4_pgt this code can * now enter a 64bit kernel that lives at arbitrary 64bit * physical addresses. * * If you work on this file, check the object module with objdump * --full-contents --reloc to make sure there are no relocation * entries. */ #include <linux/linkage.h> #include <asm/pgtable_types.h> #include <asm/page_types.h> #include <asm/msr.h> #include <asm/segment.h> #include <asm/processor-flags.h> #include <asm/realmode.h> #include "realmode.h" .text .code16 .balign PAGE_SIZE ENTRY(trampoline_start) cli # We should be safe anyway wbinvd LJMPW_RM(1f) 1: mov %cs, %ax # Code and data in the same place mov %ax, %ds mov %ax, %es mov %ax, %ss movl $0xA5A5A5A5, trampoline_status # write marker for master knows we're running # Setup stack movl $rm_stack_end, %esp call verify_cpu # Verify the cpu supports long mode testl %eax, %eax # Check for return code jnz no_longmode /* * GDT tables in non default location kernel can be beyond 16MB and * lgdt will not be able to load the address as in real mode default * operand size is 16bit. Use lgdtl instead to force operand size * to 32 bit. */ lidtl tr_idt # load idt with 0, 0 lgdtl tr_gdt # load gdt with whatever is appropriate movw $__KERNEL_DS, %dx # Data segment descriptor # Enable protected mode movl $X86_CR0_PE, %eax # protected mode (PE) bit movl %eax, %cr0 # into protected mode # flush prefetch and jump to startup_32 ljmpl $__KERNEL32_CS, $pa_startup_32 no_longmode: hlt jmp no_longmode #include "../kernel/verify_cpu.S" .section ".text32","ax" .code32 .balign 4 ENTRY(startup_32) movl %edx, %ss addl $pa_real_mode_base, %esp movl %edx, %ds movl %edx, %es movl %edx, %fs movl %edx, %gs /* * Check for memory encryption support. This is a safety net in * case BIOS hasn't done the necessary step of setting the bit in * the MSR for this AP. If SME is active and we've gotten this far * then it is safe for us to set the MSR bit and continue. If we * don't we'll eventually crash trying to execute encrypted * instructions. */ bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags jnc .Ldone movl $MSR_K8_SYSCFG, %ecx rdmsr bts $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax jc .Ldone /* * Memory encryption is enabled but the SME enable bit for this * CPU has has not been set. It is safe to set it, so do so. */ wrmsr .Ldone: movl pa_tr_cr4, %eax movl %eax, %cr4 # Enable PAE mode # Setup trampoline 4 level pagetables movl $pa_trampoline_pgd, %eax movl %eax, %cr3 # Set up EFER movl pa_tr_efer, %eax movl pa_tr_efer + 4, %edx movl $MSR_EFER, %ecx wrmsr # Enable paging and in turn activate Long Mode movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax movl %eax, %cr0 /* * At this point we're in long mode but in 32bit compatibility mode * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use * the new gdt/idt that has __KERNEL_CS with CS.L = 1. */ ljmpl $__KERNEL_CS, $pa_startup_64 .section ".text64","ax" .code64 .balign 4 ENTRY(startup_64) # Now jump into the kernel using virtual addresses jmpq *tr_start(%rip) .section ".rodata","a" # Duplicate the global descriptor table # so the kernel can live anywhere .balign 16 .globl tr_gdt tr_gdt: .short tr_gdt_end - tr_gdt - 1 # gdt limit .long pa_tr_gdt .short 0 .quad 0x00cf9b000000ffff # __KERNEL32_CS .quad 0x00af9b000000ffff # __KERNEL_CS .quad 0x00cf93000000ffff # __KERNEL_DS tr_gdt_end: .bss .balign PAGE_SIZE GLOBAL(trampoline_pgd) .space PAGE_SIZE .balign 8 GLOBAL(trampoline_header) tr_start: .space 8 GLOBAL(tr_efer) .space 8 GLOBAL(tr_cr4) .space 4 GLOBAL(tr_flags) .space 4 END(trampoline_header) #include "trampoline_common.S"
liva/minimal-linux
3,765
arch/x86/realmode/rm/wakeup_asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ACPI wakeup real mode startup stub */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/msr-index.h> #include <asm/page_types.h> #include <asm/pgtable_types.h> #include <asm/processor-flags.h> #include "realmode.h" #include "wakeup.h" .code16 /* This should match the structure in wakeup.h */ .section ".data", "aw" .balign 16 GLOBAL(wakeup_header) video_mode: .short 0 /* Video mode number */ pmode_entry: .long 0 pmode_cs: .short __KERNEL_CS pmode_cr0: .long 0 /* Saved %cr0 */ pmode_cr3: .long 0 /* Saved %cr3 */ pmode_cr4: .long 0 /* Saved %cr4 */ pmode_efer: .quad 0 /* Saved EFER */ pmode_gdt: .quad 0 pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */ pmode_behavior: .long 0 /* Wakeup behavior flags */ realmode_flags: .long 0 real_magic: .long 0 signature: .long WAKEUP_HEADER_SIGNATURE END(wakeup_header) .text .code16 .balign 16 ENTRY(wakeup_start) cli cld LJMPW_RM(3f) 3: /* Apparently some dimwit BIOS programmers don't know how to program a PM to RM transition, and we might end up here with junk in the data segment descriptor registers. The only way to repair that is to go into PM and fix it ourselves... */ movw $16, %cx lgdtl %cs:wakeup_gdt movl %cr0, %eax orb $X86_CR0_PE, %al movl %eax, %cr0 ljmpw $8, $2f 2: movw %cx, %ds movw %cx, %es movw %cx, %ss movw %cx, %fs movw %cx, %gs andb $~X86_CR0_PE, %al movl %eax, %cr0 LJMPW_RM(3f) 3: /* Set up segments */ movw %cs, %ax movw %ax, %ss movl $rm_stack_end, %esp movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs lidtl wakeup_idt /* Clear the EFLAGS */ pushl $0 popfl /* Check header signature... */ movl signature, %eax cmpl $WAKEUP_HEADER_SIGNATURE, %eax jne bogus_real_magic /* Check we really have everything... */ movl end_signature, %eax cmpl $REALMODE_END_SIGNATURE, %eax jne bogus_real_magic /* Call the C code */ calll main /* Restore MISC_ENABLE before entering protected mode, in case BIOS decided to clear XD_DISABLE during S3. */ movl pmode_behavior, %edi btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %edi jnc 1f movl pmode_misc_en, %eax movl pmode_misc_en + 4, %edx movl $MSR_IA32_MISC_ENABLE, %ecx wrmsr 1: /* Do any other stuff... */ #ifndef CONFIG_64BIT /* This could also be done in C code... */ movl pmode_cr3, %eax movl %eax, %cr3 btl $WAKEUP_BEHAVIOR_RESTORE_CR4, %edi jnc 1f movl pmode_cr4, %eax movl %eax, %cr4 1: btl $WAKEUP_BEHAVIOR_RESTORE_EFER, %edi jnc 1f movl pmode_efer, %eax movl pmode_efer + 4, %edx movl $MSR_EFER, %ecx wrmsr 1: lgdtl pmode_gdt /* This really couldn't... */ movl pmode_entry, %eax movl pmode_cr0, %ecx movl %ecx, %cr0 ljmpl $__KERNEL_CS, $pa_startup_32 /* -> jmp *%eax in trampoline_32.S */ #else jmp trampoline_start #endif bogus_real_magic: 1: hlt jmp 1b .section ".rodata","a" /* * Set up the wakeup GDT. We set these up as Big Real Mode, * that is, with limits set to 4 GB. At least the Lenovo * Thinkpad X61 is known to need this for the video BIOS * initialization quirk to work; this is likely to also * be the case for other laptops or integrated video devices. */ .balign 16 GLOBAL(wakeup_gdt) .word 3*8-1 /* Self-descriptor */ .long pa_wakeup_gdt .word 0 .word 0xffff /* 16-bit code segment @ real_mode_base */ .long 0x9b000000 + pa_real_mode_base .word 0x008f /* big real mode */ .word 0xffff /* 16-bit data segment @ real_mode_base */ .long 0x93000000 + pa_real_mode_base .word 0x008f /* big real mode */ END(wakeup_gdt) .section ".rodata","a" .balign 8 /* This is the standard real-mode IDT */ .balign 16 GLOBAL(wakeup_idt) .word 0xffff /* limit */ .long 0 /* address */ .word 0 END(wakeup_idt)
lizhirui/DreamCore
1,879
software/coremark_app/startup.S
# SPDX-License-Identifier: Apache-2.0 # Copyright 2019 Western Digital Corporation or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #Simple start up file for the reference design .section ".text.init" .global _start .type _start, @function _start: #clear minstret csrw minstret, zero csrw minstreth, zero #clear registers li x1, 0 li x2, 0 li x3, 0 li x4, 0 li x5, 0 li x6, 0 li x7, 0 li x8, 0 li x9, 0 li x10,0 li x11,0 li x12,0 li x13,0 li x14,0 li x15,0 li x16,0 li x17,0 li x18,0 li x19,0 li x20,0 li x21,0 li x22,0 li x23,0 li x24,0 li x25,0 li x26,0 li x27,0 li x28,0 li x29,0 li x30,0 li x31,0 #cache configuration #li t1, 0x55555655 #csrw 0x7c0, t1 #setup MEIP and MTIP #li t0, (1<<7 | 1<<11) #csrw mie, t0 #li t0, (1<<3) #csrw mstatus, t0 # initialize global pointer .option push .option norelax la gp, __global_pointer$ .option pop la sp, _sp la t0, __bss_start la t1, __bss_end zero_bss: sw x0, 0(t0) addi t0, t0, 4 blt t0, t1, zero_bss # #hart id csrr a0, mhartid li a1, 1 1: bgeu a0, a1, 1b # argc = argv = 0 li a0, 0 li a1, 0 call main fence csrw 0x804, 1 # loop here 2: j 2b .global send_char send_char: csrw 0x810, a0 send_char_wait: csrr t0, 0x810 srl t0, t0, 31 bnez t0, send_char_wait ret
lizhirui/DreamCore
1,879
software/dhrystone_app/startup.S
# SPDX-License-Identifier: Apache-2.0 # Copyright 2019 Western Digital Corporation or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #Simple start up file for the reference design .section ".text.init" .global _start .type _start, @function _start: #clear minstret csrw minstret, zero csrw minstreth, zero #clear registers li x1, 0 li x2, 0 li x3, 0 li x4, 0 li x5, 0 li x6, 0 li x7, 0 li x8, 0 li x9, 0 li x10,0 li x11,0 li x12,0 li x13,0 li x14,0 li x15,0 li x16,0 li x17,0 li x18,0 li x19,0 li x20,0 li x21,0 li x22,0 li x23,0 li x24,0 li x25,0 li x26,0 li x27,0 li x28,0 li x29,0 li x30,0 li x31,0 #cache configuration #li t1, 0x55555655 #csrw 0x7c0, t1 #setup MEIP and MTIP #li t0, (1<<7 | 1<<11) #csrw mie, t0 #li t0, (1<<3) #csrw mstatus, t0 # initialize global pointer .option push .option norelax la gp, __global_pointer$ .option pop la sp, _sp la t0, __bss_start la t1, __bss_end zero_bss: sw x0, 0(t0) addi t0, t0, 4 blt t0, t1, zero_bss # #hart id csrr a0, mhartid li a1, 1 1: bgeu a0, a1, 1b # argc = argv = 0 li a0, 0 li a1, 0 call main fence csrw 0x804, 1 # loop here 2: j 2b .global send_char send_char: csrw 0x810, a0 send_char_wait: csrr t0, 0x810 srl t0, t0, 31 bnez t0, send_char_wait ret
lizhirui/DreamCore
1,481
software/bootloader/main/src/startup.S
//#define USE_MODEL .section .text.entry .global _start _start: csrw minstret, zero csrw minstreth, zero li x1, 0 li x2, 0 li x3, 0 li x4, 0 li x5, 0 li x6, 0 li x7, 0 li x8, 0 li x9, 0 li x10, 0 li x11, 0 li x12, 0 li x13, 0 li x14, 0 li x15, 0 li x16, 0 li x17, 0 li x18, 0 li x19, 0 li x20, 0 li x21, 0 li x22, 0 li x23, 0 li x24, 0 li x25, 0 li x26, 0 li x27, 0 li x28, 0 li x29, 0 li x30, 0 li x31, 0 .option push .option norelax la gp, __global_pointer$ .option pop la sp, __stack_default la t0, __bss_start la t1, __bss_end zero_bss: sw x0, 0(t0) addi t0, t0, 4 blt t0, t1, zero_bss li a0, 0 li a1, 0 call main #ifdef USE_MODEL .global send_char send_char: csrw 0x800, a0 ret .global read_char read_char: csrr a0, 0x800 li t0, 0x80000000 and t0, t0, a0 bnez t0, clear_char ret clear_char: li t0, 0x80000000 csrw 0x800, t0 andi a0, a0, 0xff li t0, 0x40000000 or a0, a0, t0 ret #else .global send_char send_char: csrw 0x810, a0 send_char_wait: csrr t0, 0x810 srl t0, t0, 31 bnez t0, send_char_wait ret .global read_char read_char: csrr a0, 0x810 li t0, 0x40000000 and t0, t0, a0 bnez t0, clear_char ret clear_char: li t0, 0x80000000 csrw 0x810, t0 ret #endif
lizhirui/DreamCore
1,048
software/hello_world_app/main/src/startup.S
//#define USE_MODEL .section .text.entry .global _start _start: csrw minstret, zero csrw minstreth, zero li x1, 0 li x2, 0 li x3, 0 li x4, 0 li x5, 0 li x6, 0 li x7, 0 li x8, 0 li x9, 0 li x10, 0 li x11, 0 li x12, 0 li x13, 0 li x14, 0 li x15, 0 li x16, 0 li x17, 0 li x18, 0 li x19, 0 li x20, 0 li x21, 0 li x22, 0 li x23, 0 li x24, 0 li x25, 0 li x26, 0 li x27, 0 li x28, 0 li x29, 0 li x30, 0 li x31, 0 .option push .option norelax la gp, __global_pointer$ .option pop la sp, __stack_default la t0, __bss_start la t1, __bss_end zero_bss: sw x0, 0(t0) addi t0, t0, 4 blt t0, t1, zero_bss li a0, 0 li a1, 0 call main #ifdef USE_MODEL .global send_char send_char: csrw 0x800, a0 ret #else .global send_char send_char: csrw 0x810, a0 send_char_wait: csrr t0, 0x810 srl t0, t0, 31 bnez t0, send_char_wait ret #endif
lizhirui/DreamCore
2,077
software/ext_int_test_app/main/src/startup.S
//#define USE_MODEL .section .text.entry .global _start _start: csrw minstret, zero csrw minstreth, zero la t0, trap_entry csrw mtvec, t0 li t0, 0x800 csrw mie, t0 li t0, 0x00000008 csrw mstatus, t0 li x1, 0 li x2, 0 li x3, 0 li x4, 0 li x5, 0 li x6, 0 li x7, 0 li x8, 0 li x9, 0 li x10, 0 li x11, 0 li x12, 0 li x13, 0 li x14, 0 li x15, 0 li x16, 0 li x17, 0 li x18, 0 li x19, 0 li x20, 0 li x21, 0 li x22, 0 li x23, 0 li x24, 0 li x25, 0 li x26, 0 li x27, 0 li x28, 0 li x29, 0 li x30, 0 li x31, 0 .option push .option norelax la gp, __global_pointer$ .option pop la sp, __stack_default la t0, __bss_start la t1, __bss_end zero_bss: sw x0, 0(t0) addi t0, t0, 4 blt t0, t1, zero_bss li a0, 0 li a1, 0 call main #ifdef USE_MODEL .global send_char send_char: csrw 0x800, a0 ret #else .global send_char send_char: csrw 0x810, a0 send_char_wait: csrr t0, 0x810 srl t0, t0, 31 bnez t0, send_char_wait ret #endif trap_entry: addi sp, sp, -17 * 4 sw x1, 0 * 4(sp) sw x4, 1 * 4(sp) sw x5, 2 * 4(sp) sw x6, 3 * 4(sp) sw x7, 4 * 4(sp) sw x10, 5 * 4(sp) sw x11, 6 * 4(sp) sw x12, 7 * 4(sp) sw x13, 8 * 4(sp) sw x14, 9 * 4(sp) sw x15, 10 * 4(sp) sw x16, 11 * 4(sp) sw x17, 12 * 4(sp) sw x28, 13 * 4(sp) sw x29, 14 * 4(sp) sw x30, 15 * 4(sp) sw x31, 16 * 4(sp) call trap_entry_c lw x1, 0 * 4(sp) lw x4, 1 * 4(sp) lw x5, 2 * 4(sp) lw x6, 3 * 4(sp) lw x7, 4 * 4(sp) lw x10, 5 * 4(sp) lw x11, 6 * 4(sp) lw x12, 7 * 4(sp) lw x13, 8 * 4(sp) lw x14, 9 * 4(sp) lw x15, 10 * 4(sp) lw x16, 11 * 4(sp) lw x17, 12 * 4(sp) lw x28, 13 * 4(sp) lw x29, 14 * 4(sp) lw x30, 15 * 4(sp) lw x31, 16 * 4(sp) addi sp, sp, 17 * 4 mret .global read_mcycle read_mcycle: csrr a0, mcycle ret
lizhirui/DreamCore
1,984
software/rtthread_app/bsp/MyRISCVCore/MyRISCVCore/board/startup.S
/* * Copyright (c) 2020-2020, AnnikaChip Development Team * * Change Logs: * Date Author Notes * 2020-11-08 lizhirui first version * */ #include "riscv_encoding.h" .section .init .globl _start .type _start,@function /** * Reset Handler called on controller reset */ _start: /* Initialize all registers */ li x1, 0 li x2, 0 li x3, 0 li x4, 0 li x5, 0 li x6, 0 li x7, 0 li x8, 0 li x9, 0 li x10, 0 li x11, 0 li x12, 0 li x13, 0 li x14, 0 li x15, 0 li x16, 0 li x17, 0 li x18, 0 li x19, 0 li x20, 0 li x21, 0 li x22, 0 li x23, 0 li x24, 0 li x25, 0 li x26, 0 li x27, 0 li x28, 0 li x29, 0 li x30, 0 li x31, 0 /* Initialize Normal Stack defined in linker.ld*/ la sp, _sp /* ===== Startup Stage 1 ===== */ /* Disable Global Interrupt */ csrc CSR_MSTATUS, MSTATUS_MIE /* Initialize GP */ .option push .option norelax la gp, __global_pointer$ .option pop /* * Set Exception Entry MTVEC to exc_entry * Due to settings above, Exception and NMI * will share common entry. */ la t0, exc_entry csrw CSR_MTVEC, t0 /* ===== Startup Stage 2 ===== */ /* Disable mcycle and minstret counter */ csrci CSR_MCOUNTEREN, 0x5 /* Load data section */ la a0, _data_lma la a1, _data la a2, _edata bgeu a1, a2, 2f 1: lw t0, (a0) sw t0, (a1) addi a0, a0, 4 addi a1, a1, 4 bltu a1, a2, 1b 2: /* Clear bss section */ la a0, __bss_start la a1, _end bgeu a0, a1, 2f 1: sw zero, (a0) addi a0, a0, 4 bltu a0, a1, 1b 2: /* Call global constructors */ la a0, __libc_fini_array call atexit /* Call C/C++ constructor start up code */ call __libc_init_array /* ===== Call Main Function ===== */ /* argc = argv = envp = 0 */ li a0, 0 li a1, 0 li a2, 0 call entry 1: j 1b
lizhirui/DreamCore
6,553
software/rtthread_app/bsp/MyRISCVCore/MyRISCVCore/board/intexc.S
/* * Copyright (c) 2019 Nuclei Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /****************************************************************************** * \file intexc_gd32vf103.S * \brief NMSIS Interrupt and Exception Handling Template File * for Device gd32vf103 * \version V1.00 * \date 7 Jan 2020 * ******************************************************************************/ #include "riscv_encoding.h" /** * \brief Global interrupt disabled * \details * This function disable global interrupt. * \remarks * - All the interrupt requests will be ignored by CPU. */ .macro DISABLE_MIE csrc CSR_MSTATUS, MSTATUS_MIE .endm /** * \brief Macro for context save * \details * This macro save ABI defined caller saved registers in the stack. * \remarks * - This Macro could use to save context when you enter to interrupt * or exception */ /* Save caller registers */ .macro SAVE_CONTEXT /* Allocate stack space for context saving */ #ifndef __riscv_32e addi sp, sp, -20*REGBYTES #else addi sp, sp, -14*REGBYTES #endif /* __riscv_32e */ STORE x1, 0*REGBYTES(sp) STORE x4, 1*REGBYTES(sp) STORE x5, 2*REGBYTES(sp) STORE x6, 3*REGBYTES(sp) STORE x7, 4*REGBYTES(sp) STORE x10, 5*REGBYTES(sp) STORE x11, 6*REGBYTES(sp) STORE x12, 7*REGBYTES(sp) STORE x13, 8*REGBYTES(sp) STORE x14, 9*REGBYTES(sp) STORE x15, 10*REGBYTES(sp) #ifndef __riscv_32e STORE x16, 14*REGBYTES(sp) STORE x17, 15*REGBYTES(sp) STORE x28, 16*REGBYTES(sp) STORE x29, 17*REGBYTES(sp) STORE x30, 18*REGBYTES(sp) STORE x31, 19*REGBYTES(sp) #endif /* __riscv_32e */ .endm /** * \brief Macro for restore caller registers * \details * This macro restore ABI defined caller saved registers from stack. * \remarks * - You could use this macro to restore context before you want return * from interrupt or exeception */ /* Restore caller registers */ .macro RESTORE_CONTEXT LOAD x1, 0*REGBYTES(sp) LOAD x4, 1*REGBYTES(sp) LOAD x5, 2*REGBYTES(sp) LOAD x6, 3*REGBYTES(sp) LOAD x7, 4*REGBYTES(sp) LOAD x10, 5*REGBYTES(sp) LOAD x11, 6*REGBYTES(sp) LOAD x12, 7*REGBYTES(sp) LOAD x13, 8*REGBYTES(sp) LOAD x14, 9*REGBYTES(sp) LOAD x15, 10*REGBYTES(sp) #ifndef __riscv_32e LOAD x16, 14*REGBYTES(sp) LOAD x17, 15*REGBYTES(sp) LOAD x28, 16*REGBYTES(sp) LOAD x29, 17*REGBYTES(sp) LOAD x30, 18*REGBYTES(sp) LOAD x31, 19*REGBYTES(sp) /* De-allocate the stack space */ addi sp, sp, 20*REGBYTES #else /* De-allocate the stack space */ addi sp, sp, 14*REGBYTES #endif /* __riscv_32e */ .endm /** * \brief Macro for save necessary CSRs to stack * \details * This macro store MCAUSE, MEPC, MSUBM to stack. */ .macro SAVE_CSR_CONTEXT /* Store CSR mcause to stack using pushmcause */ //csrrwi x0, CSR_PUSHMCAUSE, 11 /* Store CSR mepc to stack using pushmepc */ //csrrwi x0, CSR_PUSHMEPC, 12 /* Store CSR msub to stack using pushmsub */ //csrrwi x0, CSR_PUSHMSUBM, 13 .endm /** * \brief Macro for restore necessary CSRs from stack * \details * This macro restore MSUBM, MEPC, MCAUSE from stack. */ .macro RESTORE_CSR_CONTEXT //LOAD x5, 13*REGBYTES(sp) //csrw CSR_MSUBM, x5 LOAD x5, 12*REGBYTES(sp) csrw CSR_MEPC, x5 LOAD x5, 11*REGBYTES(sp) csrw CSR_MCAUSE, x5 .endm /** * \brief Exception/NMI Entry * \details * This function provide common entry functions for exception/nmi. * \remarks * This function provide a default exception/nmi entry. * ABI defined caller save register and some CSR registers * to be saved before enter interrupt handler and be restored before return. */ .section .text.trap /* In CLIC mode, the exeception entry must be 64bytes aligned */ .align 6 .global exc_entry .weak exc_entry exc_entry: /* Save the caller saving registers (context) */ SAVE_CONTEXT /* Save the necessary CSR registers */ SAVE_CSR_CONTEXT /* * Set the exception handler function arguments * argument 1: mcause value * argument 2: current stack point(SP) value */ csrr a0, mcause mv a1, sp /* * TODO: Call the exception handler function * By default, the function template is provided in * system_Device.c, you can adjust it as you want */ call core_exception_handler /* Restore the necessary CSR registers */ RESTORE_CSR_CONTEXT /* Restore the caller saving registers (context) */ RESTORE_CONTEXT /* Return to regular code */ mret /** * \brief Non-Vector Interrupt Entry * \details * This function provide common entry functions for handling * non-vector interrupts * \remarks * This function provide a default non-vector interrupt entry. * ABI defined caller save register and some CSR registers need * to be saved before enter interrupt handler and be restored before return. */ .section .text.irq /* In CLIC mode, the interrupt entry must be 4bytes aligned */ .align 2 .global irq_entry .weak irq_entry /* This label will be set to MTVT2 register */ irq_entry: /* Save the caller saving registers (context) */ SAVE_CONTEXT /* Save the necessary CSR registers */ SAVE_CSR_CONTEXT /* This special CSR read/write operation, which is actually * claim the CLIC to find its pending highest ID, if the ID * is not 0, then automatically enable the mstatus.MIE, and * jump to its vector-entry-label, and update the link register */ //csrrw ra, CSR_JALMNXTI, ra /* Critical section with interrupts disabled */ DISABLE_MIE /* Restore the necessary CSR registers */ RESTORE_CSR_CONTEXT /* Restore the caller saving registers (context) */ RESTORE_CONTEXT /* Return to regular code */ mret /* Default Handler for Exceptions / Interrupts */ .global default_intexc_handler .weak default_intexc_handler Undef_Handler: default_intexc_handler: 1: j 1b
lizhirui/DreamCore
5,089
software/rtthread_app/libcpu/common/context_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2018/10/28 Bernard The unify RISC-V porting implementation * 2018/12/27 Jesven Add SMP support */ #include "cpuport.h" #ifdef RT_USING_SMP #define rt_hw_interrupt_disable rt_hw_local_irq_disable #define rt_hw_interrupt_enable rt_hw_local_irq_enable #endif /* * rt_base_t rt_hw_interrupt_disable(void); */ .globl rt_hw_interrupt_disable rt_hw_interrupt_disable: csrrci a0, mstatus, 8 ret /* * void rt_hw_interrupt_enable(rt_base_t level); */ .globl rt_hw_interrupt_enable rt_hw_interrupt_enable: csrw mstatus, a0 ret /* * #ifdef RT_USING_SMP * void rt_hw_context_switch_to(rt_ubase_t to, stuct rt_thread *to_thread); * #else * void rt_hw_context_switch_to(rt_ubase_t to); * #endif * a0 --> to * a1 --> to_thread */ .globl rt_hw_context_switch_to rt_hw_context_switch_to: LOAD sp, (a0) #ifdef RT_USING_SMP mv a0, a1 jal rt_cpus_lock_status_restore #endif LOAD a0, 2 * REGBYTES(sp) csrw mstatus, a0 j rt_hw_context_switch_exit /* * #ifdef RT_USING_SMP * void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread); * #else * void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to); * #endif * * a0 --> from * a1 --> to * a2 --> to_thread */ .globl rt_hw_context_switch rt_hw_context_switch: /* saved from thread context * x1/ra -> sp(0) * x1/ra -> sp(1) * mstatus.mie -> sp(2) * x(i) -> sp(i-4) */ addi sp, sp, -32 * REGBYTES STORE sp, (a0) STORE x1, 0 * REGBYTES(sp) STORE x1, 1 * REGBYTES(sp) csrr a0, mstatus andi a0, a0, 8 beqz a0, save_mpie li a0, 0x80 save_mpie: STORE a0, 2 * REGBYTES(sp) STORE x4, 4 * REGBYTES(sp) STORE x5, 5 * REGBYTES(sp) STORE x6, 6 * REGBYTES(sp) STORE x7, 7 * REGBYTES(sp) STORE x8, 8 * REGBYTES(sp) STORE x9, 9 * REGBYTES(sp) STORE x10, 10 * REGBYTES(sp) STORE x11, 11 * REGBYTES(sp) STORE x12, 12 * REGBYTES(sp) STORE x13, 13 * REGBYTES(sp) STORE x14, 14 * REGBYTES(sp) STORE x15, 15 * REGBYTES(sp) STORE x16, 16 * REGBYTES(sp) STORE x17, 17 * REGBYTES(sp) STORE x18, 18 * REGBYTES(sp) STORE x19, 19 * REGBYTES(sp) STORE x20, 20 * REGBYTES(sp) STORE x21, 21 * REGBYTES(sp) STORE x22, 22 * REGBYTES(sp) STORE x23, 23 * REGBYTES(sp) STORE x24, 24 * REGBYTES(sp) STORE x25, 25 * REGBYTES(sp) STORE x26, 26 * REGBYTES(sp) STORE x27, 27 * REGBYTES(sp) STORE x28, 28 * REGBYTES(sp) STORE x29, 29 * REGBYTES(sp) STORE x30, 30 * REGBYTES(sp) STORE x31, 31 * REGBYTES(sp) /* restore to thread context * sp(0) -> epc; * sp(1) -> ra; * sp(i) -> x(i+2) */ LOAD sp, (a1) #ifdef RT_USING_SMP mv a0, a2 jal rt_cpus_lock_status_restore #endif /*RT_USING_SMP*/ j rt_hw_context_switch_exit #ifdef RT_USING_SMP /* * void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread); * * a0 --> context * a1 --> from * a2 --> to * a3 --> to_thread */ .globl rt_hw_context_switch_interrupt rt_hw_context_switch_interrupt: STORE a0, 0(a1) LOAD sp, 0(a2) move a0, a3 call rt_cpus_lock_status_restore j rt_hw_context_switch_exit #endif .global rt_hw_context_switch_exit rt_hw_context_switch_exit: #ifdef RT_USING_SMP #ifdef RT_USING_SIGNALS mv a0, sp csrr t0, mhartid /* switch interrupt stack of current cpu */ la sp, __stack_start__ addi t1, t0, 1 li t2, __STACKSIZE__ mul t1, t1, t2 add sp, sp, t1 /* sp = (cpuid + 1) * __STACKSIZE__ + __stack_start__ */ call rt_signal_check mv sp, a0 #endif #endif /* resw ra to mepc */ LOAD a0, 0 * REGBYTES(sp) csrw mepc, a0 LOAD x1, 1 * REGBYTES(sp) li t0, 0x00001800 csrw mstatus, t0 LOAD a0, 2 * REGBYTES(sp) csrs mstatus, a0 LOAD x4, 4 * REGBYTES(sp) LOAD x5, 5 * REGBYTES(sp) LOAD x6, 6 * REGBYTES(sp) LOAD x7, 7 * REGBYTES(sp) LOAD x8, 8 * REGBYTES(sp) LOAD x9, 9 * REGBYTES(sp) LOAD x10, 10 * REGBYTES(sp) LOAD x11, 11 * REGBYTES(sp) LOAD x12, 12 * REGBYTES(sp) LOAD x13, 13 * REGBYTES(sp) LOAD x14, 14 * REGBYTES(sp) LOAD x15, 15 * REGBYTES(sp) LOAD x16, 16 * REGBYTES(sp) LOAD x17, 17 * REGBYTES(sp) LOAD x18, 18 * REGBYTES(sp) LOAD x19, 19 * REGBYTES(sp) LOAD x20, 20 * REGBYTES(sp) LOAD x21, 21 * REGBYTES(sp) LOAD x22, 22 * REGBYTES(sp) LOAD x23, 23 * REGBYTES(sp) LOAD x24, 24 * REGBYTES(sp) LOAD x25, 25 * REGBYTES(sp) LOAD x26, 26 * REGBYTES(sp) LOAD x27, 27 * REGBYTES(sp) LOAD x28, 28 * REGBYTES(sp) LOAD x29, 29 * REGBYTES(sp) LOAD x30, 30 * REGBYTES(sp) LOAD x31, 31 * REGBYTES(sp) addi sp, sp, 32 * REGBYTES mret
lizhirui/DreamCore
5,319
software/rtthread_app/libcpu/risc-v/myriscvcore/context_gcc.S
/* * Copyright (c) 2019-Present Nuclei Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2020/03/26 Huaqi First Nuclei RISC-V porting implementation */ #include "riscv_encoding.h" #ifndef __riscv_32e #define RT_SAVED_REGNUM 30 #else #define RT_SAVED_REGNUM 14 #endif #define RT_CONTEXT_SIZE (RT_SAVED_REGNUM * REGBYTES) .extern rt_interrupt_from_thread .extern rt_interrupt_to_thread .section .text /* * void rt_hw_context_switch_to(rt_ubase_t to); * a0 --> to_thread */ .globl rt_hw_context_switch_to /* Start the first task. This also clears the bit that indicates the FPU is in use in case the FPU was used before the scheduler was started - which would otherwise result in the unnecessary leaving of space in the stack for lazy saving of FPU registers. */ .align 3 rt_hw_context_switch_to: /* Setup Interrupt Stack using The stack that was used by entry() before the scheduler is started is no longer required after the scheduler is started. Interrupt stack pointer is stored in CSR_MSCRATCH */ la t0, _sp csrw CSR_MSCRATCH, t0 LOAD sp, 0x0(a0) /* Read sp from first TCB member(a0) */ /* Pop PC from stack and set MEPC */ LOAD t0, 0 * REGBYTES(sp) csrw CSR_MEPC, t0 /* Pop mstatus from stack and set it */ LOAD t0, (RT_SAVED_REGNUM - 1) * REGBYTES(sp) csrw CSR_MSTATUS, t0 /* Interrupt still disable here */ /* Restore Registers from Stack */ LOAD x1, 1 * REGBYTES(sp) /* RA */ LOAD x5, 2 * REGBYTES(sp) LOAD x6, 3 * REGBYTES(sp) LOAD x7, 4 * REGBYTES(sp) LOAD x8, 5 * REGBYTES(sp) LOAD x9, 6 * REGBYTES(sp) LOAD x10, 7 * REGBYTES(sp) LOAD x11, 8 * REGBYTES(sp) LOAD x12, 9 * REGBYTES(sp) LOAD x13, 10 * REGBYTES(sp) LOAD x14, 11 * REGBYTES(sp) LOAD x15, 12 * REGBYTES(sp) #ifndef __riscv_32e LOAD x16, 13 * REGBYTES(sp) LOAD x17, 14 * REGBYTES(sp) LOAD x18, 15 * REGBYTES(sp) LOAD x19, 16 * REGBYTES(sp) LOAD x20, 17 * REGBYTES(sp) LOAD x21, 18 * REGBYTES(sp) LOAD x22, 19 * REGBYTES(sp) LOAD x23, 20 * REGBYTES(sp) LOAD x24, 21 * REGBYTES(sp) LOAD x25, 22 * REGBYTES(sp) LOAD x26, 23 * REGBYTES(sp) LOAD x27, 24 * REGBYTES(sp) LOAD x28, 25 * REGBYTES(sp) LOAD x29, 26 * REGBYTES(sp) LOAD x30, 27 * REGBYTES(sp) LOAD x31, 28 * REGBYTES(sp) #endif addi sp, sp, RT_CONTEXT_SIZE mret .align 2 .global msip_handler msip_handler: addi sp, sp, -RT_CONTEXT_SIZE STORE x1, 1 * REGBYTES(sp) /* RA */ STORE x5, 2 * REGBYTES(sp) STORE x6, 3 * REGBYTES(sp) STORE x7, 4 * REGBYTES(sp) STORE x8, 5 * REGBYTES(sp) STORE x9, 6 * REGBYTES(sp) STORE x10, 7 * REGBYTES(sp) STORE x11, 8 * REGBYTES(sp) STORE x12, 9 * REGBYTES(sp) STORE x13, 10 * REGBYTES(sp) STORE x14, 11 * REGBYTES(sp) STORE x15, 12 * REGBYTES(sp) #ifndef __riscv_32e STORE x16, 13 * REGBYTES(sp) STORE x17, 14 * REGBYTES(sp) STORE x18, 15 * REGBYTES(sp) STORE x19, 16 * REGBYTES(sp) STORE x20, 17 * REGBYTES(sp) STORE x21, 18 * REGBYTES(sp) STORE x22, 19 * REGBYTES(sp) STORE x23, 20 * REGBYTES(sp) STORE x24, 21 * REGBYTES(sp) STORE x25, 22 * REGBYTES(sp) STORE x26, 23 * REGBYTES(sp) STORE x27, 24 * REGBYTES(sp) STORE x28, 25 * REGBYTES(sp) STORE x29, 26 * REGBYTES(sp) STORE x30, 27 * REGBYTES(sp) STORE x31, 28 * REGBYTES(sp) #endif /* Push mstatus to stack */ csrr t0, CSR_MSTATUS STORE t0, (RT_SAVED_REGNUM - 1) * REGBYTES(sp) /* Push additional registers */ /* Store sp to task stack */ LOAD t0, rt_interrupt_from_thread STORE sp, 0(t0) csrr t0, CSR_MEPC STORE t0, 0(sp) jal rt_hw_taskswitch /* Switch task context */ LOAD t0, rt_interrupt_to_thread LOAD sp, 0x0(t0) /* Pop PC from stack and set MEPC */ LOAD t0, 0 * REGBYTES(sp) csrw CSR_MEPC, t0 /* Pop additional registers */ /* Pop mstatus from stack and set it */ LOAD t0, (RT_SAVED_REGNUM - 1) * REGBYTES(sp) csrw CSR_MSTATUS, t0 /* Interrupt still disable here */ /* Restore Registers from Stack */ LOAD x1, 1 * REGBYTES(sp) /* RA */ LOAD x5, 2 * REGBYTES(sp) LOAD x6, 3 * REGBYTES(sp) LOAD x7, 4 * REGBYTES(sp) LOAD x8, 5 * REGBYTES(sp) LOAD x9, 6 * REGBYTES(sp) LOAD x10, 7 * REGBYTES(sp) LOAD x11, 8 * REGBYTES(sp) LOAD x12, 9 * REGBYTES(sp) LOAD x13, 10 * REGBYTES(sp) LOAD x14, 11 * REGBYTES(sp) LOAD x15, 12 * REGBYTES(sp) #ifndef __riscv_32e LOAD x16, 13 * REGBYTES(sp) LOAD x17, 14 * REGBYTES(sp) LOAD x18, 15 * REGBYTES(sp) LOAD x19, 16 * REGBYTES(sp) LOAD x20, 17 * REGBYTES(sp) LOAD x21, 18 * REGBYTES(sp) LOAD x22, 19 * REGBYTES(sp) LOAD x23, 20 * REGBYTES(sp) LOAD x24, 21 * REGBYTES(sp) LOAD x25, 22 * REGBYTES(sp) LOAD x26, 23 * REGBYTES(sp) LOAD x27, 24 * REGBYTES(sp) LOAD x28, 25 * REGBYTES(sp) LOAD x29, 26 * REGBYTES(sp) LOAD x30, 27 * REGBYTES(sp) LOAD x31, 28 * REGBYTES(sp) #endif addi sp, sp, RT_CONTEXT_SIZE mret
lizhirui/DreamCore
4,618
software/rtthread_app/libcpu/risc-v/myriscvcore/interrupt_gcc.S
/* * Copyright (c) 2019-Present Nuclei Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2020/03/26 hqfang First Nuclei RISC-V porting implementation */ #include "riscv_encoding.h" .section .text.entry .align 8 /** * \brief Global interrupt disabled * \details * This function disable global interrupt. * \remarks * - All the interrupt requests will be ignored by CPU. */ .macro DISABLE_MIE csrc CSR_MSTATUS, MSTATUS_MIE .endm /** * \brief Macro for context save * \details * This macro save ABI defined caller saved registers in the stack. * \remarks * - This Macro could use to save context when you enter to interrupt * or exception */ /* Save caller registers */ .macro SAVE_CONTEXT csrrw sp, CSR_MSCRATCH, sp /* Allocate stack space for context saving */ #ifndef __riscv_32e addi sp, sp, -20*REGBYTES #else addi sp, sp, -14*REGBYTES #endif /* __riscv_32e */ STORE x1, 0*REGBYTES(sp) STORE x4, 1*REGBYTES(sp) STORE x5, 2*REGBYTES(sp) STORE x6, 3*REGBYTES(sp) STORE x7, 4*REGBYTES(sp) STORE x10, 5*REGBYTES(sp) STORE x11, 6*REGBYTES(sp) STORE x12, 7*REGBYTES(sp) STORE x13, 8*REGBYTES(sp) STORE x14, 9*REGBYTES(sp) STORE x15, 10*REGBYTES(sp) #ifndef __riscv_32e STORE x16, 14*REGBYTES(sp) STORE x17, 15*REGBYTES(sp) STORE x28, 16*REGBYTES(sp) STORE x29, 17*REGBYTES(sp) STORE x30, 18*REGBYTES(sp) STORE x31, 19*REGBYTES(sp) #endif /* __riscv_32e */ .endm /** * \brief Macro for restore caller registers * \details * This macro restore ABI defined caller saved registers from stack. * \remarks * - You could use this macro to restore context before you want return * from interrupt or exeception */ /* Restore caller registers */ .macro RESTORE_CONTEXT LOAD x1, 0*REGBYTES(sp) LOAD x4, 1*REGBYTES(sp) LOAD x5, 2*REGBYTES(sp) LOAD x6, 3*REGBYTES(sp) LOAD x7, 4*REGBYTES(sp) LOAD x10, 5*REGBYTES(sp) LOAD x11, 6*REGBYTES(sp) LOAD x12, 7*REGBYTES(sp) LOAD x13, 8*REGBYTES(sp) LOAD x14, 9*REGBYTES(sp) LOAD x15, 10*REGBYTES(sp) #ifndef __riscv_32e LOAD x16, 14*REGBYTES(sp) LOAD x17, 15*REGBYTES(sp) LOAD x28, 16*REGBYTES(sp) LOAD x29, 17*REGBYTES(sp) LOAD x30, 18*REGBYTES(sp) LOAD x31, 19*REGBYTES(sp) /* De-allocate the stack space */ addi sp, sp, 20*REGBYTES #else /* De-allocate the stack space */ addi sp, sp, 14*REGBYTES #endif /* __riscv_32e */ csrrw sp, CSR_MSCRATCH, sp .endm /** * \brief Macro for save necessary CSRs to stack * \details * This macro store MCAUSE, MEPC, to stack. */ .macro SAVE_CSR_CONTEXT /* Store CSR mcause to stack using pushmcause */ csrr x5, CSR_MCAUSE STORE x5, 11*REGBYTES(sp) /* Store CSR mepc to stack using pushmepc */ csrr x5, CSR_MEPC STORE x5, 12*REGBYTES(sp) /* Reserved */ STORE x0, 13*REGBYTES(sp) .endm /** * \brief Macro for restore necessary CSRs from stack * \details * This macro restore MEPC, MCAUSE from stack. */ .macro RESTORE_CSR_CONTEXT LOAD x5, 12*REGBYTES(sp) csrw CSR_MEPC, x5 LOAD x5, 11*REGBYTES(sp) csrw CSR_MCAUSE, x5 .endm /** * \brief Exception/NMI Entry * \details * This function provide common entry functions for exception/nmi. * \remarks * This function provide a default exception/nmi entry. * ABI defined caller save register and some CSR registers * to be saved before enter interrupt handler and be restored before return. */ .section .text.trap /* In CLIC mode, the exeception entry must be 64bytes aligned */ .align 6 .global riscv_trap_handler_entry riscv_trap_handler_entry: /* Save the caller saving registers (context) */ SAVE_CONTEXT /* Save the necessary CSR registers */ SAVE_CSR_CONTEXT /* * Set the exception handler function arguments * argument 1: mcause value * argument 2: current stack point(SP) value */ csrr a0, mcause mv a1, sp addi t0, zero, 1 slli t0, t0, 31 ori t0, t0, 3 beq a0, t0, msip_handler_entry /* * TODO: Call the exception handler function * By default, the function template is provided in * system_Device.c, you can adjust it as you want */ call core_exception_handler /* Restore the necessary CSR registers */ RESTORE_CSR_CONTEXT /* Restore the caller saving registers (context) */ RESTORE_CONTEXT /* Return to regular code */ mret msip_handler_entry: RESTORE_CSR_CONTEXT RESTORE_CONTEXT j msip_handler
ljessendk/CanFestival
7,288
examples/gene_SYNC_HCS12/vectors.s
;;/* M68HC11 Interrupt vectors table ;; Copyright (C) 1999 Free Software Foundation, Inc. ;; Written by Stephane Carrez (stcarrez@worldnet.fr) ;; ;;This file is free software; you can redistribute it and/or modify it ;;under the terms of the GNU General Public License as published by the ;;Free Software Foundation; either version 2, or (at your option) any ;;later version. ;; ;;In addition to the permissions in the GNU General Public License, the ;;Free Software Foundation gives you unlimited permission to link the ;;compiled version of this file with other programs, and to distribute ;;those programs without any restriction coming from the use of this ;;file. (The General Public License restrictions do apply in other ;;respects; for example, they cover modification of the file, and ;;distribution when not linked into another program.) ;; ;;This file is distributed in the hope that it will be useful, but ;;WITHOUT ANY WARRANTY; without even the implied warranty of ;;MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;;General Public License for more details. ;; ;;You should have received a copy of the GNU General Public License ;;along with this program; see the file COPYING. If not, write to ;;the Free Software Foundation, 59 Temple Place - Suite 330, ;;Boston, MA 02111-1307, USA. ;; ---------------------------------------------- ;; Modified by Francis Dupin for MC9S12DP256. April 2003 ;; francis.dupin@inrets.fr ;;*/ .sect .text .globl _start .globl can4HdlTra .globl can4HdlRcv .globl can4HdlErr .globl can4HdlWup .globl can3HdlTra .globl can3HdlRcv .globl can3HdlErr .globl can3HdlWup .globl can2HdlTra .globl can2HdlRcv .globl can2HdlErr .globl can2HdlWup .globl can1HdlTra .globl can1HdlRcv .globl can1HdlErr .globl can1HdlWup .globl can0HdlTra .globl can0HdlRcv .globl can0HdlErr .globl can0HdlWup .globl timerOvflHdl .globl timer3IC .globl timer2IC .globl timer1IC .globl timer0IC ;; Default interrupt handler. .sect .text def: rti ;; ;; Interrupt vectors are in a specific section that is ;; mapped at 0xff00. For the example program, the reset handler ;; points to the generic crt0 entry point. ;; .sect .vectors .globl vectors vectors: .word def ; ff00 .word def ; ff02 .word def ; ff04 .word def ; ff06 .word def ; ff08 .word def ; ff0a .word def ; ff0c .word def ; ff0e .word def ; ff10 .word def ; ff12 .word def ; ff14 .word def ; ff16 .word def ; ff18 .word def ; ff1a .word def ; ff1c .word def ; ff1e .word def ; ff20 .word def ; ff22 .word def ; ff24 .word def ; ff26 .word def ; ff28 .word def ; ff2a .word def ; ff2c .word def ; ff2e .word def ; ff30 .word def ; ff32 .word def ; ff34 .word def ; ff36 .word def ; ff38 .word def ; ff3a .word def ; ff3c .word def ; ff3e .word def ; ff40 .word def ; ff42 .word def ; ff44 .word def ; ff46 .word def ; ff48 .word def ; ff4a .word def ; ff4c .word def ; ff4e .word def ; ff50 .word def ; ff52 .word def ; ff54 .word def ; ff56 .word def ; ff58 .word def ; ff5a .word def ; ff5c .word def ; ff5e .word def ; ff60 .word def ; ff62 .word def ; ff64 .word def ; ff66 .word def ; ff68 .word def ; ff6a .word def ; ff6c .word def ; ff6e .word def ; ff70 .word def ; ff72 .word def ; ff74 .word def ; ff76 .word def ; ff78 .word def ; ff7a .word def ; ff7c .word def ; ff7e .word def ; ff80 .word def ; ff82 .word def ; ff84 .word def ; ff86 .word def ; ff88 .word def ; ff8a .word def ; ff8c .word def ; ff8e ;; MSCAN .word can4HdlTra ; ff90 (MSCAN4 Transmit) .word can4HdlRcv ; ff92 (MSCAN4 Receive) .word can4HdlErr ; ff94 (MSCAN4 Errors) .word can4HdlWup ; ff96 (MSCAN4 Wake up) .word can3HdlTra ; ff98 (MSCAN3 Transmit) .word can3HdlRcv ; ff9a (MSCAN3 Receive) .word can3HdlErr ; ff9c (MSCAN3 Errors) .word can3HdlWup ; ff9e (MSCAN3 Wake up) .word can2HdlTra ; ffa0 (MSCAN2 Transmit) .word can2HdlRcv ; ffa2 (MSCAN2 Receive) .word can2HdlErr ; ffa4 (MSCAN2 Errors) .word can2HdlWup ; ffa6 (MSCAN2 Wake up) .word can1HdlTra ; ffa8 (MSCAN1 Transmit) .word can1HdlRcv ; ffaa (MSCAN1 Receive) .word can1HdlErr ; ffac (MSCAN1 Errors) .word can1HdlWup ; ffae (MSCAN1 Wake up) .word can0HdlTra ; ffb0 (MSCAN0 Transmit) .word can0HdlRcv ; ffb2 (MSCAN0 Receive) .word can0HdlErr ; ffb4 (MSCAN0 Errors) .word can0HdlWup ; ffb6 (MSCAN0 Wake up) .word def ; ffb8 .word def ; ffba .word def ; ffbc .word def ; ffbe .word def ; ffc0 .word def ; ffc2 .word def ; ffc4 .word def ; ffc6 .word def ; ffc8 .word def ; ffca (Modulus Down Counter underflow) .word def ; ffcc .word def ; ffce .word def ; ffd0 .word def ; ffd2 .word def ; ffd4 ;; SCI .word def ; ffd6 ;; SPI .word def ; ffd8 .word def ; ffda (PAII) .word def ; ffdc (PAOVI) .word def ;ffde (TOI) ;; Timer Output Compare .word def ; ffe0 Timer Chanel 7 .word def ; ffe2 Timer Chanel 6 .word def ; ffe4 Timer Chanel 5 .word timer4Hdl ; ffe6 Timer Chanel 4 used by Canopen .word def ; ffe8 Timer Chanel 3 .word def ; ffea Timer Chanel 2 .word def ; ffec Timer Chanel 1 .word def ; ffee Timer Chanel 0 ;; Misc .word def ; fff0 (RTII) .word def ; fff2 (IRQ) .word def ; fff4 (XIRQ) .word def ; fff6 (SWI) .word def ; fff8 (ILL) .word def ; fffa (COP Failure) .word def ; fffc (COP Clock monitor) .word _start ; fffe (reset)
lizhirui/DreamCore
3,020
software/rtthread_app/components/lwp/arch/arm/cortex-m7/lwp_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2018-10-30 heyuanjie first version */ .cpu cortex-m7 .syntax unified .thumb .text /* * void* lwp_get_sys_api(rt_uint32_t number); */ .global lwp_get_sys_api .global lwp_get_kernel_sp .global lwp_set_kernel_sp /* * void lwp_user_entry(args, text, data); */ .global lwp_user_entry .type lwp_user_entry, % function lwp_user_entry: PUSH {R0-R3} @; push text&data addr. MOV R0, SP @; v1 = SP BL lwp_set_kernel_sp @; lwp_set_kernel_sp(v1) @; set CPU to user-thread mode. MRS R2, CONTROL ORR R2, R2, #0x03 @; use PSP, user-thread mode. MSR CONTROL, R2 POP {R0-R3} @; pop app address to R1. @; set data address. MOV R9, R2 @; run app, only Thumb-mode. ORR R1, R1, #0x01 BX R1 /* * void SVC_Handler(void); */ .global SVC_Handler .type SVC_Handler, % function SVC_Handler: PUSH {LR} @; get user SP. TST LR, #0x4 ITE EQ MRSEQ R1, MSP MRSNE R1, PSP PUSH {R1} @; push app SP. @; get SVC number. mov R0, R7 @; get kernel system API BL lwp_get_sys_api PUSH {R0} @; push api @; get kernel SP to R0. BL lwp_get_kernel_sp POP {R2} @; pop api to R2. POP {R1} @; pop app SP to R1. stmfd r0!, {r1} @; save app SP to kernel SP @;push app parm5~6 to kernel SP STMFD R0!, {R4 - R5} @; copy R1(app SP) to R0(kernel SP). push {r8-r11} LDMFD R1, {R4 - R11} @; pop exception_stack_frame to r4 - r11 register STMFD R0!, {R4 - R11} @; push exception_stack_frame to server SP. pop {r8-r11} LDR R3, =svc_exit STR R3, [R0, #20] @; update LR STR R2, [R0, #24] @; update api to PC MSR PSP, R0 @; update SP, API is executed with kernel SP @; set to thread-privilege mode. MRS R3, CONTROL BIC R3, R3, #0x01 ORR R3, R3, #0x02 MSR CONTROL, R3 POP {LR} @; 0xFFFFFFED ORR LR, LR, #0x10 BX LR /* * void svc_exit(void); */ .global svc_exit .type svc_exit, % function svc_exit: @; get user SP. PUSH {R0} @; push result to SP. BL lwp_get_kernel_sp ldr r3, [r0, #-4] pop {r0} ldr lr, [r3, #20] ldr r1, [r3, #24] @; load pc add r3, #32 @; exception_stack_frame size MSR PSP, R3 @; restore app stack pointer @; restore to PSP & thread-unprivilege mode. MRS R2, CONTROL ORR R2, R2, #0x03 MSR CONTROL, R2 @; return to lwp. ORR R1, R1, #0x01 @; only Thumb-mode. BX R1 @; return to user app.
lizhirui/DreamCore
2,972
software/rtthread_app/components/lwp/arch/arm/cortex-m7/lwp_iar.S
;/* ; * Copyright (c) 2006-2018, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2018-10-30 heyuanjie first version ; */ SECTION .text:CODE(2) THUMB REQUIRE8 PRESERVE8 ;/* ; * void* lwp_get_sys_api(rt_uint32_t number); ; */ IMPORT lwp_get_sys_api IMPORT lwp_get_kernel_sp IMPORT lwp_set_kernel_sp ;/* ; * void lwp_user_entry(args, text, data); ; */ EXPORT lwp_user_entry lwp_user_entry: PUSH {R0-R3} ; push text&data addr. MOV R0, SP ; v1 = SP BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1) ; set CPU to user-thread mode. MRS R2, CONTROL ORR R2, R2, #0x03 ; use PSP, user-thread mode. MSR CONTROL, R2 POP {R0-R3} ; pop app address to R1. ; set data address. MOV R9, R2 ; run app, only Thumb-mode. ORR R1, R1, #0x01 BX R1 ;/* ; * void SVC_Handler(void); ; */ EXPORT SVC_Handler SVC_Handler: PUSH {LR} ; get user SP. TST LR, #0x4 ITE EQ MRSEQ R1, MSP MRSNE R1, PSP PUSH {R1} ; push app SP. ; get SVC number. mov R0, R7 ; get kernel system API BL lwp_get_sys_api PUSH {R0} ; push api ; get kernel SP to R0. BL lwp_get_kernel_sp POP {R2} ; pop api to R2. POP {R1} ; pop app SP to R1. stmfd r0!, {r1} ; save app SP to kernel SP ;push app parm5~6 to kernel SP STMFD R0!, {R4 - R5} ; copy R1(app SP) to R0(kernel SP). push {r8-r11} LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP. pop {r8-r11} LDR R3, =svc_exit STR R3, [R0, #20] ; update LR STR R2, [R0, #24] ; update api to PC MSR PSP, R0 ; update SP, API is executed with kernel SP ; set to thread-privilege mode. MRS R3, CONTROL BIC R3, R3, #0x01 ORR R3, R3, #0x02 MSR CONTROL, R3 POP {LR} ; 0xFFFFFFED ORR LR, LR, #0x10 BX LR ;/* ; * void svc_exit(void); ; */ EXPORT svc_exit svc_exit: ; get user SP. PUSH {R0} ; push result to SP. BL lwp_get_kernel_sp ldr r3, [r0, #-4] pop {r0} ldr lr, [r3, #20] ldr r1, [r3, #24] ; load pc add r3, r3, #32 ; exception_stack_frame size MSR PSP, R3 ; restore app stack pointer ; restore to PSP & thread-unprivilege mode. MRS R2, CONTROL ORR R2, R2, #0x03 MSR CONTROL, R2 ; return to lwp. ORR R1, R1, #0x01 ; only Thumb-mode. BX R1 ; return to user app. END
lizhirui/DreamCore
3,071
software/rtthread_app/components/lwp/arch/arm/cortex-m7/lwp_rvds.S
;/* ; * Copyright (c) 2006-2018, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2018-10-30 heyuanjie first version ; */ AREA |.text|, CODE, READONLY, ALIGN=2 THUMB REQUIRE8 PRESERVE8 ;/* ; * void* lwp_get_sys_api(rt_uint32_t number); ; */ IMPORT lwp_get_sys_api IMPORT lwp_get_kernel_sp IMPORT lwp_set_kernel_sp ;/* ; * void lwp_user_entry(args, text, data); ; */ lwp_user_entry PROC EXPORT lwp_user_entry PUSH {R0-R3} ; push text&data addr. MOV R0, SP ; v1 = SP BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1) ; set CPU to user-thread mode. MRS R2, CONTROL ORR R2, R2, #0x03 ; use PSP, user-thread mode. MSR CONTROL, R2 POP {R0-R3} ; pop app address to R1. ; set data address. MOV R9, R2 ; run app, only Thumb-mode. ORR R1, R1, #0x01 BX R1 ; never reach here! ENDP ;/* ; * void SVC_Handler(void); ; */ SVC_Handler PROC EXPORT SVC_Handler PUSH {LR} ; get user SP. TST LR, #0x4 ITE EQ MRSEQ R1, MSP MRSNE R1, PSP PUSH {R1} ; push app SP. ; get SVC number. mov R0, R7 ; get kernel system API BL lwp_get_sys_api PUSH {R0} ; push api ; get kernel SP to R0. BL lwp_get_kernel_sp POP {R2} ; pop api to R2. POP {R1} ; pop app SP to R1. stmfd r0!, {r1} ; save app SP to kernel SP ;push app parm5~6 to kernel SP STMFD R0!, {R4 - R5} ; copy R1(app SP) to R0(kernel SP). push {r8-r11} LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP. pop {r8-r11} LDR R3, =svc_exit STR R3, [R0, #20] ; update LR STR R2, [R0, #24] ; update api to PC MSR PSP, R0 ; update SP, API is executed with kernel SP ; set to thread-privilege mode. MRS R3, CONTROL BIC R3, R3, #0x01 ORR R3, R3, #0x02 MSR CONTROL, R3 POP {LR} ; 0xFFFFFFED ORR LR, LR, #0x10 BX LR ENDP ;/* ; * void svc_exit(void); ; */ svc_exit PROC EXPORT svc_exit ; get user SP. PUSH {R0} ; push result to SP. BL lwp_get_kernel_sp ldr r3, [r0, #-4] pop {r0} ldr lr, [r3, #20] ldr r1, [r3, #24] ; load pc add r3, #32 ; exception_stack_frame size MSR PSP, R3 ; restore app stack pointer ; restore to PSP & thread-unprivilege mode. MRS R2, CONTROL ORR R2, R2, #0x03 MSR CONTROL, R2 ; return to lwp. ORR R1, R1, #0x01 ; only Thumb-mode. BX R1 ; return to user app. ENDP ALIGN END
lizhirui/DreamCore
1,381
software/rtthread_app/components/lwp/arch/arm/cortex-a/lwp_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2018-12-10 Jesven first version */ #define Mode_USR 0x10 #define Mode_FIQ 0x11 #define Mode_IRQ 0x12 #define Mode_SVC 0x13 #define Mode_MON 0x16 #define Mode_ABT 0x17 #define Mode_UDF 0x1B #define Mode_SYS 0x1F #define A_Bit 0x100 #define I_Bit 0x80 @; when I bit is set, IRQ is disabled #define F_Bit 0x40 @; when F bit is set, FIQ is disabled #define T_Bit 0x20 .cpu cortex-a9 .syntax unified .text /* * void lwp_user_entry(args, text, data); */ .global lwp_user_entry .type lwp_user_entry, % function lwp_user_entry: mrs r9, cpsr bic r9, #0x1f orr r9, #Mode_USR cpsid i msr spsr, r9 /* set data address. */ mov r9, r2 movs pc, r1 /* * void SVC_Handler(void); */ .global vector_swi .type vector_swi, % function vector_swi: push {lr} mrs lr, spsr push {r4, r5, lr} cpsie i push {r0 - r3, r12} and r0, r7, #0xff bl lwp_get_sys_api cmp r0, #0 /* r0 = api */ mov lr, r0 pop {r0 - r3, r12} beq svc_exit blx lr svc_exit: cpsid i pop {r4, r5, lr} msr spsr_cxsf, lr pop {lr} movs pc, lr
lizhirui/DreamCore
1,545
software/rtthread_app/components/lwp/arch/arm/arm926/lwp_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2018-12-10 Jesven first version */ #define Mode_USR 0x10 #define Mode_FIQ 0x11 #define Mode_IRQ 0x12 #define Mode_SVC 0x13 #define Mode_MON 0x16 #define Mode_ABT 0x17 #define Mode_UDF 0x1B #define Mode_SYS 0x1F #define A_Bit 0x100 #define I_Bit 0x80 @; when I bit is set, IRQ is disabled #define F_Bit 0x40 @; when F bit is set, FIQ is disabled #define T_Bit 0x20 .cpu arm9 .syntax unified .text /* * void lwp_user_entry(args, text, data); */ .global lwp_user_entry .type lwp_user_entry, % function lwp_user_entry: mrs r9, cpsr mov r8, r9 bic r9, #0x1f orr r9, #Mode_USR orr r8, #I_Bit msr cpsr_c, r8 msr spsr, r9 /* set data address. */ mov r9, r2 movs pc, r1 /* * void SVC_Handler(void); */ .global SVC_Handler .type SVC_Handler, % function SVC_Handler: push {lr} mrs lr, spsr push {r4, r5, lr} mrs r4, cpsr bic r4, #I_Bit msr cpsr_c, r4 push {r0 - r3, r12} and r0, r7, #0xff bl lwp_get_sys_api cmp r0, #0 /* r0 = api */ mov r4, r0 pop {r0 - r3, r12} beq svc_exit ldr lr, = svc_exit bx r4 svc_exit: mrs r4, cpsr orr r4, #I_Bit msr cpsr_c, r4 pop {r4, r5, lr} msr spsr_cxsf, lr pop {lr} movs pc, lr
lizhirui/DreamCore
3,020
software/rtthread_app/components/lwp/arch/arm/cortex-m3/lwp_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2018-10-30 heyuanjie first version */ .cpu cortex-m3 .syntax unified .thumb .text /* * void* lwp_get_sys_api(rt_uint32_t number); */ .global lwp_get_sys_api .global lwp_get_kernel_sp .global lwp_set_kernel_sp /* * void lwp_user_entry(args, text, data); */ .global lwp_user_entry .type lwp_user_entry, % function lwp_user_entry: PUSH {R0-R3} @; push text&data addr. MOV R0, SP @; v1 = SP BL lwp_set_kernel_sp @; lwp_set_kernel_sp(v1) @; set CPU to user-thread mode. MRS R2, CONTROL ORR R2, R2, #0x03 @; use PSP, user-thread mode. MSR CONTROL, R2 POP {R0-R3} @; pop app address to R1. @; set data address. MOV R9, R2 @; run app, only Thumb-mode. ORR R1, R1, #0x01 BX R1 /* * void SVC_Handler(void); */ .global SVC_Handler .type SVC_Handler, % function SVC_Handler: PUSH {LR} @; get user SP. TST LR, #0x4 ITE EQ MRSEQ R1, MSP MRSNE R1, PSP PUSH {R1} @; push app SP. @; get SVC number. mov R0, R7 @; get kernel system API BL lwp_get_sys_api PUSH {R0} @; push api @; get kernel SP to R0. BL lwp_get_kernel_sp POP {R2} @; pop api to R2. POP {R1} @; pop app SP to R1. stmfd r0!, {r1} @; save app SP to kernel SP @;push app parm5~6 to kernel SP STMFD R0!, {R4 - R5} @; copy R1(app SP) to R0(kernel SP). push {r8-r11} LDMFD R1, {R4 - R11} @; pop exception_stack_frame to r4 - r11 register STMFD R0!, {R4 - R11} @; push exception_stack_frame to server SP. pop {r8-r11} LDR R3, =svc_exit STR R3, [R0, #20] @; update LR STR R2, [R0, #24] @; update api to PC MSR PSP, R0 @; update SP, API is executed with kernel SP @; set to thread-privilege mode. MRS R3, CONTROL BIC R3, R3, #0x01 ORR R3, R3, #0x02 MSR CONTROL, R3 POP {LR} @; 0xFFFFFFED ORR LR, LR, #0x10 BX LR /* * void svc_exit(void); */ .global svc_exit .type svc_exit, % function svc_exit: @; get user SP. PUSH {R0} @; push result to SP. BL lwp_get_kernel_sp ldr r3, [r0, #-4] pop {r0} ldr lr, [r3, #20] ldr r1, [r3, #24] @; load pc add r3, #32 @; exception_stack_frame size MSR PSP, R3 @; restore app stack pointer @; restore to PSP & thread-unprivilege mode. MRS R2, CONTROL ORR R2, R2, #0x03 MSR CONTROL, R2 @; return to lwp. ORR R1, R1, #0x01 @; only Thumb-mode. BX R1 @; return to user app.
lizhirui/DreamCore
2,972
software/rtthread_app/components/lwp/arch/arm/cortex-m3/lwp_iar.S
;/* ; * Copyright (c) 2006-2018, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2018-10-30 heyuanjie first version ; */ SECTION .text:CODE(2) THUMB REQUIRE8 PRESERVE8 ;/* ; * void* lwp_get_sys_api(rt_uint32_t number); ; */ IMPORT lwp_get_sys_api IMPORT lwp_get_kernel_sp IMPORT lwp_set_kernel_sp ;/* ; * void lwp_user_entry(args, text, data); ; */ EXPORT lwp_user_entry lwp_user_entry: PUSH {R0-R3} ; push text&data addr. MOV R0, SP ; v1 = SP BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1) ; set CPU to user-thread mode. MRS R2, CONTROL ORR R2, R2, #0x03 ; use PSP, user-thread mode. MSR CONTROL, R2 POP {R0-R3} ; pop app address to R1. ; set data address. MOV R9, R2 ; run app, only Thumb-mode. ORR R1, R1, #0x01 BX R1 ;/* ; * void SVC_Handler(void); ; */ EXPORT SVC_Handler SVC_Handler: PUSH {LR} ; get user SP. TST LR, #0x4 ITE EQ MRSEQ R1, MSP MRSNE R1, PSP PUSH {R1} ; push app SP. ; get SVC number. mov R0, R7 ; get kernel system API BL lwp_get_sys_api PUSH {R0} ; push api ; get kernel SP to R0. BL lwp_get_kernel_sp POP {R2} ; pop api to R2. POP {R1} ; pop app SP to R1. stmfd r0!, {r1} ; save app SP to kernel SP ;push app parm5~6 to kernel SP STMFD R0!, {R4 - R5} ; copy R1(app SP) to R0(kernel SP). push {r8-r11} LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP. pop {r8-r11} LDR R3, =svc_exit STR R3, [R0, #20] ; update LR STR R2, [R0, #24] ; update api to PC MSR PSP, R0 ; update SP, API is executed with kernel SP ; set to thread-privilege mode. MRS R3, CONTROL BIC R3, R3, #0x01 ORR R3, R3, #0x02 MSR CONTROL, R3 POP {LR} ; 0xFFFFFFED ORR LR, LR, #0x10 BX LR ;/* ; * void svc_exit(void); ; */ EXPORT svc_exit svc_exit: ; get user SP. PUSH {R0} ; push result to SP. BL lwp_get_kernel_sp ldr r3, [r0, #-4] pop {r0} ldr lr, [r3, #20] ldr r1, [r3, #24] ; load pc add r3, r3, #32 ; exception_stack_frame size MSR PSP, R3 ; restore app stack pointer ; restore to PSP & thread-unprivilege mode. MRS R2, CONTROL ORR R2, R2, #0x03 MSR CONTROL, R2 ; return to lwp. ORR R1, R1, #0x01 ; only Thumb-mode. BX R1 ; return to user app. END
lizhirui/DreamCore
3,071
software/rtthread_app/components/lwp/arch/arm/cortex-m3/lwp_rvds.S
;/* ; * Copyright (c) 2006-2018, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2018-10-30 heyuanjie first version ; */ AREA |.text|, CODE, READONLY, ALIGN=2 THUMB REQUIRE8 PRESERVE8 ;/* ; * void* lwp_get_sys_api(rt_uint32_t number); ; */ IMPORT lwp_get_sys_api IMPORT lwp_get_kernel_sp IMPORT lwp_set_kernel_sp ;/* ; * void lwp_user_entry(args, text, data); ; */ lwp_user_entry PROC EXPORT lwp_user_entry PUSH {R0-R3} ; push text&data addr. MOV R0, SP ; v1 = SP BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1) ; set CPU to user-thread mode. MRS R2, CONTROL ORR R2, R2, #0x03 ; use PSP, user-thread mode. MSR CONTROL, R2 POP {R0-R3} ; pop app address to R1. ; set data address. MOV R9, R2 ; run app, only Thumb-mode. ORR R1, R1, #0x01 BX R1 ; never reach here! ENDP ;/* ; * void SVC_Handler(void); ; */ SVC_Handler PROC EXPORT SVC_Handler PUSH {LR} ; get user SP. TST LR, #0x4 ITE EQ MRSEQ R1, MSP MRSNE R1, PSP PUSH {R1} ; push app SP. ; get SVC number. mov R0, R7 ; get kernel system API BL lwp_get_sys_api PUSH {R0} ; push api ; get kernel SP to R0. BL lwp_get_kernel_sp POP {R2} ; pop api to R2. POP {R1} ; pop app SP to R1. stmfd r0!, {r1} ; save app SP to kernel SP ;push app parm5~6 to kernel SP STMFD R0!, {R4 - R5} ; copy R1(app SP) to R0(kernel SP). push {r8-r11} LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP. pop {r8-r11} LDR R3, =svc_exit STR R3, [R0, #20] ; update LR STR R2, [R0, #24] ; update api to PC MSR PSP, R0 ; update SP, API is executed with kernel SP ; set to thread-privilege mode. MRS R3, CONTROL BIC R3, R3, #0x01 ORR R3, R3, #0x02 MSR CONTROL, R3 POP {LR} ; 0xFFFFFFED ORR LR, LR, #0x10 BX LR ENDP ;/* ; * void svc_exit(void); ; */ svc_exit PROC EXPORT svc_exit ; get user SP. PUSH {R0} ; push result to SP. BL lwp_get_kernel_sp ldr r3, [r0, #-4] pop {r0} ldr lr, [r3, #20] ldr r1, [r3, #24] ; load pc add r3, #32 ; exception_stack_frame size MSR PSP, R3 ; restore app stack pointer ; restore to PSP & thread-unprivilege mode. MRS R2, CONTROL ORR R2, R2, #0x03 MSR CONTROL, R2 ; return to lwp. ORR R1, R1, #0x01 ; only Thumb-mode. BX R1 ; return to user app. ENDP ALIGN END
lizhirui/DreamCore
3,020
software/rtthread_app/components/lwp/arch/arm/cortex-m4/lwp_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2018-10-30 heyuanjie first version */ .cpu cortex-m4 .syntax unified .thumb .text /* * void* lwp_get_sys_api(rt_uint32_t number); */ .global lwp_get_sys_api .global lwp_get_kernel_sp .global lwp_set_kernel_sp /* * void lwp_user_entry(args, text, data); */ .global lwp_user_entry .type lwp_user_entry, % function lwp_user_entry: PUSH {R0-R3} @; push text&data addr. MOV R0, SP @; v1 = SP BL lwp_set_kernel_sp @; lwp_set_kernel_sp(v1) @; set CPU to user-thread mode. MRS R2, CONTROL ORR R2, R2, #0x03 @; use PSP, user-thread mode. MSR CONTROL, R2 POP {R0-R3} @; pop app address to R1. @; set data address. MOV R9, R2 @; run app, only Thumb-mode. ORR R1, R1, #0x01 BX R1 /* * void SVC_Handler(void); */ .global SVC_Handler .type SVC_Handler, % function SVC_Handler: PUSH {LR} @; get user SP. TST LR, #0x4 ITE EQ MRSEQ R1, MSP MRSNE R1, PSP PUSH {R1} @; push app SP. @; get SVC number. mov R0, R7 @; get kernel system API BL lwp_get_sys_api PUSH {R0} @; push api @; get kernel SP to R0. BL lwp_get_kernel_sp POP {R2} @; pop api to R2. POP {R1} @; pop app SP to R1. stmfd r0!, {r1} @; save app SP to kernel SP @;push app parm5~6 to kernel SP STMFD R0!, {R4 - R5} @; copy R1(app SP) to R0(kernel SP). push {r8-r11} LDMFD R1, {R4 - R11} @; pop exception_stack_frame to r4 - r11 register STMFD R0!, {R4 - R11} @; push exception_stack_frame to server SP. pop {r8-r11} LDR R3, =svc_exit STR R3, [R0, #20] @; update LR STR R2, [R0, #24] @; update api to PC MSR PSP, R0 @; update SP, API is executed with kernel SP @; set to thread-privilege mode. MRS R3, CONTROL BIC R3, R3, #0x01 ORR R3, R3, #0x02 MSR CONTROL, R3 POP {LR} @; 0xFFFFFFED ORR LR, LR, #0x10 BX LR /* * void svc_exit(void); */ .global svc_exit .type svc_exit, % function svc_exit: @; get user SP. PUSH {R0} @; push result to SP. BL lwp_get_kernel_sp ldr r3, [r0, #-4] pop {r0} ldr lr, [r3, #20] ldr r1, [r3, #24] @; load pc add r3, #32 @; exception_stack_frame size MSR PSP, R3 @; restore app stack pointer @; restore to PSP & thread-unprivilege mode. MRS R2, CONTROL ORR R2, R2, #0x03 MSR CONTROL, R2 @; return to lwp. ORR R1, R1, #0x01 @; only Thumb-mode. BX R1 @; return to user app.
lizhirui/DreamCore
2,972
software/rtthread_app/components/lwp/arch/arm/cortex-m4/lwp_iar.S
;/* ; * Copyright (c) 2006-2018, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2018-10-30 heyuanjie first version ; */ SECTION .text:CODE(2) THUMB REQUIRE8 PRESERVE8 ;/* ; * void* lwp_get_sys_api(rt_uint32_t number); ; */ IMPORT lwp_get_sys_api IMPORT lwp_get_kernel_sp IMPORT lwp_set_kernel_sp ;/* ; * void lwp_user_entry(args, text, data); ; */ EXPORT lwp_user_entry lwp_user_entry: PUSH {R0-R3} ; push text&data addr. MOV R0, SP ; v1 = SP BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1) ; set CPU to user-thread mode. MRS R2, CONTROL ORR R2, R2, #0x03 ; use PSP, user-thread mode. MSR CONTROL, R2 POP {R0-R3} ; pop app address to R1. ; set data address. MOV R9, R2 ; run app, only Thumb-mode. ORR R1, R1, #0x01 BX R1 ;/* ; * void SVC_Handler(void); ; */ EXPORT SVC_Handler SVC_Handler: PUSH {LR} ; get user SP. TST LR, #0x4 ITE EQ MRSEQ R1, MSP MRSNE R1, PSP PUSH {R1} ; push app SP. ; get SVC number. mov R0, R7 ; get kernel system API BL lwp_get_sys_api PUSH {R0} ; push api ; get kernel SP to R0. BL lwp_get_kernel_sp POP {R2} ; pop api to R2. POP {R1} ; pop app SP to R1. stmfd r0!, {r1} ; save app SP to kernel SP ;push app parm5~6 to kernel SP STMFD R0!, {R4 - R5} ; copy R1(app SP) to R0(kernel SP). push {r8-r11} LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP. pop {r8-r11} LDR R3, =svc_exit STR R3, [R0, #20] ; update LR STR R2, [R0, #24] ; update api to PC MSR PSP, R0 ; update SP, API is executed with kernel SP ; set to thread-privilege mode. MRS R3, CONTROL BIC R3, R3, #0x01 ORR R3, R3, #0x02 MSR CONTROL, R3 POP {LR} ; 0xFFFFFFED ORR LR, LR, #0x10 BX LR ;/* ; * void svc_exit(void); ; */ EXPORT svc_exit svc_exit: ; get user SP. PUSH {R0} ; push result to SP. BL lwp_get_kernel_sp ldr r3, [r0, #-4] pop {r0} ldr lr, [r3, #20] ldr r1, [r3, #24] ; load pc add r3, r3, #32 ; exception_stack_frame size MSR PSP, R3 ; restore app stack pointer ; restore to PSP & thread-unprivilege mode. MRS R2, CONTROL ORR R2, R2, #0x03 MSR CONTROL, R2 ; return to lwp. ORR R1, R1, #0x01 ; only Thumb-mode. BX R1 ; return to user app. END
lizhirui/DreamCore
3,071
software/rtthread_app/components/lwp/arch/arm/cortex-m4/lwp_rvds.S
;/* ; * Copyright (c) 2006-2018, RT-Thread Development Team ; * ; * SPDX-License-Identifier: Apache-2.0 ; * ; * Change Logs: ; * Date Author Notes ; * 2018-10-30 heyuanjie first version ; */ AREA |.text|, CODE, READONLY, ALIGN=2 THUMB REQUIRE8 PRESERVE8 ;/* ; * void* lwp_get_sys_api(rt_uint32_t number); ; */ IMPORT lwp_get_sys_api IMPORT lwp_get_kernel_sp IMPORT lwp_set_kernel_sp ;/* ; * void lwp_user_entry(args, text, data); ; */ lwp_user_entry PROC EXPORT lwp_user_entry PUSH {R0-R3} ; push text&data addr. MOV R0, SP ; v1 = SP BL lwp_set_kernel_sp ; lwp_set_kernel_sp(v1) ; set CPU to user-thread mode. MRS R2, CONTROL ORR R2, R2, #0x03 ; use PSP, user-thread mode. MSR CONTROL, R2 POP {R0-R3} ; pop app address to R1. ; set data address. MOV R9, R2 ; run app, only Thumb-mode. ORR R1, R1, #0x01 BX R1 ; never reach here! ENDP ;/* ; * void SVC_Handler(void); ; */ SVC_Handler PROC EXPORT SVC_Handler PUSH {LR} ; get user SP. TST LR, #0x4 ITE EQ MRSEQ R1, MSP MRSNE R1, PSP PUSH {R1} ; push app SP. ; get SVC number. mov R0, R7 ; get kernel system API BL lwp_get_sys_api PUSH {R0} ; push api ; get kernel SP to R0. BL lwp_get_kernel_sp POP {R2} ; pop api to R2. POP {R1} ; pop app SP to R1. stmfd r0!, {r1} ; save app SP to kernel SP ;push app parm5~6 to kernel SP STMFD R0!, {R4 - R5} ; copy R1(app SP) to R0(kernel SP). push {r8-r11} LDMFD R1, {R4 - R11} ; pop exception_stack_frame to r4 - r11 register STMFD R0!, {R4 - R11} ; push exception_stack_frame to server SP. pop {r8-r11} LDR R3, =svc_exit STR R3, [R0, #20] ; update LR STR R2, [R0, #24] ; update api to PC MSR PSP, R0 ; update SP, API is executed with kernel SP ; set to thread-privilege mode. MRS R3, CONTROL BIC R3, R3, #0x01 ORR R3, R3, #0x02 MSR CONTROL, R3 POP {LR} ; 0xFFFFFFED ORR LR, LR, #0x10 BX LR ENDP ;/* ; * void svc_exit(void); ; */ svc_exit PROC EXPORT svc_exit ; get user SP. PUSH {R0} ; push result to SP. BL lwp_get_kernel_sp ldr r3, [r0, #-4] pop {r0} ldr lr, [r3, #20] ldr r1, [r3, #24] ; load pc add r3, #32 ; exception_stack_frame size MSR PSP, R3 ; restore app stack pointer ; restore to PSP & thread-unprivilege mode. MRS R2, CONTROL ORR R2, R2, #0x03 MSR CONTROL, R2 ; return to lwp. ORR R1, R1, #0x01 ; only Thumb-mode. BX R1 ; return to user app. ENDP ALIGN END
lizhirui/DreamCore
1,380
software/rtthread_app/components/lwp/arch/arm/cortex-a9/lwp_gcc.S
/* * Copyright (c) 2006-2018, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2018-12-10 Jesven first version */ #define Mode_USR 0x10 #define Mode_FIQ 0x11 #define Mode_IRQ 0x12 #define Mode_SVC 0x13 #define Mode_MON 0x16 #define Mode_ABT 0x17 #define Mode_UDF 0x1B #define Mode_SYS 0x1F #define A_Bit 0x100 #define I_Bit 0x80 @; when I bit is set, IRQ is disabled #define F_Bit 0x40 @; when F bit is set, FIQ is disabled #define T_Bit 0x20 .cpu cortex-a9 .syntax unified .text /* * void lwp_user_entry(args, text, data); */ .global lwp_user_entry .type lwp_user_entry, % function lwp_user_entry: mrs r9, cpsr bic r9, #0x1f orr r9, #Mode_USR cpsid i msr spsr, r9 /* set data address. */ mov r9, r2 movs pc, r1 /* * void vector_swi(void); */ .global vector_swi .type vector_swi, % function vector_swi: push {lr} mrs lr, spsr push {r4, r5, lr} cpsie i push {r0 - r3, r12} and r0, r7, #0xff bl lwp_get_sys_api cmp r0, #0 /* r0 = api */ mov lr, r0 pop {r0 - r3, r12} beq svc_exit blx lr svc_exit: cpsid i pop {r4, r5, lr} msr spsr_cxsf, lr pop {lr} movs pc, lr
lizhirui/DreamCore
4,073
model/tests/riscv-tests/benchmarks/common/crt.S
# See LICENSE for license details. #include "encoding.h" #if __riscv_xlen == 64 # define LREG ld # define SREG sd # define REGBYTES 8 #else # define LREG lw # define SREG sw # define REGBYTES 4 #endif .section ".text.init" .globl _start _start: li x1, 0 li x2, 0 li x3, 0 li x4, 0 li x5, 0 li x6, 0 li x7, 0 li x8, 0 li x9, 0 li x10,0 li x11,0 li x12,0 li x13,0 li x14,0 li x15,0 li x16,0 li x17,0 li x18,0 li x19,0 li x20,0 li x21,0 li x22,0 li x23,0 li x24,0 li x25,0 li x26,0 li x27,0 li x28,0 li x29,0 li x30,0 li x31,0 # enable FPU and accelerator if present li t0, MSTATUS_FS | MSTATUS_XS csrs mstatus, t0 # make sure XLEN agrees with compilation choice li t0, 1 slli t0, t0, 31 #if __riscv_xlen == 64 bgez t0, 1f #else bltz t0, 1f #endif 2: li a0, 1 sw a0, tohost, t0 j 2b 1: #ifdef __riscv_flen # initialize FPU if we have one la t0, 1f csrw mtvec, t0 fssr x0 fmv.s.x f0, x0 fmv.s.x f1, x0 fmv.s.x f2, x0 fmv.s.x f3, x0 fmv.s.x f4, x0 fmv.s.x f5, x0 fmv.s.x f6, x0 fmv.s.x f7, x0 fmv.s.x f8, x0 fmv.s.x f9, x0 fmv.s.x f10,x0 fmv.s.x f11,x0 fmv.s.x f12,x0 fmv.s.x f13,x0 fmv.s.x f14,x0 fmv.s.x f15,x0 fmv.s.x f16,x0 fmv.s.x f17,x0 fmv.s.x f18,x0 fmv.s.x f19,x0 fmv.s.x f20,x0 fmv.s.x f21,x0 fmv.s.x f22,x0 fmv.s.x f23,x0 fmv.s.x f24,x0 fmv.s.x f25,x0 fmv.s.x f26,x0 fmv.s.x f27,x0 fmv.s.x f28,x0 fmv.s.x f29,x0 fmv.s.x f30,x0 fmv.s.x f31,x0 1: #endif # initialize trap vector la t0, trap_entry csrw mtvec, t0 # initialize global pointer .option push .option norelax la gp, __global_pointer$ .option pop la tp, _end + 63 and tp, tp, -64 # get core id csrr a0, mhartid # for now, assume only 1 core li a1, 1 1:bgeu a0, a1, 1b # give each core 128KB of stack + TLS #define STKSHIFT 17 add sp, a0, 1 sll sp, sp, STKSHIFT add sp, sp, tp sll a2, a0, STKSHIFT add tp, tp, a2 j _init .align 2 trap_entry: addi sp, sp, -272 SREG x1, 1*REGBYTES(sp) SREG x2, 2*REGBYTES(sp) SREG x3, 3*REGBYTES(sp) SREG x4, 4*REGBYTES(sp) SREG x5, 5*REGBYTES(sp) SREG x6, 6*REGBYTES(sp) SREG x7, 7*REGBYTES(sp) SREG x8, 8*REGBYTES(sp) SREG x9, 9*REGBYTES(sp) SREG x10, 10*REGBYTES(sp) SREG x11, 11*REGBYTES(sp) SREG x12, 12*REGBYTES(sp) SREG x13, 13*REGBYTES(sp) SREG x14, 14*REGBYTES(sp) SREG x15, 15*REGBYTES(sp) SREG x16, 16*REGBYTES(sp) SREG x17, 17*REGBYTES(sp) SREG x18, 18*REGBYTES(sp) SREG x19, 19*REGBYTES(sp) SREG x20, 20*REGBYTES(sp) SREG x21, 21*REGBYTES(sp) SREG x22, 22*REGBYTES(sp) SREG x23, 23*REGBYTES(sp) SREG x24, 24*REGBYTES(sp) SREG x25, 25*REGBYTES(sp) SREG x26, 26*REGBYTES(sp) SREG x27, 27*REGBYTES(sp) SREG x28, 28*REGBYTES(sp) SREG x29, 29*REGBYTES(sp) SREG x30, 30*REGBYTES(sp) SREG x31, 31*REGBYTES(sp) csrr a0, mcause csrr a1, mepc mv a2, sp jal handle_trap csrw mepc, a0 # Remain in M-mode after eret li t0, MSTATUS_MPP csrs mstatus, t0 LREG x1, 1*REGBYTES(sp) LREG x2, 2*REGBYTES(sp) LREG x3, 3*REGBYTES(sp) LREG x4, 4*REGBYTES(sp) LREG x5, 5*REGBYTES(sp) LREG x6, 6*REGBYTES(sp) LREG x7, 7*REGBYTES(sp) LREG x8, 8*REGBYTES(sp) LREG x9, 9*REGBYTES(sp) LREG x10, 10*REGBYTES(sp) LREG x11, 11*REGBYTES(sp) LREG x12, 12*REGBYTES(sp) LREG x13, 13*REGBYTES(sp) LREG x14, 14*REGBYTES(sp) LREG x15, 15*REGBYTES(sp) LREG x16, 16*REGBYTES(sp) LREG x17, 17*REGBYTES(sp) LREG x18, 18*REGBYTES(sp) LREG x19, 19*REGBYTES(sp) LREG x20, 20*REGBYTES(sp) LREG x21, 21*REGBYTES(sp) LREG x22, 22*REGBYTES(sp) LREG x23, 23*REGBYTES(sp) LREG x24, 24*REGBYTES(sp) LREG x25, 25*REGBYTES(sp) LREG x26, 26*REGBYTES(sp) LREG x27, 27*REGBYTES(sp) LREG x28, 28*REGBYTES(sp) LREG x29, 29*REGBYTES(sp) LREG x30, 30*REGBYTES(sp) LREG x31, 31*REGBYTES(sp) addi sp, sp, 272 mret .section ".tohost","aw",@progbits .align 6 .globl tohost tohost: .dword 0 .align 6 .globl fromhost fromhost: .dword 0
lizhirui/DreamCore
1,159
model/tests/riscv-tests/isa/rv64ud/structural.S
# See LICENSE for license details. #***************************************************************************** # structural.S #----------------------------------------------------------------------------- # # This test verifies that the FPU correctly obviates structural hazards on its # writeback port (e.g. fadd followed by fsgnj) # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN li x12, 1 li x2, 0x3FF0000000000000 li x1, 0x3F800000 #define TEST(testnum, nops) \ test_ ## testnum: \ li TESTNUM, testnum; \ fmv.d.x f4, x0 ;\ fmv.s.x f3, x0 ;\ fmv.d.x f2, x2 ;\ fmv.s.x f1, x1 ;\ j 1f ;\ .align 5 ;\ 1:fmul.d f4, f2, f2 ;\ nops ;\ fsgnj.s f3, f1, f1 ;\ fmv.x.d x4, f4 ;\ fmv.x.s x5, f3 ;\ beq x1, x5, 2f ;\ j fail;\ 2:beq x2, x4, 2f ;\ j fail; \ 2:fmv.d.x f2, zero ;\ fmv.s.x f1, zero ;\ TEST(1,;) TEST(2,nop) TEST(3,nop;nop) TEST(4,nop;nop;nop) TEST(5,nop;nop;nop;nop) TEST(6,nop;nop;nop;nop;nop) TEST(7,nop;nop;nop;nop;nop;nop) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,044
model/tests/riscv-tests/isa/rv64ud/fmin.S
# See LICENSE for license details. #***************************************************************************** # fmin.S #----------------------------------------------------------------------------- # # Test f{min|max}.d instructinos. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN #if __riscv_xlen == 32 # Replace the function with the 32-bit variant defined in test_macros.h #undef TEST_FP_OP2_D #define TEST_FP_OP2_D TEST_FP_OP2_D32 #endif #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_FP_OP2_D( 2, fmin.d, 0, 1.0, 2.5, 1.0 ); TEST_FP_OP2_D( 3, fmin.d, 0, -1235.1, -1235.1, 1.1 ); TEST_FP_OP2_D( 4, fmin.d, 0, -1235.1, 1.1, -1235.1 ); TEST_FP_OP2_D( 5, fmin.d, 0, -1235.1, NaN, -1235.1 ); TEST_FP_OP2_D( 6, fmin.d, 0, 0.00000001, 3.14159265, 0.00000001 ); TEST_FP_OP2_D( 7, fmin.d, 0, -2.0, -1.0, -2.0 ); TEST_FP_OP2_D(12, fmax.d, 0, 2.5, 2.5, 1.0 ); TEST_FP_OP2_D(13, fmax.d, 0, 1.1, -1235.1, 1.1 ); TEST_FP_OP2_D(14, fmax.d, 0, 1.1, 1.1, -1235.1 ); TEST_FP_OP2_D(15, fmax.d, 0, -1235.1, NaN, -1235.1 ); TEST_FP_OP2_D(16, fmax.d, 0, 3.14159265, 3.14159265, 0.00000001 ); TEST_FP_OP2_D(17, fmax.d, 0, -1.0, -1.0, -2.0 ); # FMAX(sNaN, x) = x TEST_FP_OP2_D(20, fmax.d, 0x10, 1.0, sNaN, 1.0); # FMAX(qNaN, qNaN) = canonical NaN TEST_FP_OP2_D(21, fmax.d, 0x00, qNaN, NaN, NaN); # -0.0 < +0.0 TEST_FP_OP2_D(30, fmin.d, 0, -0.0, -0.0, 0.0 ); TEST_FP_OP2_D(31, fmin.d, 0, -0.0, 0.0, -0.0 ); TEST_FP_OP2_D(32, fmax.d, 0, 0.0, -0.0, 0.0 ); TEST_FP_OP2_D(33, fmax.d, 0, 0.0, 0.0, -0.0 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
1,803
model/tests/riscv-tests/isa/rv64ud/fmadd.S
# See LICENSE for license details. #***************************************************************************** # fmadd.S #----------------------------------------------------------------------------- # # Test f[n]m{add|sub}.s and f[n]m{add|sub}.d instructions. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN #if __riscv_xlen == 32 # Replace the function with the 32-bit variant defined in test_macros.h #undef TEST_FP_OP3_D #define TEST_FP_OP3_D TEST_FP_OP3_D32 #endif #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_FP_OP3_D( 2, fmadd.d, 0, 3.5, 1.0, 2.5, 1.0 ); TEST_FP_OP3_D( 3, fmadd.d, 1, 1236.1999999999999, -1.0, -1235.1, 1.1 ); TEST_FP_OP3_D( 4, fmadd.d, 0, -12.0, 2.0, -5.0, -2.0 ); TEST_FP_OP3_D( 5, fnmadd.d, 0, -3.5, 1.0, 2.5, 1.0 ); TEST_FP_OP3_D( 6, fnmadd.d, 1, -1236.1999999999999, -1.0, -1235.1, 1.1 ); TEST_FP_OP3_D( 7, fnmadd.d, 0, 12.0, 2.0, -5.0, -2.0 ); TEST_FP_OP3_D( 8, fmsub.d, 0, 1.5, 1.0, 2.5, 1.0 ); TEST_FP_OP3_D( 9, fmsub.d, 1, 1234, -1.0, -1235.1, 1.1 ); TEST_FP_OP3_D(10, fmsub.d, 0, -8.0, 2.0, -5.0, -2.0 ); TEST_FP_OP3_D(11, fnmsub.d, 0, -1.5, 1.0, 2.5, 1.0 ); TEST_FP_OP3_D(12, fnmsub.d, 1, -1234, -1.0, -1235.1, 1.1 ); TEST_FP_OP3_D(13, fnmsub.d, 0, 8.0, 2.0, -5.0, -2.0 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
3,659
model/tests/riscv-tests/isa/rv64ud/move.S
# See LICENSE for license details. #***************************************************************************** # move.S #----------------------------------------------------------------------------- # # This test verifies that fmv.d.x, fmv.x.d, and fsgnj[x|n].d work properly. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN #TODO: make 32-bit compatible version #define TEST_FSGNJD(n, insn, new_sign, rs1_sign, rs2_sign) \ TEST_CASE(n, a0, 0x123456789abcdef0 | (-(new_sign) << 63), \ li a1, ((rs1_sign) << 63) | 0x123456789abcdef0; \ li a2, -(rs2_sign); \ fmv.d.x f1, a1; \ fmv.d.x f2, a2; \ insn f0, f1, f2; \ fmv.x.d a0, f0) TEST_FSGNJD(10, fsgnj.d, 0, 0, 0) TEST_FSGNJD(11, fsgnj.d, 1, 0, 1) TEST_FSGNJD(12, fsgnj.d, 0, 1, 0) TEST_FSGNJD(13, fsgnj.d, 1, 1, 1) TEST_FSGNJD(20, fsgnjn.d, 1, 0, 0) TEST_FSGNJD(21, fsgnjn.d, 0, 0, 1) TEST_FSGNJD(22, fsgnjn.d, 1, 1, 0) TEST_FSGNJD(23, fsgnjn.d, 0, 1, 1) TEST_FSGNJD(30, fsgnjx.d, 0, 0, 0) TEST_FSGNJD(31, fsgnjx.d, 1, 0, 1) TEST_FSGNJD(32, fsgnjx.d, 1, 1, 0) TEST_FSGNJD(33, fsgnjx.d, 0, 1, 1) // Test fsgnj.s in conjunction with double-precision moves #define TEST_FSGNJS(n, rd, rs1, rs2) \ TEST_CASE(n, a0, (rd) | (-((rd) >> 31) << 32), \ li a1, rs1; \ li a2, rs2; \ fmv.d.x f1, a1; \ fmv.d.x f2, a2; \ fsgnj.s f0, f1, f2; \ fmv.x.s a0, f0); \ TEST_CASE(1##n, a0, (rd) | 0xffffffff00000000, \ li a1, rs1; \ li a2, rs2; \ fmv.d.x f1, a1; \ fmv.d.x f2, a2; \ fsgnj.s f0, f1, f2; \ fmv.x.d a0, f0) TEST_FSGNJS(40, 0x7fc00000, 0x7ffffffe12345678, 0) TEST_FSGNJS(41, 0x7fc00000, 0xfffffffe12345678, 0) TEST_FSGNJS(42, 0x7fc00000, 0x7fffffff12345678, 0) TEST_FSGNJS(43, 0x12345678, 0xffffffff12345678, 0) TEST_FSGNJS(50, 0x7fc00000, 0x7ffffffe12345678, 0x80000000) TEST_FSGNJS(51, 0x7fc00000, 0xfffffffe12345678, 0x80000000) TEST_FSGNJS(52, 0x7fc00000, 0x7fffffff12345678, 0x80000000) TEST_FSGNJS(53, 0x12345678, 0xffffffff12345678, 0x80000000) TEST_FSGNJS(60, 0xffc00000, 0x7ffffffe12345678, 0xffffffff80000000) TEST_FSGNJS(61, 0xffc00000, 0xfffffffe12345678, 0xffffffff80000000) TEST_FSGNJS(62, 0x92345678, 0xffffffff12345678, 0xffffffff80000000) TEST_FSGNJS(63, 0x12345678, 0xffffffff12345678, 0x7fffffff80000000) // Test fsgnj.d in conjunction with single-precision moves #define TEST_FSGNJD_SP(n, isnan, rd, rs1, rs2) \ TEST_CASE(n, a0, ((rd) & 0xffffffff) | (-(((rd) >> 31) & 1) << 32), \ li a1, rs1; \ li a2, rs2; \ fmv.d.x f1, a1; \ fmv.d.x f2, a2; \ fsgnj.d f0, f1, f2; \ feq.s a0, f0, f0; \ addi a0, a0, -!(isnan); \ bnez a0, 1f; \ fmv.x.s a0, f0; \ 1:); \ TEST_CASE(1##n, a0, rd, \ li a1, rs1; \ li a2, rs2; \ fmv.d.x f1, a1; \ fmv.d.x f2, a2; \ fsgnj.d f0, f1, f2; \ fmv.x.d a0, f0; \ 1:) TEST_FSGNJD_SP(70, 0, 0xffffffff11111111, 0xffffffff11111111, 0xffffffff11111111) TEST_FSGNJD_SP(71, 1, 0x7fffffff11111111, 0xffffffff11111111, 0x7fffffff11111111) TEST_FSGNJD_SP(72, 0, 0xffffffff11111111, 0xffffffff11111111, 0xffffffff91111111) TEST_FSGNJD_SP(73, 0, 0xffffffff11111111, 0xffffffff11111111, 0x8000000000000000) TEST_FSGNJD_SP(74, 0, 0xffffffff11111111, 0x7fffffff11111111, 0xffffffff11111111) TEST_FSGNJD_SP(75, 1, 0x7fffffff11111111, 0x7fffffff11111111, 0x7fffffff11111111) TEST_FSGNJD_SP(76, 0, 0xffffffff11111111, 0x7fffffff11111111, 0xffffffff91111111) TEST_FSGNJD_SP(77, 0, 0xffffffff11111111, 0x7fffffff11111111, 0x8000000000000000) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
1,028
model/tests/riscv-tests/isa/rv64ud/ldst.S
# See LICENSE for license details. #***************************************************************************** # ldst.S #----------------------------------------------------------------------------- # # This test verifies that flw, fld, fsw, and fsd work properly. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN la s0, tdat TEST_CASE(2, a0, 0x40000000bf800000, fld f2, 0(s0); fsd f2, 16(s0); ld a0, 16(s0)) TEST_CASE(3, a0, 0x40000000bf800000, fld f2, 0(s0); fsw f2, 16(s0); ld a0, 16(s0)) TEST_CASE(4, a0, 0x40000000bf800000, flw f2, 0(s0); fsw f2, 16(s0); ld a0, 16(s0)) TEST_CASE(5, a0, 0xc080000040400000, fld f2, 8(s0); fsd f2, 16(s0); ld a0, 16(s0)) TEST_CASE(6, a0, 0xffffffff40400000, flw f2, 8(s0); fsd f2, 16(s0); ld a0, 16(s0)) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA tdat: .word 0xbf800000 .word 0x40000000 .word 0x40400000 .word 0xc0800000 .word 0xdeadbeef .word 0xcafebabe .word 0xabad1dea .word 0x1337d00d RVTEST_DATA_END
lizhirui/DreamCore
1,521
model/tests/riscv-tests/isa/rv64ud/fdiv.S
# See LICENSE for license details. #***************************************************************************** # fdiv.S #----------------------------------------------------------------------------- # # Test f{div|sqrt}.d instructions. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN #if __riscv_xlen == 32 # Replace the functions with the 32-bit variants defined in test_macros.h #undef TEST_FP_OP2_D #define TEST_FP_OP2_D TEST_FP_OP2_D32 #undef TEST_FP_OP1_D #define TEST_FP_OP1_D TEST_FP_OP1_D32 #undef TEST_FP_OP1_D_DWORD_RESULT #define TEST_FP_OP1_D_DWORD_RESULT TEST_FP_OP1_D32_DWORD_RESULT #endif #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_FP_OP2_D( 2, fdiv.d, 1, 1.1557273520668288, 3.14159265, 2.71828182 ); TEST_FP_OP2_D( 3, fdiv.d, 1,-0.9991093838555584, -1234, 1235.1 ); TEST_FP_OP2_D( 4, fdiv.d, 0, 3.14159265, 3.14159265, 1.0 ); TEST_FP_OP1_D( 5, fsqrt.d, 1, 1.7724538498928541, 3.14159265 ); TEST_FP_OP1_D( 6, fsqrt.d, 0, 100, 10000 ); TEST_FP_OP1_D_DWORD_RESULT(16, fsqrt.d, 0x10, 0x7FF8000000000000, -1.0 ); TEST_FP_OP1_D( 7, fsqrt.d, 1, 13.076696830622021, 171.0); TEST_FP_OP1_D( 8, fsqrt.d, 1,0.00040099251863345283320230749702, 1.60795e-7); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
1,686
model/tests/riscv-tests/isa/rv64ud/recoding.S
# See LICENSE for license details. #***************************************************************************** # recoding.S #----------------------------------------------------------------------------- # # Test corner cases of John Hauser's microarchitectural recoding scheme. # There are twice as many recoded values as IEEE-754 values; some of these # extras are redundant (e.g. Inf) and others are illegal (subnormals with # too many bits set). # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN # Make sure infinities with different mantissas compare as equal. fld f0, minf, a0 fld f1, three, a0 fmul.d f1, f1, f0 TEST_CASE( 2, a0, 1, feq.d a0, f0, f1) TEST_CASE( 3, a0, 1, fle.d a0, f0, f1) TEST_CASE( 4, a0, 0, flt.d a0, f0, f1) # Likewise, but for zeroes. fcvt.d.w f0, x0 li a0, 1 fcvt.d.w f1, a0 fmul.d f1, f1, f0 TEST_CASE(5, a0, 1, feq.d a0, f0, f1) TEST_CASE(6, a0, 1, fle.d a0, f0, f1) TEST_CASE(7, a0, 0, flt.d a0, f0, f1) # When converting small doubles to single-precision subnormals, # ensure that the extra precision is discarded. flw f0, big, a0 fld f1, tiny, a0 fcvt.s.d f1, f1 fmul.s f0, f0, f1 fmv.x.s a0, f0 lw a1, small TEST_CASE(10, a0, 0, sub a0, a0, a1) # Make sure FSD+FLD correctly saves and restores a single-precision value. flw f0, three, a0 fadd.s f1, f0, f0 fadd.s f0, f0, f0 fsd f0, tiny, a0 fld f0, tiny, a0 TEST_CASE(20, a0, 1, feq.s a0, f0, f1) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN minf: .double -Inf three: .double 3.0 big: .float 1221 small: .float 2.9133121e-37 tiny: .double 2.3860049081905093e-40 RVTEST_DATA_END
lizhirui/DreamCore
1,594
model/tests/riscv-tests/isa/rv64ud/fcmp.S
# See LICENSE for license details. #***************************************************************************** # fcmp.S #----------------------------------------------------------------------------- # # Test f{eq|lt|le}.d instructions. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- #if __riscv_xlen == 32 # Replace the function with the 32-bit variant defined in test_macros.h #undef TEST_FP_CMP_OP_D #define TEST_FP_CMP_OP_D TEST_FP_CMP_OP_D32 #endif TEST_FP_CMP_OP_D( 2, feq.d, 0x00, 1, -1.36, -1.36) TEST_FP_CMP_OP_D( 3, fle.d, 0x00, 1, -1.36, -1.36) TEST_FP_CMP_OP_D( 4, flt.d, 0x00, 0, -1.36, -1.36) TEST_FP_CMP_OP_D( 5, feq.d, 0x00, 0, -1.37, -1.36) TEST_FP_CMP_OP_D( 6, fle.d, 0x00, 1, -1.37, -1.36) TEST_FP_CMP_OP_D( 7, flt.d, 0x00, 1, -1.37, -1.36) # Only sNaN should signal invalid for feq. TEST_FP_CMP_OP_D( 8, feq.d, 0x00, 0, NaN, 0) TEST_FP_CMP_OP_D( 9, feq.d, 0x00, 0, NaN, NaN) TEST_FP_CMP_OP_D(10, feq.d, 0x10, 0, sNaN, 0) # qNaN should signal invalid for fle/flt. TEST_FP_CMP_OP_D(11, flt.d, 0x10, 0, NaN, 0) TEST_FP_CMP_OP_D(12, flt.d, 0x10, 0, NaN, NaN) TEST_FP_CMP_OP_D(13, flt.d, 0x10, 0, sNaN, 0) TEST_FP_CMP_OP_D(14, fle.d, 0x10, 0, NaN, 0) TEST_FP_CMP_OP_D(15, fle.d, 0x10, 0, NaN, NaN) TEST_FP_CMP_OP_D(16, fle.d, 0x10, 0, sNaN, 0) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
1,229
model/tests/riscv-tests/isa/rv64ud/fclass.S
# See LICENSE for license details. #***************************************************************************** # fclass.S #----------------------------------------------------------------------------- # # Test fclass.d instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN #if __riscv_xlen == 32 # Replace the function with the 32-bit variant defined in test_macros.h #undef TEST_FCLASS_D #define TEST_FCLASS_D TEST_FCLASS_D32 #endif #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_FCLASS_D( 2, 1 << 0, 0xfff0000000000000 ) TEST_FCLASS_D( 3, 1 << 1, 0xbff0000000000000 ) TEST_FCLASS_D( 4, 1 << 2, 0x800fffffffffffff ) TEST_FCLASS_D( 5, 1 << 3, 0x8000000000000000 ) TEST_FCLASS_D( 6, 1 << 4, 0x0000000000000000 ) TEST_FCLASS_D( 7, 1 << 5, 0x000fffffffffffff ) TEST_FCLASS_D( 8, 1 << 6, 0x3ff0000000000000 ) TEST_FCLASS_D( 9, 1 << 7, 0x7ff0000000000000 ) TEST_FCLASS_D(10, 1 << 8, 0x7ff0000000000001 ) TEST_FCLASS_D(11, 1 << 9, 0x7ff8000000000000 ) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
1,876
model/tests/riscv-tests/isa/rv64ud/fcvt.S
# See LICENSE for license details. #***************************************************************************** # fcvt.S #----------------------------------------------------------------------------- # # Test fcvt.d.{wu|w|lu|l}, fcvt.s.d, and fcvt.d.s instructions. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN #if __riscv_xlen == 32 # Replace the function with the 32-bit variant defined in test_macros.h #undef TEST_INT_FP_OP_D #define TEST_INT_FP_OP_D TEST_INT_FP_OP_D32 #undef TEST_FCVT_S_D #define TEST_FCVT_S_D TEST_FCVT_S_D32 #endif #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_INT_FP_OP_D(2, fcvt.d.w, 2.0, 2); TEST_INT_FP_OP_D(3, fcvt.d.w, -2.0, -2); TEST_INT_FP_OP_D(4, fcvt.d.wu, 2.0, 2); TEST_INT_FP_OP_D(5, fcvt.d.wu, 4294967294, -2); #if __riscv_xlen >= 64 TEST_INT_FP_OP_D(6, fcvt.d.l, 2.0, 2); TEST_INT_FP_OP_D(7, fcvt.d.l, -2.0, -2); TEST_INT_FP_OP_D(8, fcvt.d.lu, 2.0, 2); TEST_INT_FP_OP_D(9, fcvt.d.lu, 1.8446744073709552e19, -2); #endif TEST_FCVT_S_D(10, -1.5, -1.5) TEST_FCVT_D_S(11, -1.5, -1.5) #if __riscv_xlen >= 64 TEST_CASE(12, a0, 0x7ff8000000000000, la a1, test_data_22; ld a2, 0(a1); fmv.d.x f2, a2; fcvt.s.d f2, f2; fcvt.d.s f2, f2; fmv.x.d a0, f2; ) #else TEST_CASE_D32(12, a0, a1, 0x7ff8000000000000, la a1, test_data_22; fld f2, 0(a1); fcvt.s.d f2, f2; fcvt.d.s f2, f2; fsd f2, 0(a1); lw a0, 0(a1); lw a1, 4(a1) ) #endif TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA test_data_22: .dword 0x7ffcffffffff8004 RVTEST_DATA_END
lizhirui/DreamCore
4,731
model/tests/riscv-tests/isa/rv64ud/fcvt_w.S
# See LICENSE for license details. #***************************************************************************** # fcvt_w.S #----------------------------------------------------------------------------- # # Test fcvt{wu|w|lu|l}.d instructions. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_FP_INT_OP_D( 2, fcvt.w.d, 0x01, -1, -1.1, rtz); TEST_FP_INT_OP_D( 3, fcvt.w.d, 0x00, -1, -1.0, rtz); TEST_FP_INT_OP_D( 4, fcvt.w.d, 0x01, 0, -0.9, rtz); TEST_FP_INT_OP_D( 5, fcvt.w.d, 0x01, 0, 0.9, rtz); TEST_FP_INT_OP_D( 6, fcvt.w.d, 0x00, 1, 1.0, rtz); TEST_FP_INT_OP_D( 7, fcvt.w.d, 0x01, 1, 1.1, rtz); TEST_FP_INT_OP_D( 8, fcvt.w.d, 0x10, -1<<31, -3e9, rtz); TEST_FP_INT_OP_D( 9, fcvt.w.d, 0x10, (1<<31)-1, 3e9, rtz); TEST_FP_INT_OP_D(12, fcvt.wu.d, 0x10, 0, -3.0, rtz); TEST_FP_INT_OP_D(13, fcvt.wu.d, 0x10, 0, -1.0, rtz); TEST_FP_INT_OP_D(14, fcvt.wu.d, 0x01, 0, -0.9, rtz); TEST_FP_INT_OP_D(15, fcvt.wu.d, 0x01, 0, 0.9, rtz); TEST_FP_INT_OP_D(16, fcvt.wu.d, 0x00, 1, 1.0, rtz); TEST_FP_INT_OP_D(17, fcvt.wu.d, 0x01, 1, 1.1, rtz); TEST_FP_INT_OP_D(18, fcvt.wu.d, 0x10, 0, -3e9, rtz); TEST_FP_INT_OP_D(19, fcvt.wu.d, 0x00, 0xffffffffb2d05e00, 3e9, rtz); #if __riscv_xlen >= 64 TEST_FP_INT_OP_D(22, fcvt.l.d, 0x01, -1, -1.1, rtz); TEST_FP_INT_OP_D(23, fcvt.l.d, 0x00, -1, -1.0, rtz); TEST_FP_INT_OP_D(24, fcvt.l.d, 0x01, 0, -0.9, rtz); TEST_FP_INT_OP_D(25, fcvt.l.d, 0x01, 0, 0.9, rtz); TEST_FP_INT_OP_D(26, fcvt.l.d, 0x00, 1, 1.0, rtz); TEST_FP_INT_OP_D(27, fcvt.l.d, 0x01, 1, 1.1, rtz); TEST_FP_INT_OP_D(28, fcvt.l.d, 0x00,-3000000000, -3e9, rtz); TEST_FP_INT_OP_D(29, fcvt.l.d, 0x00, 3000000000, 3e9, rtz); TEST_FP_INT_OP_D(20, fcvt.l.d, 0x10, -1<<63,-3e19, rtz); TEST_FP_INT_OP_D(21, fcvt.l.d, 0x10, (1<<63)-1, 3e19, rtz); TEST_FP_INT_OP_D(32, fcvt.lu.d, 0x10, 0, -3.0, rtz); TEST_FP_INT_OP_D(33, fcvt.lu.d, 0x10, 0, -1.0, rtz); TEST_FP_INT_OP_D(34, fcvt.lu.d, 0x01, 0, -0.9, rtz); TEST_FP_INT_OP_D(35, fcvt.lu.d, 0x01, 0, 0.9, rtz); TEST_FP_INT_OP_D(36, fcvt.lu.d, 0x00, 1, 1.0, rtz); TEST_FP_INT_OP_D(37, fcvt.lu.d, 0x01, 1, 1.1, rtz); TEST_FP_INT_OP_D(38, fcvt.lu.d, 0x10, 0, -3e9, rtz); TEST_FP_INT_OP_D(39, fcvt.lu.d, 0x00, 3000000000, 3e9, rtz); #endif # test negative NaN, negative infinity conversion TEST_CASE(42, x1, 0x000000007fffffff, la x1, tdat_d; fld f1, 0(x1); fcvt.w.d x1, f1) #if __riscv_xlen >= 64 TEST_CASE(43, x1, 0x7fffffffffffffff, la x1, tdat_d; fld f1, 0(x1); fcvt.l.d x1, f1) #endif TEST_CASE(44, x1, 0xffffffff80000000, la x1, tdat_d; fld f1, 16(x1); fcvt.w.d x1, f1) #if __riscv_xlen >= 64 TEST_CASE(45, x1, 0x8000000000000000, la x1, tdat_d; fld f1, 16(x1); fcvt.l.d x1, f1) #endif # test positive NaN, positive infinity conversion TEST_CASE(52, x1, 0x000000007fffffff, la x1, tdat_d; fld f1, 8(x1); fcvt.w.d x1, f1) #if __riscv_xlen >= 64 TEST_CASE(53, x1, 0x7fffffffffffffff, la x1, tdat_d; fld f1, 8(x1); fcvt.l.d x1, f1) #endif TEST_CASE(54, x1, 0x000000007fffffff, la x1, tdat_d; fld f1, 24(x1); fcvt.w.d x1, f1) #if __riscv_xlen >= 64 TEST_CASE(55, x1, 0x7fffffffffffffff, la x1, tdat_d; fld f1, 24(x1); fcvt.l.d x1, f1) #endif # test NaN, infinity conversions to unsigned integer TEST_CASE(62, x1, 0xffffffffffffffff, la x1, tdat_d; fld f1, 0(x1); fcvt.wu.d x1, f1) TEST_CASE(63, x1, 0xffffffffffffffff, la x1, tdat_d; fld f1, 8(x1); fcvt.wu.d x1, f1) TEST_CASE(64, x1, 0, la x1, tdat_d; fld f1, 16(x1); fcvt.wu.d x1, f1) TEST_CASE(65, x1, 0xffffffffffffffff, la x1, tdat_d; fld f1, 24(x1); fcvt.wu.d x1, f1) #if __riscv_xlen >= 64 TEST_CASE(66, x1, 0xffffffffffffffff, la x1, tdat_d; fld f1, 0(x1); fcvt.lu.d x1, f1) TEST_CASE(67, x1, 0xffffffffffffffff, la x1, tdat_d; fld f1, 8(x1); fcvt.lu.d x1, f1) TEST_CASE(68, x1, 0, la x1, tdat_d; fld f1, 16(x1); fcvt.lu.d x1, f1) TEST_CASE(69, x1, 0xffffffffffffffff, la x1, tdat_d; fld f1, 24(x1); fcvt.lu.d x1, f1) #endif TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA # -NaN, NaN, -inf, +inf tdat: .word 0xffffffff .word 0x7fffffff .word 0xff800000 .word 0x7f800000 tdat_d: .dword 0xffffffffffffffff .dword 0x7fffffffffffffff .dword 0xfff0000000000000 .dword 0x7ff0000000000000 RVTEST_DATA_END
lizhirui/DreamCore
1,553
model/tests/riscv-tests/isa/rv64ud/fadd.S
# See LICENSE for license details. #***************************************************************************** # fadd.S #----------------------------------------------------------------------------- # # Test f{add|sub|mul}.d instructions. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64UF RVTEST_CODE_BEGIN #if __riscv_xlen == 32 # Replace the function with the 32-bit variant defined in test_macros.h #undef TEST_FP_OP2_D #define TEST_FP_OP2_D TEST_FP_OP2_D32 #endif #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_FP_OP2_D( 2, fadd.d, 0, 3.5, 2.5, 1.0 ); TEST_FP_OP2_D( 3, fadd.d, 1, -1234, -1235.1, 1.1 ); TEST_FP_OP2_D( 4, fadd.d, 1, 3.14159266, 3.14159265, 0.00000001 ); TEST_FP_OP2_D( 5, fsub.d, 0, 1.5, 2.5, 1.0 ); TEST_FP_OP2_D( 6, fsub.d, 1, -1234, -1235.1, -1.1 ); TEST_FP_OP2_D( 7, fsub.d, 1, 3.1415926400000001, 3.14159265, 0.00000001 ); TEST_FP_OP2_D( 8, fmul.d, 0, 2.5, 2.5, 1.0 ); TEST_FP_OP2_D( 9, fmul.d, 1, 1358.61, -1235.1, -1.1 ); TEST_FP_OP2_D(10, fmul.d, 1, 3.14159265e-8, 3.14159265, 0.00000001 ); # Is the canonical NaN generated for Inf - Inf? TEST_FP_OP2_D(11, fsub.d, 0x10, qNaN, Inf, Inf); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,953
model/tests/riscv-tests/isa/rv64ssvnapot/napot.S
# See LICENSE for license details. #***************************************************************************** # napot.S #----------------------------------------------------------------------------- # # Test Svnapot # #include "riscv_test.h" #include "test_macros.h" #if (DRAM_BASE >> 30 << 30) != DRAM_BASE # error This test requires DRAM_BASE be SV39 superpage-aligned #endif #if __riscv_xlen != 64 # error This test requires RV64 #endif RVTEST_RV64M RVTEST_CODE_BEGIN # Construct the page table #define MY_VA 0x40201010 # VPN 2 == VPN 1 == VPN 0 == 0x1 # Page offset == 0x10 #### # Level 0 PTE contents # PPN la a0, my_data srl a0, a0, 12 # adjust the PPN to be in NAPOT form li a1, ~0xF and a0, a0, a1 ori a0, a0, 0x8 # attributes sll a0, a0, PTE_PPN_SHIFT li a1, PTE_V | PTE_U | PTE_R | PTE_W | PTE_X | PTE_A | PTE_D | PTE_N or a0, a0, a1 # Level 0 PTE address la a1, page_table addi a1, a1, ((MY_VA >> 12) & 0x1FF) * 8 # Level 0 PTE store sd a0, (a1) #### # Level 1 PTE contents la a0, page_table srl a0, a0, 12 sll a0, a0, PTE_PPN_SHIFT li a1, PTE_V or a0, a0, a1 # Level 1 PTE address la a1, page_table addi a1, a1, ((MY_VA >> 21) & 0x1FF) * 8 li a2, 1 << 12 add a1, a1, a2 # Level 1 PTE store sd a0, (a1) #### # Level 2 PTE contents la a0, page_table li a1, 1 << 12 add a0, a0, a1 srl a0, a0, 12 sll a0, a0, PTE_PPN_SHIFT li a1, PTE_V or a0, a0, a1 # Level 2 PTE address la a1, page_table addi a1, a1, ((MY_VA >> 30) & 0x1FF) * 8 li a2, 2 << 12 add a1, a1, a2 # Level 2 PTE store sd a0, (a1) #### # Do a load from the PA that would be written if the PTE were misinterpreted as non-NAPOT la a0, my_data li a1, ~0xFFFF and a0, a0, a1 li a1, 0x8000 | (MY_VA & 0xFFF) or a3, a0, a1 li a1, 0 sw a1, (a3) #### li TESTNUM, 1 ## Turn on VM la a1, page_table li a2, 2 << 12 add a1, a1, a2 srl a1, a1, 12 li a0, (SATP_MODE & ~(SATP_MODE<<1)) * SATP_MODE_SV39 or a0, a0, a1 csrw satp, a0 sfence.vma # Set up MPRV with MPP=S and SUM=1, so loads and stores use S-mode and S can access U pages li a1, ((MSTATUS_MPP & ~(MSTATUS_MPP<<1)) * PRV_S) | MSTATUS_MPRV | MSTATUS_SUM csrs mstatus, a1 # Do a store to MY_VA li a0, MY_VA li a1, 42 sw a1, (a0) # Clear MPRV li a1, MSTATUS_MPRV csrc mstatus, a1 # Do a load from the PA that would be written if the PTE were misinterpreted as non-NAPOT lw a1, (a3) # Check the result li a0, 42 beq a1, a0, die # Do a load from the PA for MY_VA la a0, my_data li a1, MY_VA & 0xFFFF add a0, a0, a1 lw a1, (a0) li a2, 42 # Check the result bne a1, a2, die #### RVTEST_PASS TEST_PASSFAIL .align 2 .global mtvec_handler mtvec_handler: die: RVTEST_FAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA .align 20 page_table: .dword 0 .align 20 my_data: .dword 0 RVTEST_DATA_END
lizhirui/DreamCore
3,981
model/tests/riscv-tests/isa/rv64uc/rvc.S
# See LICENSE for license details. #***************************************************************************** # rvc.S #----------------------------------------------------------------------------- # # Test RVC corner cases. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN .align 2 .option push .option norvc #define RVC_TEST_CASE(n, r, v, code...) \ TEST_CASE (n, r, v, .option push; .option rvc; code; .align 2; .option pop) // Make sure fetching a 4-byte instruction across a page boundary works. li TESTNUM, 2 li a1, 666 TEST_CASE (2, a1, 667, \ j 1f; \ .align 3; \ data: \ .dword 0xfedcba9876543210; \ .dword 0xfedcba9876543210; \ .align 12; \ .skip 4094; \ 1: addi a1, a1, 1) li sp, 0x1234 RVC_TEST_CASE (3, a0, 0x1234 + 1020, c.addi4spn a0, sp, 1020) RVC_TEST_CASE (4, sp, 0x1234 + 496, c.addi16sp sp, 496) RVC_TEST_CASE (5, sp, 0x1234 + 496 - 512, c.addi16sp sp, -512) la a1, data RVC_TEST_CASE (6, a2, 0xfffffffffedcba99, c.lw a0, 4(a1); addi a0, a0, 1; c.sw a0, 4(a1); c.lw a2, 4(a1)) #if __riscv_xlen == 64 RVC_TEST_CASE (7, a2, 0xfedcba9976543211, c.ld a0, 0(a1); addi a0, a0, 1; c.sd a0, 0(a1); c.ld a2, 0(a1)) #endif RVC_TEST_CASE (8, a0, -15, ori a0, x0, 1; c.addi a0, -16) RVC_TEST_CASE (9, a5, -16, ori a5, x0, 1; c.li a5, -16) #if __riscv_xlen == 64 RVC_TEST_CASE (10, a0, 0x76543210, ld a0, (a1); c.addiw a0, -1) #endif RVC_TEST_CASE (11, s0, 0xffffffffffffffe1, c.lui s0, 0xfffe1; c.srai s0, 12) #if __riscv_xlen == 64 RVC_TEST_CASE (12, s0, 0x000fffffffffffe1, c.lui s0, 0xfffe1; c.srli s0, 12) #else RVC_TEST_CASE (12, s0, 0x000fffe1, c.lui s0, 0xfffe1; c.srli s0, 12) #endif RVC_TEST_CASE (14, s0, ~0x11, c.li s0, -2; c.andi s0, ~0x10) RVC_TEST_CASE (15, s1, 14, li s1, 20; li a0, 6; c.sub s1, a0) RVC_TEST_CASE (16, s1, 18, li s1, 20; li a0, 6; c.xor s1, a0) RVC_TEST_CASE (17, s1, 22, li s1, 20; li a0, 6; c.or s1, a0) RVC_TEST_CASE (18, s1, 4, li s1, 20; li a0, 6; c.and s1, a0) #if __riscv_xlen == 64 RVC_TEST_CASE (19, s1, 0xffffffff80000000, li s1, 0x7fffffff; li a0, -1; c.subw s1, a0) RVC_TEST_CASE (20, s1, 0xffffffff80000000, li s1, 0x7fffffff; li a0, 1; c.addw s1, a0) #endif RVC_TEST_CASE (21, s0, 0x12340, li s0, 0x1234; c.slli s0, 4) RVC_TEST_CASE (30, ra, 0, \ li ra, 0; \ c.j 1f; \ c.j 2f; \ 1:c.j 1f; \ 2:j fail; \ 1:) RVC_TEST_CASE (31, x0, 0, \ li a0, 0; \ c.beqz a0, 1f; \ c.j 2f; \ 1:c.j 1f; \ 2:j fail; \ 1:) RVC_TEST_CASE (32, x0, 0, \ li a0, 1; \ c.bnez a0, 1f; \ c.j 2f; \ 1:c.j 1f; \ 2:j fail; \ 1:) RVC_TEST_CASE (33, x0, 0, \ li a0, 1; \ c.beqz a0, 1f; \ c.j 2f; \ 1:c.j fail; \ 2:) RVC_TEST_CASE (34, x0, 0, \ li a0, 0; \ c.bnez a0, 1f; \ c.j 2f; \ 1:c.j fail; \ 2:) RVC_TEST_CASE (35, ra, 0, \ la t0, 1f; \ li ra, 0; \ c.jr t0; \ c.j 2f; \ 1:c.j 1f; \ 2:j fail; \ 1:) RVC_TEST_CASE (36, ra, -2, \ la t0, 1f; \ li ra, 0; \ c.jalr t0; \ c.j 2f; \ 1:c.j 1f; \ 2:j fail; \ 1:sub ra, ra, t0) #if __riscv_xlen == 32 RVC_TEST_CASE (37, ra, -2, \ la t0, 1f; \ li ra, 0; \ c.jal 1f; \ c.j 2f; \ 1:c.j 1f; \ 2:j fail; \ 1:sub ra, ra, t0) #endif la sp, data RVC_TEST_CASE (40, a2, 0xfffffffffedcba99, c.lwsp a0, 12(sp); addi a0, a0, 1; c.swsp a0, 12(sp); c.lwsp a2, 12(sp)) #if __riscv_xlen == 64 RVC_TEST_CASE (41, a2, 0xfedcba9976543211, c.ldsp a0, 8(sp); addi a0, a0, 1; c.sdsp a0, 8(sp); c.ldsp a2, 8(sp)) #endif RVC_TEST_CASE (42, t0, 0x246, li a0, 0x123; c.mv t0, a0; c.add t0, a0) .option pop TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN RVTEST_DATA_END
lizhirui/DreamCore
2,651
model/tests/riscv-tests/isa/rv64ui/xor.S
# See LICENSE for license details. #***************************************************************************** # xor.S #----------------------------------------------------------------------------- # # Test xor instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Logical tests #------------------------------------------------------------- TEST_RR_OP( 2, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_OP( 3, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_OP( 4, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_OP( 5, xor, 0x00ff00ff, 0xf00ff00f, 0xf0f0f0f0 ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 6, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC2_EQ_DEST( 7, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC12_EQ_DEST( 8, xor, 0x00000000, 0xff00ff00 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 9, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_DEST_BYPASS( 10, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_DEST_BYPASS( 11, 2, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_SRC12_BYPASS( 12, 0, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC12_BYPASS( 13, 0, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_SRC12_BYPASS( 14, 0, 2, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_SRC12_BYPASS( 15, 1, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC12_BYPASS( 16, 1, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_SRC12_BYPASS( 17, 2, 0, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_SRC21_BYPASS( 18, 0, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC21_BYPASS( 19, 0, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_SRC21_BYPASS( 20, 0, 2, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_SRC21_BYPASS( 21, 1, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC21_BYPASS( 22, 1, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_SRC21_BYPASS( 23, 2, 0, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_ZEROSRC1( 24, xor, 0xff00ff00, 0xff00ff00 ); TEST_RR_ZEROSRC2( 25, xor, 0x00ff00ff, 0x00ff00ff ); TEST_RR_ZEROSRC12( 26, xor, 0 ); TEST_RR_ZERODEST( 27, xor, 0x11111111, 0x22222222 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
3,122
model/tests/riscv-tests/isa/rv64ui/sub.S
# See LICENSE for license details. #***************************************************************************** # sub.S #----------------------------------------------------------------------------- # # Test sub instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_RR_OP( 2, sub, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 ); TEST_RR_OP( 3, sub, 0x0000000000000000, 0x0000000000000001, 0x0000000000000001 ); TEST_RR_OP( 4, sub, 0xfffffffffffffffc, 0x0000000000000003, 0x0000000000000007 ); TEST_RR_OP( 5, sub, 0x0000000000008000, 0x0000000000000000, 0xffffffffffff8000 ); TEST_RR_OP( 6, sub, 0xffffffff80000000, 0xffffffff80000000, 0x0000000000000000 ); TEST_RR_OP( 7, sub, 0xffffffff80008000, 0xffffffff80000000, 0xffffffffffff8000 ); TEST_RR_OP( 8, sub, 0xffffffffffff8001, 0x0000000000000000, 0x0000000000007fff ); TEST_RR_OP( 9, sub, 0x000000007fffffff, 0x000000007fffffff, 0x0000000000000000 ); TEST_RR_OP( 10, sub, 0x000000007fff8000, 0x000000007fffffff, 0x0000000000007fff ); TEST_RR_OP( 11, sub, 0xffffffff7fff8001, 0xffffffff80000000, 0x0000000000007fff ); TEST_RR_OP( 12, sub, 0x0000000080007fff, 0x000000007fffffff, 0xffffffffffff8000 ); TEST_RR_OP( 13, sub, 0x0000000000000001, 0x0000000000000000, 0xffffffffffffffff ); TEST_RR_OP( 14, sub, 0xfffffffffffffffe, 0xffffffffffffffff, 0x0000000000000001 ); TEST_RR_OP( 15, sub, 0x0000000000000000, 0xffffffffffffffff, 0xffffffffffffffff ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 16, sub, 2, 13, 11 ); TEST_RR_SRC2_EQ_DEST( 17, sub, 3, 14, 11 ); TEST_RR_SRC12_EQ_DEST( 18, sub, 0, 13 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 19, 0, sub, 2, 13, 11 ); TEST_RR_DEST_BYPASS( 20, 1, sub, 3, 14, 11 ); TEST_RR_DEST_BYPASS( 21, 2, sub, 4, 15, 11 ); TEST_RR_SRC12_BYPASS( 22, 0, 0, sub, 2, 13, 11 ); TEST_RR_SRC12_BYPASS( 23, 0, 1, sub, 3, 14, 11 ); TEST_RR_SRC12_BYPASS( 24, 0, 2, sub, 4, 15, 11 ); TEST_RR_SRC12_BYPASS( 25, 1, 0, sub, 2, 13, 11 ); TEST_RR_SRC12_BYPASS( 26, 1, 1, sub, 3, 14, 11 ); TEST_RR_SRC12_BYPASS( 27, 2, 0, sub, 4, 15, 11 ); TEST_RR_SRC21_BYPASS( 28, 0, 0, sub, 2, 13, 11 ); TEST_RR_SRC21_BYPASS( 29, 0, 1, sub, 3, 14, 11 ); TEST_RR_SRC21_BYPASS( 30, 0, 2, sub, 4, 15, 11 ); TEST_RR_SRC21_BYPASS( 31, 1, 0, sub, 2, 13, 11 ); TEST_RR_SRC21_BYPASS( 32, 1, 1, sub, 3, 14, 11 ); TEST_RR_SRC21_BYPASS( 33, 2, 0, sub, 4, 15, 11 ); TEST_RR_ZEROSRC1( 34, sub, 15, -15 ); TEST_RR_ZEROSRC2( 35, sub, 32, 32 ); TEST_RR_ZEROSRC12( 36, sub, 0 ); TEST_RR_ZERODEST( 37, sub, 16, 30 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,308
model/tests/riscv-tests/isa/rv64ui/lw.S
# See LICENSE for license details. #***************************************************************************** # lw.S #----------------------------------------------------------------------------- # # Test lw instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Basic tests #------------------------------------------------------------- TEST_LD_OP( 2, lw, 0x0000000000ff00ff, 0, tdat ); TEST_LD_OP( 3, lw, 0xffffffffff00ff00, 4, tdat ); TEST_LD_OP( 4, lw, 0x000000000ff00ff0, 8, tdat ); TEST_LD_OP( 5, lw, 0xfffffffff00ff00f, 12, tdat ); # Test with negative offset TEST_LD_OP( 6, lw, 0x0000000000ff00ff, -12, tdat4 ); TEST_LD_OP( 7, lw, 0xffffffffff00ff00, -8, tdat4 ); TEST_LD_OP( 8, lw, 0x000000000ff00ff0, -4, tdat4 ); TEST_LD_OP( 9, lw, 0xfffffffff00ff00f, 0, tdat4 ); # Test with a negative base TEST_CASE( 10, x5, 0x0000000000ff00ff, \ la x1, tdat; \ addi x1, x1, -32; \ lw x5, 32(x1); \ ) # Test with unaligned base TEST_CASE( 11, x5, 0xffffffffff00ff00, \ la x1, tdat; \ addi x1, x1, -3; \ lw x5, 7(x1); \ ) #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_LD_DEST_BYPASS( 12, 0, lw, 0x000000000ff00ff0, 4, tdat2 ); TEST_LD_DEST_BYPASS( 13, 1, lw, 0xfffffffff00ff00f, 4, tdat3 ); TEST_LD_DEST_BYPASS( 14, 2, lw, 0xffffffffff00ff00, 4, tdat1 ); TEST_LD_SRC1_BYPASS( 15, 0, lw, 0x000000000ff00ff0, 4, tdat2 ); TEST_LD_SRC1_BYPASS( 16, 1, lw, 0xfffffffff00ff00f, 4, tdat3 ); TEST_LD_SRC1_BYPASS( 17, 2, lw, 0xffffffffff00ff00, 4, tdat1 ); #------------------------------------------------------------- # Test write-after-write hazard #------------------------------------------------------------- TEST_CASE( 18, x2, 2, \ la x5, tdat; \ lw x2, 0(x5); \ li x2, 2; \ ) TEST_CASE( 19, x2, 2, \ la x5, tdat; \ lw x2, 0(x5); \ nop; \ li x2, 2; \ ) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA tdat: tdat1: .word 0x00ff00ff tdat2: .word 0xff00ff00 tdat3: .word 0x0ff00ff0 tdat4: .word 0xf00ff00f RVTEST_DATA_END
lizhirui/DreamCore
2,748
model/tests/riscv-tests/isa/rv64ui/sltu.S
# See LICENSE for license details. #***************************************************************************** # sltu.S #----------------------------------------------------------------------------- # # Test sltu instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_RR_OP( 2, sltu, 0, 0x00000000, 0x00000000 ); TEST_RR_OP( 3, sltu, 0, 0x00000001, 0x00000001 ); TEST_RR_OP( 4, sltu, 1, 0x00000003, 0x00000007 ); TEST_RR_OP( 5, sltu, 0, 0x00000007, 0x00000003 ); TEST_RR_OP( 6, sltu, 1, 0x00000000, 0xffff8000 ); TEST_RR_OP( 7, sltu, 0, 0x80000000, 0x00000000 ); TEST_RR_OP( 8, sltu, 1, 0x80000000, 0xffff8000 ); TEST_RR_OP( 9, sltu, 1, 0x00000000, 0x00007fff ); TEST_RR_OP( 10, sltu, 0, 0x7fffffff, 0x00000000 ); TEST_RR_OP( 11, sltu, 0, 0x7fffffff, 0x00007fff ); TEST_RR_OP( 12, sltu, 0, 0x80000000, 0x00007fff ); TEST_RR_OP( 13, sltu, 1, 0x7fffffff, 0xffff8000 ); TEST_RR_OP( 14, sltu, 1, 0x00000000, 0xffffffff ); TEST_RR_OP( 15, sltu, 0, 0xffffffff, 0x00000001 ); TEST_RR_OP( 16, sltu, 0, 0xffffffff, 0xffffffff ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 17, sltu, 0, 14, 13 ); TEST_RR_SRC2_EQ_DEST( 18, sltu, 1, 11, 13 ); TEST_RR_SRC12_EQ_DEST( 19, sltu, 0, 13 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 20, 0, sltu, 1, 11, 13 ); TEST_RR_DEST_BYPASS( 21, 1, sltu, 0, 14, 13 ); TEST_RR_DEST_BYPASS( 22, 2, sltu, 1, 12, 13 ); TEST_RR_SRC12_BYPASS( 23, 0, 0, sltu, 0, 14, 13 ); TEST_RR_SRC12_BYPASS( 24, 0, 1, sltu, 1, 11, 13 ); TEST_RR_SRC12_BYPASS( 25, 0, 2, sltu, 0, 15, 13 ); TEST_RR_SRC12_BYPASS( 26, 1, 0, sltu, 1, 10, 13 ); TEST_RR_SRC12_BYPASS( 27, 1, 1, sltu, 0, 16, 13 ); TEST_RR_SRC12_BYPASS( 28, 2, 0, sltu, 1, 9, 13 ); TEST_RR_SRC21_BYPASS( 29, 0, 0, sltu, 0, 17, 13 ); TEST_RR_SRC21_BYPASS( 30, 0, 1, sltu, 1, 8, 13 ); TEST_RR_SRC21_BYPASS( 31, 0, 2, sltu, 0, 18, 13 ); TEST_RR_SRC21_BYPASS( 32, 1, 0, sltu, 1, 7, 13 ); TEST_RR_SRC21_BYPASS( 33, 1, 1, sltu, 0, 19, 13 ); TEST_RR_SRC21_BYPASS( 34, 2, 0, sltu, 1, 6, 13 ); TEST_RR_ZEROSRC1( 35, sltu, 1, -1 ); TEST_RR_ZEROSRC2( 36, sltu, 0, -1 ); TEST_RR_ZEROSRC12( 37, sltu, 0 ); TEST_RR_ZERODEST( 38, sltu, 16, 30 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,538
model/tests/riscv-tests/isa/rv64ui/bgeu.S
# See LICENSE for license details. #***************************************************************************** # bgeu.S #----------------------------------------------------------------------------- # # Test bgeu instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Branch tests #------------------------------------------------------------- # Each test checks both forward and backward branches TEST_BR2_OP_TAKEN( 2, bgeu, 0x00000000, 0x00000000 ); TEST_BR2_OP_TAKEN( 3, bgeu, 0x00000001, 0x00000001 ); TEST_BR2_OP_TAKEN( 4, bgeu, 0xffffffff, 0xffffffff ); TEST_BR2_OP_TAKEN( 5, bgeu, 0x00000001, 0x00000000 ); TEST_BR2_OP_TAKEN( 6, bgeu, 0xffffffff, 0xfffffffe ); TEST_BR2_OP_TAKEN( 7, bgeu, 0xffffffff, 0x00000000 ); TEST_BR2_OP_NOTTAKEN( 8, bgeu, 0x00000000, 0x00000001 ); TEST_BR2_OP_NOTTAKEN( 9, bgeu, 0xfffffffe, 0xffffffff ); TEST_BR2_OP_NOTTAKEN( 10, bgeu, 0x00000000, 0xffffffff ); TEST_BR2_OP_NOTTAKEN( 11, bgeu, 0x7fffffff, 0x80000000 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_BR2_SRC12_BYPASS( 12, 0, 0, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 13, 0, 1, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 14, 0, 2, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 15, 1, 0, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 16, 1, 1, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 17, 2, 0, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 18, 0, 0, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 19, 0, 1, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 20, 0, 2, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 21, 1, 0, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 22, 1, 1, bgeu, 0xefffffff, 0xf0000000 ); TEST_BR2_SRC12_BYPASS( 23, 2, 0, bgeu, 0xefffffff, 0xf0000000 ); #------------------------------------------------------------- # Test delay slot instructions not executed nor bypassed #------------------------------------------------------------- TEST_CASE( 24, x1, 3, \ li x1, 1; \ bgeu x1, x0, 1f; \ addi x1, x1, 1; \ addi x1, x1, 1; \ addi x1, x1, 1; \ addi x1, x1, 1; \ 1: addi x1, x1, 1; \ addi x1, x1, 1; \ ) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,175
model/tests/riscv-tests/isa/rv64ui/slti.S
# See LICENSE for license details. #***************************************************************************** # slti.S #----------------------------------------------------------------------------- # # Test slti instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_IMM_OP( 2, slti, 0, 0x0000000000000000, 0x000 ); TEST_IMM_OP( 3, slti, 0, 0x0000000000000001, 0x001 ); TEST_IMM_OP( 4, slti, 1, 0x0000000000000003, 0x007 ); TEST_IMM_OP( 5, slti, 0, 0x0000000000000007, 0x003 ); TEST_IMM_OP( 6, slti, 0, 0x0000000000000000, 0x800 ); TEST_IMM_OP( 7, slti, 1, 0xffffffff80000000, 0x000 ); TEST_IMM_OP( 8, slti, 1, 0xffffffff80000000, 0x800 ); TEST_IMM_OP( 9, slti, 1, 0x0000000000000000, 0x7ff ); TEST_IMM_OP( 10, slti, 0, 0x000000007fffffff, 0x000 ); TEST_IMM_OP( 11, slti, 0, 0x000000007fffffff, 0x7ff ); TEST_IMM_OP( 12, slti, 1, 0xffffffff80000000, 0x7ff ); TEST_IMM_OP( 13, slti, 0, 0x000000007fffffff, 0x800 ); TEST_IMM_OP( 14, slti, 0, 0x0000000000000000, 0xfff ); TEST_IMM_OP( 15, slti, 1, 0xffffffffffffffff, 0x001 ); TEST_IMM_OP( 16, slti, 0, 0xffffffffffffffff, 0xfff ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_IMM_SRC1_EQ_DEST( 17, slti, 1, 11, 13 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_IMM_DEST_BYPASS( 18, 0, slti, 0, 15, 10 ); TEST_IMM_DEST_BYPASS( 19, 1, slti, 1, 10, 16 ); TEST_IMM_DEST_BYPASS( 20, 2, slti, 0, 16, 9 ); TEST_IMM_SRC1_BYPASS( 21, 0, slti, 1, 11, 15 ); TEST_IMM_SRC1_BYPASS( 22, 1, slti, 0, 17, 8 ); TEST_IMM_SRC1_BYPASS( 23, 2, slti, 1, 12, 14 ); TEST_IMM_ZEROSRC1( 24, slti, 0, 0xfff ); TEST_IMM_ZERODEST( 25, slti, 0x00ff00ff, 0xfff ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
4,229
model/tests/riscv-tests/isa/rv64ui/sll.S
# See LICENSE for license details. #***************************************************************************** # sll.S #----------------------------------------------------------------------------- # # Test sll instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_RR_OP( 2, sll, 0x0000000000000001, 0x0000000000000001, 0 ); TEST_RR_OP( 3, sll, 0x0000000000000002, 0x0000000000000001, 1 ); TEST_RR_OP( 4, sll, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_OP( 5, sll, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_OP( 6, sll, 0x0000000080000000, 0x0000000000000001, 31 ); TEST_RR_OP( 7, sll, 0xffffffffffffffff, 0xffffffffffffffff, 0 ); TEST_RR_OP( 8, sll, 0xfffffffffffffffe, 0xffffffffffffffff, 1 ); TEST_RR_OP( 9, sll, 0xffffffffffffff80, 0xffffffffffffffff, 7 ); TEST_RR_OP( 10, sll, 0xffffffffffffc000, 0xffffffffffffffff, 14 ); TEST_RR_OP( 11, sll, 0xffffffff80000000, 0xffffffffffffffff, 31 ); TEST_RR_OP( 12, sll, 0x0000000021212121, 0x0000000021212121, 0 ); TEST_RR_OP( 13, sll, 0x0000000042424242, 0x0000000021212121, 1 ); TEST_RR_OP( 14, sll, 0x0000001090909080, 0x0000000021212121, 7 ); TEST_RR_OP( 15, sll, 0x0000084848484000, 0x0000000021212121, 14 ); TEST_RR_OP( 16, sll, 0x1090909080000000, 0x0000000021212121, 31 ); # Verify that shifts only use bottom six(rv64) or five(rv32) bits TEST_RR_OP( 17, sll, 0x0000000021212121, 0x0000000021212121, 0xffffffffffffffc0 ); TEST_RR_OP( 18, sll, 0x0000000042424242, 0x0000000021212121, 0xffffffffffffffc1 ); TEST_RR_OP( 19, sll, 0x0000001090909080, 0x0000000021212121, 0xffffffffffffffc7 ); TEST_RR_OP( 20, sll, 0x0000084848484000, 0x0000000021212121, 0xffffffffffffffce ); #if __riscv_xlen == 64 TEST_RR_OP( 21, sll, 0x8000000000000000, 0x0000000021212121, 0xffffffffffffffff ); TEST_RR_OP( 50, sll, 0x8000000000000000, 0x0000000000000001, 63 ); TEST_RR_OP( 51, sll, 0xffffff8000000000, 0xffffffffffffffff, 39 ); TEST_RR_OP( 52, sll, 0x0909080000000000, 0x0000000021212121, 43 ); #endif #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 22, sll, 0x00000080, 0x00000001, 7 ); TEST_RR_SRC2_EQ_DEST( 23, sll, 0x00004000, 0x00000001, 14 ); TEST_RR_SRC12_EQ_DEST( 24, sll, 24, 3 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 25, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_DEST_BYPASS( 26, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_DEST_BYPASS( 27, 2, sll, 0x0000000080000000, 0x0000000000000001, 31 ); TEST_RR_SRC12_BYPASS( 28, 0, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_SRC12_BYPASS( 29, 0, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_SRC12_BYPASS( 30, 0, 2, sll, 0x0000000080000000, 0x0000000000000001, 31 ); TEST_RR_SRC12_BYPASS( 31, 1, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_SRC12_BYPASS( 32, 1, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_SRC12_BYPASS( 33, 2, 0, sll, 0x0000000080000000, 0x0000000000000001, 31 ); TEST_RR_SRC21_BYPASS( 34, 0, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_SRC21_BYPASS( 35, 0, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_SRC21_BYPASS( 36, 0, 2, sll, 0x0000000080000000, 0x0000000000000001, 31 ); TEST_RR_SRC21_BYPASS( 37, 1, 0, sll, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_SRC21_BYPASS( 38, 1, 1, sll, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_SRC21_BYPASS( 39, 2, 0, sll, 0x0000000080000000, 0x0000000000000001, 31 ); TEST_RR_ZEROSRC1( 40, sll, 0, 15 ); TEST_RR_ZEROSRC2( 41, sll, 32, 32 ); TEST_RR_ZEROSRC12( 42, sll, 0 ); TEST_RR_ZERODEST( 43, sll, 1024, 2048 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
4,389
model/tests/riscv-tests/isa/rv64ui/sraw.S
# See LICENSE for license details. #***************************************************************************** # sraw.S #----------------------------------------------------------------------------- # # Test sraw instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_RR_OP( 2, sraw, 0xffffffff80000000, 0xffffffff80000000, 0 ); TEST_RR_OP( 3, sraw, 0xffffffffc0000000, 0xffffffff80000000, 1 ); TEST_RR_OP( 4, sraw, 0xffffffffff000000, 0xffffffff80000000, 7 ); TEST_RR_OP( 5, sraw, 0xfffffffffffe0000, 0xffffffff80000000, 14 ); TEST_RR_OP( 6, sraw, 0xffffffffffffffff, 0xffffffff80000001, 31 ); TEST_RR_OP( 7, sraw, 0x000000007fffffff, 0x000000007fffffff, 0 ); TEST_RR_OP( 8, sraw, 0x000000003fffffff, 0x000000007fffffff, 1 ); TEST_RR_OP( 9, sraw, 0x0000000000ffffff, 0x000000007fffffff, 7 ); TEST_RR_OP( 10, sraw, 0x000000000001ffff, 0x000000007fffffff, 14 ); TEST_RR_OP( 11, sraw, 0x0000000000000000, 0x000000007fffffff, 31 ); TEST_RR_OP( 12, sraw, 0xffffffff81818181, 0xffffffff81818181, 0 ); TEST_RR_OP( 13, sraw, 0xffffffffc0c0c0c0, 0xffffffff81818181, 1 ); TEST_RR_OP( 14, sraw, 0xffffffffff030303, 0xffffffff81818181, 7 ); TEST_RR_OP( 15, sraw, 0xfffffffffffe0606, 0xffffffff81818181, 14 ); TEST_RR_OP( 16, sraw, 0xffffffffffffffff, 0xffffffff81818181, 31 ); # Verify that shifts only use bottom five bits TEST_RR_OP( 17, sraw, 0xffffffff81818181, 0xffffffff81818181, 0xffffffffffffffe0 ); TEST_RR_OP( 18, sraw, 0xffffffffc0c0c0c0, 0xffffffff81818181, 0xffffffffffffffe1 ); TEST_RR_OP( 19, sraw, 0xffffffffff030303, 0xffffffff81818181, 0xffffffffffffffe7 ); TEST_RR_OP( 20, sraw, 0xfffffffffffe0606, 0xffffffff81818181, 0xffffffffffffffee ); TEST_RR_OP( 21, sraw, 0xffffffffffffffff, 0xffffffff81818181, 0xffffffffffffffff ); # Verify that shifts ignore top 32 (using true 64-bit values) TEST_RR_OP( 44, sraw, 0x0000000012345678, 0xffffffff12345678, 0 ); TEST_RR_OP( 45, sraw, 0x0000000001234567, 0xffffffff12345678, 4 ); TEST_RR_OP( 46, sraw, 0xffffffff92345678, 0x0000000092345678, 0 ); TEST_RR_OP( 47, sraw, 0xfffffffff9234567, 0x0000000092345678, 4 ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 22, sraw, 0xffffffffff000000, 0xffffffff80000000, 7 ); TEST_RR_SRC2_EQ_DEST( 23, sraw, 0xfffffffffffe0000, 0xffffffff80000000, 14 ); TEST_RR_SRC12_EQ_DEST( 24, sraw, 0, 7 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 25, 0, sraw, 0xffffffffff000000, 0xffffffff80000000, 7 ); TEST_RR_DEST_BYPASS( 26, 1, sraw, 0xfffffffffffe0000, 0xffffffff80000000, 14 ); TEST_RR_DEST_BYPASS( 27, 2, sraw, 0xffffffffffffffff, 0xffffffff80000000, 31 ); TEST_RR_SRC12_BYPASS( 28, 0, 0, sraw, 0xffffffffff000000, 0xffffffff80000000, 7 ); TEST_RR_SRC12_BYPASS( 29, 0, 1, sraw, 0xfffffffffffe0000, 0xffffffff80000000, 14 ); TEST_RR_SRC12_BYPASS( 30, 0, 2, sraw, 0xffffffffffffffff, 0xffffffff80000000, 31 ); TEST_RR_SRC12_BYPASS( 31, 1, 0, sraw, 0xffffffffff000000, 0xffffffff80000000, 7 ); TEST_RR_SRC12_BYPASS( 32, 1, 1, sraw, 0xfffffffffffe0000, 0xffffffff80000000, 14 ); TEST_RR_SRC12_BYPASS( 33, 2, 0, sraw, 0xffffffffffffffff, 0xffffffff80000000, 31 ); TEST_RR_SRC21_BYPASS( 34, 0, 0, sraw, 0xffffffffff000000, 0xffffffff80000000, 7 ); TEST_RR_SRC21_BYPASS( 35, 0, 1, sraw, 0xfffffffffffe0000, 0xffffffff80000000, 14 ); TEST_RR_SRC21_BYPASS( 36, 0, 2, sraw, 0xffffffffffffffff, 0xffffffff80000000, 31 ); TEST_RR_SRC21_BYPASS( 37, 1, 0, sraw, 0xffffffffff000000, 0xffffffff80000000, 7 ); TEST_RR_SRC21_BYPASS( 38, 1, 1, sraw, 0xfffffffffffe0000, 0xffffffff80000000, 14 ); TEST_RR_SRC21_BYPASS( 39, 2, 0, sraw, 0xffffffffffffffff, 0xffffffff80000000, 31 ); TEST_RR_ZEROSRC1( 40, sraw, 0, 15 ); TEST_RR_ZEROSRC2( 41, sraw, 32, 32 ); TEST_RR_ZEROSRC12( 42, sraw, 0 ); TEST_RR_ZERODEST( 43, sraw, 1024, 2048 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,345
model/tests/riscv-tests/isa/rv64ui/addi.S
# See LICENSE for license details. #***************************************************************************** # addi.S #----------------------------------------------------------------------------- # # Test addi instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_IMM_OP( 2, addi, 0x00000000, 0x00000000, 0x000 ); TEST_IMM_OP( 3, addi, 0x00000002, 0x00000001, 0x001 ); TEST_IMM_OP( 4, addi, 0x0000000a, 0x00000003, 0x007 ); TEST_IMM_OP( 5, addi, 0xfffffffffffff800, 0x0000000000000000, 0x800 ); TEST_IMM_OP( 6, addi, 0xffffffff80000000, 0xffffffff80000000, 0x000 ); TEST_IMM_OP( 7, addi, 0xffffffff7ffff800, 0xffffffff80000000, 0x800 ); TEST_IMM_OP( 8, addi, 0x00000000000007ff, 0x00000000, 0x7ff ); TEST_IMM_OP( 9, addi, 0x000000007fffffff, 0x7fffffff, 0x000 ); TEST_IMM_OP( 10, addi, 0x00000000800007fe, 0x7fffffff, 0x7ff ); TEST_IMM_OP( 11, addi, 0xffffffff800007ff, 0xffffffff80000000, 0x7ff ); TEST_IMM_OP( 12, addi, 0x000000007ffff7ff, 0x000000007fffffff, 0x800 ); TEST_IMM_OP( 13, addi, 0xffffffffffffffff, 0x0000000000000000, 0xfff ); TEST_IMM_OP( 14, addi, 0x0000000000000000, 0xffffffffffffffff, 0x001 ); TEST_IMM_OP( 15, addi, 0xfffffffffffffffe, 0xffffffffffffffff, 0xfff ); TEST_IMM_OP( 16, addi, 0x0000000080000000, 0x7fffffff, 0x001 ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_IMM_SRC1_EQ_DEST( 17, addi, 24, 13, 11 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_IMM_DEST_BYPASS( 18, 0, addi, 24, 13, 11 ); TEST_IMM_DEST_BYPASS( 19, 1, addi, 23, 13, 10 ); TEST_IMM_DEST_BYPASS( 20, 2, addi, 22, 13, 9 ); TEST_IMM_SRC1_BYPASS( 21, 0, addi, 24, 13, 11 ); TEST_IMM_SRC1_BYPASS( 22, 1, addi, 23, 13, 10 ); TEST_IMM_SRC1_BYPASS( 23, 2, addi, 22, 13, 9 ); TEST_IMM_ZEROSRC1( 24, addi, 32, 32 ); TEST_IMM_ZERODEST( 25, addi, 33, 50 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
4,358
model/tests/riscv-tests/isa/rv64ui/sllw.S
# See LICENSE for license details. #***************************************************************************** # sllw.S #----------------------------------------------------------------------------- # # Test sllw instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_RR_OP( 2, sllw, 0x0000000000000001, 0x0000000000000001, 0 ); TEST_RR_OP( 3, sllw, 0x0000000000000002, 0x0000000000000001, 1 ); TEST_RR_OP( 4, sllw, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_OP( 5, sllw, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_OP( 6, sllw, 0xffffffff80000000, 0x0000000000000001, 31 ); TEST_RR_OP( 7, sllw, 0xffffffffffffffff, 0xffffffffffffffff, 0 ); TEST_RR_OP( 8, sllw, 0xfffffffffffffffe, 0xffffffffffffffff, 1 ); TEST_RR_OP( 9, sllw, 0xffffffffffffff80, 0xffffffffffffffff, 7 ); TEST_RR_OP( 10, sllw, 0xffffffffffffc000, 0xffffffffffffffff, 14 ); TEST_RR_OP( 11, sllw, 0xffffffff80000000, 0xffffffffffffffff, 31 ); TEST_RR_OP( 12, sllw, 0x0000000021212121, 0x0000000021212121, 0 ); TEST_RR_OP( 13, sllw, 0x0000000042424242, 0x0000000021212121, 1 ); TEST_RR_OP( 14, sllw, 0xffffffff90909080, 0x0000000021212121, 7 ); TEST_RR_OP( 15, sllw, 0x0000000048484000, 0x0000000021212121, 14 ); TEST_RR_OP( 16, sllw, 0xffffffff80000000, 0x0000000021212121, 31 ); # Verify that shifts only use bottom five bits TEST_RR_OP( 17, sllw, 0x0000000021212121, 0x0000000021212121, 0xffffffffffffffe0 ); TEST_RR_OP( 18, sllw, 0x0000000042424242, 0x0000000021212121, 0xffffffffffffffe1 ); TEST_RR_OP( 19, sllw, 0xffffffff90909080, 0x0000000021212121, 0xffffffffffffffe7 ); TEST_RR_OP( 20, sllw, 0x0000000048484000, 0x0000000021212121, 0xffffffffffffffee ); TEST_RR_OP( 21, sllw, 0xffffffff80000000, 0x0000000021212121, 0xffffffffffffffff ); # Verify that shifts ignore top 32 (using true 64-bit values) TEST_RR_OP( 44, sllw, 0x0000000012345678, 0xffffffff12345678, 0 ); TEST_RR_OP( 45, sllw, 0x0000000023456780, 0xffffffff12345678, 4 ); TEST_RR_OP( 46, sllw, 0xffffffff92345678, 0x0000000092345678, 0 ); TEST_RR_OP( 47, sllw, 0xffffffff93456780, 0x0000000099345678, 4 ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 22, sllw, 0x00000080, 0x00000001, 7 ); TEST_RR_SRC2_EQ_DEST( 23, sllw, 0x00004000, 0x00000001, 14 ); TEST_RR_SRC12_EQ_DEST( 24, sllw, 24, 3 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 25, 0, sllw, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_DEST_BYPASS( 26, 1, sllw, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_DEST_BYPASS( 27, 2, sllw, 0xffffffff80000000, 0x0000000000000001, 31 ); TEST_RR_SRC12_BYPASS( 28, 0, 0, sllw, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_SRC12_BYPASS( 29, 0, 1, sllw, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_SRC12_BYPASS( 30, 0, 2, sllw, 0xffffffff80000000, 0x0000000000000001, 31 ); TEST_RR_SRC12_BYPASS( 31, 1, 0, sllw, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_SRC12_BYPASS( 32, 1, 1, sllw, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_SRC12_BYPASS( 33, 2, 0, sllw, 0xffffffff80000000, 0x0000000000000001, 31 ); TEST_RR_SRC21_BYPASS( 34, 0, 0, sllw, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_SRC21_BYPASS( 35, 0, 1, sllw, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_SRC21_BYPASS( 36, 0, 2, sllw, 0xffffffff80000000, 0x0000000000000001, 31 ); TEST_RR_SRC21_BYPASS( 37, 1, 0, sllw, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_RR_SRC21_BYPASS( 38, 1, 1, sllw, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_RR_SRC21_BYPASS( 39, 2, 0, sllw, 0xffffffff80000000, 0x0000000000000001, 31 ); TEST_RR_ZEROSRC1( 40, sllw, 0, 15 ); TEST_RR_ZEROSRC2( 41, sllw, 32, 32 ); TEST_RR_ZEROSRC12( 42, sllw, 0 ); TEST_RR_ZERODEST( 43, sllw, 1024, 2048 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,345
model/tests/riscv-tests/isa/rv64ui/ld.S
# See LICENSE for license details. #***************************************************************************** # ld.S #----------------------------------------------------------------------------- # # Test ld instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Basic tests #------------------------------------------------------------- TEST_LD_OP( 2, ld, 0x00ff00ff00ff00ff, 0, tdat ); TEST_LD_OP( 3, ld, 0xff00ff00ff00ff00, 8, tdat ); TEST_LD_OP( 4, ld, 0x0ff00ff00ff00ff0, 16, tdat ); TEST_LD_OP( 5, ld, 0xf00ff00ff00ff00f, 24, tdat ); # Test with negative offset TEST_LD_OP( 6, ld, 0x00ff00ff00ff00ff, -24, tdat4 ); TEST_LD_OP( 7, ld, 0xff00ff00ff00ff00, -16, tdat4 ); TEST_LD_OP( 8, ld, 0x0ff00ff00ff00ff0, -8, tdat4 ); TEST_LD_OP( 9, ld, 0xf00ff00ff00ff00f, 0, tdat4 ); # Test with a negative base TEST_CASE( 10, x5, 0x00ff00ff00ff00ff, \ la x1, tdat; \ addi x1, x1, -32; \ ld x5, 32(x1); \ ) # Test with unaligned base TEST_CASE( 11, x5, 0xff00ff00ff00ff00, \ la x1, tdat; \ addi x1, x1, -3; \ ld x5, 11(x1); \ ) #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_LD_DEST_BYPASS( 12, 0, ld, 0x0ff00ff00ff00ff0, 8, tdat2 ); TEST_LD_DEST_BYPASS( 13, 1, ld, 0xf00ff00ff00ff00f, 8, tdat3 ); TEST_LD_DEST_BYPASS( 14, 2, ld, 0xff00ff00ff00ff00, 8, tdat1 ); TEST_LD_SRC1_BYPASS( 15, 0, ld, 0x0ff00ff00ff00ff0, 8, tdat2 ); TEST_LD_SRC1_BYPASS( 16, 1, ld, 0xf00ff00ff00ff00f, 8, tdat3 ); TEST_LD_SRC1_BYPASS( 17, 2, ld, 0xff00ff00ff00ff00, 8, tdat1 ); #------------------------------------------------------------- # Test write-after-write hazard #------------------------------------------------------------- TEST_CASE( 18, x2, 2, \ la x5, tdat; \ ld x2, 0(x5); \ li x2, 2; \ ) TEST_CASE( 19, x2, 2, \ la x5, tdat; \ ld x2, 0(x5); \ nop; \ li x2, 2; \ ) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA tdat: tdat1: .dword 0x00ff00ff00ff00ff tdat2: .dword 0xff00ff00ff00ff00 tdat3: .dword 0x0ff00ff00ff00ff0 tdat4: .dword 0xf00ff00ff00ff00f RVTEST_DATA_END
lizhirui/DreamCore
1,680
model/tests/riscv-tests/isa/rv64ui/andi.S
# See LICENSE for license details. #***************************************************************************** # andi.S #----------------------------------------------------------------------------- # # Test andi instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Logical tests #------------------------------------------------------------- TEST_IMM_OP( 2, andi, 0xff00ff00, 0xff00ff00, 0xf0f ); TEST_IMM_OP( 3, andi, 0x000000f0, 0x0ff00ff0, 0x0f0 ); TEST_IMM_OP( 4, andi, 0x0000000f, 0x00ff00ff, 0x70f ); TEST_IMM_OP( 5, andi, 0x00000000, 0xf00ff00f, 0x0f0 ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_IMM_SRC1_EQ_DEST( 6, andi, 0x00000000, 0xff00ff00, 0x0f0 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_IMM_DEST_BYPASS( 7, 0, andi, 0x00000700, 0x0ff00ff0, 0x70f ); TEST_IMM_DEST_BYPASS( 8, 1, andi, 0x000000f0, 0x00ff00ff, 0x0f0 ); TEST_IMM_DEST_BYPASS( 9, 2, andi, 0xf00ff00f, 0xf00ff00f, 0xf0f ); TEST_IMM_SRC1_BYPASS( 10, 0, andi, 0x00000700, 0x0ff00ff0, 0x70f ); TEST_IMM_SRC1_BYPASS( 11, 1, andi, 0x000000f0, 0x00ff00ff, 0x0f0 ); TEST_IMM_SRC1_BYPASS( 12, 2, andi, 0x0000000f, 0xf00ff00f, 0x70f ); TEST_IMM_ZEROSRC1( 13, andi, 0, 0x0f0 ); TEST_IMM_ZERODEST( 14, andi, 0x00ff00ff, 0x70f ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
4,389
model/tests/riscv-tests/isa/rv64ui/srlw.S
# See LICENSE for license details. #***************************************************************************** # srlw.S #----------------------------------------------------------------------------- # # Test srlw instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_RR_OP( 2, srlw, 0xffffffff80000000, 0xffffffff80000000, 0 ); TEST_RR_OP( 3, srlw, 0x0000000040000000, 0xffffffff80000000, 1 ); TEST_RR_OP( 4, srlw, 0x0000000001000000, 0xffffffff80000000, 7 ); TEST_RR_OP( 5, srlw, 0x0000000000020000, 0xffffffff80000000, 14 ); TEST_RR_OP( 6, srlw, 0x0000000000000001, 0xffffffff80000001, 31 ); TEST_RR_OP( 7, srlw, 0xffffffffffffffff, 0xffffffffffffffff, 0 ); TEST_RR_OP( 8, srlw, 0x000000007fffffff, 0xffffffffffffffff, 1 ); TEST_RR_OP( 9, srlw, 0x0000000001ffffff, 0xffffffffffffffff, 7 ); TEST_RR_OP( 10, srlw, 0x000000000003ffff, 0xffffffffffffffff, 14 ); TEST_RR_OP( 11, srlw, 0x0000000000000001, 0xffffffffffffffff, 31 ); TEST_RR_OP( 12, srlw, 0x0000000021212121, 0x0000000021212121, 0 ); TEST_RR_OP( 13, srlw, 0x0000000010909090, 0x0000000021212121, 1 ); TEST_RR_OP( 14, srlw, 0x0000000000424242, 0x0000000021212121, 7 ); TEST_RR_OP( 15, srlw, 0x0000000000008484, 0x0000000021212121, 14 ); TEST_RR_OP( 16, srlw, 0x0000000000000000, 0x0000000021212121, 31 ); # Verify that shifts only use bottom five bits TEST_RR_OP( 17, srlw, 0x0000000021212121, 0x0000000021212121, 0xffffffffffffffe0 ); TEST_RR_OP( 18, srlw, 0x0000000010909090, 0x0000000021212121, 0xffffffffffffffe1 ); TEST_RR_OP( 19, srlw, 0x0000000000424242, 0x0000000021212121, 0xffffffffffffffe7 ); TEST_RR_OP( 20, srlw, 0x0000000000008484, 0x0000000021212121, 0xffffffffffffffee ); TEST_RR_OP( 21, srlw, 0x0000000000000000, 0x0000000021212121, 0xffffffffffffffff ); # Verify that shifts ignore top 32 (using true 64-bit values) TEST_RR_OP( 44, srlw, 0x0000000012345678, 0xffffffff12345678, 0 ); TEST_RR_OP( 45, srlw, 0x0000000001234567, 0xffffffff12345678, 4 ); TEST_RR_OP( 46, srlw, 0xffffffff92345678, 0x0000000092345678, 0 ); TEST_RR_OP( 47, srlw, 0x0000000009234567, 0x0000000092345678, 4 ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 22, srlw, 0x0000000001000000, 0xffffffff80000000, 7 ); TEST_RR_SRC2_EQ_DEST( 23, srlw, 0x0000000000020000, 0xffffffff80000000, 14 ); TEST_RR_SRC12_EQ_DEST( 24, srlw, 0, 7 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 25, 0, srlw, 0x0000000001000000, 0xffffffff80000000, 7 ); TEST_RR_DEST_BYPASS( 26, 1, srlw, 0x0000000000020000, 0xffffffff80000000, 14 ); TEST_RR_DEST_BYPASS( 27, 2, srlw, 0x0000000000000001, 0xffffffff80000000, 31 ); TEST_RR_SRC12_BYPASS( 28, 0, 0, srlw, 0x0000000001000000, 0xffffffff80000000, 7 ); TEST_RR_SRC12_BYPASS( 29, 0, 1, srlw, 0x0000000000020000, 0xffffffff80000000, 14 ); TEST_RR_SRC12_BYPASS( 30, 0, 2, srlw, 0x0000000000000001, 0xffffffff80000000, 31 ); TEST_RR_SRC12_BYPASS( 31, 1, 0, srlw, 0x0000000001000000, 0xffffffff80000000, 7 ); TEST_RR_SRC12_BYPASS( 32, 1, 1, srlw, 0x0000000000020000, 0xffffffff80000000, 14 ); TEST_RR_SRC12_BYPASS( 33, 2, 0, srlw, 0x0000000000000001, 0xffffffff80000000, 31 ); TEST_RR_SRC21_BYPASS( 34, 0, 0, srlw, 0x0000000001000000, 0xffffffff80000000, 7 ); TEST_RR_SRC21_BYPASS( 35, 0, 1, srlw, 0x0000000000020000, 0xffffffff80000000, 14 ); TEST_RR_SRC21_BYPASS( 36, 0, 2, srlw, 0x0000000000000001, 0xffffffff80000000, 31 ); TEST_RR_SRC21_BYPASS( 37, 1, 0, srlw, 0x0000000001000000, 0xffffffff80000000, 7 ); TEST_RR_SRC21_BYPASS( 38, 1, 1, srlw, 0x0000000000020000, 0xffffffff80000000, 14 ); TEST_RR_SRC21_BYPASS( 39, 2, 0, srlw, 0x0000000000000001, 0xffffffff80000000, 31 ); TEST_RR_ZEROSRC1( 40, srlw, 0, 15 ); TEST_RR_ZEROSRC2( 41, srlw, 32, 32 ); TEST_RR_ZEROSRC12( 42, srlw, 0 ); TEST_RR_ZERODEST( 43, srlw, 1024, 2048 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,310
model/tests/riscv-tests/isa/rv64ui/lhu.S
# See LICENSE for license details. #***************************************************************************** # lhu.S #----------------------------------------------------------------------------- # # Test lhu instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Basic tests #------------------------------------------------------------- TEST_LD_OP( 2, lhu, 0x00000000000000ff, 0, tdat ); TEST_LD_OP( 3, lhu, 0x000000000000ff00, 2, tdat ); TEST_LD_OP( 4, lhu, 0x0000000000000ff0, 4, tdat ); TEST_LD_OP( 5, lhu, 0x000000000000f00f, 6, tdat ); # Test with negative offset TEST_LD_OP( 6, lhu, 0x00000000000000ff, -6, tdat4 ); TEST_LD_OP( 7, lhu, 0x000000000000ff00, -4, tdat4 ); TEST_LD_OP( 8, lhu, 0x0000000000000ff0, -2, tdat4 ); TEST_LD_OP( 9, lhu, 0x000000000000f00f, 0, tdat4 ); # Test with a negative base TEST_CASE( 10, x5, 0x00000000000000ff, \ la x1, tdat; \ addi x1, x1, -32; \ lhu x5, 32(x1); \ ) # Test with unaligned base TEST_CASE( 11, x5, 0x000000000000ff00, \ la x1, tdat; \ addi x1, x1, -5; \ lhu x5, 7(x1); \ ) #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_LD_DEST_BYPASS( 12, 0, lhu, 0x0000000000000ff0, 2, tdat2 ); TEST_LD_DEST_BYPASS( 13, 1, lhu, 0x000000000000f00f, 2, tdat3 ); TEST_LD_DEST_BYPASS( 14, 2, lhu, 0x000000000000ff00, 2, tdat1 ); TEST_LD_SRC1_BYPASS( 15, 0, lhu, 0x0000000000000ff0, 2, tdat2 ); TEST_LD_SRC1_BYPASS( 16, 1, lhu, 0x000000000000f00f, 2, tdat3 ); TEST_LD_SRC1_BYPASS( 17, 2, lhu, 0x000000000000ff00, 2, tdat1 ); #------------------------------------------------------------- # Test write-after-write hazard #------------------------------------------------------------- TEST_CASE( 18, x2, 2, \ la x5, tdat; \ lhu x2, 0(x5); \ li x2, 2; \ ) TEST_CASE( 19, x2, 2, \ la x5, tdat; \ lhu x2, 0(x5); \ nop; \ li x2, 2; \ ) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA tdat: tdat1: .half 0x00ff tdat2: .half 0xff00 tdat3: .half 0x0ff0 tdat4: .half 0xf00f RVTEST_DATA_END
lizhirui/DreamCore
2,633
model/tests/riscv-tests/isa/rv64ui/and.S
# See LICENSE for license details. #***************************************************************************** # and.S #----------------------------------------------------------------------------- # # Test and instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Logical tests #------------------------------------------------------------- TEST_RR_OP( 2, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_OP( 3, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_OP( 4, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_OP( 5, and, 0xf000f000, 0xf00ff00f, 0xf0f0f0f0 ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 6, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC2_EQ_DEST( 7, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_SRC12_EQ_DEST( 8, and, 0xff00ff00, 0xff00ff00 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 9, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_DEST_BYPASS( 10, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_DEST_BYPASS( 11, 2, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_SRC12_BYPASS( 12, 0, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC12_BYPASS( 13, 0, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_SRC12_BYPASS( 14, 0, 2, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_SRC12_BYPASS( 15, 1, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC12_BYPASS( 16, 1, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_SRC12_BYPASS( 17, 2, 0, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_SRC21_BYPASS( 18, 0, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC21_BYPASS( 19, 0, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_SRC21_BYPASS( 20, 0, 2, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_SRC21_BYPASS( 21, 1, 0, and, 0x0f000f00, 0xff00ff00, 0x0f0f0f0f ); TEST_RR_SRC21_BYPASS( 22, 1, 1, and, 0x00f000f0, 0x0ff00ff0, 0xf0f0f0f0 ); TEST_RR_SRC21_BYPASS( 23, 2, 0, and, 0x000f000f, 0x00ff00ff, 0x0f0f0f0f ); TEST_RR_ZEROSRC1( 24, and, 0, 0xff00ff00 ); TEST_RR_ZEROSRC2( 25, and, 0, 0x00ff00ff ); TEST_RR_ZEROSRC12( 26, and, 0 ); TEST_RR_ZERODEST( 27, and, 0x11111111, 0x22222222 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,366
model/tests/riscv-tests/isa/rv64ui/bltu.S
# See LICENSE for license details. #***************************************************************************** # bltu.S #----------------------------------------------------------------------------- # # Test bltu instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Branch tests #------------------------------------------------------------- # Each test checks both forward and backward branches TEST_BR2_OP_TAKEN( 2, bltu, 0x00000000, 0x00000001 ); TEST_BR2_OP_TAKEN( 3, bltu, 0xfffffffe, 0xffffffff ); TEST_BR2_OP_TAKEN( 4, bltu, 0x00000000, 0xffffffff ); TEST_BR2_OP_NOTTAKEN( 5, bltu, 0x00000001, 0x00000000 ); TEST_BR2_OP_NOTTAKEN( 6, bltu, 0xffffffff, 0xfffffffe ); TEST_BR2_OP_NOTTAKEN( 7, bltu, 0xffffffff, 0x00000000 ); TEST_BR2_OP_NOTTAKEN( 8, bltu, 0x80000000, 0x7fffffff ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_BR2_SRC12_BYPASS( 9, 0, 0, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 10, 0, 1, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 11, 0, 2, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 12, 1, 0, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 13, 1, 1, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 14, 2, 0, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 15, 0, 0, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 16, 0, 1, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 17, 0, 2, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 18, 1, 0, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 19, 1, 1, bltu, 0xf0000000, 0xefffffff ); TEST_BR2_SRC12_BYPASS( 20, 2, 0, bltu, 0xf0000000, 0xefffffff ); #------------------------------------------------------------- # Test delay slot instructions not executed nor bypassed #------------------------------------------------------------- TEST_CASE( 21, x1, 3, \ li x1, 1; \ bltu x0, x1, 1f; \ addi x1, x1, 1; \ addi x1, x1, 1; \ addi x1, x1, 1; \ addi x1, x1, 1; \ 1: addi x1, x1, 1; \ addi x1, x1, 1; \ ) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,959
model/tests/riscv-tests/isa/rv64ui/slliw.S
# See LICENSE for license details. #***************************************************************************** # slliw.S #----------------------------------------------------------------------------- # # Test slliw instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_IMM_OP( 2, slliw, 0x0000000000000001, 0x0000000000000001, 0 ); TEST_IMM_OP( 3, slliw, 0x0000000000000002, 0x0000000000000001, 1 ); TEST_IMM_OP( 4, slliw, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_IMM_OP( 5, slliw, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_IMM_OP( 6, slliw, 0xffffffff80000000, 0x0000000000000001, 31 ); TEST_IMM_OP( 7, slliw, 0xffffffffffffffff, 0xffffffffffffffff, 0 ); TEST_IMM_OP( 8, slliw, 0xfffffffffffffffe, 0xffffffffffffffff, 1 ); TEST_IMM_OP( 9, slliw, 0xffffffffffffff80, 0xffffffffffffffff, 7 ); TEST_IMM_OP( 10, slliw, 0xffffffffffffc000, 0xffffffffffffffff, 14 ); TEST_IMM_OP( 11, slliw, 0xffffffff80000000, 0xffffffffffffffff, 31 ); TEST_IMM_OP( 12, slliw, 0x0000000021212121, 0x0000000021212121, 0 ); TEST_IMM_OP( 13, slliw, 0x0000000042424242, 0x0000000021212121, 1 ); TEST_IMM_OP( 14, slliw, 0xffffffff90909080, 0x0000000021212121, 7 ); TEST_IMM_OP( 15, slliw, 0x0000000048484000, 0x0000000021212121, 14 ); TEST_IMM_OP( 16, slliw, 0xffffffff80000000, 0x0000000021212121, 31 ); # Verify that shifts ignore top 32 (using true 64-bit values) TEST_IMM_OP( 44, slliw, 0x0000000012345678, 0xffffffff12345678, 0 ); TEST_IMM_OP( 45, slliw, 0x0000000023456780, 0xffffffff12345678, 4 ); TEST_IMM_OP( 46, slliw, 0xffffffff92345678, 0x0000000092345678, 0 ); TEST_IMM_OP( 47, slliw, 0xffffffff93456780, 0x0000000099345678, 4 ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_IMM_SRC1_EQ_DEST( 17, slliw, 0x00000080, 0x00000001, 7 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_IMM_DEST_BYPASS( 18, 0, slliw, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_IMM_DEST_BYPASS( 19, 1, slliw, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_IMM_DEST_BYPASS( 20, 2, slliw, 0xffffffff80000000, 0x0000000000000001, 31 ); TEST_IMM_SRC1_BYPASS( 21, 0, slliw, 0x0000000000000080, 0x0000000000000001, 7 ); TEST_IMM_SRC1_BYPASS( 22, 1, slliw, 0x0000000000004000, 0x0000000000000001, 14 ); TEST_IMM_SRC1_BYPASS( 23, 2, slliw, 0xffffffff80000000, 0x0000000000000001, 31 ); TEST_IMM_ZEROSRC1( 24, slliw, 0, 31 ); TEST_IMM_ZERODEST( 25, slliw, 31, 28 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,642
model/tests/riscv-tests/isa/rv64ui/sh.S
# See LICENSE for license details. #***************************************************************************** # sh.S #----------------------------------------------------------------------------- # # Test sh instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Basic tests #------------------------------------------------------------- TEST_ST_OP( 2, lh, sh, 0x00000000000000aa, 0, tdat ); TEST_ST_OP( 3, lh, sh, 0xffffffffffffaa00, 2, tdat ); TEST_ST_OP( 4, lw, sh, 0xffffffffbeef0aa0, 4, tdat ); TEST_ST_OP( 5, lh, sh, 0xffffffffffffa00a, 6, tdat ); # Test with negative offset TEST_ST_OP( 6, lh, sh, 0x00000000000000aa, -6, tdat8 ); TEST_ST_OP( 7, lh, sh, 0xffffffffffffaa00, -4, tdat8 ); TEST_ST_OP( 8, lh, sh, 0x0000000000000aa0, -2, tdat8 ); TEST_ST_OP( 9, lh, sh, 0xffffffffffffa00a, 0, tdat8 ); # Test with a negative base TEST_CASE( 10, x5, 0x5678, \ la x1, tdat9; \ li x2, 0x12345678; \ addi x4, x1, -32; \ sh x2, 32(x4); \ lh x5, 0(x1); \ ) # Test with unaligned base TEST_CASE( 11, x5, 0x3098, \ la x1, tdat9; \ li x2, 0x00003098; \ addi x1, x1, -5; \ sh x2, 7(x1); \ la x4, tdat10; \ lh x5, 0(x4); \ ) #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_ST_SRC12_BYPASS( 12, 0, 0, lh, sh, 0xffffffffffffccdd, 0, tdat ); TEST_ST_SRC12_BYPASS( 13, 0, 1, lh, sh, 0xffffffffffffbccd, 2, tdat ); TEST_ST_SRC12_BYPASS( 14, 0, 2, lh, sh, 0xffffffffffffbbcc, 4, tdat ); TEST_ST_SRC12_BYPASS( 15, 1, 0, lh, sh, 0xffffffffffffabbc, 6, tdat ); TEST_ST_SRC12_BYPASS( 16, 1, 1, lh, sh, 0xffffffffffffaabb, 8, tdat ); TEST_ST_SRC12_BYPASS( 17, 2, 0, lh, sh, 0xffffffffffffdaab, 10, tdat ); TEST_ST_SRC21_BYPASS( 18, 0, 0, lh, sh, 0x2233, 0, tdat ); TEST_ST_SRC21_BYPASS( 19, 0, 1, lh, sh, 0x1223, 2, tdat ); TEST_ST_SRC21_BYPASS( 20, 0, 2, lh, sh, 0x1122, 4, tdat ); TEST_ST_SRC21_BYPASS( 21, 1, 0, lh, sh, 0x0112, 6, tdat ); TEST_ST_SRC21_BYPASS( 22, 1, 1, lh, sh, 0x0011, 8, tdat ); TEST_ST_SRC21_BYPASS( 23, 2, 0, lh, sh, 0x3001, 10, tdat ); li a0, 0xbeef la a1, tdat sh a0, 6(a1) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA tdat: tdat1: .half 0xbeef tdat2: .half 0xbeef tdat3: .half 0xbeef tdat4: .half 0xbeef tdat5: .half 0xbeef tdat6: .half 0xbeef tdat7: .half 0xbeef tdat8: .half 0xbeef tdat9: .half 0xbeef tdat10: .half 0xbeef RVTEST_DATA_END
lizhirui/DreamCore
3,452
model/tests/riscv-tests/isa/rv64ui/srl.S
# See LICENSE for license details. #***************************************************************************** # srl.S #----------------------------------------------------------------------------- # # Test srl instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- #define TEST_SRL(n, v, a) \ TEST_RR_OP(n, srl, ((v) & ((1 << (__riscv_xlen-1) << 1) - 1)) >> (a), v, a) TEST_SRL( 2, 0xffffffff80000000, 0 ); TEST_SRL( 3, 0xffffffff80000000, 1 ); TEST_SRL( 4, 0xffffffff80000000, 7 ); TEST_SRL( 5, 0xffffffff80000000, 14 ); TEST_SRL( 6, 0xffffffff80000001, 31 ); TEST_SRL( 7, 0xffffffffffffffff, 0 ); TEST_SRL( 8, 0xffffffffffffffff, 1 ); TEST_SRL( 9, 0xffffffffffffffff, 7 ); TEST_SRL( 10, 0xffffffffffffffff, 14 ); TEST_SRL( 11, 0xffffffffffffffff, 31 ); TEST_SRL( 12, 0x0000000021212121, 0 ); TEST_SRL( 13, 0x0000000021212121, 1 ); TEST_SRL( 14, 0x0000000021212121, 7 ); TEST_SRL( 15, 0x0000000021212121, 14 ); TEST_SRL( 16, 0x0000000021212121, 31 ); # Verify that shifts only use bottom six(rv64) or five(rv32) bits TEST_RR_OP( 17, srl, 0x0000000021212121, 0x0000000021212121, 0xffffffffffffffc0 ); TEST_RR_OP( 18, srl, 0x0000000010909090, 0x0000000021212121, 0xffffffffffffffc1 ); TEST_RR_OP( 19, srl, 0x0000000000424242, 0x0000000021212121, 0xffffffffffffffc7 ); TEST_RR_OP( 20, srl, 0x0000000000008484, 0x0000000021212121, 0xffffffffffffffce ); TEST_RR_OP( 21, srl, 0x0000000000000000, 0x0000000021212121, 0xffffffffffffffff ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 22, srl, 0x01000000, 0x80000000, 7 ); TEST_RR_SRC2_EQ_DEST( 23, srl, 0x00020000, 0x80000000, 14 ); TEST_RR_SRC12_EQ_DEST( 24, srl, 0, 7 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 25, 0, srl, 0x01000000, 0x80000000, 7 ); TEST_RR_DEST_BYPASS( 26, 1, srl, 0x00020000, 0x80000000, 14 ); TEST_RR_DEST_BYPASS( 27, 2, srl, 0x00000001, 0x80000000, 31 ); TEST_RR_SRC12_BYPASS( 28, 0, 0, srl, 0x01000000, 0x80000000, 7 ); TEST_RR_SRC12_BYPASS( 29, 0, 1, srl, 0x00020000, 0x80000000, 14 ); TEST_RR_SRC12_BYPASS( 30, 0, 2, srl, 0x00000001, 0x80000000, 31 ); TEST_RR_SRC12_BYPASS( 31, 1, 0, srl, 0x01000000, 0x80000000, 7 ); TEST_RR_SRC12_BYPASS( 32, 1, 1, srl, 0x00020000, 0x80000000, 14 ); TEST_RR_SRC12_BYPASS( 33, 2, 0, srl, 0x00000001, 0x80000000, 31 ); TEST_RR_SRC21_BYPASS( 34, 0, 0, srl, 0x01000000, 0x80000000, 7 ); TEST_RR_SRC21_BYPASS( 35, 0, 1, srl, 0x00020000, 0x80000000, 14 ); TEST_RR_SRC21_BYPASS( 36, 0, 2, srl, 0x00000001, 0x80000000, 31 ); TEST_RR_SRC21_BYPASS( 37, 1, 0, srl, 0x01000000, 0x80000000, 7 ); TEST_RR_SRC21_BYPASS( 38, 1, 1, srl, 0x00020000, 0x80000000, 14 ); TEST_RR_SRC21_BYPASS( 39, 2, 0, srl, 0x00000001, 0x80000000, 31 ); TEST_RR_ZEROSRC1( 40, srl, 0, 15 ); TEST_RR_ZEROSRC2( 41, srl, 32, 32 ); TEST_RR_ZEROSRC12( 42, srl, 0 ); TEST_RR_ZERODEST( 43, srl, 1024, 2048 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,610
model/tests/riscv-tests/isa/rv64ui/sb.S
# See LICENSE for license details. #***************************************************************************** # sb.S #----------------------------------------------------------------------------- # # Test sb instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Basic tests #------------------------------------------------------------- TEST_ST_OP( 2, lb, sb, 0xffffffffffffffaa, 0, tdat ); TEST_ST_OP( 3, lb, sb, 0x0000000000000000, 1, tdat ); TEST_ST_OP( 4, lh, sb, 0xffffffffffffefa0, 2, tdat ); TEST_ST_OP( 5, lb, sb, 0x000000000000000a, 3, tdat ); # Test with negative offset TEST_ST_OP( 6, lb, sb, 0xffffffffffffffaa, -3, tdat8 ); TEST_ST_OP( 7, lb, sb, 0x0000000000000000, -2, tdat8 ); TEST_ST_OP( 8, lb, sb, 0xffffffffffffffa0, -1, tdat8 ); TEST_ST_OP( 9, lb, sb, 0x000000000000000a, 0, tdat8 ); # Test with a negative base TEST_CASE( 10, x5, 0x78, \ la x1, tdat9; \ li x2, 0x12345678; \ addi x4, x1, -32; \ sb x2, 32(x4); \ lb x5, 0(x1); \ ) # Test with unaligned base TEST_CASE( 11, x5, 0xffffffffffffff98, \ la x1, tdat9; \ li x2, 0x00003098; \ addi x1, x1, -6; \ sb x2, 7(x1); \ la x4, tdat10; \ lb x5, 0(x4); \ ) #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_ST_SRC12_BYPASS( 12, 0, 0, lb, sb, 0xffffffffffffffdd, 0, tdat ); TEST_ST_SRC12_BYPASS( 13, 0, 1, lb, sb, 0xffffffffffffffcd, 1, tdat ); TEST_ST_SRC12_BYPASS( 14, 0, 2, lb, sb, 0xffffffffffffffcc, 2, tdat ); TEST_ST_SRC12_BYPASS( 15, 1, 0, lb, sb, 0xffffffffffffffbc, 3, tdat ); TEST_ST_SRC12_BYPASS( 16, 1, 1, lb, sb, 0xffffffffffffffbb, 4, tdat ); TEST_ST_SRC12_BYPASS( 17, 2, 0, lb, sb, 0xffffffffffffffab, 5, tdat ); TEST_ST_SRC21_BYPASS( 18, 0, 0, lb, sb, 0x33, 0, tdat ); TEST_ST_SRC21_BYPASS( 19, 0, 1, lb, sb, 0x23, 1, tdat ); TEST_ST_SRC21_BYPASS( 20, 0, 2, lb, sb, 0x22, 2, tdat ); TEST_ST_SRC21_BYPASS( 21, 1, 0, lb, sb, 0x12, 3, tdat ); TEST_ST_SRC21_BYPASS( 22, 1, 1, lb, sb, 0x11, 4, tdat ); TEST_ST_SRC21_BYPASS( 23, 2, 0, lb, sb, 0x01, 5, tdat ); li a0, 0xef la a1, tdat sb a0, 3(a1) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA tdat: tdat1: .byte 0xef tdat2: .byte 0xef tdat3: .byte 0xef tdat4: .byte 0xef tdat5: .byte 0xef tdat6: .byte 0xef tdat7: .byte 0xef tdat8: .byte 0xef tdat9: .byte 0xef tdat10: .byte 0xef RVTEST_DATA_END
lizhirui/DreamCore
2,028
model/tests/riscv-tests/isa/rv64ui/beq.S
# See LICENSE for license details. #***************************************************************************** # beq.S #----------------------------------------------------------------------------- # # Test beq instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Branch tests #------------------------------------------------------------- # Each test checks both forward and backward branches TEST_BR2_OP_TAKEN( 2, beq, 0, 0 ); TEST_BR2_OP_TAKEN( 3, beq, 1, 1 ); TEST_BR2_OP_TAKEN( 4, beq, -1, -1 ); TEST_BR2_OP_NOTTAKEN( 5, beq, 0, 1 ); TEST_BR2_OP_NOTTAKEN( 6, beq, 1, 0 ); TEST_BR2_OP_NOTTAKEN( 7, beq, -1, 1 ); TEST_BR2_OP_NOTTAKEN( 8, beq, 1, -1 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_BR2_SRC12_BYPASS( 9, 0, 0, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 10, 0, 1, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 11, 0, 2, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 12, 1, 0, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 13, 1, 1, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 14, 2, 0, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 15, 0, 0, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 16, 0, 1, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 17, 0, 2, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 18, 1, 0, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 19, 1, 1, beq, 0, -1 ); TEST_BR2_SRC12_BYPASS( 20, 2, 0, beq, 0, -1 ); #------------------------------------------------------------- # Test delay slot instructions not executed nor bypassed #------------------------------------------------------------- TEST_CASE( 21, x1, 3, \ li x1, 1; \ beq x0, x0, 1f; \ addi x1, x1, 1; \ addi x1, x1, 1; \ addi x1, x1, 1; \ addi x1, x1, 1; \ 1: addi x1, x1, 1; \ addi x1, x1, 1; \ ) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,750
model/tests/riscv-tests/isa/rv64ui/sd.S
# See LICENSE for license details. #***************************************************************************** # sd.S #----------------------------------------------------------------------------- # # Test sd instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Basic tests #------------------------------------------------------------- TEST_ST_OP( 2, ld, sd, 0x00aa00aa00aa00aa, 0, tdat ); TEST_ST_OP( 3, ld, sd, 0xaa00aa00aa00aa00, 8, tdat ); TEST_ST_OP( 4, ld, sd, 0x0aa00aa00aa00aa0, 16, tdat ); TEST_ST_OP( 5, ld, sd, 0xa00aa00aa00aa00a, 24, tdat ); # Test with negative offset TEST_ST_OP( 6, ld, sd, 0x00aa00aa00aa00aa, -24, tdat8 ); TEST_ST_OP( 7, ld, sd, 0xaa00aa00aa00aa00, -16, tdat8 ); TEST_ST_OP( 8, ld, sd, 0x0aa00aa00aa00aa0, -8, tdat8 ); TEST_ST_OP( 9, ld, sd, 0xa00aa00aa00aa00a, 0, tdat8 ); # Test with a negative base TEST_CASE( 10, x5, 0x1234567812345678, \ la x1, tdat9; \ li x2, 0x1234567812345678; \ addi x4, x1, -32; \ sd x2, 32(x4); \ ld x5, 0(x1); \ ) # Test with unaligned base TEST_CASE( 11, x5, 0x5821309858213098, \ la x1, tdat9; \ li x2, 0x5821309858213098; \ addi x1, x1, -3; \ sd x2, 11(x1); \ la x4, tdat10; \ ld x5, 0(x4); \ ) #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_ST_SRC12_BYPASS( 12, 0, 0, ld, sd, 0xabbccdd, 0, tdat ); TEST_ST_SRC12_BYPASS( 13, 0, 1, ld, sd, 0xaabbccd, 8, tdat ); TEST_ST_SRC12_BYPASS( 14, 0, 2, ld, sd, 0xdaabbcc, 16, tdat ); TEST_ST_SRC12_BYPASS( 15, 1, 0, ld, sd, 0xddaabbc, 24, tdat ); TEST_ST_SRC12_BYPASS( 16, 1, 1, ld, sd, 0xcddaabb, 32, tdat ); TEST_ST_SRC12_BYPASS( 17, 2, 0, ld, sd, 0xccddaab, 40, tdat ); TEST_ST_SRC21_BYPASS( 18, 0, 0, ld, sd, 0x00112233, 0, tdat ); TEST_ST_SRC21_BYPASS( 19, 0, 1, ld, sd, 0x30011223, 8, tdat ); TEST_ST_SRC21_BYPASS( 20, 0, 2, ld, sd, 0x33001122, 16, tdat ); TEST_ST_SRC21_BYPASS( 21, 1, 0, ld, sd, 0x23300112, 24, tdat ); TEST_ST_SRC21_BYPASS( 22, 1, 1, ld, sd, 0x22330011, 32, tdat ); TEST_ST_SRC21_BYPASS( 23, 2, 0, ld, sd, 0x12233001, 40, tdat ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA tdat: tdat1: .dword 0xdeadbeefdeadbeef tdat2: .dword 0xdeadbeefdeadbeef tdat3: .dword 0xdeadbeefdeadbeef tdat4: .dword 0xdeadbeefdeadbeef tdat5: .dword 0xdeadbeefdeadbeef tdat6: .dword 0xdeadbeefdeadbeef tdat7: .dword 0xdeadbeefdeadbeef tdat8: .dword 0xdeadbeefdeadbeef tdat9: .dword 0xdeadbeefdeadbeef tdat10: .dword 0xdeadbeefdeadbeef RVTEST_DATA_END
lizhirui/DreamCore
3,160
model/tests/riscv-tests/isa/rv64ui/subw.S
# See LICENSE for license details. #***************************************************************************** # subw.S #----------------------------------------------------------------------------- # # Test subw instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Arithmetic tests #------------------------------------------------------------- TEST_RR_OP( 2, subw, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 ); TEST_RR_OP( 3, subw, 0x0000000000000000, 0x0000000000000001, 0x0000000000000001 ); TEST_RR_OP( 4, subw, 0xfffffffffffffffc, 0x0000000000000003, 0x0000000000000007 ); TEST_RR_OP( 5, subw, 0x0000000000008000, 0x0000000000000000, 0xffffffffffff8000 ); TEST_RR_OP( 6, subw, 0xffffffff80000000, 0xffffffff80000000, 0x0000000000000000 ); TEST_RR_OP( 7, subw, 0xffffffff80008000, 0xffffffff80000000, 0xffffffffffff8000 ); TEST_RR_OP( 8, subw, 0xffffffffffff8001, 0x0000000000000000, 0x0000000000007fff ); TEST_RR_OP( 9, subw, 0x000000007fffffff, 0x000000007fffffff, 0x0000000000000000 ); TEST_RR_OP( 10, subw, 0x000000007fff8000, 0x000000007fffffff, 0x0000000000007fff ); TEST_RR_OP( 11, subw, 0x000000007fff8001, 0xffffffff80000000, 0x0000000000007fff ); TEST_RR_OP( 12, subw, 0xffffffff80007fff, 0x000000007fffffff, 0xffffffffffff8000 ); TEST_RR_OP( 13, subw, 0x0000000000000001, 0x0000000000000000, 0xffffffffffffffff ); TEST_RR_OP( 14, subw, 0xfffffffffffffffe, 0xffffffffffffffff, 0x0000000000000001 ); TEST_RR_OP( 15, subw, 0x0000000000000000, 0xffffffffffffffff, 0xffffffffffffffff ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_RR_SRC1_EQ_DEST( 16, subw, 2, 13, 11 ); TEST_RR_SRC2_EQ_DEST( 17, subw, 3, 14, 11 ); TEST_RR_SRC12_EQ_DEST( 18, subw, 0, 13 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_RR_DEST_BYPASS( 19, 0, subw, 2, 13, 11 ); TEST_RR_DEST_BYPASS( 20, 1, subw, 3, 14, 11 ); TEST_RR_DEST_BYPASS( 21, 2, subw, 4, 15, 11 ); TEST_RR_SRC12_BYPASS( 22, 0, 0, subw, 2, 13, 11 ); TEST_RR_SRC12_BYPASS( 23, 0, 1, subw, 3, 14, 11 ); TEST_RR_SRC12_BYPASS( 24, 0, 2, subw, 4, 15, 11 ); TEST_RR_SRC12_BYPASS( 25, 1, 0, subw, 2, 13, 11 ); TEST_RR_SRC12_BYPASS( 26, 1, 1, subw, 3, 14, 11 ); TEST_RR_SRC12_BYPASS( 27, 2, 0, subw, 4, 15, 11 ); TEST_RR_SRC21_BYPASS( 28, 0, 0, subw, 2, 13, 11 ); TEST_RR_SRC21_BYPASS( 29, 0, 1, subw, 3, 14, 11 ); TEST_RR_SRC21_BYPASS( 30, 0, 2, subw, 4, 15, 11 ); TEST_RR_SRC21_BYPASS( 31, 1, 0, subw, 2, 13, 11 ); TEST_RR_SRC21_BYPASS( 32, 1, 1, subw, 3, 14, 11 ); TEST_RR_SRC21_BYPASS( 33, 2, 0, subw, 4, 15, 11 ); TEST_RR_ZEROSRC1( 34, subw, 15, -15 ); TEST_RR_ZEROSRC2( 35, subw, 32, 32 ); TEST_RR_ZEROSRC12( 36, subw, 0 ); TEST_RR_ZERODEST( 37, subw, 16, 30 ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
1,829
model/tests/riscv-tests/isa/rv64ui/ori.S
# See LICENSE for license details. #***************************************************************************** # ori.S #----------------------------------------------------------------------------- # # Test ori instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Logical tests #------------------------------------------------------------- TEST_IMM_OP( 2, ori, 0xffffffffffffff0f, 0xffffffffff00ff00, 0xf0f ); TEST_IMM_OP( 3, ori, 0x000000000ff00ff0, 0x000000000ff00ff0, 0x0f0 ); TEST_IMM_OP( 4, ori, 0x0000000000ff07ff, 0x0000000000ff00ff, 0x70f ); TEST_IMM_OP( 5, ori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 ); #------------------------------------------------------------- # Source/Destination tests #------------------------------------------------------------- TEST_IMM_SRC1_EQ_DEST( 6, ori, 0xff00fff0, 0xff00ff00, 0x0f0 ); #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_IMM_DEST_BYPASS( 7, 0, ori, 0x000000000ff00ff0, 0x000000000ff00ff0, 0x0f0 ); TEST_IMM_DEST_BYPASS( 8, 1, ori, 0x0000000000ff07ff, 0x0000000000ff00ff, 0x70f ); TEST_IMM_DEST_BYPASS( 9, 2, ori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 ); TEST_IMM_SRC1_BYPASS( 10, 0, ori, 0x000000000ff00ff0, 0x000000000ff00ff0, 0x0f0 ); TEST_IMM_SRC1_BYPASS( 11, 1, ori, 0xffffffffffffffff, 0x0000000000ff00ff, 0xf0f ); TEST_IMM_SRC1_BYPASS( 12, 2, ori, 0xfffffffff00ff0ff, 0xfffffffff00ff00f, 0x0f0 ); TEST_IMM_ZEROSRC1( 13, ori, 0x0f0, 0x0f0 ); TEST_IMM_ZERODEST( 14, ori, 0x00ff00ff, 0x70f ); TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA RVTEST_DATA_END
lizhirui/DreamCore
2,302
model/tests/riscv-tests/isa/rv64ui/lbu.S
# See LICENSE for license details. #***************************************************************************** # lbu.S #----------------------------------------------------------------------------- # # Test lbu instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Basic tests #------------------------------------------------------------- TEST_LD_OP( 2, lbu, 0x00000000000000ff, 0, tdat ); TEST_LD_OP( 3, lbu, 0x0000000000000000, 1, tdat ); TEST_LD_OP( 4, lbu, 0x00000000000000f0, 2, tdat ); TEST_LD_OP( 5, lbu, 0x000000000000000f, 3, tdat ); # Test with negative offset TEST_LD_OP( 6, lbu, 0x00000000000000ff, -3, tdat4 ); TEST_LD_OP( 7, lbu, 0x0000000000000000, -2, tdat4 ); TEST_LD_OP( 8, lbu, 0x00000000000000f0, -1, tdat4 ); TEST_LD_OP( 9, lbu, 0x000000000000000f, 0, tdat4 ); # Test with a negative base TEST_CASE( 10, x5, 0x00000000000000ff, \ la x1, tdat; \ addi x1, x1, -32; \ lbu x5, 32(x1); \ ) # Test with unaligned base TEST_CASE( 11, x5, 0x0000000000000000, \ la x1, tdat; \ addi x1, x1, -6; \ lbu x5, 7(x1); \ ) #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_LD_DEST_BYPASS( 12, 0, lbu, 0x00000000000000f0, 1, tdat2 ); TEST_LD_DEST_BYPASS( 13, 1, lbu, 0x000000000000000f, 1, tdat3 ); TEST_LD_DEST_BYPASS( 14, 2, lbu, 0x0000000000000000, 1, tdat1 ); TEST_LD_SRC1_BYPASS( 15, 0, lbu, 0x00000000000000f0, 1, tdat2 ); TEST_LD_SRC1_BYPASS( 16, 1, lbu, 0x000000000000000f, 1, tdat3 ); TEST_LD_SRC1_BYPASS( 17, 2, lbu, 0x0000000000000000, 1, tdat1 ); #------------------------------------------------------------- # Test write-after-write hazard #------------------------------------------------------------- TEST_CASE( 18, x2, 2, \ la x5, tdat; \ lbu x2, 0(x5); \ li x2, 2; \ ) TEST_CASE( 19, x2, 2, \ la x5, tdat; \ lbu x2, 0(x5); \ nop; \ li x2, 2; \ ) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA tdat: tdat1: .byte 0xff tdat2: .byte 0x00 tdat3: .byte 0xf0 tdat4: .byte 0x0f RVTEST_DATA_END
lizhirui/DreamCore
2,326
model/tests/riscv-tests/isa/rv64ui/lwu.S
# See LICENSE for license details. #***************************************************************************** # lwu.S #----------------------------------------------------------------------------- # # Test lwu instruction. # #include "riscv_test.h" #include "test_macros.h" RVTEST_RV64U RVTEST_CODE_BEGIN #------------------------------------------------------------- # Basic tests #------------------------------------------------------------- TEST_LD_OP( 2, lwu, 0x0000000000ff00ff, 0, tdat ); TEST_LD_OP( 3, lwu, 0x00000000ff00ff00, 4, tdat ); TEST_LD_OP( 4, lwu, 0x000000000ff00ff0, 8, tdat ); TEST_LD_OP( 5, lwu, 0x00000000f00ff00f, 12, tdat ); # Test with negative offset TEST_LD_OP( 6, lwu, 0x0000000000ff00ff, -12, tdat4 ); TEST_LD_OP( 7, lwu, 0x00000000ff00ff00, -8, tdat4 ); TEST_LD_OP( 8, lwu, 0x000000000ff00ff0, -4, tdat4 ); TEST_LD_OP( 9, lwu, 0x00000000f00ff00f, 0, tdat4 ); # Test with a negative base TEST_CASE( 10, x5, 0x0000000000ff00ff, \ la x1, tdat; \ addi x1, x1, -32; \ lwu x5, 32(x1); \ ) # Test with unaligned base TEST_CASE( 11, x5, 0x00000000ff00ff00, \ la x1, tdat; \ addi x1, x1, -3; \ lwu x5, 7(x1); \ ) #------------------------------------------------------------- # Bypassing tests #------------------------------------------------------------- TEST_LD_DEST_BYPASS( 12, 0, lwu, 0x000000000ff00ff0, 4, tdat2 ); TEST_LD_DEST_BYPASS( 13, 1, lwu, 0x00000000f00ff00f, 4, tdat3 ); TEST_LD_DEST_BYPASS( 14, 2, lwu, 0x00000000ff00ff00, 4, tdat1 ); TEST_LD_SRC1_BYPASS( 15, 0, lwu, 0x000000000ff00ff0, 4, tdat2 ); TEST_LD_SRC1_BYPASS( 16, 1, lwu, 0x00000000f00ff00f, 4, tdat3 ); TEST_LD_SRC1_BYPASS( 17, 2, lwu, 0x00000000ff00ff00, 4, tdat1 ); #------------------------------------------------------------- # Test write-after-write hazard #------------------------------------------------------------- TEST_CASE( 18, x2, 2, \ la x5, tdat; \ lwu x2, 0(x5); \ li x2, 2; \ ) TEST_CASE( 19, x2, 2, \ la x5, tdat; \ lwu x2, 0(x5); \ nop; \ li x2, 2; \ ) TEST_PASSFAIL RVTEST_CODE_END .data RVTEST_DATA_BEGIN TEST_DATA tdat: tdat1: .word 0x00ff00ff tdat2: .word 0xff00ff00 tdat3: .word 0x0ff00ff0 tdat4: .word 0xf00ff00f RVTEST_DATA_END