repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
kushdevteam/bunproj | 833 | .local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/dfminmax.s | .text
.global __hexagon_mindf3
.global __hexagon_maxdf3
.type __hexagon_mindf3,@function
.type __hexagon_maxdf3,@function
.global __qdsp_mindf3 ; .set __qdsp_mindf3, __hexagon_mindf3
.global __qdsp_maxdf3 ; .set __qdsp_maxdf3, __hexagon_maxdf3
.p2align 5
__hexagon_mindf3:
{
p0 = dfclass(r1:0,#0x10)
p1 = dfcmp.gt(r1:0,r3:2)
r5:4 = r1:0
}
{
if (p0) r1:0 = r3:2
if (p1) r1:0 = r3:2
p2 = dfcmp.eq(r1:0,r3:2)
if (!p2.new) jumpr:t r31
}
{
r1:0 = or(r5:4,r3:2)
jumpr r31
}
.size __hexagon_mindf3,.-__hexagon_mindf3
.falign
__hexagon_maxdf3:
{
p0 = dfclass(r1:0,#0x10)
p1 = dfcmp.gt(r3:2,r1:0)
r5:4 = r1:0
}
{
if (p0) r1:0 = r3:2
if (p1) r1:0 = r3:2
p2 = dfcmp.eq(r1:0,r3:2)
if (!p2.new) jumpr:t r31
}
{
r1:0 = and(r5:4,r3:2)
jumpr r31
}
.size __hexagon_maxdf3,.-__hexagon_maxdf3
|
kushdevteam/bunproj | 3,885 | .local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/fastmath2_ldlib_asm.s | .text
.global __hexagon_fast2ldadd_asm
.type __hexagon_fast2ldadd_asm, @function
__hexagon_fast2ldadd_asm:
.falign
{
R4 = memw(r29+#8)
R5 = memw(r29+#24)
r7 = r0
}
{
R6 = sub(R4, R5):sat
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
R6 = abs(R6):sat
if ( P0) R4 = #1
if (!P0) R5 = #1
R9 = #62
} {
R6 = MIN(R6, R9)
R1:0 = memd(r29+#0)
R3:2 = memd(r29+#16)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = add(R1:0, R3:2)
R3:2 = #0
} {
R4 = clb(R1:0)
R9.L =#0x0001
} {
R8 -= add(R4, #-1)
R4 = add(R4, #-1)
p0 = cmp.gt(R4, #58)
R9.H =#0x8000
} {
if(!p0)memw(r7+#8) = R8
R1:0 = ASL(R1:0, R4)
if(p0) jump .Ldenorma1
} {
memd(r7+#0) = R1:0
jumpr r31
}
.Ldenorma1:
memd(r7+#0) = R3:2
{
memw(r7+#8) = R9
jumpr r31
}
.text
.global __hexagon_fast2ldsub_asm
.type __hexagon_fast2ldsub_asm, @function
__hexagon_fast2ldsub_asm:
.falign
{
R4 = memw(r29+#8)
R5 = memw(r29+#24)
r7 = r0
}
{
R6 = sub(R4, R5):sat
P0 = CMP.GT(R4, R5);
if ( P0.new) R8 = add(R4, #1)
if (!P0.new) R8 = add(R5, #1)
} {
R6 = abs(R6):sat
if ( P0) R4 = #1
if (!P0) R5 = #1
R9 = #62
} {
R6 = min(R6, R9)
R1:0 = memd(r29+#0)
R3:2 = memd(r29+#16)
} {
if (!P0) R4 = add(R6, #1)
if ( P0) R5 = add(R6, #1)
} {
R1:0 = ASR(R1:0, R4)
R3:2 = ASR(R3:2, R5)
} {
R1:0 = sub(R1:0, R3:2)
R3:2 = #0
} {
R4 = clb(R1:0)
R9.L =#0x0001
} {
R8 -= add(R4, #-1)
R4 = add(R4, #-1)
p0 = cmp.gt(R4, #58)
R9.H =#0x8000
} {
if(!p0)memw(r7+#8) = R8
R1:0 = asl(R1:0, R4)
if(p0) jump .Ldenorma_s
} {
memd(r7+#0) = R1:0
jumpr r31
}
.Ldenorma_s:
memd(r7+#0) = R3:2
{
memw(r7+#8) = R9
jumpr r31
}
.text
.global __hexagon_fast2ldmpy_asm
.type __hexagon_fast2ldmpy_asm, @function
__hexagon_fast2ldmpy_asm:
.falign
{
R15:14 = memd(r29+#0)
R3:2 = memd(r29+#16)
R13:12 = #0
}
{
R8= extractu(R2, #31, #1)
R9= extractu(R14, #31, #1)
R13.H = #0x8000
}
{
R11:10 = mpy(R15, R3)
R7:6 = mpy(R15, R8)
R4 = memw(r29+#8)
R5 = memw(r29+#24)
}
{
R11:10 = add(R11:10, R11:10)
R7:6 += mpy(R3, R9)
}
{
R7:6 = asr(R7:6, #30)
R8.L = #0x0001
p1 = cmp.eq(R15:14, R3:2)
}
{
R7:6 = add(R7:6, R11:10)
R4= add(R4, R5)
p2 = cmp.eq(R3:2, R13:12)
}
{
R9 = clb(R7:6)
R8.H = #0x8000
p1 = and(p1, p2)
}
{
R4-= add(R9, #-1)
R9 = add(R9, #-1)
if(p1) jump .Lsat1
}
{
R7:6 = asl(R7:6, R9)
memw(R0+#8) = R4
p0 = cmp.gt(R9, #58)
if(p0.new) jump:NT .Ldenorm1
}
{
memd(R0+#0) = R7:6
jumpr r31
}
.Lsat1:
{
R13:12 = #0
R4+= add(R9, #1)
}
{
R13.H = #0x4000
memw(R0+#8) = R4
}
{
memd(R0+#0) = R13:12
jumpr r31
}
.Ldenorm1:
{
memw(R0+#8) = R8
R15:14 = #0
}
{
memd(R0+#0) = R15:14
jumpr r31
}
|
kushdevteam/bunproj | 872 | .local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/compiler_builtins-0.1.158/src/hexagon/sfsqrt_opt.s | FUNCTION_BEGIN __hexagon_sqrtf
{
r3,p0 = sfinvsqrta(r0)
r5 = sffixupr(r0)
r4 = ##0x3f000000
r1:0 = combine(#0,#0)
}
{
r0 += sfmpy(r3,r5):lib
r1 += sfmpy(r3,r4):lib
r2 = r4
r3 = r5
}
{
r2 -= sfmpy(r0,r1):lib
p1 = sfclass(r5,#1)
}
{
r0 += sfmpy(r0,r2):lib
r1 += sfmpy(r1,r2):lib
r2 = r4
r3 = r5
}
{
r2 -= sfmpy(r0,r1):lib
r3 -= sfmpy(r0,r0):lib
}
{
r0 += sfmpy(r1,r3):lib
r1 += sfmpy(r1,r2):lib
r2 = r4
r3 = r5
}
{
r3 -= sfmpy(r0,r0):lib
if (p1) r0 = or(r0,r5)
}
{
r0 += sfmpy(r1,r3,p0):scale
jumpr r31
}
FUNCTION_END __hexagon_sqrtf
.global __qdsp_sqrtf ; .set __qdsp_sqrtf, __hexagon_sqrtf
.global __hexagon_fast_sqrtf ; .set __hexagon_fast_sqrtf, __hexagon_sqrtf
.global __hexagon_fast2_sqrtf ; .set __hexagon_fast2_sqrtf, __hexagon_sqrtf
|
kushdevteam/bunproj | 7,228 | .local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/lzma-sys-0.1.20/xz-5.2/src/liblzma/check/crc32_x86.S | /*
* Speed-optimized CRC32 using slicing-by-eight algorithm
*
* This uses only i386 instructions, but it is optimized for i686 and later
* (including e.g. Pentium II/III/IV, Athlon XP, and Core 2). For i586
* (e.g. Pentium), slicing-by-four would be better, and even the C version
* of slicing-by-eight built with gcc -march=i586 tends to be a little bit
* better than this. Very few probably run this code on i586 or older x86
* so this shouldn't be a problem in practice.
*
* Authors: Igor Pavlov (original version)
* Lasse Collin (AT&T syntax, PIC support, better portability)
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*
* This code needs lzma_crc32_table, which can be created using the
* following C code:
uint32_t lzma_crc32_table[8][256];
void
init_table(void)
{
// IEEE-802.3
static const uint32_t poly32 = UINT32_C(0xEDB88320);
// Castagnoli
// static const uint32_t poly32 = UINT32_C(0x82F63B78);
// Koopman
// static const uint32_t poly32 = UINT32_C(0xEB31D82E);
for (size_t s = 0; s < 8; ++s) {
for (size_t b = 0; b < 256; ++b) {
uint32_t r = s == 0 ? b : lzma_crc32_table[s - 1][b];
for (size_t i = 0; i < 8; ++i) {
if (r & 1)
r = (r >> 1) ^ poly32;
else
r >>= 1;
}
lzma_crc32_table[s][b] = r;
}
}
}
* The prototype of the CRC32 function:
* extern uint32_t lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc);
*/
/*
* On some systems, the functions need to be prefixed. The prefix is
* usually an underscore.
*/
#ifndef __USER_LABEL_PREFIX__
# define __USER_LABEL_PREFIX__
#endif
#define MAKE_SYM_CAT(prefix, sym) prefix ## sym
#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym)
#define LZMA_CRC32 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32)
#define LZMA_CRC32_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32_table)
/*
* Solaris assembler doesn't have .p2align, and Darwin uses .align
* differently than GNU/Linux and Solaris.
*/
#if defined(__APPLE__) || defined(__MSDOS__)
# define ALIGN(pow2, abs) .align pow2
#else
# define ALIGN(pow2, abs) .align abs
#endif
.text
.globl LZMA_CRC32
#if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \
&& !defined(__MSDOS__)
.type LZMA_CRC32, @function
#endif
ALIGN(4, 16)
LZMA_CRC32:
/*
* Register usage:
* %eax crc
* %esi buf
* %edi size or buf + size
* %ebx lzma_crc32_table
* %ebp Table index
* %ecx Temporary
* %edx Temporary
*/
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl 0x14(%esp), %esi /* buf */
movl 0x18(%esp), %edi /* size */
movl 0x1C(%esp), %eax /* crc */
/*
* Store the address of lzma_crc32_table to %ebx. This is needed to
* get position-independent code (PIC).
*
* The PIC macro is defined by libtool, while __PIC__ is defined
* by GCC but only on some systems. Testing for both makes it simpler
* to test this code without libtool, and keeps the code working also
* when built with libtool but using something else than GCC.
*
* I understood that libtool may define PIC on Windows even though
* the code in Windows DLLs is not PIC in sense that it is in ELF
* binaries, so we need a separate check to always use the non-PIC
* code on Windows.
*/
#if (!defined(PIC) && !defined(__PIC__)) \
|| (defined(_WIN32) || defined(__CYGWIN__))
/* Not PIC */
movl $ LZMA_CRC32_TABLE, %ebx
#elif defined(__APPLE__)
/* Mach-O */
call .L_get_pc
.L_pic:
leal .L_lzma_crc32_table$non_lazy_ptr-.L_pic(%ebx), %ebx
movl (%ebx), %ebx
#else
/* ELF */
call .L_get_pc
addl $_GLOBAL_OFFSET_TABLE_, %ebx
movl LZMA_CRC32_TABLE@GOT(%ebx), %ebx
#endif
/* Complement the initial value. */
notl %eax
ALIGN(4, 16)
.L_align:
/*
* Check if there is enough input to use slicing-by-eight.
* We need 16 bytes, because the loop pre-reads eight bytes.
*/
cmpl $16, %edi
jb .L_rest
/* Check if we have reached alignment of eight bytes. */
testl $7, %esi
jz .L_slice
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrl $8, %eax
xorl (%ebx, %ebp, 4), %eax
decl %edi
jmp .L_align
ALIGN(2, 4)
.L_slice:
/*
* If we get here, there's at least 16 bytes of aligned input
* available. Make %edi multiple of eight bytes. Store the possible
* remainder over the "size" variable in the argument stack.
*/
movl %edi, 0x18(%esp)
andl $-8, %edi
subl %edi, 0x18(%esp)
/*
* Let %edi be buf + size - 8 while running the main loop. This way
* we can compare for equality to determine when exit the loop.
*/
addl %esi, %edi
subl $8, %edi
/* Read in the first eight aligned bytes. */
xorl (%esi), %eax
movl 4(%esi), %ecx
movzbl %cl, %ebp
.L_loop:
movl 0x0C00(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl 0x0800(%ebx, %ebp, 4), %edx
shrl $16, %ecx
xorl 8(%esi), %edx
movzbl %cl, %ebp
xorl 0x0400(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl (%ebx, %ebp, 4), %edx
movzbl %al, %ebp
/*
* Read the next four bytes, for which the CRC is calculated
* on the next interation of the loop.
*/
movl 12(%esi), %ecx
xorl 0x1C00(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
shrl $16, %eax
xorl 0x1800(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
movzbl %al, %eax
movl 0x1400(%ebx, %eax, 4), %eax
addl $8, %esi
xorl %edx, %eax
xorl 0x1000(%ebx, %ebp, 4), %eax
/* Check for end of aligned input. */
cmpl %edi, %esi
movzbl %cl, %ebp
jne .L_loop
/*
* Process the remaining eight bytes, which we have already
* copied to %ecx and %edx.
*/
movl 0x0C00(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl 0x0800(%ebx, %ebp, 4), %edx
shrl $16, %ecx
movzbl %cl, %ebp
xorl 0x0400(%ebx, %ebp, 4), %edx
movzbl %ch, %ebp
xorl (%ebx, %ebp, 4), %edx
movzbl %al, %ebp
xorl 0x1C00(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
shrl $16, %eax
xorl 0x1800(%ebx, %ebp, 4), %edx
movzbl %ah, %ebp
movzbl %al, %eax
movl 0x1400(%ebx, %eax, 4), %eax
addl $8, %esi
xorl %edx, %eax
xorl 0x1000(%ebx, %ebp, 4), %eax
/* Copy the number of remaining bytes to %edi. */
movl 0x18(%esp), %edi
.L_rest:
/* Check for end of input. */
testl %edi, %edi
jz .L_return
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrl $8, %eax
xorl (%ebx, %ebp, 4), %eax
decl %edi
jmp .L_rest
.L_return:
/* Complement the final value. */
notl %eax
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
#if defined(PIC) || defined(__PIC__)
ALIGN(4, 16)
.L_get_pc:
movl (%esp), %ebx
ret
#endif
#if defined(__APPLE__) && (defined(PIC) || defined(__PIC__))
/* Mach-O PIC */
.section __IMPORT,__pointers,non_lazy_symbol_pointers
.L_lzma_crc32_table$non_lazy_ptr:
.indirect_symbol LZMA_CRC32_TABLE
.long 0
#elif defined(_WIN32) || defined(__CYGWIN__)
# ifdef DLL_EXPORT
/* This is equivalent of __declspec(dllexport). */
.section .drectve
.ascii " -export:lzma_crc32"
# endif
#elif !defined(__MSDOS__)
/* ELF */
.size LZMA_CRC32, .-LZMA_CRC32
#endif
/*
* This is needed to support non-executable stack. It's ugly to
* use __linux__ here, but I don't know a way to detect when
* we are using GNU assembler.
*/
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",@progbits
#endif
|
kushdevteam/bunproj | 6,761 | .local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/lzma-sys-0.1.20/xz-5.2/src/liblzma/check/crc64_x86.S | /*
* Speed-optimized CRC64 using slicing-by-four algorithm
*
* This uses only i386 instructions, but it is optimized for i686 and later
* (including e.g. Pentium II/III/IV, Athlon XP, and Core 2).
*
* Authors: Igor Pavlov (original CRC32 assembly code)
* Lasse Collin (CRC64 adaptation of the modified CRC32 code)
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*
* This code needs lzma_crc64_table, which can be created using the
* following C code:
uint64_t lzma_crc64_table[4][256];
void
init_table(void)
{
// ECMA-182
static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42);
for (size_t s = 0; s < 4; ++s) {
for (size_t b = 0; b < 256; ++b) {
uint64_t r = s == 0 ? b : lzma_crc64_table[s - 1][b];
for (size_t i = 0; i < 8; ++i) {
if (r & 1)
r = (r >> 1) ^ poly64;
else
r >>= 1;
}
lzma_crc64_table[s][b] = r;
}
}
}
* The prototype of the CRC64 function:
* extern uint64_t lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc);
*/
/*
* On some systems, the functions need to be prefixed. The prefix is
* usually an underscore.
*/
#ifndef __USER_LABEL_PREFIX__
# define __USER_LABEL_PREFIX__
#endif
#define MAKE_SYM_CAT(prefix, sym) prefix ## sym
#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym)
#define LZMA_CRC64 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64)
#define LZMA_CRC64_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64_table)
/*
* Solaris assembler doesn't have .p2align, and Darwin uses .align
* differently than GNU/Linux and Solaris.
*/
#if defined(__APPLE__) || defined(__MSDOS__)
# define ALIGN(pow2, abs) .align pow2
#else
# define ALIGN(pow2, abs) .align abs
#endif
.text
.globl LZMA_CRC64
#if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \
&& !defined(__MSDOS__)
.type LZMA_CRC64, @function
#endif
ALIGN(4, 16)
LZMA_CRC64:
/*
* Register usage:
* %eax crc LSB
* %edx crc MSB
* %esi buf
* %edi size or buf + size
* %ebx lzma_crc64_table
* %ebp Table index
* %ecx Temporary
*/
pushl %ebx
pushl %esi
pushl %edi
pushl %ebp
movl 0x14(%esp), %esi /* buf */
movl 0x18(%esp), %edi /* size */
movl 0x1C(%esp), %eax /* crc LSB */
movl 0x20(%esp), %edx /* crc MSB */
/*
* Store the address of lzma_crc64_table to %ebx. This is needed to
* get position-independent code (PIC).
*
* The PIC macro is defined by libtool, while __PIC__ is defined
* by GCC but only on some systems. Testing for both makes it simpler
* to test this code without libtool, and keeps the code working also
* when built with libtool but using something else than GCC.
*
* I understood that libtool may define PIC on Windows even though
* the code in Windows DLLs is not PIC in sense that it is in ELF
* binaries, so we need a separate check to always use the non-PIC
* code on Windows.
*/
#if (!defined(PIC) && !defined(__PIC__)) \
|| (defined(_WIN32) || defined(__CYGWIN__))
/* Not PIC */
movl $ LZMA_CRC64_TABLE, %ebx
#elif defined(__APPLE__)
/* Mach-O */
call .L_get_pc
.L_pic:
leal .L_lzma_crc64_table$non_lazy_ptr-.L_pic(%ebx), %ebx
movl (%ebx), %ebx
#else
/* ELF */
call .L_get_pc
addl $_GLOBAL_OFFSET_TABLE_, %ebx
movl LZMA_CRC64_TABLE@GOT(%ebx), %ebx
#endif
/* Complement the initial value. */
notl %eax
notl %edx
.L_align:
/*
* Check if there is enough input to use slicing-by-four.
* We need eight bytes, because the loop pre-reads four bytes.
*/
cmpl $8, %edi
jb .L_rest
/* Check if we have reached alignment of four bytes. */
testl $3, %esi
jz .L_slice
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrdl $8, %edx, %eax
xorl (%ebx, %ebp, 8), %eax
shrl $8, %edx
xorl 4(%ebx, %ebp, 8), %edx
decl %edi
jmp .L_align
.L_slice:
/*
* If we get here, there's at least eight bytes of aligned input
* available. Make %edi multiple of four bytes. Store the possible
* remainder over the "size" variable in the argument stack.
*/
movl %edi, 0x18(%esp)
andl $-4, %edi
subl %edi, 0x18(%esp)
/*
* Let %edi be buf + size - 4 while running the main loop. This way
* we can compare for equality to determine when exit the loop.
*/
addl %esi, %edi
subl $4, %edi
/* Read in the first four aligned bytes. */
movl (%esi), %ecx
.L_loop:
xorl %eax, %ecx
movzbl %cl, %ebp
movl 0x1800(%ebx, %ebp, 8), %eax
xorl %edx, %eax
movl 0x1804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
xorl 0x1000(%ebx, %ebp, 8), %eax
xorl 0x1004(%ebx, %ebp, 8), %edx
shrl $16, %ecx
movzbl %cl, %ebp
xorl 0x0800(%ebx, %ebp, 8), %eax
xorl 0x0804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
addl $4, %esi
xorl (%ebx, %ebp, 8), %eax
xorl 4(%ebx, %ebp, 8), %edx
/* Check for end of aligned input. */
cmpl %edi, %esi
/*
* Copy the next input byte to %ecx. It is slightly faster to
* read it here than at the top of the loop.
*/
movl (%esi), %ecx
jb .L_loop
/*
* Process the remaining four bytes, which we have already
* copied to %ecx.
*/
xorl %eax, %ecx
movzbl %cl, %ebp
movl 0x1800(%ebx, %ebp, 8), %eax
xorl %edx, %eax
movl 0x1804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
xorl 0x1000(%ebx, %ebp, 8), %eax
xorl 0x1004(%ebx, %ebp, 8), %edx
shrl $16, %ecx
movzbl %cl, %ebp
xorl 0x0800(%ebx, %ebp, 8), %eax
xorl 0x0804(%ebx, %ebp, 8), %edx
movzbl %ch, %ebp
addl $4, %esi
xorl (%ebx, %ebp, 8), %eax
xorl 4(%ebx, %ebp, 8), %edx
/* Copy the number of remaining bytes to %edi. */
movl 0x18(%esp), %edi
.L_rest:
/* Check for end of input. */
testl %edi, %edi
jz .L_return
/* Calculate CRC of the next input byte. */
movzbl (%esi), %ebp
incl %esi
movzbl %al, %ecx
xorl %ecx, %ebp
shrdl $8, %edx, %eax
xorl (%ebx, %ebp, 8), %eax
shrl $8, %edx
xorl 4(%ebx, %ebp, 8), %edx
decl %edi
jmp .L_rest
.L_return:
/* Complement the final value. */
notl %eax
notl %edx
popl %ebp
popl %edi
popl %esi
popl %ebx
ret
#if defined(PIC) || defined(__PIC__)
ALIGN(4, 16)
.L_get_pc:
movl (%esp), %ebx
ret
#endif
#if defined(__APPLE__) && (defined(PIC) || defined(__PIC__))
/* Mach-O PIC */
.section __IMPORT,__pointers,non_lazy_symbol_pointers
.L_lzma_crc64_table$non_lazy_ptr:
.indirect_symbol LZMA_CRC64_TABLE
.long 0
#elif defined(_WIN32) || defined(__CYGWIN__)
# ifdef DLL_EXPORT
/* This is equivalent of __declspec(dllexport). */
.section .drectve
.ascii " -export:lzma_crc64"
# endif
#elif !defined(__MSDOS__)
/* ELF */
.size LZMA_CRC64, .-LZMA_CRC64
#endif
/*
* This is needed to support non-executable stack. It's ugly to
* use __linux__ here, but I don't know a way to detect when
* we are using GNU assembler.
*/
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",@progbits
#endif
|
kushdevteam/bunproj | 15,150 | .local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zstd-sys-2.0.16+zstd.1.5.7/zstd/lib/decompress/huf_decompress_amd64.S | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "../common/portability_macros.h"
#if defined(__ELF__) && defined(__GNUC__)
/* Stack marking
* ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
*/
.section .note.GNU-stack,"",%progbits
#if defined(__aarch64__)
/* Mark that this assembly supports BTI & PAC, because it is empty for aarch64.
* See: https://github.com/facebook/zstd/issues/3841
* See: https://gcc.godbolt.org/z/sqr5T4ffK
* See: https://lore.kernel.org/linux-arm-kernel/20200429211641.9279-8-broonie@kernel.org/
* See: https://reviews.llvm.org/D62609
*/
.pushsection .note.gnu.property, "a"
.p2align 3
.long 4 /* size of the name - "GNU\0" */
.long 0x10 /* size of descriptor */
.long 0x5 /* NT_GNU_PROPERTY_TYPE_0 */
.asciz "GNU"
.long 0xc0000000 /* pr_type - GNU_PROPERTY_AARCH64_FEATURE_1_AND */
.long 4 /* pr_datasz - 4 bytes */
.long 3 /* pr_data - GNU_PROPERTY_AARCH64_FEATURE_1_BTI | GNU_PROPERTY_AARCH64_FEATURE_1_PAC */
.p2align 3 /* pr_padding - bring everything to 8 byte alignment */
.popsection
#endif
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
/* Calling convention:
*
* %rdi (or %rcx on Windows) contains the first argument: HUF_DecompressAsmArgs*.
* %rbp isn't maintained (no frame pointer).
* %rsp contains the stack pointer that grows down.
* No red-zone is assumed, only addresses >= %rsp are used.
* All register contents are preserved.
*/
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop)
ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop)
.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop
.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop
.text
/* Sets up register mappings for clarity.
* op[], bits[], dtable & ip[0] each get their own register.
* ip[1,2,3] & olimit alias var[].
* %rax is a scratch register.
*/
#define op0 rsi
#define op1 rbx
#define op2 rcx
#define op3 rdi
#define ip0 r8
#define ip1 r9
#define ip2 r10
#define ip3 r11
#define bits0 rbp
#define bits1 rdx
#define bits2 r12
#define bits3 r13
#define dtable r14
#define olimit r15
/* var[] aliases ip[1,2,3] & olimit
* ip[1,2,3] are saved every iteration.
* olimit is only used in compute_olimit.
*/
#define var0 r15
#define var1 r9
#define var2 r10
#define var3 r11
/* 32-bit var registers */
#define vard0 r15d
#define vard1 r9d
#define vard2 r10d
#define vard3 r11d
/* Calls X(N) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM(X) \
X(0); \
X(1); \
X(2); \
X(3)
/* Calls X(N, idx) for each stream 0, 1, 2, 3. */
#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \
X(0, idx); \
X(1, idx); \
X(2, idx); \
X(3, idx)
/* Define both _HUF_* & HUF_* symbols because MacOS
* C symbols are prefixed with '_' & Linux symbols aren't.
*/
_HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
HUF_decompress4X1_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push 104(%rax) /* ilowest */
push 112(%rax) /* oend */
push %olimit /* olimit space */
subq $24, %rsp
.L_4X1_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rbx, rdx must be saved
* op3 & ip0 mustn't be clobbered
*/
movq %rbx, 0(%rsp)
movq %rdx, 8(%rsp)
movq 32(%rsp), %rax /* rax = oend */
subq %op3, %rax /* rax = oend - op3 */
/* r15 = (oend - op3) / 5 */
movabsq $-3689348814741910323, %rdx
mulq %rdx
movq %rdx, %r15
shrq $2, %r15
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %rbx /* rbx = ip0 - ilowest */
/* rdx = (ip0 - ilowest) / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %rbx
shrq %rbx
addq %rbx, %rdx
shrq $2, %rdx
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* r15 = r15 * 5 */
leaq (%r15, %r15, 4), %r15
/* olimit = op3 + r15 */
addq %op3, %olimit
movq 8(%rsp), %rdx
movq 0(%rsp), %rbx
/* If (op3 + 20 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X1_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X1_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X1_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X1_exit
/* Reads top 11 bits from bits[n]
* Loads dt[bits[n]] into var[n]
*/
#define GET_NEXT_DELT(n) \
movq $53, %var##n; \
shrxq %var##n, %bits##n, %var##n; \
movzwl (%dtable,%var##n,2),%vard##n
/* var[n] must contain the DTable entry computed with GET_NEXT_DELT
* Moves var[n] to %rax
* bits[n] <<= var[n] & 63
* op[n][idx] = %rax >> 8
* %ah is a way to access bits [8, 16) of %rax
*/
#define DECODE_FROM_DELT(n, idx) \
movq %var##n, %rax; \
shlxq %var##n, %bits##n, %bits##n; \
movb %ah, idx(%op##n)
/* Assumes GET_NEXT_DELT has been called.
* Calls DECODE_FROM_DELT then GET_NEXT_DELT
*/
#define DECODE_AND_GET_NEXT(n, idx) \
DECODE_FROM_DELT(n, idx); \
GET_NEXT_DELT(n) \
/* // ctz & nbBytes is stored in bits[n]
* // nbBits is stored in %rax
* ctz = CTZ[bits[n]]
* nbBits = ctz & 7
* nbBytes = ctz >> 3
* op[n] += 5
* ip[n] -= nbBytes
* // Note: x86-64 is little-endian ==> no bswap
* bits[n] = MEM_readST(ip[n]) | 1
* bits[n] <<= nbBits
*/
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
andq $7, %rax; \
shrq $3, %bits##n; \
leaq 5(%op##n), %op##n; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlx %rax, %bits##n, %bits##n
/* Store clobbered variables on the stack */
movq %olimit, 24(%rsp)
movq %ip1, 0(%rsp)
movq %ip2, 8(%rsp)
movq %ip3, 16(%rsp)
/* Call GET_NEXT_DELT for each stream */
FOR_EACH_STREAM(GET_NEXT_DELT)
.p2align 6
.L_4X1_loop_body:
/* Decode 5 symbols in each of the 4 streams (20 total)
* Must have called GET_NEXT_DELT for each stream
*/
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4)
/* Load ip[1,2,3] from stack (var[] aliases them)
* ip[] is needed for RELOAD_BITS
* Each will be stored back to the stack after RELOAD
*/
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Reload each stream & fetch the next table entry
* to prepare for the next iteration
*/
RELOAD_BITS(0)
GET_NEXT_DELT(0)
RELOAD_BITS(1)
movq %ip1, 0(%rsp)
GET_NEXT_DELT(1)
RELOAD_BITS(2)
movq %ip2, 8(%rsp)
GET_NEXT_DELT(2)
RELOAD_BITS(3)
movq %ip3, 16(%rsp)
GET_NEXT_DELT(3)
/* If op3 < olimit: continue the loop */
cmp %op3, 24(%rsp)
ja .L_4X1_loop_body
/* Reload ip[1,2,3] from stack */
movq 0(%rsp), %ip1
movq 8(%rsp), %ip2
movq 16(%rsp), %ip3
/* Re-compute olimit */
jmp .L_4X1_compute_olimit
#undef GET_NEXT_DELT
#undef DECODE_FROM_DELT
#undef DECODE
#undef RELOAD_BITS
.L_4X1_exit:
addq $24, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* olimit */
pop %rax /* oend */
pop %rax /* ilowest */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
_HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
HUF_decompress4X2_usingDTable_internal_fast_asm_loop:
ZSTD_CET_ENDBRANCH
/* Save all registers - even if they are callee saved for simplicity. */
push %rax
push %rbx
push %rcx
push %rdx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
/* Read HUF_DecompressAsmArgs* args from %rax */
#if defined(_WIN32)
movq %rcx, %rax
#else
movq %rdi, %rax
#endif
movq 0(%rax), %ip0
movq 8(%rax), %ip1
movq 16(%rax), %ip2
movq 24(%rax), %ip3
movq 32(%rax), %op0
movq 40(%rax), %op1
movq 48(%rax), %op2
movq 56(%rax), %op3
movq 64(%rax), %bits0
movq 72(%rax), %bits1
movq 80(%rax), %bits2
movq 88(%rax), %bits3
movq 96(%rax), %dtable
push %rax /* argument */
push %rax /* olimit */
push 104(%rax) /* ilowest */
movq 112(%rax), %rax
push %rax /* oend3 */
movq %op3, %rax
push %rax /* oend2 */
movq %op2, %rax
push %rax /* oend1 */
movq %op1, %rax
push %rax /* oend0 */
/* Scratch space */
subq $8, %rsp
.L_4X2_compute_olimit:
/* Computes how many iterations we can do safely
* %r15, %rax may be clobbered
* rdx must be saved
* op[1,2,3,4] & ip0 mustn't be clobbered
*/
movq %rdx, 0(%rsp)
/* We can consume up to 7 input bytes each iteration. */
movq %ip0, %rax /* rax = ip0 */
movq 40(%rsp), %rdx /* rdx = ilowest */
subq %rdx, %rax /* rax = ip0 - ilowest */
movq %rax, %r15 /* r15 = ip0 - ilowest */
/* rdx = rax / 7 */
movabsq $2635249153387078803, %rdx
mulq %rdx
subq %rdx, %r15
shrq %r15
addq %r15, %rdx
shrq $2, %rdx
/* r15 = (ip0 - ilowest) / 7 */
movq %rdx, %r15
/* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */
movq 8(%rsp), %rax /* rax = oend0 */
subq %op0, %rax /* rax = oend0 - op0 */
movq 16(%rsp), %rdx /* rdx = oend1 */
subq %op1, %rdx /* rdx = oend1 - op1 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 24(%rsp), %rax /* rax = oend2 */
subq %op2, %rax /* rax = oend2 - op2 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movq 32(%rsp), %rax /* rax = oend3 */
subq %op3, %rax /* rax = oend3 - op3 */
cmpq %rax, %rdx
cmova %rax, %rdx /* rdx = min(%rdx, %rax) */
movabsq $-3689348814741910323, %rax
mulq %rdx
shrq $3, %rdx /* rdx = rdx / 10 */
/* r15 = min(%rdx, %r15) */
cmpq %rdx, %r15
cmova %rdx, %r15
/* olimit = op3 + 5 * r15 */
movq %r15, %rax
leaq (%op3, %rax, 4), %olimit
addq %rax, %olimit
movq 0(%rsp), %rdx
/* If (op3 + 10 > olimit) */
movq %op3, %rax /* rax = op3 */
cmpq %rax, %olimit /* op3 == olimit */
je .L_4X2_exit
/* If (ip1 < ip0) go to exit */
cmpq %ip0, %ip1
jb .L_4X2_exit
/* If (ip2 < ip1) go to exit */
cmpq %ip1, %ip2
jb .L_4X2_exit
/* If (ip3 < ip2) go to exit */
cmpq %ip2, %ip3
jb .L_4X2_exit
#define DECODE(n, idx) \
movq %bits##n, %rax; \
shrq $53, %rax; \
movzwl 0(%dtable,%rax,4),%r8d; \
movzbl 2(%dtable,%rax,4),%r15d; \
movzbl 3(%dtable,%rax,4),%eax; \
movw %r8w, (%op##n); \
shlxq %r15, %bits##n, %bits##n; \
addq %rax, %op##n
#define RELOAD_BITS(n) \
bsfq %bits##n, %bits##n; \
movq %bits##n, %rax; \
shrq $3, %bits##n; \
andq $7, %rax; \
subq %bits##n, %ip##n; \
movq (%ip##n), %bits##n; \
orq $1, %bits##n; \
shlxq %rax, %bits##n, %bits##n
movq %olimit, 48(%rsp)
.p2align 6
.L_4X2_loop_body:
/* We clobber r8, so store it on the stack */
movq %r8, 0(%rsp)
/* Decode 5 symbols from each of the 4 streams (20 symbols total). */
FOR_EACH_STREAM_WITH_INDEX(DECODE, 0)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 1)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 2)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 3)
FOR_EACH_STREAM_WITH_INDEX(DECODE, 4)
/* Reload r8 */
movq 0(%rsp), %r8
FOR_EACH_STREAM(RELOAD_BITS)
cmp %op3, 48(%rsp)
ja .L_4X2_loop_body
jmp .L_4X2_compute_olimit
#undef DECODE
#undef RELOAD_BITS
.L_4X2_exit:
addq $8, %rsp
/* Restore stack (oend & olimit) */
pop %rax /* oend0 */
pop %rax /* oend1 */
pop %rax /* oend2 */
pop %rax /* oend3 */
pop %rax /* ilowest */
pop %rax /* olimit */
pop %rax /* arg */
/* Save ip / op / bits */
movq %ip0, 0(%rax)
movq %ip1, 8(%rax)
movq %ip2, 16(%rax)
movq %ip3, 24(%rax)
movq %op0, 32(%rax)
movq %op1, 40(%rax)
movq %op2, 48(%rax)
movq %op3, 56(%rax)
movq %bits0, 64(%rax)
movq %bits1, 72(%rax)
movq %bits2, 80(%rax)
movq %bits3, 88(%rax)
/* Restore registers */
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rdx
pop %rcx
pop %rbx
pop %rax
ret
#endif
|
L0czek/aosp-virt-package | 2,102 | guest/vmbase_example/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.fill 509, 8, 0x0 // 509 GiB of remaining VA space
0: /* level 2 */
#if defined(VMBASE_EXAMPLE_IS_BIOS)
.quad 0 // 2 MiB not mapped (DT)
.quad .L_BLOCK_MEM_XIP | 0x80200000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80400000 // 2 MiB of writable DRAM
.fill 509, 8, 0x0
#elif defined(VMBASE_EXAMPLE_IS_KERNEL)
.quad .L_BLOCK_MEM_XIP | 0x80000000 // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | 0x80200000 // 2 MiB of writable DRAM
.fill 510, 8, 0x0
#else
#error "Unexpected vmbase_example mode: failed to generate idmap"
#endif
|
L0czek/aosp-virt-package | 976 | guest/vmbase_example/image.ld.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
MEMORY
{
#if defined(VMBASE_EXAMPLE_IS_BIOS)
image : ORIGIN = 0x80200000, LENGTH = 2M
writable_data : ORIGIN = 0x80400000, LENGTH = 2M
#elif defined(VMBASE_EXAMPLE_IS_KERNEL)
image : ORIGIN = 0x80000000, LENGTH = 2M
writable_data : ORIGIN = 0x80200000, LENGTH = 2M
#else
#error "Unexpected vmbase_example mode: failed to generate image layout"
#endif
}
|
L0czek/aosp-virt-package | 1,745 | guest/pvmfw/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
/* Access flag. */
.set .L_TT_AF, 0x1 << 10
/* Not global. */
.set .L_TT_NG, 0x1 << 11
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.align 12
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GB of device mappings
.quad .L_TT_TYPE_TABLE + 0f // Unmapped device memory, and pVM firmware
.fill 510, 8, 0x0 // 510 GB of remaining VA space
/* level 2 */
0: .fill 510, 8, 0x0
.quad .L_BLOCK_MEM_XIP | 0x7fc00000 // pVM firmware image
.quad .L_BLOCK_MEM | 0x7fe00000 // Writable memory for stack, heap &c.
|
L0czek/aosp-virt-package | 2,161 | guest/rialto/idmap.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Initial TTBR0 idmap activated before first memory write.
// Remains active until a new page table is created by early Rust.
//
.set .SZ_1K, 1024
.set .SZ_4K, 4 * .SZ_1K
.set .SZ_1M, 1024 * .SZ_1K
.set .SZ_2M, 2 * .SZ_1M
.set .SZ_1G, 1024 * .SZ_1M
.set .PAGE_SIZE, .SZ_4K
.set .ORIGIN_ADDR, 2 * .SZ_1G
.set .TEXT_ADDR, .ORIGIN_ADDR + (0 * .SZ_2M)
.set .DATA_ADDR, .ORIGIN_ADDR + (1 * .SZ_2M)
.set .L_TT_TYPE_BLOCK, 0x1
.set .L_TT_TYPE_PAGE, 0x3
.set .L_TT_TYPE_TABLE, 0x3
.set .L_TT_AF, 0x1 << 10 // Access flag
.set .L_TT_NG, 0x1 << 11 // Not global
.set .L_TT_RO, 0x2 << 6
.set .L_TT_XN, 0x3 << 53
.set .L_TT_MT_DEV, 0x0 << 2 // MAIR #0 (DEV_nGnRE)
.set .L_TT_MT_MEM, (0x1 << 2) | (0x3 << 8) // MAIR #1 (MEM_WBWA), inner shareable
.set .L_BLOCK_RO, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_RO | .L_TT_XN
.set .L_BLOCK_DEV, .L_TT_TYPE_BLOCK | .L_TT_MT_DEV | .L_TT_AF | .L_TT_XN
.set .L_BLOCK_MEM, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_XN | .L_TT_NG
.set .L_BLOCK_MEM_XIP, .L_TT_TYPE_BLOCK | .L_TT_MT_MEM | .L_TT_AF | .L_TT_NG | .L_TT_RO
.section ".rodata.idmap", "a", %progbits
.global idmap
.balign .PAGE_SIZE
idmap:
/* level 1 */
.quad .L_BLOCK_DEV | 0x0 // 1 GiB of device mappings
.quad 0x0 // 1 GiB unmapped
.quad .L_TT_TYPE_TABLE + 0f // up to 1 GiB of DRAM
.balign .PAGE_SIZE, 0 // unmapped
/* level 2 */
0:
.quad .L_BLOCK_MEM_XIP | .TEXT_ADDR // 2 MiB of DRAM containing image
.quad .L_BLOCK_MEM | .DATA_ADDR // 2 MiB of writable DRAM
.balign .PAGE_SIZE, 0 // unmapped
|
L0czek/aosp-virt-package | 5,154 | libs/libvmbase/entry.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
.set .L_MAIR_DEV_nGnRE, 0x04
.set .L_MAIR_MEM_WBWA, 0xff
.set .Lmairval, .L_MAIR_DEV_nGnRE | (.L_MAIR_MEM_WBWA << 8)
/* 4 KiB granule size for TTBR0_EL1. */
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
* cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
.set .L_TCR_T0SZ_512, 64 - 39
.set .Ltcrval, .L_TCR_TG0_4KB | .L_TCR_TG1_4KB | .L_TCR_EPD1 | .L_TCR_RGN_OWB
.set .Ltcrval, .Ltcrval | .L_TCR_RGN_IWB | .L_TCR_SH_INNER | .L_TCR_T0SZ_512
/* Stage 1 instruction access cacheability is unaffected. */
.set .L_SCTLR_ELx_I, 0x1 << 12
/* SP alignment fault if SP is not aligned to a 16 byte boundary. */
.set .L_SCTLR_ELx_SA, 0x1 << 3
/* Stage 1 data access cacheability is unaffected. */
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/* Privileged Access Never is unchanged on taking an exception to EL1. */
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* All writable memory regions are treated as XN. */
.set .L_SCTLR_EL1_WXN, 0x1 << 19
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
/* Various IT instructions are disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_ITD, 0x1 << 7
.set .L_SCTLR_EL1_RES1, (0x1 << 11) | (0x1 << 20) | (0x1 << 22) | (0x1 << 28) | (0x1 << 29)
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
* prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3
* for the Rust entry point, as these may contain boot parameters.
*/
.section .init.entry, "ax"
.global entry
entry:
/* Load and apply the memory management configuration, ready to enable MMU and caches. */
adr x30, vector_table_panic
msr vbar_el1, x30
/*
* Our load address is set by the host so validate it before proceeding.
*/
adr x30, entry
mov_i x29, entry
cmp x29, x30
b.eq 1f
reset_or_hang
1:
adrp x30, idmap
msr ttbr0_el1, x30
mov_i x30, .Lmairval
msr mair_el1, x30
mov_i x30, .Ltcrval
/* Copy the supported PA range into TCR_EL1.IPS. */
mrs x29, id_aa64mmfr0_el1
bfi x30, x29, #32, #4
msr tcr_el1, x30
mov_i x30, .Lsctlrval
/*
* Ensure everything before this point has completed, then invalidate any potentially stale
* local TLB entries before they start being used.
*/
isb
tlbi vmalle1
ic iallu
dsb nsh
isb
/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed until this has completed.
*/
msr sctlr_el1, x30
isb
/* Disable trapping floating point access in EL1. */
mrs x30, cpacr_el1
orr x30, x30, #(0x3 << 20)
msr cpacr_el1, x30
isb
/* Zero out the bss section. */
adr_l x29, bss_begin
adr_l x30, bss_end
0: cmp x29, x30
b.hs 1f
stp xzr, xzr, [x29], #16
b 0b
1: /* Copy the data section. */
adr_l x28, data_begin
adr_l x29, data_end
adr_l x30, data_lma
2: cmp x28, x29
b.ge 3f
ldp q0, q1, [x30], #32
stp q0, q1, [x28], #32
b 2b
3: /* Prepare the exception handler stack (SP_EL1). */
adr_l x30, init_eh_stack_pointer
msr spsel, #1
mov sp, x30
/* Prepare the main thread stack (SP_EL0). */
adr_l x30, init_stack_pointer
msr spsel, #0
mov sp, x30
/* Set up exception vector. */
adr x30, vector_table_el1
msr vbar_el1, x30
/*
* Set up Bionic-compatible thread-local storage.
*
* Note that TPIDR_EL0 can't be configured from rust_entry because the
* compiler will dereference it during function entry to access
* __stack_chk_guard and Rust doesn't support LLVM's
* __attribute__((no_stack_protector)).
*/
adr_l x30, __bionic_tls
msr tpidr_el0, x30
/* Call into Rust code. */
bl rust_entry
/* Loop forever waiting for interrupts. */
4: wfi
b 4b
|
L0czek/aosp-virt-package | 4,679 | libs/libvmbase/exceptions.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Saves the volatile registers onto the stack. This currently takes 14
* instructions, so it can be used in exception handlers with 18 instructions
* left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
* which can be used as the first and second arguments of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
stp x0, x1, [sp, #-(8 * 24)]!
stp x2, x3, [sp, #8 * 2]
stp x4, x5, [sp, #8 * 4]
stp x6, x7, [sp, #8 * 6]
stp x8, x9, [sp, #8 * 8]
stp x10, x11, [sp, #8 * 10]
stp x12, x13, [sp, #8 * 12]
stp x14, x15, [sp, #8 * 14]
stp x16, x17, [sp, #8 * 16]
str x18, [sp, #8 * 18]
stp x29, x30, [sp, #8 * 20]
/*
* Save elr_el1 & spsr_el1. This such that we can take nested exception
* and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm
/**
* Restores the volatile registers from the stack. This currently takes 14
* instructions, so it can be used in exception handlers while still leaving 18
* instructions left; if paired with save_volatile_to_stack, there are 4
* instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
ldp x2, x3, [sp, #8 * 2]
ldp x4, x5, [sp, #8 * 4]
ldp x6, x7, [sp, #8 * 6]
ldp x8, x9, [sp, #8 * 8]
ldp x10, x11, [sp, #8 * 10]
ldp x12, x13, [sp, #8 * 12]
ldp x14, x15, [sp, #8 * 14]
ldp x16, x17, [sp, #8 * 16]
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]
/* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
/* Restore x0 & x1, and release stack space. */
ldp x0, x1, [sp], #8 * 24
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SP0. It behaves similarly to the SPx case by first switching to SPx, doing
* the work, then switching back to SP0 before returning.
*
* Switching to SPx and calling the Rust handler takes 16 instructions. To
* restore and return we need an additional 16 instructions, so we can implement
* the whole handler within the allotted 32 instructions.
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
msr spsel, #0
eret
.endm
/**
* This is a generic handler for exceptions taken at the current EL while using
* SPx. It saves volatile registers, calls the Rust handler, restores volatile
* registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care about
* non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions, and
* restoring and returning also takes 15 instructions, so we can fit the whole
* handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
bl \handler
restore_volatile_from_stack
eret
.endm
.section .text.vector_table_el1, "ax"
.global vector_table_el1
.balign 0x800
vector_table_el1:
sync_cur_sp0:
current_exception_sp0 sync_exception_current
.balign 0x80
irq_cur_sp0:
current_exception_sp0 irq_current
.balign 0x80
fiq_cur_sp0:
current_exception_sp0 fiq_current
.balign 0x80
serr_cur_sp0:
current_exception_sp0 serr_current
.balign 0x80
sync_cur_spx:
current_exception_spx sync_exception_current
.balign 0x80
irq_cur_spx:
current_exception_spx irq_current
.balign 0x80
fiq_cur_spx:
current_exception_spx fiq_current
.balign 0x80
serr_cur_spx:
current_exception_spx serr_current
.balign 0x80
sync_lower_64:
current_exception_spx sync_lower
.balign 0x80
irq_lower_64:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_64:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_64:
current_exception_spx serr_lower
.balign 0x80
sync_lower_32:
current_exception_spx sync_lower
.balign 0x80
irq_lower_32:
current_exception_spx irq_lower
.balign 0x80
fiq_lower_32:
current_exception_spx fiq_lower
.balign 0x80
serr_lower_32:
current_exception_spx serr_lower
|
L0czek/aosp-virt-package | 1,788 | libs/libvmbase/exceptions_panic.S | /*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common.h>
/**
* The following table is intended to trap any fault resulting from the very
* first memory accesses. They assume that PSCI v0.2 is available and provides
* the PSCI_SYSTEM_RESET call in an attempt to gracefully exit but otherwise
* results in the core busy-looping.
*/
.section .text.vector_table_panic, "ax"
.global vector_table_panic
.balign 0x800
vector_table_panic:
sync_cur_sp0_panic:
reset_or_hang
.balign 0x80
irq_cur_sp0_panic:
reset_or_hang
.balign 0x80
fiq_cur_sp0_panic:
reset_or_hang
.balign 0x80
serr_cur_sp0_panic:
reset_or_hang
.balign 0x80
sync_cur_spx_panic:
reset_or_hang
.balign 0x80
irq_cur_spx_panic:
reset_or_hang
.balign 0x80
fiq_cur_spx_panic:
reset_or_hang
.balign 0x80
serr_cur_spx_panic:
reset_or_hang
.balign 0x80
sync_lower_64_panic:
reset_or_hang
.balign 0x80
irq_lower_64_panic:
reset_or_hang
.balign 0x80
fiq_lower_64_panic:
reset_or_hang
.balign 0x80
serr_lower_64_panic:
reset_or_hang
.balign 0x80
sync_lower_32_panic:
reset_or_hang
.balign 0x80
irq_lower_32_panic:
reset_or_hang
.balign 0x80
fiq_lower_32_panic:
reset_or_hang
.balign 0x80
serr_lower_32_panic:
reset_or_hang
|
l2-riderft68/kyberlib | 4,063 | src/avx2/ntt.S | #include "consts.h"
.include "shuffle.inc"
.macro mul rh0,rh1,rh2,rh3,zl0=15,zl1=15,zh0=2,zh1=2
vpmullw %ymm\zl0,%ymm\rh0,%ymm12
vpmullw %ymm\zl0,%ymm\rh1,%ymm13
vpmullw %ymm\zl1,%ymm\rh2,%ymm14
vpmullw %ymm\zl1,%ymm\rh3,%ymm15
vpmulhw %ymm\zh0,%ymm\rh0,%ymm\rh0
vpmulhw %ymm\zh0,%ymm\rh1,%ymm\rh1
vpmulhw %ymm\zh1,%ymm\rh2,%ymm\rh2
vpmulhw %ymm\zh1,%ymm\rh3,%ymm\rh3
.endm
.macro reduce
vpmulhw %ymm0,%ymm12,%ymm12
vpmulhw %ymm0,%ymm13,%ymm13
vpmulhw %ymm0,%ymm14,%ymm14
vpmulhw %ymm0,%ymm15,%ymm15
.endm
.macro update rln,rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3
vpaddw %ymm\rh0,%ymm\rl0,%ymm\rln
vpsubw %ymm\rh0,%ymm\rl0,%ymm\rh0
vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl0
vpsubw %ymm\rh1,%ymm\rl1,%ymm\rh1
vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl1
vpsubw %ymm\rh2,%ymm\rl2,%ymm\rh2
vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl2
vpsubw %ymm\rh3,%ymm\rl3,%ymm\rh3
vpsubw %ymm12,%ymm\rln,%ymm\rln
vpaddw %ymm12,%ymm\rh0,%ymm\rh0
vpsubw %ymm13,%ymm\rl0,%ymm\rl0
vpaddw %ymm13,%ymm\rh1,%ymm\rh1
vpsubw %ymm14,%ymm\rl1,%ymm\rl1
vpaddw %ymm14,%ymm\rh2,%ymm\rh2
vpsubw %ymm15,%ymm\rl2,%ymm\rl2
vpaddw %ymm15,%ymm\rh3,%ymm\rh3
.endm
.macro level0 off
vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm15
vmovdqa (64*\off+128)*2(%rdi),%ymm8
vmovdqa (64*\off+144)*2(%rdi),%ymm9
vmovdqa (64*\off+160)*2(%rdi),%ymm10
vmovdqa (64*\off+176)*2(%rdi),%ymm11
vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm2
mul 8,9,10,11
vmovdqa (64*\off+ 0)*2(%rdi),%ymm4
vmovdqa (64*\off+ 16)*2(%rdi),%ymm5
vmovdqa (64*\off+ 32)*2(%rdi),%ymm6
vmovdqa (64*\off+ 48)*2(%rdi),%ymm7
reduce
update 3,4,5,6,7,8,9,10,11
vmovdqa %ymm3,(64*\off+ 0)*2(%rdi)
vmovdqa %ymm4,(64*\off+ 16)*2(%rdi)
vmovdqa %ymm5,(64*\off+ 32)*2(%rdi)
vmovdqa %ymm6,(64*\off+ 48)*2(%rdi)
vmovdqa %ymm8,(64*\off+128)*2(%rdi)
vmovdqa %ymm9,(64*\off+144)*2(%rdi)
vmovdqa %ymm10,(64*\off+160)*2(%rdi)
vmovdqa %ymm11,(64*\off+176)*2(%rdi)
.endm
.macro levels1t6 off
/* level 1 */
vmovdqa (_ZETAS_EXP+224*\off+16)*2(%rsi),%ymm15
vmovdqa (128*\off+ 64)*2(%rdi),%ymm8
vmovdqa (128*\off+ 80)*2(%rdi),%ymm9
vmovdqa (128*\off+ 96)*2(%rdi),%ymm10
vmovdqa (128*\off+112)*2(%rdi),%ymm11
vmovdqa (_ZETAS_EXP+224*\off+32)*2(%rsi),%ymm2
mul 8,9,10,11
vmovdqa (128*\off+ 0)*2(%rdi),%ymm4
vmovdqa (128*\off+ 16)*2(%rdi),%ymm5
vmovdqa (128*\off+ 32)*2(%rdi),%ymm6
vmovdqa (128*\off+ 48)*2(%rdi),%ymm7
reduce
update 3,4,5,6,7,8,9,10,11
/* level 2 */
shuffle8 5,10,7,10
shuffle8 6,11,5,11
vmovdqa (_ZETAS_EXP+224*\off+48)*2(%rsi),%ymm15
vmovdqa (_ZETAS_EXP+224*\off+64)*2(%rsi),%ymm2
mul 7,10,5,11
shuffle8 3,8,6,8
shuffle8 4,9,3,9
reduce
update 4,6,8,3,9,7,10,5,11
/* level 3 */
shuffle4 8,5,9,5
shuffle4 3,11,8,11
vmovdqa (_ZETAS_EXP+224*\off+80)*2(%rsi),%ymm15
vmovdqa (_ZETAS_EXP+224*\off+96)*2(%rsi),%ymm2
mul 9,5,8,11
shuffle4 4,7,3,7
shuffle4 6,10,4,10
reduce
update 6,3,7,4,10,9,5,8,11
/* level 4 */
shuffle2 7,8,10,8
shuffle2 4,11,7,11
vmovdqa (_ZETAS_EXP+224*\off+112)*2(%rsi),%ymm15
vmovdqa (_ZETAS_EXP+224*\off+128)*2(%rsi),%ymm2
mul 10,8,7,11
shuffle2 6,9,4,9
shuffle2 3,5,6,5
reduce
update 3,4,9,6,5,10,8,7,11
/* level 5 */
shuffle1 9,7,5,7
shuffle1 6,11,9,11
vmovdqa (_ZETAS_EXP+224*\off+144)*2(%rsi),%ymm15
vmovdqa (_ZETAS_EXP+224*\off+160)*2(%rsi),%ymm2
mul 5,7,9,11
shuffle1 3,10,6,10
shuffle1 4,8,3,8
reduce
update 4,6,10,3,8,5,7,9,11
/* level 6 */
vmovdqa (_ZETAS_EXP+224*\off+176)*2(%rsi),%ymm14
vmovdqa (_ZETAS_EXP+224*\off+208)*2(%rsi),%ymm15
vmovdqa (_ZETAS_EXP+224*\off+192)*2(%rsi),%ymm8
vmovdqa (_ZETAS_EXP+224*\off+224)*2(%rsi),%ymm2
mul 10,3,9,11,14,15,8,2
reduce
update 8,4,6,5,7,10,3,9,11
vmovdqa %ymm8,(128*\off+ 0)*2(%rdi)
vmovdqa %ymm4,(128*\off+ 16)*2(%rdi)
vmovdqa %ymm10,(128*\off+ 32)*2(%rdi)
vmovdqa %ymm3,(128*\off+ 48)*2(%rdi)
vmovdqa %ymm6,(128*\off+ 64)*2(%rdi)
vmovdqa %ymm5,(128*\off+ 80)*2(%rdi)
vmovdqa %ymm9,(128*\off+ 96)*2(%rdi)
vmovdqa %ymm11,(128*\off+112)*2(%rdi)
.endm
.text
.global ntt_avx
.global _ntt_avx
ntt_avx:
_ntt_avx:
vmovdqa _16XQ*2(%rsi),%ymm0
level0 0
level0 1
levels1t6 0
levels1t6 1
ret
|
l2-riderft68/kyberlib | 2,604 | src/avx2/basemul.S | #include "consts.h"
.macro schoolbook off
vmovdqa _16XQINV*2(%rcx),%ymm0
vmovdqa (64*\off+ 0)*2(%rsi),%ymm1 # a0
vmovdqa (64*\off+16)*2(%rsi),%ymm2 # b0
vmovdqa (64*\off+32)*2(%rsi),%ymm3 # a1
vmovdqa (64*\off+48)*2(%rsi),%ymm4 # b1
vpmullw %ymm0,%ymm1,%ymm9 # a0.lo
vpmullw %ymm0,%ymm2,%ymm10 # b0.lo
vpmullw %ymm0,%ymm3,%ymm11 # a1.lo
vpmullw %ymm0,%ymm4,%ymm12 # b1.lo
vmovdqa (64*\off+ 0)*2(%rdx),%ymm5 # c0
vmovdqa (64*\off+16)*2(%rdx),%ymm6 # d0
vpmulhw %ymm5,%ymm1,%ymm13 # a0c0.hi
vpmulhw %ymm6,%ymm1,%ymm1 # a0d0.hi
vpmulhw %ymm5,%ymm2,%ymm14 # b0c0.hi
vpmulhw %ymm6,%ymm2,%ymm2 # b0d0.hi
vmovdqa (64*\off+32)*2(%rdx),%ymm7 # c1
vmovdqa (64*\off+48)*2(%rdx),%ymm8 # d1
vpmulhw %ymm7,%ymm3,%ymm15 # a1c1.hi
vpmulhw %ymm8,%ymm3,%ymm3 # a1d1.hi
vpmulhw %ymm7,%ymm4,%ymm0 # b1c1.hi
vpmulhw %ymm8,%ymm4,%ymm4 # b1d1.hi
vmovdqa %ymm13,(%rsp)
vpmullw %ymm5,%ymm9,%ymm13 # a0c0.lo
vpmullw %ymm6,%ymm9,%ymm9 # a0d0.lo
vpmullw %ymm5,%ymm10,%ymm5 # b0c0.lo
vpmullw %ymm6,%ymm10,%ymm10 # b0d0.lo
vpmullw %ymm7,%ymm11,%ymm6 # a1c1.lo
vpmullw %ymm8,%ymm11,%ymm11 # a1d1.lo
vpmullw %ymm7,%ymm12,%ymm7 # b1c1.lo
vpmullw %ymm8,%ymm12,%ymm12 # b1d1.lo
vmovdqa _16XQ*2(%rcx),%ymm8
vpmulhw %ymm8,%ymm13,%ymm13
vpmulhw %ymm8,%ymm9,%ymm9
vpmulhw %ymm8,%ymm5,%ymm5
vpmulhw %ymm8,%ymm10,%ymm10
vpmulhw %ymm8,%ymm6,%ymm6
vpmulhw %ymm8,%ymm11,%ymm11
vpmulhw %ymm8,%ymm7,%ymm7
vpmulhw %ymm8,%ymm12,%ymm12
vpsubw (%rsp),%ymm13,%ymm13 # -a0c0
vpsubw %ymm9,%ymm1,%ymm9 # a0d0
vpsubw %ymm5,%ymm14,%ymm5 # b0c0
vpsubw %ymm10,%ymm2,%ymm10 # b0d0
vpsubw %ymm6,%ymm15,%ymm6 # a1c1
vpsubw %ymm11,%ymm3,%ymm11 # a1d1
vpsubw %ymm7,%ymm0,%ymm7 # b1c1
vpsubw %ymm12,%ymm4,%ymm12 # b1d1
vmovdqa (%r9),%ymm0
vmovdqa 32(%r9),%ymm1
vpmullw %ymm0,%ymm10,%ymm2
vpmullw %ymm0,%ymm12,%ymm3
vpmulhw %ymm1,%ymm10,%ymm10
vpmulhw %ymm1,%ymm12,%ymm12
vpmulhw %ymm8,%ymm2,%ymm2
vpmulhw %ymm8,%ymm3,%ymm3
vpsubw %ymm2,%ymm10,%ymm10 # rb0d0
vpsubw %ymm3,%ymm12,%ymm12 # rb1d1
vpaddw %ymm5,%ymm9,%ymm9
vpaddw %ymm7,%ymm11,%ymm11
vpsubw %ymm13,%ymm10,%ymm13
vpsubw %ymm12,%ymm6,%ymm6
vmovdqa %ymm13,(64*\off+ 0)*2(%rdi)
vmovdqa %ymm9,(64*\off+16)*2(%rdi)
vmovdqa %ymm6,(64*\off+32)*2(%rdi)
vmovdqa %ymm11,(64*\off+48)*2(%rdi)
.endm
.text
.global basemul_avx
.global _basemul_avx
basemul_avx:
_basemul_avx:
mov %rsp,%r8
and $-32,%rsp
sub $32,%rsp
lea (_ZETAS_EXP+176)*2(%rcx),%r9
schoolbook 0
add $32*2,%r9
schoolbook 1
add $192*2,%r9
schoolbook 2
add $32*2,%r9
schoolbook 3
mov %r8,%rsp
ret
|
l2-riderft68/kyberlib | 4,672 | src/avx2/invntt.S | #include "consts.h"
.include "shuffle.inc"
.include "fq.inc"
.macro butterfly rl0,rl1,rl2,rl3,rh0,rh1,rh2,rh3,zl0=2,zl1=2,zh0=3,zh1=3
vpsubw %ymm\rl0,%ymm\rh0,%ymm12
vpaddw %ymm\rh0,%ymm\rl0,%ymm\rl0
vpsubw %ymm\rl1,%ymm\rh1,%ymm13
vpmullw %ymm\zl0,%ymm12,%ymm\rh0
vpaddw %ymm\rh1,%ymm\rl1,%ymm\rl1
vpsubw %ymm\rl2,%ymm\rh2,%ymm14
vpmullw %ymm\zl0,%ymm13,%ymm\rh1
vpaddw %ymm\rh2,%ymm\rl2,%ymm\rl2
vpsubw %ymm\rl3,%ymm\rh3,%ymm15
vpmullw %ymm\zl1,%ymm14,%ymm\rh2
vpaddw %ymm\rh3,%ymm\rl3,%ymm\rl3
vpmullw %ymm\zl1,%ymm15,%ymm\rh3
vpmulhw %ymm\zh0,%ymm12,%ymm12
vpmulhw %ymm\zh0,%ymm13,%ymm13
vpmulhw %ymm\zh1,%ymm14,%ymm14
vpmulhw %ymm\zh1,%ymm15,%ymm15
vpmulhw %ymm0,%ymm\rh0,%ymm\rh0
vpmulhw %ymm0,%ymm\rh1,%ymm\rh1
vpmulhw %ymm0,%ymm\rh2,%ymm\rh2
vpmulhw %ymm0,%ymm\rh3,%ymm\rh3
#
#
vpsubw %ymm\rh0,%ymm12,%ymm\rh0
vpsubw %ymm\rh1,%ymm13,%ymm\rh1
vpsubw %ymm\rh2,%ymm14,%ymm\rh2
vpsubw %ymm\rh3,%ymm15,%ymm\rh3
.endm
.macro intt_levels0t5 off
/* level 0 */
vmovdqa _16XFLO*2(%rsi),%ymm2
vmovdqa _16XFHI*2(%rsi),%ymm3
vmovdqa (128*\off+ 0)*2(%rdi),%ymm4
vmovdqa (128*\off+ 32)*2(%rdi),%ymm6
vmovdqa (128*\off+ 16)*2(%rdi),%ymm5
vmovdqa (128*\off+ 48)*2(%rdi),%ymm7
fqmulprecomp 2,3,4
fqmulprecomp 2,3,6
fqmulprecomp 2,3,5
fqmulprecomp 2,3,7
vmovdqa (128*\off+ 64)*2(%rdi),%ymm8
vmovdqa (128*\off+ 96)*2(%rdi),%ymm10
vmovdqa (128*\off+ 80)*2(%rdi),%ymm9
vmovdqa (128*\off+112)*2(%rdi),%ymm11
fqmulprecomp 2,3,8
fqmulprecomp 2,3,10
fqmulprecomp 2,3,9
fqmulprecomp 2,3,11
vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+208)*2(%rsi),%ymm15
vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+176)*2(%rsi),%ymm1
vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+224)*2(%rsi),%ymm2
vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+192)*2(%rsi),%ymm3
vmovdqa _REVIDXB*2(%rsi),%ymm12
vpshufb %ymm12,%ymm15,%ymm15
vpshufb %ymm12,%ymm1,%ymm1
vpshufb %ymm12,%ymm2,%ymm2
vpshufb %ymm12,%ymm3,%ymm3
butterfly 4,5,8,9,6,7,10,11,15,1,2,3
/* level 1 */
vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+144)*2(%rsi),%ymm2
vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+160)*2(%rsi),%ymm3
vmovdqa _REVIDXB*2(%rsi),%ymm1
vpshufb %ymm1,%ymm2,%ymm2
vpshufb %ymm1,%ymm3,%ymm3
butterfly 4,5,6,7,8,9,10,11,2,2,3,3
shuffle1 4,5,3,5
shuffle1 6,7,4,7
shuffle1 8,9,6,9
shuffle1 10,11,8,11
/* level 2 */
vmovdqa _REVIDXD*2(%rsi),%ymm12
vpermd (_ZETAS_EXP+(1-\off)*224+112)*2(%rsi),%ymm12,%ymm2
vpermd (_ZETAS_EXP+(1-\off)*224+128)*2(%rsi),%ymm12,%ymm10
butterfly 3,4,6,8,5,7,9,11,2,2,10,10
vmovdqa _16XV*2(%rsi),%ymm1
red16 3
shuffle2 3,4,10,4
shuffle2 6,8,3,8
shuffle2 5,7,6,7
shuffle2 9,11,5,11
/* level 3 */
vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+80)*2(%rsi),%ymm2
vpermq $0x1B,(_ZETAS_EXP+(1-\off)*224+96)*2(%rsi),%ymm9
butterfly 10,3,6,5,4,8,7,11,2,2,9,9
shuffle4 10,3,9,3
shuffle4 6,5,10,5
shuffle4 4,8,6,8
shuffle4 7,11,4,11
/* level 4 */
vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+48)*2(%rsi),%ymm2
vpermq $0x4E,(_ZETAS_EXP+(1-\off)*224+64)*2(%rsi),%ymm7
butterfly 9,10,6,4,3,5,8,11,2,2,7,7
red16 9
shuffle8 9,10,7,10
shuffle8 6,4,9,4
shuffle8 3,5,6,5
shuffle8 8,11,3,11
/* level 5 */
vmovdqa (_ZETAS_EXP+(1-\off)*224+16)*2(%rsi),%ymm2
vmovdqa (_ZETAS_EXP+(1-\off)*224+32)*2(%rsi),%ymm8
butterfly 7,9,6,3,10,4,5,11,2,2,8,8
vmovdqa %ymm7,(128*\off+ 0)*2(%rdi)
vmovdqa %ymm9,(128*\off+ 16)*2(%rdi)
vmovdqa %ymm6,(128*\off+ 32)*2(%rdi)
vmovdqa %ymm3,(128*\off+ 48)*2(%rdi)
vmovdqa %ymm10,(128*\off+ 64)*2(%rdi)
vmovdqa %ymm4,(128*\off+ 80)*2(%rdi)
vmovdqa %ymm5,(128*\off+ 96)*2(%rdi)
vmovdqa %ymm11,(128*\off+112)*2(%rdi)
.endm
.macro intt_level6 off
/* level 6 */
vmovdqa (64*\off+ 0)*2(%rdi),%ymm4
vmovdqa (64*\off+128)*2(%rdi),%ymm8
vmovdqa (64*\off+ 16)*2(%rdi),%ymm5
vmovdqa (64*\off+144)*2(%rdi),%ymm9
vpbroadcastq (_ZETAS_EXP+0)*2(%rsi),%ymm2
vmovdqa (64*\off+ 32)*2(%rdi),%ymm6
vmovdqa (64*\off+160)*2(%rdi),%ymm10
vmovdqa (64*\off+ 48)*2(%rdi),%ymm7
vmovdqa (64*\off+176)*2(%rdi),%ymm11
vpbroadcastq (_ZETAS_EXP+4)*2(%rsi),%ymm3
butterfly 4,5,6,7,8,9,10,11
.if \off == 0
red16 4
.endif
vmovdqa %ymm4,(64*\off+ 0)*2(%rdi)
vmovdqa %ymm5,(64*\off+ 16)*2(%rdi)
vmovdqa %ymm6,(64*\off+ 32)*2(%rdi)
vmovdqa %ymm7,(64*\off+ 48)*2(%rdi)
vmovdqa %ymm8,(64*\off+128)*2(%rdi)
vmovdqa %ymm9,(64*\off+144)*2(%rdi)
vmovdqa %ymm10,(64*\off+160)*2(%rdi)
vmovdqa %ymm11,(64*\off+176)*2(%rdi)
.endm
.text
.global invntt_avx
.global _invntt_avx
invntt_avx:
_invntt_avx:
vmovdqa _16XQ*2(%rsi),%ymm0
intt_levels0t5 0
intt_levels0t5 1
intt_level6 0
intt_level6 1
ret
|
l2-riderft68/kyberlib | 1,566 | src/avx2/fq.S | #include "consts.h"
.include "fq.inc"
.text
reduce128_avx:
#load
vmovdqa (%rdi),%ymm2
vmovdqa 32(%rdi),%ymm3
vmovdqa 64(%rdi),%ymm4
vmovdqa 96(%rdi),%ymm5
vmovdqa 128(%rdi),%ymm6
vmovdqa 160(%rdi),%ymm7
vmovdqa 192(%rdi),%ymm8
vmovdqa 224(%rdi),%ymm9
red16 2
red16 3
red16 4
red16 5
red16 6
red16 7
red16 8
red16 9
#store
vmovdqa %ymm2,(%rdi)
vmovdqa %ymm3,32(%rdi)
vmovdqa %ymm4,64(%rdi)
vmovdqa %ymm5,96(%rdi)
vmovdqa %ymm6,128(%rdi)
vmovdqa %ymm7,160(%rdi)
vmovdqa %ymm8,192(%rdi)
vmovdqa %ymm9,224(%rdi)
ret
.global reduce_avx
.global _reduce_avx
reduce_avx:
_reduce_avx:
#consts
vmovdqa _16XQ*2(%rsi),%ymm0
vmovdqa _16XV*2(%rsi),%ymm1
call reduce128_avx
add $256,%rdi
call reduce128_avx
ret
tomont128_avx:
#load
vmovdqa (%rdi),%ymm3
vmovdqa 32(%rdi),%ymm4
vmovdqa 64(%rdi),%ymm5
vmovdqa 96(%rdi),%ymm6
vmovdqa 128(%rdi),%ymm7
vmovdqa 160(%rdi),%ymm8
vmovdqa 192(%rdi),%ymm9
vmovdqa 224(%rdi),%ymm10
fqmulprecomp 1,2,3,11
fqmulprecomp 1,2,4,12
fqmulprecomp 1,2,5,13
fqmulprecomp 1,2,6,14
fqmulprecomp 1,2,7,15
fqmulprecomp 1,2,8,11
fqmulprecomp 1,2,9,12
fqmulprecomp 1,2,10,13
#store
vmovdqa %ymm3,(%rdi)
vmovdqa %ymm4,32(%rdi)
vmovdqa %ymm5,64(%rdi)
vmovdqa %ymm6,96(%rdi)
vmovdqa %ymm7,128(%rdi)
vmovdqa %ymm8,160(%rdi)
vmovdqa %ymm9,192(%rdi)
vmovdqa %ymm10,224(%rdi)
ret
.global tomont_avx
.global _tomont_avx
tomont_avx:
_tomont_avx:
#consts
vmovdqa _16XQ*2(%rsi),%ymm0
vmovdqa _16XMONTSQLO*2(%rsi),%ymm1
vmovdqa _16XMONTSQHI*2(%rsi),%ymm2
call tomont128_avx
add $256,%rdi
call tomont128_avx
ret
|
l2-riderft68/kyberlib | 4,329 | src/avx2/shuffle.S | #include "consts.h"
.include "fq.inc"
.include "shuffle.inc"
/*
nttpack_avx:
#load
vmovdqa (%rdi),%ymm4
vmovdqa 32(%rdi),%ymm5
vmovdqa 64(%rdi),%ymm6
vmovdqa 96(%rdi),%ymm7
vmovdqa 128(%rdi),%ymm8
vmovdqa 160(%rdi),%ymm9
vmovdqa 192(%rdi),%ymm10
vmovdqa 224(%rdi),%ymm11
shuffle1 4,5,3,5
shuffle1 6,7,4,7
shuffle1 8,9,6,9
shuffle1 10,11,8,11
shuffle2 3,4,10,4
shuffle2 6,8,3,8
shuffle2 5,7,6,7
shuffle2 9,11,5,11
shuffle4 10,3,9,3
shuffle4 6,5,10,5
shuffle4 4,8,6,8
shuffle4 7,11,4,11
shuffle8 9,10,7,10
shuffle8 6,4,9,4
shuffle8 3,5,6,5
shuffle8 8,11,3,11
#store
vmovdqa %ymm7,(%rdi)
vmovdqa %ymm9,32(%rdi)
vmovdqa %ymm6,64(%rdi)
vmovdqa %ymm3,96(%rdi)
vmovdqa %ymm10,128(%rdi)
vmovdqa %ymm4,160(%rdi)
vmovdqa %ymm5,192(%rdi)
vmovdqa %ymm11,224(%rdi)
ret
*/
.text
nttunpack128_avx:
#load
vmovdqa (%rdi),%ymm4
vmovdqa 32(%rdi),%ymm5
vmovdqa 64(%rdi),%ymm6
vmovdqa 96(%rdi),%ymm7
vmovdqa 128(%rdi),%ymm8
vmovdqa 160(%rdi),%ymm9
vmovdqa 192(%rdi),%ymm10
vmovdqa 224(%rdi),%ymm11
shuffle8 4,8,3,8
shuffle8 5,9,4,9
shuffle8 6,10,5,10
shuffle8 7,11,6,11
shuffle4 3,5,7,5
shuffle4 8,10,3,10
shuffle4 4,6,8,6
shuffle4 9,11,4,11
shuffle2 7,8,9,8
shuffle2 5,6,7,6
shuffle2 3,4,5,4
shuffle2 10,11,3,11
shuffle1 9,5,10,5
shuffle1 8,4,9,4
shuffle1 7,3,8,3
shuffle1 6,11,7,11
#store
vmovdqa %ymm10,(%rdi)
vmovdqa %ymm5,32(%rdi)
vmovdqa %ymm9,64(%rdi)
vmovdqa %ymm4,96(%rdi)
vmovdqa %ymm8,128(%rdi)
vmovdqa %ymm3,160(%rdi)
vmovdqa %ymm7,192(%rdi)
vmovdqa %ymm11,224(%rdi)
ret
.global nttunpack_avx
.global _nttunpack_avx
nttunpack_avx:
_nttunpack_avx:
call nttunpack128_avx
add $256,%rdi
call nttunpack128_avx
ret
ntttobytes128_avx:
#load
vmovdqa (%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vmovdqa 64(%rsi),%ymm7
vmovdqa 96(%rsi),%ymm8
vmovdqa 128(%rsi),%ymm9
vmovdqa 160(%rsi),%ymm10
vmovdqa 192(%rsi),%ymm11
vmovdqa 224(%rsi),%ymm12
#csubq
csubq 5,13
csubq 6,13
csubq 7,13
csubq 8,13
csubq 9,13
csubq 10,13
csubq 11,13
csubq 12,13
#bitpack
vpsllw $12,%ymm6,%ymm4
vpor %ymm4,%ymm5,%ymm4
vpsrlw $4,%ymm6,%ymm5
vpsllw $8,%ymm7,%ymm6
vpor %ymm5,%ymm6,%ymm5
vpsrlw $8,%ymm7,%ymm6
vpsllw $4,%ymm8,%ymm7
vpor %ymm6,%ymm7,%ymm6
vpsllw $12,%ymm10,%ymm7
vpor %ymm7,%ymm9,%ymm7
vpsrlw $4,%ymm10,%ymm8
vpsllw $8,%ymm11,%ymm9
vpor %ymm8,%ymm9,%ymm8
vpsrlw $8,%ymm11,%ymm9
vpsllw $4,%ymm12,%ymm10
vpor %ymm9,%ymm10,%ymm9
shuffle1 4,5,3,5
shuffle1 6,7,4,7
shuffle1 8,9,6,9
shuffle2 3,4,8,4
shuffle2 6,5,3,5
shuffle2 7,9,6,9
shuffle4 8,3,7,3
shuffle4 6,4,8,4
shuffle4 5,9,6,9
shuffle8 7,8,5,8
shuffle8 6,3,7,3
shuffle8 4,9,6,9
#store
vmovdqu %ymm5,(%rdi)
vmovdqu %ymm7,32(%rdi)
vmovdqu %ymm6,64(%rdi)
vmovdqu %ymm8,96(%rdi)
vmovdqu %ymm3,128(%rdi)
vmovdqu %ymm9,160(%rdi)
ret
.global ntttobytes_avx
.global _ntttobytes_avx
ntttobytes_avx:
_ntttobytes_avx:
#consts
vmovdqa _16XQ*2(%rdx),%ymm0
call ntttobytes128_avx
add $256,%rsi
add $192,%rdi
call ntttobytes128_avx
ret
nttfrombytes128_avx:
#load
vmovdqu (%rsi),%ymm4
vmovdqu 32(%rsi),%ymm5
vmovdqu 64(%rsi),%ymm6
vmovdqu 96(%rsi),%ymm7
vmovdqu 128(%rsi),%ymm8
vmovdqu 160(%rsi),%ymm9
shuffle8 4,7,3,7
shuffle8 5,8,4,8
shuffle8 6,9,5,9
shuffle4 3,8,6,8
shuffle4 7,5,3,5
shuffle4 4,9,7,9
shuffle2 6,5,4,5
shuffle2 8,7,6,7
shuffle2 3,9,8,9
shuffle1 4,7,10,7
shuffle1 5,8,4,8
shuffle1 6,9,5,9
#bitunpack
vpsrlw $12,%ymm10,%ymm11
vpsllw $4,%ymm7,%ymm12
vpor %ymm11,%ymm12,%ymm11
vpand %ymm0,%ymm10,%ymm10
vpand %ymm0,%ymm11,%ymm11
vpsrlw $8,%ymm7,%ymm12
vpsllw $8,%ymm4,%ymm13
vpor %ymm12,%ymm13,%ymm12
vpand %ymm0,%ymm12,%ymm12
vpsrlw $4,%ymm4,%ymm13
vpand %ymm0,%ymm13,%ymm13
vpsrlw $12,%ymm8,%ymm14
vpsllw $4,%ymm5,%ymm15
vpor %ymm14,%ymm15,%ymm14
vpand %ymm0,%ymm8,%ymm8
vpand %ymm0,%ymm14,%ymm14
vpsrlw $8,%ymm5,%ymm15
vpsllw $8,%ymm9,%ymm1
vpor %ymm15,%ymm1,%ymm15
vpand %ymm0,%ymm15,%ymm15
vpsrlw $4,%ymm9,%ymm1
vpand %ymm0,%ymm1,%ymm1
#store
vmovdqa %ymm10,(%rdi)
vmovdqa %ymm11,32(%rdi)
vmovdqa %ymm12,64(%rdi)
vmovdqa %ymm13,96(%rdi)
vmovdqa %ymm8,128(%rdi)
vmovdqa %ymm14,160(%rdi)
vmovdqa %ymm15,192(%rdi)
vmovdqa %ymm1,224(%rdi)
ret
.global nttfrombytes_avx
.global _nttfrombytes_avx
nttfrombytes_avx:
_nttfrombytes_avx:
#consts
vmovdqa _16XMASK*2(%rdx),%ymm0
call nttfrombytes128_avx
add $256,%rdi
add $192,%rsi
call nttfrombytes128_avx
ret
|
l3gacyb3ta/compiler-book-following | 451 | out.s | .globl main
main:
pushq %rbp
movq %rsp, %rbp
subq $32, %rsp
movq $20, %r10
movq $20, -4(%rbp)
movl $3, %r10d
movslq %r10d,%r11
movq %r11, -8(%rbp)
movq -4(%rbp), %r10
movq -4(%rbp), %r10
movq %r10, -16(%rbp)
movq -8(%rbp), %r10
addq %r10, -16(%rbp)
movl -16(%rbp), %r10d
movl %r10d, -24(%rbp)
movl -24(%rbp), %eax
movq %rbp, %rsp
popq %rbp
ret
movl $0, %eax
movq %rbp, %rsp
popq %rbp
ret
.section .note.GNU-stack,"",@progbits
|
laduiw/rcore-lab | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
laduiw/rcore-lab | 2,218 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret
|
lal-buturabi/zkp-other-works | 8,449 | sp1s/sp1/zkvm/entrypoint/src/memset.s | // This is musl-libc memset commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30:
//
// src/string/memset.c
//
// This was compiled into assembly with:
//
// clang-14 -target riscv32 -march=rv32im -O3 -S memset.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contibutors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.attribute 4, 16
.attribute 5, "rv32im"
.file "musl_memset.c"
.globl memset
.p2align 2
.type memset,@function
memset:
beqz a2, .LBB0_9memset
sb a1, 0(a0)
add a3, a2, a0
li a4, 3
sb a1, -1(a3)
bltu a2, a4, .LBB0_9memset
sb a1, 1(a0)
sb a1, 2(a0)
sb a1, -2(a3)
li a4, 7
sb a1, -3(a3)
bltu a2, a4, .LBB0_9memset
sb a1, 3(a0)
li a5, 9
sb a1, -4(a3)
bltu a2, a5, .LBB0_9memset
neg a3, a0
andi a4, a3, 3
add a3, a0, a4
sub a2, a2, a4
andi a2, a2, -4
andi a1, a1, 255
lui a4, 4112
addi a4, a4, 257
mul a1, a1, a4
sw a1, 0(a3)
add a4, a3, a2
sw a1, -4(a4)
bltu a2, a5, .LBB0_9memset
sw a1, 4(a3)
sw a1, 8(a3)
sw a1, -12(a4)
li a5, 25
sw a1, -8(a4)
bltu a2, a5, .LBB0_9memset
sw a1, 12(a3)
sw a1, 16(a3)
sw a1, 20(a3)
sw a1, 24(a3)
sw a1, -28(a4)
sw a1, -24(a4)
sw a1, -20(a4)
andi a5, a3, 4
ori a5, a5, 24
sub a2, a2, a5
li a6, 32
sw a1, -16(a4)
bltu a2, a6, .LBB0_9memset
add a3, a3, a5
li a4, 31
.LBB0_8memset:
sw a1, 0(a3)
sw a1, 4(a3)
sw a1, 8(a3)
sw a1, 12(a3)
sw a1, 16(a3)
sw a1, 20(a3)
sw a1, 24(a3)
sw a1, 28(a3)
addi a2, a2, -32
addi a3, a3, 32
bltu a4, a2, .LBB0_8memset
.LBB0_9memset:
ret
.Lfunc_end0memset:
.size memset, .Lfunc_end0memset-memset
.ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157"
.section ".note.GNU-stack","",@progbits
.addrsig |
lal-buturabi/zkp-other-works | 11,854 | sp1s/sp1/zkvm/entrypoint/src/memcpy.s | // This is musl-libc commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30:
//
// src/string/memcpy.c
//
// This was compiled into assembly with:
//
// clang-14 -target riscv32 -march=rv32im -O3 -S memcpy.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contibutors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.attribute 4, 16
.attribute 5, "rv32im"
.file "musl_memcpy.c"
.globl memcpy
.p2align 2
.type memcpy,@function
memcpy:
andi a3, a1, 3
seqz a3, a3
seqz a4, a2
or a3, a3, a4
bnez a3, .LBBmemcpy0_11
addi a5, a1, 1
mv a6, a0
.LBBmemcpy0_2:
lb a7, 0(a1)
addi a4, a1, 1
addi a3, a6, 1
sb a7, 0(a6)
addi a2, a2, -1
andi a1, a5, 3
snez a1, a1
snez a6, a2
and a7, a1, a6
addi a5, a5, 1
mv a1, a4
mv a6, a3
bnez a7, .LBBmemcpy0_2
andi a1, a3, 3
beqz a1, .LBBmemcpy0_12
.LBBmemcpy0_4:
li a5, 32
bltu a2, a5, .LBBmemcpy0_26
li a5, 3
beq a1, a5, .LBBmemcpy0_19
li a5, 2
beq a1, a5, .LBBmemcpy0_22
li a5, 1
bne a1, a5, .LBBmemcpy0_26
lw a5, 0(a4)
sb a5, 0(a3)
srli a1, a5, 8
sb a1, 1(a3)
srli a6, a5, 16
addi a1, a3, 3
sb a6, 2(a3)
addi a2, a2, -3
addi a3, a4, 16
li a4, 16
.LBBmemcpy0_9:
lw a6, -12(a3)
srli a5, a5, 24
slli a7, a6, 8
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 24
slli a6, t0, 8
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 24
slli t0, a7, 8
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 24
slli a7, a5, 8
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_9
addi a4, a3, -13
j .LBBmemcpy0_25
.LBBmemcpy0_11:
mv a3, a0
mv a4, a1
andi a1, a3, 3
bnez a1, .LBBmemcpy0_4
.LBBmemcpy0_12:
li a1, 16
bltu a2, a1, .LBBmemcpy0_15
li a1, 15
.LBBmemcpy0_14:
lw a5, 0(a4)
lw a6, 4(a4)
lw a7, 8(a4)
lw t0, 12(a4)
sw a5, 0(a3)
sw a6, 4(a3)
sw a7, 8(a3)
sw t0, 12(a3)
addi a4, a4, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a1, a2, .LBBmemcpy0_14
.LBBmemcpy0_15:
andi a1, a2, 8
beqz a1, .LBBmemcpy0_17
lw a1, 0(a4)
lw a5, 4(a4)
sw a1, 0(a3)
sw a5, 4(a3)
addi a3, a3, 8
addi a4, a4, 8
.LBBmemcpy0_17:
andi a1, a2, 4
beqz a1, .LBBmemcpy0_30
lw a1, 0(a4)
sw a1, 0(a3)
addi a3, a3, 4
addi a4, a4, 4
j .LBBmemcpy0_30
.LBBmemcpy0_19:
lw a5, 0(a4)
addi a1, a3, 1
sb a5, 0(a3)
addi a2, a2, -1
addi a3, a4, 16
li a4, 18
.LBBmemcpy0_20:
lw a6, -12(a3)
srli a5, a5, 8
slli a7, a6, 24
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 8
slli a6, t0, 24
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 8
slli t0, a7, 24
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 8
slli a7, a5, 24
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_20
addi a4, a3, -15
j .LBBmemcpy0_25
.LBBmemcpy0_22:
lw a5, 0(a4)
sb a5, 0(a3)
srli a6, a5, 8
addi a1, a3, 2
sb a6, 1(a3)
addi a2, a2, -2
addi a3, a4, 16
li a4, 17
.LBBmemcpy0_23:
lw a6, -12(a3)
srli a5, a5, 16
slli a7, a6, 16
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 16
slli a6, t0, 16
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 16
slli t0, a7, 16
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 16
slli a7, a5, 16
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_23
addi a4, a3, -14
.LBBmemcpy0_25:
mv a3, a1
.LBBmemcpy0_26:
andi a1, a2, 16
bnez a1, .LBBmemcpy0_35
andi a1, a2, 8
bnez a1, .LBBmemcpy0_36
.LBBmemcpy0_28:
andi a1, a2, 4
beqz a1, .LBBmemcpy0_30
.LBBmemcpy0_29:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
addi a4, a4, 4
addi a5, a3, 4
sb a1, 3(a3)
mv a3, a5
.LBBmemcpy0_30:
andi a1, a2, 2
bnez a1, .LBBmemcpy0_33
andi a1, a2, 1
bnez a1, .LBBmemcpy0_34
.LBBmemcpy0_32:
ret
.LBBmemcpy0_33:
lb a1, 0(a4)
lb a5, 1(a4)
sb a1, 0(a3)
addi a4, a4, 2
addi a1, a3, 2
sb a5, 1(a3)
mv a3, a1
andi a1, a2, 1
beqz a1, .LBBmemcpy0_32
.LBBmemcpy0_34:
lb a1, 0(a4)
sb a1, 0(a3)
ret
.LBBmemcpy0_35:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
lb a5, 4(a4)
lb a6, 5(a4)
sb a1, 3(a3)
lb a1, 6(a4)
sb a5, 4(a3)
sb a6, 5(a3)
lb a5, 7(a4)
sb a1, 6(a3)
lb a1, 8(a4)
lb a6, 9(a4)
sb a5, 7(a3)
lb a5, 10(a4)
sb a1, 8(a3)
sb a6, 9(a3)
lb a1, 11(a4)
sb a5, 10(a3)
lb a5, 12(a4)
lb a6, 13(a4)
sb a1, 11(a3)
lb a1, 14(a4)
sb a5, 12(a3)
sb a6, 13(a3)
lb a5, 15(a4)
sb a1, 14(a3)
addi a4, a4, 16
addi a1, a3, 16
sb a5, 15(a3)
mv a3, a1
andi a1, a2, 8
beqz a1, .LBBmemcpy0_28
.LBBmemcpy0_36:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
lb a5, 4(a4)
lb a6, 5(a4)
sb a1, 3(a3)
lb a1, 6(a4)
sb a5, 4(a3)
sb a6, 5(a3)
lb a5, 7(a4)
sb a1, 6(a3)
addi a4, a4, 8
addi a1, a3, 8
sb a5, 7(a3)
mv a3, a1
andi a1, a2, 4
bnez a1, .LBBmemcpy0_29
j .LBBmemcpy0_30
.Lfuncmemcpy_end0:
.size memcpy, .Lfuncmemcpy_end0-memcpy
.ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157"
.section ".note.GNU-stack","",@progbits
.addrsig |
lambbear/wlambch3 | 676 | os/src/task/switch.S | .altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
lambbear/wlambch3 | 1,488 | os/src/trap/trap.S | .altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
lambdaclass/sp1_poc_forger | 8,449 | crates/zkvm/entrypoint/src/memset.s | // This is musl-libc memset commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30:
//
// src/string/memset.c
//
// This was compiled into assembly with:
//
// clang-14 -target riscv32 -march=rv32im -O3 -S memset.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contibutors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.attribute 4, 16
.attribute 5, "rv32im"
.file "musl_memset.c"
.globl memset
.p2align 2
.type memset,@function
memset:
beqz a2, .LBB0_9memset
sb a1, 0(a0)
add a3, a2, a0
li a4, 3
sb a1, -1(a3)
bltu a2, a4, .LBB0_9memset
sb a1, 1(a0)
sb a1, 2(a0)
sb a1, -2(a3)
li a4, 7
sb a1, -3(a3)
bltu a2, a4, .LBB0_9memset
sb a1, 3(a0)
li a5, 9
sb a1, -4(a3)
bltu a2, a5, .LBB0_9memset
neg a3, a0
andi a4, a3, 3
add a3, a0, a4
sub a2, a2, a4
andi a2, a2, -4
andi a1, a1, 255
lui a4, 4112
addi a4, a4, 257
mul a1, a1, a4
sw a1, 0(a3)
add a4, a3, a2
sw a1, -4(a4)
bltu a2, a5, .LBB0_9memset
sw a1, 4(a3)
sw a1, 8(a3)
sw a1, -12(a4)
li a5, 25
sw a1, -8(a4)
bltu a2, a5, .LBB0_9memset
sw a1, 12(a3)
sw a1, 16(a3)
sw a1, 20(a3)
sw a1, 24(a3)
sw a1, -28(a4)
sw a1, -24(a4)
sw a1, -20(a4)
andi a5, a3, 4
ori a5, a5, 24
sub a2, a2, a5
li a6, 32
sw a1, -16(a4)
bltu a2, a6, .LBB0_9memset
add a3, a3, a5
li a4, 31
.LBB0_8memset:
sw a1, 0(a3)
sw a1, 4(a3)
sw a1, 8(a3)
sw a1, 12(a3)
sw a1, 16(a3)
sw a1, 20(a3)
sw a1, 24(a3)
sw a1, 28(a3)
addi a2, a2, -32
addi a3, a3, 32
bltu a4, a2, .LBB0_8memset
.LBB0_9memset:
ret
.Lfunc_end0memset:
.size memset, .Lfunc_end0memset-memset
.ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157"
.section ".note.GNU-stack","",@progbits
.addrsig |
lambdaclass/sp1_poc_forger | 11,854 | crates/zkvm/entrypoint/src/memcpy.s | // This is musl-libc commit 37e18b7bf307fa4a8c745feebfcba54a0ba74f30:
//
// src/string/memcpy.c
//
// This was compiled into assembly with:
//
// clang-14 -target riscv32 -march=rv32im -O3 -S memcpy.c -nostdlib -fno-builtin -funroll-loops
//
// and labels manually updated to not conflict.
//
// musl as a whole is licensed under the following standard MIT license:
//
// ----------------------------------------------------------------------
// Copyright © 2005-2020 Rich Felker, et al.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// ----------------------------------------------------------------------
//
// Authors/contributors include:
//
// A. Wilcox
// Ada Worcester
// Alex Dowad
// Alex Suykov
// Alexander Monakov
// Andre McCurdy
// Andrew Kelley
// Anthony G. Basile
// Aric Belsito
// Arvid Picciani
// Bartosz Brachaczek
// Benjamin Peterson
// Bobby Bingham
// Boris Brezillon
// Brent Cook
// Chris Spiegel
// Clément Vasseur
// Daniel Micay
// Daniel Sabogal
// Daurnimator
// David Carlier
// David Edelsohn
// Denys Vlasenko
// Dmitry Ivanov
// Dmitry V. Levin
// Drew DeVault
// Emil Renner Berthing
// Fangrui Song
// Felix Fietkau
// Felix Janda
// Gianluca Anzolin
// Hauke Mehrtens
// He X
// Hiltjo Posthuma
// Isaac Dunham
// Jaydeep Patil
// Jens Gustedt
// Jeremy Huntwork
// Jo-Philipp Wich
// Joakim Sindholt
// John Spencer
// Julien Ramseier
// Justin Cormack
// Kaarle Ritvanen
// Khem Raj
// Kylie McClain
// Leah Neukirchen
// Luca Barbato
// Luka Perkov
// M Farkas-Dyck (Strake)
// Mahesh Bodapati
// Markus Wichmann
// Masanori Ogino
// Michael Clark
// Michael Forney
// Mikhail Kremnyov
// Natanael Copa
// Nicholas J. Kain
// orc
// Pascal Cuoq
// Patrick Oppenlander
// Petr Hosek
// Petr Skocik
// Pierre Carrier
// Reini Urban
// Rich Felker
// Richard Pennington
// Ryan Fairfax
// Samuel Holland
// Segev Finer
// Shiz
// sin
// Solar Designer
// Stefan Kristiansson
// Stefan O'Rear
// Szabolcs Nagy
// Timo Teräs
// Trutz Behn
// Valentin Ochs
// Will Dietz
// William Haddon
// William Pitcock
//
// Portions of this software are derived from third-party works licensed
// under terms compatible with the above MIT license:
//
// The TRE regular expression implementation (src/regex/reg* and
// src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
// under a 2-clause BSD license (license text in the source files). The
// included version has been heavily modified by Rich Felker in 2012, in
// the interests of size, simplicity, and namespace cleanliness.
//
// Much of the math library code (src/math/* and src/complex/*) is
// Copyright © 1993,2004 Sun Microsystems or
// Copyright © 2003-2011 David Schultz or
// Copyright © 2003-2009 Steven G. Kargl or
// Copyright © 2003-2009 Bruce D. Evans or
// Copyright © 2008 Stephen L. Moshier or
// Copyright © 2017-2018 Arm Limited
// and labelled as such in comments in the individual source files. All
// have been licensed under extremely permissive terms.
//
// The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
// The Android Open Source Project and is licensed under a two-clause BSD
// license. It was taken from Bionic libc, used on Android.
//
// The AArch64 memcpy and memset code (src/string/aarch64/*) are
// Copyright © 1999-2019, Arm Limited.
//
// The implementation of DES for crypt (src/crypt/crypt_des.c) is
// Copyright © 1994 David Burren. It is licensed under a BSD license.
//
// The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
// originally written by Solar Designer and placed into the public
// domain. The code also comes with a fallback permissive license for use
// in jurisdictions that may not recognize the public domain.
//
// The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
// Valentin Ochs and is licensed under an MIT-style license.
//
// The x86_64 port was written by Nicholas J. Kain and is licensed under
// the standard MIT terms.
//
// The mips and microblaze ports were originally written by Richard
// Pennington for use in the ellcc project. The original code was adapted
// by Rich Felker for build system and code conventions during upstream
// integration. It is licensed under the standard MIT terms.
//
// The mips64 port was contributed by Imagination Technologies and is
// licensed under the standard MIT terms.
//
// The powerpc port was also originally written by Richard Pennington,
// and later supplemented and integrated by John Spencer. It is licensed
// under the standard MIT terms.
//
// All other files which have no copyright comments are original works
// produced specifically for use as part of this library, written either
// by Rich Felker, the main author of the library, or by one or more
// contibutors listed above. Details on authorship of individual files
// can be found in the git version control history of the project. The
// omission of copyright and license comments in each file is in the
// interest of source tree size.
//
// In addition, permission is hereby granted for all public header files
// (include/* and arch/* /bits/* ) and crt files intended to be linked into
// applications (crt/*, ldso/dlstart.c, and arch/* /crt_arch.h) to omit
// the copyright notice and permission notice otherwise required by the
// license, and to use these files without any requirement of
// attribution. These files include substantial contributions from:
//
// Bobby Bingham
// John Spencer
// Nicholas J. Kain
// Rich Felker
// Richard Pennington
// Stefan Kristiansson
// Szabolcs Nagy
//
// all of whom have explicitly granted such permission.
//
// This file previously contained text expressing a belief that most of
// the files covered by the above exception were sufficiently trivial not
// to be subject to copyright, resulting in confusion over whether it
// negated the permissions granted in the license. In the spirit of
// permissive licensing, and of not having licensing issues being an
// obstacle to adoption, that text has been removed.
.text
.attribute 4, 16
.attribute 5, "rv32im"
.file "musl_memcpy.c"
.globl memcpy
.p2align 2
.type memcpy,@function
memcpy:
andi a3, a1, 3
seqz a3, a3
seqz a4, a2
or a3, a3, a4
bnez a3, .LBBmemcpy0_11
addi a5, a1, 1
mv a6, a0
.LBBmemcpy0_2:
lb a7, 0(a1)
addi a4, a1, 1
addi a3, a6, 1
sb a7, 0(a6)
addi a2, a2, -1
andi a1, a5, 3
snez a1, a1
snez a6, a2
and a7, a1, a6
addi a5, a5, 1
mv a1, a4
mv a6, a3
bnez a7, .LBBmemcpy0_2
andi a1, a3, 3
beqz a1, .LBBmemcpy0_12
.LBBmemcpy0_4:
li a5, 32
bltu a2, a5, .LBBmemcpy0_26
li a5, 3
beq a1, a5, .LBBmemcpy0_19
li a5, 2
beq a1, a5, .LBBmemcpy0_22
li a5, 1
bne a1, a5, .LBBmemcpy0_26
lw a5, 0(a4)
sb a5, 0(a3)
srli a1, a5, 8
sb a1, 1(a3)
srli a6, a5, 16
addi a1, a3, 3
sb a6, 2(a3)
addi a2, a2, -3
addi a3, a4, 16
li a4, 16
.LBBmemcpy0_9:
lw a6, -12(a3)
srli a5, a5, 24
slli a7, a6, 8
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 24
slli a6, t0, 8
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 24
slli t0, a7, 8
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 24
slli a7, a5, 8
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_9
addi a4, a3, -13
j .LBBmemcpy0_25
.LBBmemcpy0_11:
mv a3, a0
mv a4, a1
andi a1, a3, 3
bnez a1, .LBBmemcpy0_4
.LBBmemcpy0_12:
li a1, 16
bltu a2, a1, .LBBmemcpy0_15
li a1, 15
.LBBmemcpy0_14:
lw a5, 0(a4)
lw a6, 4(a4)
lw a7, 8(a4)
lw t0, 12(a4)
sw a5, 0(a3)
sw a6, 4(a3)
sw a7, 8(a3)
sw t0, 12(a3)
addi a4, a4, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a1, a2, .LBBmemcpy0_14
.LBBmemcpy0_15:
andi a1, a2, 8
beqz a1, .LBBmemcpy0_17
lw a1, 0(a4)
lw a5, 4(a4)
sw a1, 0(a3)
sw a5, 4(a3)
addi a3, a3, 8
addi a4, a4, 8
.LBBmemcpy0_17:
andi a1, a2, 4
beqz a1, .LBBmemcpy0_30
lw a1, 0(a4)
sw a1, 0(a3)
addi a3, a3, 4
addi a4, a4, 4
j .LBBmemcpy0_30
.LBBmemcpy0_19:
lw a5, 0(a4)
addi a1, a3, 1
sb a5, 0(a3)
addi a2, a2, -1
addi a3, a4, 16
li a4, 18
.LBBmemcpy0_20:
lw a6, -12(a3)
srli a5, a5, 8
slli a7, a6, 24
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 8
slli a6, t0, 24
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 8
slli t0, a7, 24
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 8
slli a7, a5, 24
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_20
addi a4, a3, -15
j .LBBmemcpy0_25
.LBBmemcpy0_22:
lw a5, 0(a4)
sb a5, 0(a3)
srli a6, a5, 8
addi a1, a3, 2
sb a6, 1(a3)
addi a2, a2, -2
addi a3, a4, 16
li a4, 17
.LBBmemcpy0_23:
lw a6, -12(a3)
srli a5, a5, 16
slli a7, a6, 16
lw t0, -8(a3)
or a5, a7, a5
sw a5, 0(a1)
srli a5, a6, 16
slli a6, t0, 16
lw a7, -4(a3)
or a5, a6, a5
sw a5, 4(a1)
srli a6, t0, 16
slli t0, a7, 16
lw a5, 0(a3)
or a6, t0, a6
sw a6, 8(a1)
srli a6, a7, 16
slli a7, a5, 16
or a6, a7, a6
sw a6, 12(a1)
addi a1, a1, 16
addi a2, a2, -16
addi a3, a3, 16
bltu a4, a2, .LBBmemcpy0_23
addi a4, a3, -14
.LBBmemcpy0_25:
mv a3, a1
.LBBmemcpy0_26:
andi a1, a2, 16
bnez a1, .LBBmemcpy0_35
andi a1, a2, 8
bnez a1, .LBBmemcpy0_36
.LBBmemcpy0_28:
andi a1, a2, 4
beqz a1, .LBBmemcpy0_30
.LBBmemcpy0_29:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
addi a4, a4, 4
addi a5, a3, 4
sb a1, 3(a3)
mv a3, a5
.LBBmemcpy0_30:
andi a1, a2, 2
bnez a1, .LBBmemcpy0_33
andi a1, a2, 1
bnez a1, .LBBmemcpy0_34
.LBBmemcpy0_32:
ret
.LBBmemcpy0_33:
lb a1, 0(a4)
lb a5, 1(a4)
sb a1, 0(a3)
addi a4, a4, 2
addi a1, a3, 2
sb a5, 1(a3)
mv a3, a1
andi a1, a2, 1
beqz a1, .LBBmemcpy0_32
.LBBmemcpy0_34:
lb a1, 0(a4)
sb a1, 0(a3)
ret
.LBBmemcpy0_35:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
lb a5, 4(a4)
lb a6, 5(a4)
sb a1, 3(a3)
lb a1, 6(a4)
sb a5, 4(a3)
sb a6, 5(a3)
lb a5, 7(a4)
sb a1, 6(a3)
lb a1, 8(a4)
lb a6, 9(a4)
sb a5, 7(a3)
lb a5, 10(a4)
sb a1, 8(a3)
sb a6, 9(a3)
lb a1, 11(a4)
sb a5, 10(a3)
lb a5, 12(a4)
lb a6, 13(a4)
sb a1, 11(a3)
lb a1, 14(a4)
sb a5, 12(a3)
sb a6, 13(a3)
lb a5, 15(a4)
sb a1, 14(a3)
addi a4, a4, 16
addi a1, a3, 16
sb a5, 15(a3)
mv a3, a1
andi a1, a2, 8
beqz a1, .LBBmemcpy0_28
.LBBmemcpy0_36:
lb a1, 0(a4)
lb a5, 1(a4)
lb a6, 2(a4)
sb a1, 0(a3)
sb a5, 1(a3)
lb a1, 3(a4)
sb a6, 2(a3)
lb a5, 4(a4)
lb a6, 5(a4)
sb a1, 3(a3)
lb a1, 6(a4)
sb a5, 4(a3)
sb a6, 5(a3)
lb a5, 7(a4)
sb a1, 6(a3)
addi a4, a4, 8
addi a1, a3, 8
sb a5, 7(a3)
mv a3, a1
andi a1, a2, 4
bnez a1, .LBBmemcpy0_29
j .LBBmemcpy0_30
.Lfuncmemcpy_end0:
.size memcpy, .Lfuncmemcpy_end0-memcpy
.ident "Ubuntu clang version 14.0.6-++20220622053131+f28c006a5895-1~exp1~20220622173215.157"
.section ".note.GNU-stack","",@progbits
.addrsig |
Lanthanum1/jyy-os | 622 | minimal/minimal.S | #include <sys/syscall.h>
// The x86-64 system call Application Binary Interface (ABI):
// System call number: RAX
// Arguments: RDI, RSI, RDX, RCX, R8, R9
// Return value: RAX
// See also: syscall(2) syscalls(2)
#define syscall3(id, a1, a2, a3) \
movq $SYS_##id, %rax; \
movq $a1, %rdi; \
movq $a2, %rsi; \
movq $a3, %rdx; \
syscall
#define syscall2(id, a1, a2) syscall3(id, a1, a2, 0)
#define syscall1(id, a1) syscall2(id, a1, 0)
.globl _start
_start:
syscall3(write, 1, addr1, addr2 - addr1)
syscall1(exit, 1)
addr1:
.ascii "\033[01;31mHello, OS World\033[0m\n"
addr2:
|
lantos1618/asm_test | 101 | program.s | .section __DATA,__data
L0:
.asciz "Hello, World!
"
.section __TEXT,__text
.global _start
_start:
|
Lanxas1018/Lab_sdram | 6,209 | firmware/start_pico.S | /*
* Copyright 2018, Serge Bazanski <serge@bazanski.pl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted.
*/
#include "../extraops.S"
/*
* Interrupt vector.
*/
.global _start
_start:
.org 0x00000000 # Reset
j _crt0
.org 0x00000010 # IRQ
_irq_vector:
addi sp, sp, -16
sw t0, 4(sp)
sw ra, 8(sp)
/* By convention, q2 holds true IRQ vector, but remains caller-save.
We rely on the assumption that compiler-generated code will never touch
the QREGs. q3 is truly scratch/caller-save. */
picorv32_getq_insn(t0, q2)
sw t0, 12(sp)
jalr t0 // Call the true IRQ vector.
lw t0, 12(sp)
picorv32_setq_insn(q2, t0) // Restore the true IRQ vector.
lw ra, 8(sp)
lw t0, 4(sp)
addi sp, sp, 16
picorv32_retirq_insn() // return from interrupt
/*
* IRQ handler, branched to from the vector.
*/
_irq:
/* save x1/x2 to q1/q2 */
picorv32_setq_insn(q2, x1)
picorv32_setq_insn(q3, x2)
/* use x1 to index into irq_regs */
lui x1, %hi(irq_regs)
addi x1, x1, %lo(irq_regs)
/* use x2 as scratch space for saving registers */
/* q0 (== x1), q2(== x2), q3 */
picorv32_getq_insn(x2, q0)
sw x2, 0*4(x1)
picorv32_getq_insn(x2, q2)
sw x2, 1*4(x1)
picorv32_getq_insn(x2, q3)
sw x2, 2*4(x1)
/* save x3 - x31 */
sw x3, 3*4(x1)
sw x4, 4*4(x1)
sw x5, 5*4(x1)
sw x6, 6*4(x1)
sw x7, 7*4(x1)
sw x8, 8*4(x1)
sw x9, 9*4(x1)
sw x10, 10*4(x1)
sw x11, 11*4(x1)
sw x12, 12*4(x1)
sw x13, 13*4(x1)
sw x14, 14*4(x1)
sw x15, 15*4(x1)
sw x16, 16*4(x1)
sw x17, 17*4(x1)
sw x18, 18*4(x1)
sw x19, 19*4(x1)
sw x20, 20*4(x1)
sw x21, 21*4(x1)
sw x22, 22*4(x1)
sw x23, 23*4(x1)
sw x24, 24*4(x1)
sw x25, 25*4(x1)
sw x26, 26*4(x1)
sw x27, 27*4(x1)
sw x28, 28*4(x1)
sw x29, 29*4(x1)
sw x30, 30*4(x1)
sw x31, 31*4(x1)
/* update _irq_pending to the currently pending interrupts */
picorv32_getq_insn(t0, q1)
la t1, (_irq_pending)
sw t0, 0(t1)
/* prepare C handler stack */
lui sp, %hi(_irq_stack)
addi sp, sp, %lo(_irq_stack)
/* call C handler */
jal ra, isr
/* use x1 to index into irq_regs */
lui x1, %hi(irq_regs)
addi x1, x1, %lo(irq_regs)
/* restore q0 - q2 */
lw x2, 0*4(x1)
picorv32_setq_insn(q0, x2)
lw x2, 1*4(x1)
picorv32_setq_insn(q1, x2)
lw x2, 2*4(x1)
picorv32_setq_insn(q2, x2)
/* restore x3 - x31 */
lw x3, 3*4(x1)
lw x4, 4*4(x1)
lw x5, 5*4(x1)
lw x6, 6*4(x1)
lw x7, 7*4(x1)
lw x8, 8*4(x1)
lw x9, 9*4(x1)
lw x10, 10*4(x1)
lw x11, 11*4(x1)
lw x12, 12*4(x1)
lw x13, 13*4(x1)
lw x14, 14*4(x1)
lw x15, 15*4(x1)
lw x16, 16*4(x1)
lw x17, 17*4(x1)
lw x18, 18*4(x1)
lw x19, 19*4(x1)
lw x20, 20*4(x1)
lw x21, 21*4(x1)
lw x22, 22*4(x1)
lw x23, 23*4(x1)
lw x24, 24*4(x1)
lw x25, 25*4(x1)
lw x26, 26*4(x1)
lw x27, 27*4(x1)
lw x28, 28*4(x1)
lw x29, 29*4(x1)
lw x30, 30*4(x1)
lw x31, 31*4(x1)
/* restore x1 - x2 from q registers */
picorv32_getq_insn(x1, q1)
picorv32_getq_insn(x2, q2)
ret
/*
* Reset handler, branched to from the vector.
*/
_crt0:
/* zero-initialize all registers */
addi x1, zero, 0
addi x2, zero, 0
addi x3, zero, 0
addi x4, zero, 0
addi x5, zero, 0
addi x6, zero, 0
addi x7, zero, 0
addi x8, zero, 0
addi x9, zero, 0
addi x10, zero, 0
addi x11, zero, 0
addi x12, zero, 0
addi x13, zero, 0
addi x14, zero, 0
addi x15, zero, 0
addi x16, zero, 0
addi x17, zero, 0
addi x18, zero, 0
addi x19, zero, 0
addi x20, zero, 0
addi x21, zero, 0
addi x22, zero, 0
addi x23, zero, 0
addi x24, zero, 0
addi x25, zero, 0
addi x26, zero, 0
addi x27, zero, 0
addi x28, zero, 0
addi x29, zero, 0
addi x30, zero, 0
addi x31, zero, 0
/* mask all interrupts */
li t0, 0xffffffff
picorv32_maskirq_insn(zero, t0)
/* reflect that in _irq_mask */
la t1, _irq_mask
sw t0, 0(t1)
/* Load DATA */
la t0, _fdata_rom
la t1, _fdata
la t2, _edata
3:
lw t3, 0(t0)
sw t3, 0(t1)
/* _edata is aligned to 16 bytes. Use word-xfers. */
addi t0, t0, 4
addi t1, t1, 4
bltu t1, t2, 3b
/* Clear BSS */
#la t0, _fbss
#la t1, _ebss
2:
#sw zero, 0(t0)
#addi t0, t0, 4
#bltu t0, t1, 2b
/* set main stack */
la sp, _fstack
/* Set up address to IRQ handler since vector is hardcoded.
By convention, q2 keeps the pointer to the true IRQ handler,
to emulate relocatable interrupts. */
la t0, _irq
picorv32_setq_insn(q2, t0)
/* jump to main */
jal ra, main
1:
/* loop forever */
j 1b
/*
* Enable interrupts by copying the software mask to the hardware mask
*/
.global _irq_enable
_irq_enable:
/* Set _irq_enabled to true */
la t0, _irq_enabled
addi t1, zero, 1
sw t1, 0(t0)
/* Set the HW IRQ mask to _irq_mask */
la t0, _irq_mask
lw t0, 0(t0)
picorv32_maskirq_insn(zero, t0)
ret
/*
* Disable interrupts by masking all interrupts (the mask should already be
* up to date)
*/
.global _irq_disable
_irq_disable:
/* Mask all IRQs */
li t0, 0xffffffff
picorv32_maskirq_insn(zero, t0)
/* Set _irq_enabled to false */
la t0, _irq_enabled
sw zero, (t0)
ret
/*
* Set interrrupt mask.
* This updates the software mask (for readback and interrupt inable/disable)
* and the hardware mask.
* 1 means interrupt is masked (disabled).
*/
.global _irq_setmask
_irq_setmask:
/* Update _irq_mask */
la t0, _irq_mask
sw a0, (t0)
/* Are interrupts enabled? */
la t0, _irq_enabled
lw t0, 0(t0)
beq t0, zero, 1f
/* If so, update the HW IRQ mask */
picorv32_maskirq_insn(zero, a0)
1:
ret
.section .bss
irq_regs:
/* saved interrupt registers, x0 - x31 */
.fill 32,4
/* interrupt stack */
.fill 256,4
_irq_stack:
/*
* Bitfield of pending interrupts, updated on ISR entry.
*/
.global _irq_pending
_irq_pending:
.word 0
/*
* Software copy of enabled interrupts. Do not write directly, use
* _irq_set_mask instead.
*/
.global _irq_mask
_irq_mask:
.word 0
/*
* Software state of global interrupts being enabled or disabled. Do not write
* directly, use _irq_disable / _irq_enable instead.
*/
.global _irq_enabled
_irq_enabled:
.word 0
|
Lanxas1018/Lab_sdram | 2,655 | firmware/extraops.S | // This is free and unencumbered software released into the public domain.
//
// Anyone is free to copy, modify, publish, use, compile, sell, or
// distribute this software, either in source code form or as a compiled
// binary, for any purpose, commercial or non-commercial, and by any
// means.
#define regnum_q0 0
#define regnum_q1 1
#define regnum_q2 2
#define regnum_q3 3
#define regnum_x0 0
#define regnum_x1 1
#define regnum_x2 2
#define regnum_x3 3
#define regnum_x4 4
#define regnum_x5 5
#define regnum_x6 6
#define regnum_x7 7
#define regnum_x8 8
#define regnum_x9 9
#define regnum_x10 10
#define regnum_x11 11
#define regnum_x12 12
#define regnum_x13 13
#define regnum_x14 14
#define regnum_x15 15
#define regnum_x16 16
#define regnum_x17 17
#define regnum_x18 18
#define regnum_x19 19
#define regnum_x20 20
#define regnum_x21 21
#define regnum_x22 22
#define regnum_x23 23
#define regnum_x24 24
#define regnum_x25 25
#define regnum_x26 26
#define regnum_x27 27
#define regnum_x28 28
#define regnum_x29 29
#define regnum_x30 30
#define regnum_x31 31
#define regnum_zero 0
#define regnum_ra 1
#define regnum_sp 2
#define regnum_gp 3
#define regnum_tp 4
#define regnum_t0 5
#define regnum_t1 6
#define regnum_t2 7
#define regnum_s0 8
#define regnum_s1 9
#define regnum_a0 10
#define regnum_a1 11
#define regnum_a2 12
#define regnum_a3 13
#define regnum_a4 14
#define regnum_a5 15
#define regnum_a6 16
#define regnum_a7 17
#define regnum_s2 18
#define regnum_s3 19
#define regnum_s4 20
#define regnum_s5 21
#define regnum_s6 22
#define regnum_s7 23
#define regnum_s8 24
#define regnum_s9 25
#define regnum_s10 26
#define regnum_s11 27
#define regnum_t3 28
#define regnum_t4 29
#define regnum_t5 30
#define regnum_t6 31
// x8 is s0 and also fp
#define regnum_fp 8
#define r_type_insn(_f7, _rs2, _rs1, _f3, _rd, _opc) \
.word (((_f7) << 25) | ((_rs2) << 20) | ((_rs1) << 15) | ((_f3) << 12) | ((_rd) << 7) | ((_opc) << 0))
#define picorv32_getq_insn(_rd, _qs) \
r_type_insn(0b0000000, 0, regnum_ ## _qs, 0b100, regnum_ ## _rd, 0b0001011)
#define picorv32_setq_insn(_qd, _rs) \
r_type_insn(0b0000001, 0, regnum_ ## _rs, 0b010, regnum_ ## _qd, 0b0001011)
#define picorv32_retirq_insn() \
r_type_insn(0b0000010, 0, 0, 0b000, 0, 0b0001011)
#define picorv32_maskirq_insn(_rd, _rs) \
r_type_insn(0b0000011, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
#define picorv32_waitirq_insn(_rd) \
r_type_insn(0b0000100, 0, 0, 0b100, regnum_ ## _rd, 0b0001011)
#define picorv32_timer_insn(_rd, _rs) \
r_type_insn(0b0000101, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
|
Lanxas1018/Lab_sdram | 1,582 | firmware/crt0_vex.S | .global main
.global isr
.global _start
_start:
j crt_init
nop
nop
nop
nop
nop
nop
nop
.global trap_entry
trap_entry:
sw x1, - 1*4(sp)
sw x5, - 2*4(sp)
sw x6, - 3*4(sp)
sw x7, - 4*4(sp)
sw x10, - 5*4(sp)
sw x11, - 6*4(sp)
sw x12, - 7*4(sp)
sw x13, - 8*4(sp)
sw x14, - 9*4(sp)
sw x15, -10*4(sp)
sw x16, -11*4(sp)
sw x17, -12*4(sp)
sw x28, -13*4(sp)
sw x29, -14*4(sp)
sw x30, -15*4(sp)
sw x31, -16*4(sp)
addi sp,sp,-16*4
call isr
lw x1 , 15*4(sp)
lw x5, 14*4(sp)
lw x6, 13*4(sp)
lw x7, 12*4(sp)
lw x10, 11*4(sp)
lw x11, 10*4(sp)
lw x12, 9*4(sp)
lw x13, 8*4(sp)
lw x14, 7*4(sp)
lw x15, 6*4(sp)
lw x16, 5*4(sp)
lw x17, 4*4(sp)
lw x28, 3*4(sp)
lw x29, 2*4(sp)
lw x30, 1*4(sp)
lw x31, 0*4(sp)
addi sp,sp,16*4
mret
.text
crt_init:
la sp, _fstack
la a0, trap_entry
csrw mtvec, a0
sram_init:
la a0, _fsram
la a1, _esram
la a2, _esram_rom
sram_loop:
beq a0,a1,sram_done
lw a3,0(a2)
sw a3,0(a0)
add a0,a0,4
add a2,a2,4
j sram_loop
sram_done:
data_init:
la a0, _fdata
la a1, _edata
la a2, _fdata_rom
data_loop:
beq a0,a1,data_done
lw a3,0(a2)
sw a3,0(a0)
add a0,a0,4
add a2,a2,4
j data_loop
data_done:
bss_init:
la a0, _fbss
la a1, _ebss
bss_loop:
beq a0,a1,bss_done
sw zero,0(a0)
add a0,a0,4
#ifndef SIM
j bss_loop
#endif
bss_done:
li a0, 0x880 //880 enable timer + external interrupt sources (until mstatus.MIE is set, they will never trigger an interrupt)
csrw mie,a0
call main
infinit_loop:
j infinit_loop
|
Lanxas1018/Lab_sdram | 1,803 | firmware/crt0_ibex.S | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
#include "simple_system_regs.h"
.section .text
default_exc_handler:
jal x0, simple_exc_handler
timer_handler:
jal x0, simple_timer_handler
reset_handler:
/* set all registers to zero */
mv x1, x0
mv x2, x1
mv x3, x1
mv x4, x1
mv x5, x1
mv x6, x1
mv x7, x1
mv x8, x1
mv x9, x1
mv x10, x1
mv x11, x1
mv x12, x1
mv x13, x1
mv x14, x1
mv x15, x1
mv x16, x1
mv x17, x1
mv x18, x1
mv x19, x1
mv x20, x1
mv x21, x1
mv x22, x1
mv x23, x1
mv x24, x1
mv x25, x1
mv x26, x1
mv x27, x1
mv x28, x1
mv x29, x1
mv x30, x1
mv x31, x1
/* stack initilization */
# la x2, _stack_start
la x2, 0x01000800
_start:
.global _start
/* clear BSS */
la x26, _bss_start
la x27, _bss_end
bge x26, x27, zero_loop_end
zero_loop:
sw x0, 0(x26)
addi x26, x26, 4
ble x26, x27, zero_loop
zero_loop_end:
main_entry:
/* jump to main program entry point (argc = argv = 0) */
addi x10, x0, 0
addi x11, x0, 0
jal x1, main
/* Halt simulation */
#li x5, SIM_CTRL_BASE + SIM_CTRL_CTRL
#li x6, 1
#sw x6, 0(x5)
/* If execution ends up here just put the core to sleep */
sleep_loop:
wfi
j sleep_loop
/* =================================================== [ exceptions ] === */
/* This section has to be down here, since we have to disable rvc for it */
.section .vectors, "ax"
.option norvc;
// All unimplemented interrupts/exceptions go to the default_exc_handler.
.org 0x00
.rept 7
jal x0, default_exc_handler
.endr
jal x0, timer_handler
.rept 23
jal x0, default_exc_handler
.endr
// reset vector
.org 0x80
jal x0, reset_handler
|
Lanxas1018/Lab_sdram | 3,215 | firmware/start_caravel_vexriscv.s | # SPDX-FileCopyrightText: 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
.section .text
start:
# zero-initialize register file
addi x1, zero, 0
# x2 (sp) is initialized by reset
addi x3, zero, 0
addi x4, zero, 0
addi x5, zero, 0
addi x6, zero, 0
addi x7, zero, 0
addi x8, zero, 0
addi x9, zero, 0
addi x10, zero, 0
addi x11, zero, 0
addi x12, zero, 0
addi x13, zero, 0
addi x14, zero, 0
addi x15, zero, 0
addi x16, zero, 0
addi x17, zero, 0
addi x18, zero, 0
addi x19, zero, 0
addi x20, zero, 0
addi x21, zero, 0
addi x22, zero, 0
addi x23, zero, 0
addi x24, zero, 0
addi x25, zero, 0
addi x26, zero, 0
addi x27, zero, 0
addi x28, zero, 0
addi x29, zero, 0
addi x30, zero, 0
addi x31, zero, 0
# zero initialize scratchpad memory
# setmemloop:
# sw zero, 0(x1)
# addi x1, x1, 4
# blt x1, sp, setmemloop
# copy data section
la a0, _sidata
la a1, _sdata
la a2, _edata
bge a1, a2, end_init_data
loop_init_data:
lw a3, 0(a0)
sw a3, 0(a1)
addi a0, a0, 4
addi a1, a1, 4
blt a1, a2, loop_init_data
end_init_data:
# zero-init bss section
la a0, _sbss
la a1, _ebss
bge a0, a1, end_init_bss
loop_init_bss:
sw zero, 0(a0)
addi a0, a0, 4
blt a0, a1, loop_init_bss
end_init_bss:
la sp, _fstack
# call main
call main
loop:
j loop
.global flashio_worker_begin
.global flashio_worker_end
.balign 4
flashio_worker_begin:
# a0 ... data pointer
# a1 ... data length
# a2 ... optional WREN cmd (0 = disable)
# address of SPI ctrl reg
li t0, 0x28000000
# Set CS high, IO0 is output
li t1, 0x120
sh t1, 0(t0)
# Enable Manual SPI Ctrl
sb zero, 3(t0)
# Send optional WREN cmd
beqz a2, flashio_worker_L1
li t5, 8
andi t2, a2, 0xff
flashio_worker_L4:
srli t4, t2, 7
sb t4, 0(t0)
ori t4, t4, 0x10
sb t4, 0(t0)
slli t2, t2, 1
andi t2, t2, 0xff
addi t5, t5, -1
bnez t5, flashio_worker_L4
sb t1, 0(t0)
# SPI transfer
flashio_worker_L1:
# If byte count is zero, we're done
beqz a1, flashio_worker_L3
# Set t5 to count down 32 bits
li t5, 32
# Load t2 from address a0 (4 bytes)
lw t2, 0(a0)
flashio_worker_LY:
# Set t6 to count down 8 bits
li t6, 8
flashio_worker_L2:
# Clock out the bit (msb first) on IO0 and read bit in from IO1
srli t4, t2, 31
sb t4, 0(t0)
ori t4, t4, 0x10
sb t4, 0(t0)
lbu t4, 0(t0)
andi t4, t4, 2
srli t4, t4, 1
slli t2, t2, 1
or t2, t2, t4
# Decrement 32 bit count
addi t5, t5, -1
bnez t5, flashio_worker_LX
sw t2, 0(a0)
addi a0, a0, 4
lw t2, 0(a0)
flashio_worker_LX:
addi t6, t6, -1
bnez t6, flashio_worker_L2
addi a1, a1, -1
bnez a1, flashio_worker_LY
beqz t5, flashio_worker_L3
sw t2, 0(a0)
flashio_worker_L3:
# Back to MEMIO mode
li t1, 0x80
sb t1, 3(t0)
ret
.balign 4
flashio_worker_end:
|
Lanxas1018/Lab_sdram | 3,199 | firmware/start_caravel_ibex.s | # SPDX-FileCopyrightText: 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
.section .text
start:
# zero-initialize register file
addi x1, zero, 0
# x2 (sp) is initialized by reset
addi x3, zero, 0
addi x4, zero, 0
addi x5, zero, 0
addi x6, zero, 0
addi x7, zero, 0
addi x8, zero, 0
addi x9, zero, 0
addi x10, zero, 0
addi x11, zero, 0
addi x12, zero, 0
addi x13, zero, 0
addi x14, zero, 0
addi x15, zero, 0
addi x16, zero, 0
addi x17, zero, 0
addi x18, zero, 0
addi x19, zero, 0
addi x20, zero, 0
addi x21, zero, 0
addi x22, zero, 0
addi x23, zero, 0
addi x24, zero, 0
addi x25, zero, 0
addi x26, zero, 0
addi x27, zero, 0
addi x28, zero, 0
addi x29, zero, 0
addi x30, zero, 0
addi x31, zero, 0
# zero initialize scratchpad memory
# setmemloop:
# sw zero, 0(x1)
# addi x1, x1, 4
# blt x1, sp, setmemloop
# copy data section
la a0, _sidata
la a1, _sdata
la a2, _edata
bge a1, a2, end_init_data
loop_init_data:
lw a3, 0(a0)
sw a3, 0(a1)
addi a0, a0, 4
addi a1, a1, 4
blt a1, a2, loop_init_data
end_init_data:
# zero-init bss section
la a0, _sbss
la a1, _ebss
bge a0, a1, end_init_bss
loop_init_bss:
sw zero, 0(a0)
addi a0, a0, 4
blt a0, a1, loop_init_bss
end_init_bss:
# call main
call main
loop:
j loop
.global flashio_worker_begin
.global flashio_worker_end
.balign 4
flashio_worker_begin:
# a0 ... data pointer
# a1 ... data length
# a2 ... optional WREN cmd (0 = disable)
# address of SPI ctrl reg
li t0, 0x28000000
# Set CS high, IO0 is output
li t1, 0x120
sh t1, 0(t0)
# Enable Manual SPI Ctrl
sb zero, 3(t0)
# Send optional WREN cmd
beqz a2, flashio_worker_L1
li t5, 8
andi t2, a2, 0xff
flashio_worker_L4:
srli t4, t2, 7
sb t4, 0(t0)
ori t4, t4, 0x10
sb t4, 0(t0)
slli t2, t2, 1
andi t2, t2, 0xff
addi t5, t5, -1
bnez t5, flashio_worker_L4
sb t1, 0(t0)
# SPI transfer
flashio_worker_L1:
# If byte count is zero, we're done
beqz a1, flashio_worker_L3
# Set t5 to count down 32 bits
li t5, 32
# Load t2 from address a0 (4 bytes)
lw t2, 0(a0)
flashio_worker_LY:
# Set t6 to count down 8 bits
li t6, 8
flashio_worker_L2:
# Clock out the bit (msb first) on IO0 and read bit in from IO1
srli t4, t2, 31
sb t4, 0(t0)
ori t4, t4, 0x10
sb t4, 0(t0)
lbu t4, 0(t0)
andi t4, t4, 2
srli t4, t4, 1
slli t2, t2, 1
or t2, t2, t4
# Decrement 32 bit count
addi t5, t5, -1
bnez t5, flashio_worker_LX
sw t2, 0(a0)
addi a0, a0, 4
lw t2, 0(a0)
flashio_worker_LX:
addi t6, t6, -1
bnez t6, flashio_worker_L2
addi a1, a1, -1
bnez a1, flashio_worker_LY
beqz t5, flashio_worker_L3
sw t2, 0(a0)
flashio_worker_L3:
# Back to MEMIO mode
li t1, 0x80
sb t1, 3(t0)
ret
.balign 4
flashio_worker_end:
|
Lanxas1018/Lab_sdram | 6,209 | firmware/start.S | /*
* Copyright 2018, Serge Bazanski <serge@bazanski.pl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted.
*/
#include "../extraops.S"
/*
* Interrupt vector.
*/
.global _start
_start:
.org 0x00000000 # Reset
j _crt0
.org 0x00000010 # IRQ
_irq_vector:
addi sp, sp, -16
sw t0, 4(sp)
sw ra, 8(sp)
/* By convention, q2 holds true IRQ vector, but remains caller-save.
We rely on the assumption that compiler-generated code will never touch
the QREGs. q3 is truly scratch/caller-save. */
picorv32_getq_insn(t0, q2)
sw t0, 12(sp)
jalr t0 // Call the true IRQ vector.
lw t0, 12(sp)
picorv32_setq_insn(q2, t0) // Restore the true IRQ vector.
lw ra, 8(sp)
lw t0, 4(sp)
addi sp, sp, 16
picorv32_retirq_insn() // return from interrupt
/*
* IRQ handler, branched to from the vector.
*/
_irq:
/* save x1/x2 to q1/q2 */
picorv32_setq_insn(q2, x1)
picorv32_setq_insn(q3, x2)
/* use x1 to index into irq_regs */
lui x1, %hi(irq_regs)
addi x1, x1, %lo(irq_regs)
/* use x2 as scratch space for saving registers */
/* q0 (== x1), q2(== x2), q3 */
picorv32_getq_insn(x2, q0)
sw x2, 0*4(x1)
picorv32_getq_insn(x2, q2)
sw x2, 1*4(x1)
picorv32_getq_insn(x2, q3)
sw x2, 2*4(x1)
/* save x3 - x31 */
sw x3, 3*4(x1)
sw x4, 4*4(x1)
sw x5, 5*4(x1)
sw x6, 6*4(x1)
sw x7, 7*4(x1)
sw x8, 8*4(x1)
sw x9, 9*4(x1)
sw x10, 10*4(x1)
sw x11, 11*4(x1)
sw x12, 12*4(x1)
sw x13, 13*4(x1)
sw x14, 14*4(x1)
sw x15, 15*4(x1)
sw x16, 16*4(x1)
sw x17, 17*4(x1)
sw x18, 18*4(x1)
sw x19, 19*4(x1)
sw x20, 20*4(x1)
sw x21, 21*4(x1)
sw x22, 22*4(x1)
sw x23, 23*4(x1)
sw x24, 24*4(x1)
sw x25, 25*4(x1)
sw x26, 26*4(x1)
sw x27, 27*4(x1)
sw x28, 28*4(x1)
sw x29, 29*4(x1)
sw x30, 30*4(x1)
sw x31, 31*4(x1)
/* update _irq_pending to the currently pending interrupts */
picorv32_getq_insn(t0, q1)
la t1, (_irq_pending)
sw t0, 0(t1)
/* prepare C handler stack */
lui sp, %hi(_irq_stack)
addi sp, sp, %lo(_irq_stack)
/* call C handler */
jal ra, isr
/* use x1 to index into irq_regs */
lui x1, %hi(irq_regs)
addi x1, x1, %lo(irq_regs)
/* restore q0 - q2 */
lw x2, 0*4(x1)
picorv32_setq_insn(q0, x2)
lw x2, 1*4(x1)
picorv32_setq_insn(q1, x2)
lw x2, 2*4(x1)
picorv32_setq_insn(q2, x2)
/* restore x3 - x31 */
lw x3, 3*4(x1)
lw x4, 4*4(x1)
lw x5, 5*4(x1)
lw x6, 6*4(x1)
lw x7, 7*4(x1)
lw x8, 8*4(x1)
lw x9, 9*4(x1)
lw x10, 10*4(x1)
lw x11, 11*4(x1)
lw x12, 12*4(x1)
lw x13, 13*4(x1)
lw x14, 14*4(x1)
lw x15, 15*4(x1)
lw x16, 16*4(x1)
lw x17, 17*4(x1)
lw x18, 18*4(x1)
lw x19, 19*4(x1)
lw x20, 20*4(x1)
lw x21, 21*4(x1)
lw x22, 22*4(x1)
lw x23, 23*4(x1)
lw x24, 24*4(x1)
lw x25, 25*4(x1)
lw x26, 26*4(x1)
lw x27, 27*4(x1)
lw x28, 28*4(x1)
lw x29, 29*4(x1)
lw x30, 30*4(x1)
lw x31, 31*4(x1)
/* restore x1 - x2 from q registers */
picorv32_getq_insn(x1, q1)
picorv32_getq_insn(x2, q2)
ret
/*
* Reset handler, branched to from the vector.
*/
_crt0:
/* zero-initialize all registers */
addi x1, zero, 0
addi x2, zero, 0
addi x3, zero, 0
addi x4, zero, 0
addi x5, zero, 0
addi x6, zero, 0
addi x7, zero, 0
addi x8, zero, 0
addi x9, zero, 0
addi x10, zero, 0
addi x11, zero, 0
addi x12, zero, 0
addi x13, zero, 0
addi x14, zero, 0
addi x15, zero, 0
addi x16, zero, 0
addi x17, zero, 0
addi x18, zero, 0
addi x19, zero, 0
addi x20, zero, 0
addi x21, zero, 0
addi x22, zero, 0
addi x23, zero, 0
addi x24, zero, 0
addi x25, zero, 0
addi x26, zero, 0
addi x27, zero, 0
addi x28, zero, 0
addi x29, zero, 0
addi x30, zero, 0
addi x31, zero, 0
/* mask all interrupts */
li t0, 0xffffffff
picorv32_maskirq_insn(zero, t0)
/* reflect that in _irq_mask */
la t1, _irq_mask
sw t0, 0(t1)
/* Load DATA */
la t0, _fdata_rom
la t1, _fdata
la t2, _edata
3:
lw t3, 0(t0)
sw t3, 0(t1)
/* _edata is aligned to 16 bytes. Use word-xfers. */
addi t0, t0, 4
addi t1, t1, 4
bltu t1, t2, 3b
/* Clear BSS */
#la t0, _fbss
#la t1, _ebss
2:
#sw zero, 0(t0)
#addi t0, t0, 4
#bltu t0, t1, 2b
/* set main stack */
la sp, _fstack
/* Set up address to IRQ handler since vector is hardcoded.
By convention, q2 keeps the pointer to the true IRQ handler,
to emulate relocatable interrupts. */
la t0, _irq
picorv32_setq_insn(q2, t0)
/* jump to main */
jal ra, main
1:
/* loop forever */
j 1b
/*
* Enable interrupts by copying the software mask to the hardware mask
*/
.global _irq_enable
_irq_enable:
/* Set _irq_enabled to true */
la t0, _irq_enabled
addi t1, zero, 1
sw t1, 0(t0)
/* Set the HW IRQ mask to _irq_mask */
la t0, _irq_mask
lw t0, 0(t0)
picorv32_maskirq_insn(zero, t0)
ret
/*
* Disable interrupts by masking all interrupts (the mask should already be
* up to date)
*/
.global _irq_disable
_irq_disable:
/* Mask all IRQs */
li t0, 0xffffffff
picorv32_maskirq_insn(zero, t0)
/* Set _irq_enabled to false */
la t0, _irq_enabled
sw zero, (t0)
ret
/*
* Set interrrupt mask.
* This updates the software mask (for readback and interrupt inable/disable)
* and the hardware mask.
* 1 means interrupt is masked (disabled).
*/
.global _irq_setmask
_irq_setmask:
/* Update _irq_mask */
la t0, _irq_mask
sw a0, (t0)
/* Are interrupts enabled? */
la t0, _irq_enabled
lw t0, 0(t0)
beq t0, zero, 1f
/* If so, update the HW IRQ mask */
picorv32_maskirq_insn(zero, a0)
1:
ret
.section .bss
irq_regs:
/* saved interrupt registers, x0 - x31 */
.fill 32,4
/* interrupt stack */
.fill 256,4
_irq_stack:
/*
* Bitfield of pending interrupts, updated on ISR entry.
*/
.global _irq_pending
_irq_pending:
.word 0
/*
* Software copy of enabled interrupts. Do not write directly, use
* _irq_set_mask instead.
*/
.global _irq_mask
_irq_mask:
.word 0
/*
* Software state of global interrupts being enabled or disabled. Do not write
* directly, use _irq_disable / _irq_enable instead.
*/
.global _irq_enabled
_irq_enabled:
.word 0
|
LauraRosaMedina/Prueba1 | 853 | tests/syntax-tests/source/ARM Assembly/test.S | .data
.balign 4
red: .word 0
green: .word 0
blue: .word 0
.text
.global grayscale
.func grayscale
grayscale:
assign:
/* some comment */
ldr ip, addr_red
str r3, [ip]
ldr ip, addr_green
ldmfd r13!, {r3}
str r3, [ip]
ldr ip, addr_blue
ldmfd r13!, {r3}
str r3, [ip]
stmfd r13!, {r4-r8}
ldr ip, addr_red
ldr r3, [ip]
ldr ip, addr_green
ldr r4, [ip]
ldr ip, addr_blue
ldr r5, [ip] /* another comment */
grayscale_loop:
ldrb r6, [r1]
mul r6, r3, r6
add r1, r1, #1
ldrb r7, [r1]
mul r7, r4, r7
add r1, r1, #1
ldrb r8, [r1]
mul r8, r5, r8
add r1, r1, #1
add r6, r6, r7
add r6, r6, r8
asr r6, r6, #8
str r6, [r2]
add r2, r2, #1
sub r0, r0, #1
cmp r0, #0
bne grayscale_loop
ldmfd r13!, {r4-r8}
stmfd r13!, {r0-r1}
bx lr
addr_red: .word red
addr_green: .word green
addr_blue: .word blue
|
LauraRosaMedina/Prueba1 | 7,962 | tests/syntax-tests/highlighted/ARM Assembly/test.S | [38;2;248;248;242m.[0m[38;2;248;248;242mdata[0m
[38;2;249;38;114m.balign[0m[38;2;190;132;255m 4[0m
[38;2;248;248;242mred[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mgreen[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mblue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;249;38;114m.text[0m
[38;2;249;38;114m.global[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;249;38;114m.func[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;248;248;242mgrayscale[0m[38;2;248;248;242m:[0m
[38;2;248;248;242massign[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;117;113;94m/* some comment */[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m[38;2;248;248;242m [0m[38;2;117;113;94m/* another comment */[0m
[38;2;248;248;242mgrayscale_loop[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239masr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #8[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr2[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239msub[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mcmp[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #0[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbne[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale_loop[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr0[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr1[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbx[0m[38;2;248;248;242m [0m[38;2;248;248;242mlr[0m
[38;2;248;248;242maddr_red[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mred[0m
[38;2;248;248;242maddr_green[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mgreen[0m
[38;2;248;248;242maddr_blue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mblue[0m
|
lciga/TyumenCTF-2025-Writeups | 1,268 | reverse/E4syP01nts/E4syP01nts.s | # E4syP01nts BY
#▄▄▄ ▪ ▐▄• ▄ ▄• ▄▌ ▐ ▄ ▐ ▄ ▪ ▪▪▪▪▪ ▄▄▌ ▄▄·
#▀▄ █·██ █▌█▌▪█▪██▌•█▌▐█•█▌▐███ ▪▪▪▪ ██• ▐█ ▌▪
#▐▀▀▄ ▐█· ·██· █▌▐█▌▐█▐▐▌▐█▐▐▌▐█· ▪▪▪▪ ██▪ ██ ▄
#▐█•█▌▐█▌▪▐█·█▌▐█▄█▌██▐█▌██▐█▌▐█▌ ▪▪▪▪ ▐█▌▐▌▐███▌
#.▀ ▀▀▀▀•▀▀ ▀▀ ▀▀▀ ▀▀ █▪▀▀ █▪▀▀▀ ▪▪▪▪ .▀▀▀ ·▀▀▀
.section .text
.globl _start
_start:
mov $1, %rax
mov $1, %rdi
lea msg(%rip), %rsi
mov $msglen, %rdx
syscall
mov $60, %rax
xor %rdi, %rdi
syscall
decrfl:
lea encfl(%rip), %rsi
mov $flaglen, %rcx
xor %rdx, %rdx
.decrlp:
movb (%rsi,%rdx), %al
sub $26, %al
mov %edx, %r8d
and $3, %r8d
lea key2(%rip), %r9
movb (%r9,%r8), %r10b
xor %r10b, %al
mov %edx, %r8d
and $3, %r8d
lea key1(%rip), %r9
movb (%r9,%r8), %r10b
xor %r10b, %al
mov %al, (%rsi,%rdx)
inc %rdx
loop .decrlp
ret
.section .rodata
msg:
.ascii "who wants some E4syP01nts?\n\0"
msglen = . - msg
encfl:
.byte 0x61, 0x37, 0x40, 0x93, 0x90, 0x24, 0x2a, 0x5a, 0x6f, 0x39, 0x4f, 0x3e, 0x7b, 0x55, 0x41, 0x80, 0x80, 0x71, 0x26, 0x8a, 0x94, 0x31, 0x81, 0x81, 0x98, 0x55, 0x59, 0x3e, 0x7f, 0x71, 0x3b, 0x81, 0x88
flaglen = . - encfl
key1:
.byte 0xAD, 0xDE, 0xAD, 0xDE
key2:
.byte 0xBE, 0xBA, 0xFE, 0xCA
|
LearningOS/osbiglab-2024s-npudrv | 1,598 | modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
.rodata : ALIGN(4K) {
_srodata = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
. = ALIGN(4K);
_erodata = .;
}
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = ALIGN(64);
_percpu_size_aligned = .;
. = _percpu_load_start + _percpu_size_aligned * %SMP%;
}
. = _percpu_start + SIZEOF(.percpu);
_percpu_end = .;
. = ALIGN(4K);
_edata = .;
.bss : ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
|
LearningOS/osbiglab-2024s-npudrv | 1,672 | modules/axhal/src/arch/riscv/trap.S | .macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.if \from_user == 1
LDR t0, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv tp, t0
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR gp, sp, 2 // load user gp and tp
LDR t0, sp, 3
STR tp, sp, 3 // save supervisor tp
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
LDR t0, sp, 31
LDR t1, sp, 32
csrw sepc, t0
csrw sstatus, t1
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // switch sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
|
LearningOS/osbiglab-2024s-npudrv | 2,415 | modules/axhal/src/arch/aarch64/trap.S | .macro SAVE_REGS
sub sp, sp, 34 * 8
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
mrs x9, sp_el0
mrs x10, elr_el1
mrs x11, spsr_el1
stp x30, x9, [sp, 30 * 8]
stp x10, x11, [sp, 32 * 8]
.endm
.macro RESTORE_REGS
ldp x10, x11, [sp, 32 * 8]
ldp x30, x9, [sp, 30 * 8]
msr sp_el0, x9
msr elr_el1, x10
msr spsr_el1, x11
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, 34 * 8
.endm
.macro INVALID_EXCP, kind, source
.p2align 7
SAVE_REGS
mov x0, sp
mov x1, \kind
mov x2, \source
bl invalid_exception
b .Lexception_return
.endm
.macro HANDLE_SYNC
.p2align 7
SAVE_REGS
mov x0, sp
bl handle_sync_exception
b .Lexception_return
.endm
.macro HANDLE_IRQ
.p2align 7
SAVE_REGS
mov x0, sp
bl handle_irq_exception
b .Lexception_return
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
INVALID_EXCP 0 0
INVALID_EXCP 1 0
INVALID_EXCP 2 0
INVALID_EXCP 3 0
// current EL, with SP_ELx
HANDLE_SYNC
HANDLE_IRQ
INVALID_EXCP 2 1
INVALID_EXCP 3 1
// lower EL, aarch64
HANDLE_SYNC
HANDLE_IRQ
INVALID_EXCP 2 2
INVALID_EXCP 3 2
// lower EL, aarch32
INVALID_EXCP 0 3
INVALID_EXCP 1 3
INVALID_EXCP 2 3
INVALID_EXCP 3 3
.Lexception_return:
RESTORE_REGS
eret
|
LearningOS/osbiglab-2024s-npudrv | 1,505 | modules/axhal/src/arch/x86_64/trap.S | .equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
LearningOS/osbiglab-2024s-npudrv | 1,965 | modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
LearningOS/osbiglab-2024s-npudrv | 4,307 | modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 510
# 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
LearningOS/rust_shyper | 4,068 | src/arch/riscv64/exception.S | .equ RA, 8
.equ SP, 16
.equ GP, 24
.equ TP, 32
.equ T0, 40
.equ T1, 48
.equ T2, 56
.equ S0, 64
.equ S1, 72
.equ A0, 80
.equ A1, 88
.equ A2, 96
.equ A3, 104
.equ A4, 112
.equ A5, 120
.equ A6, 128
.equ A7, 136
.equ S2, 144
.equ S3, 152
.equ S4, 160
.equ S5, 168
.equ S6, 176
.equ S7, 184
.equ S8, 192
.equ S9, 200
.equ S10, 208
.equ S11, 216
.equ T3, 224
.equ T4, 232
.equ T5, 240
.equ T6, 248
.equ SEPC, 256
.equ SCAUSE, 264
.equ STVAL, 272
.equ SSTATUS, 280
.equ SSCRATCH, 288
.equ HART_INFO, 296
.equ HYPER_SP, 304
.equ FRAME_SIZE, (HYPER_SP + 8)
// Save all registers except sp and privileged registers to the data structure formed by sp
.macro SAVE_REGS
sd ra, RA(sp)
sd gp, GP(sp)
sd tp, TP(sp)
sd t0, T0(sp)
sd t1, T1(sp)
sd t2, T2(sp)
sd s0, S0(sp)
sd s1, S1(sp)
sd a0, A0(sp)
sd a1, A1(sp)
sd a2, A2(sp)
sd a3, A3(sp)
sd a4, A4(sp)
sd a5, A5(sp)
sd a6, A6(sp)
sd a7, A7(sp)
sd s2, S2(sp)
sd s3, S3(sp)
sd s4, S4(sp)
sd s5, S5(sp)
sd s6, S6(sp)
sd s7, S7(sp)
sd s8, S8(sp)
sd s9, S9(sp)
sd s10, S10(sp)
sd s11, S11(sp)
sd t3, T3(sp)
sd t4, T4(sp)
sd t5, T5(sp)
sd t6, T6(sp)
.endm
// Recover all registers except sp and privileged registers from the data structure formed by sp
.macro RESTORE_REGS
ld ra, RA(sp)
ld gp, GP(sp)
ld tp, TP(sp)
ld t0, T0(sp)
ld t1, T1(sp)
ld t2, T2(sp)
ld s0, S0(sp)
ld s1, S1(sp)
ld a0, A0(sp)
ld a1, A1(sp)
ld a2, A2(sp)
ld a3, A3(sp)
ld a4, A4(sp)
ld a5, A5(sp)
ld a6, A6(sp)
ld a7, A7(sp)
ld s2, S2(sp)
ld s3, S3(sp)
ld s4, S4(sp)
ld s5, S5(sp)
ld s6, S6(sp)
ld s7, S7(sp)
ld s8, S8(sp)
ld s9, S9(sp)
ld s10, S10(sp)
ld s11, S11(sp)
ld t3, T3(sp)
ld t4, T4(sp)
ld t5, T5(sp)
ld t6, T6(sp)
.endm
.macro VECTOR handler
// Like FarmOS(xv6),save hypervisor trapframe's address in sscratch reg
// including hypervisor_sp, tp(Pointer to the cpu information structure corresponding to this cpu) and so on
// swap sscratch and sp
csrrw sp, sscratch, sp
bne sp, zero, virt_entry
// from HS-mode
hs_mode_entry:
csrrw sp, sscratch, sp
// sp: hypervisor's sp, sscratch: 0
addi sp, sp, -FRAME_SIZE
SAVE_REGS
csrr s0, sepc
csrr s1, scause
csrr s2, stval
csrr s3, sstatus
sd s0, SEPC(sp)
sd s1, SCAUSE(sp)
sd s2, STVAL(sp)
sd s3, SSTATUS(sp)
csrr s0, sscratch
// pass ctx
mv a0, sp
call \handler
j context_pop
virt_entry:
// store general regs
SAVE_REGS
// save VM's sp to Trapframe
csrr t0, sscratch
sd t0, SP(sp)
// clear sscratch,indicating that we're trapped into kernel mode
csrw sscratch, zero
// save Trapframe(sscratch)
sd sp, SSCRATCH(sp)
csrr s0, sepc
csrr s1, scause
csrr s2, stval
csrr s3, sstatus
sd s0, SEPC(sp)
sd s1, SCAUSE(sp)
sd s2, STVAL(sp)
sd s3, SSTATUS(sp)
# Load the tp saved on the stack
ld tp, HART_INFO(sp)
// pass ctx
mv a0, sp
mv s0, sp
ld sp, HYPER_SP(sp)
call \handler
j context_pop
.endm
.global context_vm_entry
context_vm_entry:
// TODO: Before entry,we need to write sstatus,sscratch,sepc into ctx pointed by a0
mv sp, a0
j return_to_vm
context_pop:
// after 5
bne s0, zero, return_to_vm_pre
return_to_hypervisor:
ld s1, SEPC(sp)
csrw sepc, s1 // set jumping destination
RESTORE_REGS
addi sp, sp, FRAME_SIZE
sret
return_to_vm_pre:
mv sp, s0 // s0 = old sscratch
return_to_vm:
ld s1, SEPC(sp)
ld s2, SSTATUS(sp)
ld s3, SSCRATCH(sp)
csrw sepc, s1 // set jumping destination
csrw sstatus, s2 // Set sstatus,configure the next phase to jump to the S-mode
csrw sscratch, s3
// restore general regs
RESTORE_REGS
ld sp, SP(sp)
// Currently, sp is vm's sp,pointing to vm's address space
sret
.global exception_entry
exception_entry:
VECTOR exception_rust_handler
|
LearningOS/rust_shyper | 668 | src/arch/aarch64/cache.S | // void cache_invalidate_d(u64 start, u64 length);
.global cache_invalidate_d
cache_invalidate_d:
add x2, x0, x1 /* calculate the end address */
bic x0, x0, #(64 - 1) /* align the start with a cache line */
1:
dc ivac, x0 /* invalidate cache to PoC by VA */
add x0, x0, #64
cmp x0, x2
blt 1b
mov x0, xzr
dsb sy
ret
// void cache_clean_invalidate_d(u64 start, u64 length);
.global cache_clean_invalidate_d
cache_clean_invalidate_d:
add x2, x0, x1 /* calculate the end address */
bic x0, x0, #(64 - 1) /* align the start with a cache line */
1:
dc civac, x0 /* invalidate cache to PoC by VA */
add x0, x0, #64
cmp x0, x2
blt 1b
mov x0, xzr
dsb sy
ret
|
LearningOS/rust_shyper | 2,026 | src/arch/aarch64/fpsimd.S | /* SPDX-License-Identifier: GPL-2.0-only */
/*
* FP/SIMD state saving and restoring macros
*
* Copyright (C) 2012 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
*/
.macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2]
stp q4, q5, [\state, #16 * 4]
stp q6, q7, [\state, #16 * 6]
stp q8, q9, [\state, #16 * 8]
stp q10, q11, [\state, #16 * 10]
stp q12, q13, [\state, #16 * 12]
stp q14, q15, [\state, #16 * 14]
stp q16, q17, [\state, #16 * 16]
stp q18, q19, [\state, #16 * 18]
stp q20, q21, [\state, #16 * 20]
stp q22, q23, [\state, #16 * 22]
stp q24, q25, [\state, #16 * 24]
stp q26, q27, [\state, #16 * 26]
stp q28, q29, [\state, #16 * 28]
stp q30, q31, [\state, #16 * 30]!
mrs x\tmpnr, fpsr
str w\tmpnr, [\state, #16 * 2]
mrs x\tmpnr, fpcr
str w\tmpnr, [\state, #16 * 2 + 4]
.endm
.macro fpsimd_restore_fpcr state, tmp
/*
* Writes to fpcr may be self-synchronising, so avoid restoring
* the register if it hasn't changed.
*/
mrs \tmp, fpcr
cmp \tmp, \state
b.eq 9999f
msr fpcr, \state
9999:
.endm
/* Clobbers \state */
.macro fpsimd_restore state, tmpnr
ldp q0, q1, [\state, #16 * 0]
ldp q2, q3, [\state, #16 * 2]
ldp q4, q5, [\state, #16 * 4]
ldp q6, q7, [\state, #16 * 6]
ldp q8, q9, [\state, #16 * 8]
ldp q10, q11, [\state, #16 * 10]
ldp q12, q13, [\state, #16 * 12]
ldp q14, q15, [\state, #16 * 14]
ldp q16, q17, [\state, #16 * 16]
ldp q18, q19, [\state, #16 * 18]
ldp q20, q21, [\state, #16 * 20]
ldp q22, q23, [\state, #16 * 22]
ldp q24, q25, [\state, #16 * 24]
ldp q26, q27, [\state, #16 * 26]
ldp q28, q29, [\state, #16 * 28]
ldp q30, q31, [\state, #16 * 30]!
ldr w\tmpnr, [\state, #16 * 2]
msr fpsr, x\tmpnr
ldr w\tmpnr, [\state, #16 * 2 + 4]
fpsimd_restore_fpcr x\tmpnr, \state
.endm
// void fpsimd_save_ctx(u64 *ctx)
.global fpsimd_save_ctx
fpsimd_save_ctx:
fpsimd_save x0 9
ret
// void fpsimd_restore_ctx(u64 *ctx)
.global fpsimd_restore_ctx
fpsimd_restore_ctx:
fpsimd_restore x0 9
ret
|
LearningOS/rust_shyper | 2,657 | src/arch/aarch64/exception.S | .macro VECTOR handler
sub sp, sp, #(0x110)
stp x0, x1, [sp, #(0 * 16)]
stp x2, x3, [sp, #(1 * 16)]
stp x4, x5, [sp, #(2 * 16)]
stp x6, x7, [sp, #(3 * 16)]
stp x8, x9, [sp, #(4 * 16)]
stp x10,x11, [sp, #(5 * 16)]
stp x12,x13, [sp, #(6 * 16)]
stp x14,x15, [sp, #(7 * 16)]
stp x16,x17, [sp, #(8 * 16)]
stp x18,x19, [sp, #(9 * 16)]
stp x20,x21, [sp, #(10 * 16)]
stp x22,x23, [sp, #(11 * 16)]
stp x24,x25, [sp, #(12 * 16)]
stp x26,x27, [sp, #(13 * 16)]
stp x28,x29, [sp, #(14 * 16)]
mrs x1, spsr_el2
stp x30, x1, [sp, #(15 * 16)]
mrs x0, elr_el2
mov x1, sp
add x1, x1, #(0x110)
stp x0, x1, [sp, #(16 * 16)]
mov x0, sp
bl \handler
b context_pop
.endm
.macro VECTOR_DISABLED
1: wfe
b 1b
.endm
.section .text.vectors
.align 11
.global vectors
vectors:
// Current exception level with SP_EL0.
.org 0x000
VECTOR current_el_sp0_synchronous
.org 0x080
VECTOR current_el_sp0_irq
.org 0x100
VECTOR_DISABLED
.org 0x180
VECTOR current_el_sp0_serror
// Current exception level with SP_ELx, x > 0.
.org 0x200
VECTOR current_el_spx_synchronous
.org 0x280
VECTOR current_el_spx_irq
.org 0x300
VECTOR_DISABLED // FIQ
.org 0x380
VECTOR current_el_spx_serror
// Lower exception level, aarch64
.org 0x400
VECTOR lower_aarch64_synchronous
.org 0x480
VECTOR lower_aarch64_irq
.org 0x500
VECTOR_DISABLED // FIQ
.org 0x580
VECTOR lower_aarch64_serror
// Lower exception level, aarch32
.org 0x600
VECTOR_DISABLED
.org 0x680
VECTOR_DISABLED
.org 0x700
VECTOR_DISABLED
.org 0x780
VECTOR_DISABLED
.org 0x800
.global context_vm_entry
context_vm_entry:
mov sp, x0
context_pop:
ldr x0, [sp, #(31 * 8)] // spsr
ldr x1, [sp, #(32 * 8)] // elr
msr spsr_el2, x0
msr elr_el2, x1
ldp x0, x1, [sp, #(0 * 16)]
ldp x2, x3, [sp, #(1 * 16)]
ldp x4, x5, [sp, #(2 * 16)]
ldp x6, x7, [sp, #(3 * 16)]
ldp x8, x9, [sp, #(4 * 16)]
ldp x10,x11, [sp, #(5 * 16)]
ldp x12,x13, [sp, #(6 * 16)]
ldp x14,x15, [sp, #(7 * 16)]
ldp x16,x17, [sp, #(8 * 16)]
ldp x18,x19, [sp, #(9 * 16)]
ldp x20,x21, [sp, #(10 * 16)]
ldp x22,x23, [sp, #(11 * 16)]
ldp x24,x25, [sp, #(12 * 16)]
ldp x26,x27, [sp, #(13 * 16)]
ldp x28,x29, [sp, #(14 * 16)]
ldr x30, [sp, #(15 * 16)]
add sp, sp, #(0x110)
eret
.global fresh_cpu
fresh_cpu:
sub sp, sp, #8
str x0, [sp]
ldr x0, =vectors
msr vbar_el2, x0
ldr x0, [sp]
add sp, sp, #8
ret
.global fresh_hyper_asm
fresh_hyper_asm:
bl fresh_cpu
b context_vm_entry
|
Leinnan/ulang | 260 | samples/return_2.s | .globl _main
_main:
push %rbp
mov %rsp, %rbp
subq $8, %rsp
movl $2, -4(%rbp)
movl -4(%rbp), %r11d
imull $3, %r11d
movl %r11d, -4(%rbp)
movl $5, -8(%rbp)
movl -4(%rbp), %r10d
addl %r10d, -8(%rbp)
movl -8(%rbp), %eax
movq %rbp, %rsp
popq %rbp
ret
|
LemonFan-maker/PulsarOS | 2,493 | src/exceptions.s | /* src/exceptions.s - CORRECT VERSION */
.section ".text._start"
.global _start
_start:
// 1. 设置栈指针
// 从链接脚本中加载栈顶地址
ldr x0, =_stack_top
mov sp, x0
// 2. 清空 .bss section (可选但推荐)
// ldr x0, =_bss_start
// ldr x1, =_bss_end
// b clear_bss_loop_check
// clear_bss_loop:
// str xzr, [x0], #8
// clear_bss_loop_check:
// cmp x0, x1
// b.lt clear_bss_loop
// 3. 跳转到 Rust 的主函数
.extern kernel_main
bl kernel_main
// 4. 如果 rust_main 返回了 (虽然它不应该), 进入死循环
hang:
b hang
.section ".text.boot"
.global _exception_vectors
.align 11 // 2^11 = 2048-byte alignment
_exception_vectors:
// Exception from Current EL with SP_ELx
b current_elx_sync // Synchronous
b current_elx_irq // IRQ (This is our target)
b unhandled_exception // FIQ / SError
b unhandled_exception // Error
// Jump table padding to cover all 16 entries
.rept 12
b unhandled_exception
.endr
// Our main IRQ handler entry point
.global current_elx_irq
current_elx_irq:
// 1. Save context
sub sp, sp, #256
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
str x30, [sp, #16 * 15]
// 2. Call the high-level Rust handler
.extern handle_irq
bl handle_irq
// 3. Restore context
ldr x30, [sp, #16 * 15]
ldp x28, x29, [sp, #16 * 14]
ldp x26, x27, [sp, #16 * 13]
ldp x24, x25, [sp, #16 * 12]
ldp x22, x23, [sp, #16 * 11]
ldp x20, x21, [sp, #16 * 10]
ldp x18, x19, [sp, #16 * 9]
ldp x16, x17, [sp, #16 * 8]
ldp x14, x15, [sp, #16 * 7]
ldp x12, x13, [sp, #16 * 6]
ldp x10, x11, [sp, #16 * 5]
ldp x8, x9, [sp, #16 * 4]
ldp x6, x7, [sp, #16 * 3]
ldp x4, x5, [sp, #16 * 2]
ldp x2, x3, [sp, #16 * 1]
ldp x0, x1, [sp, #16 * 0]
add sp, sp, #256
// 4. Return from exception
eret
// A catch-all handler for unexpected exceptions
.global unhandled_exception
.global current_elx_sync
unhandled_exception:
current_elx_sync:
b . |
lemonsuqing/arceos_ftp | 2,001 | modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
_srodata = .;
.rodata : ALIGN(4K) {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
}
.init_array : ALIGN(0x10) {
__init_array_start = .;
*(.init_array .init_array.*)
__init_array_end = .;
}
. = ALIGN(4K);
_erodata = .;
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
_percpu_end = _percpu_start + SIZEOF(.percpu);
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = _percpu_load_start + ALIGN(64) * %SMP%;
}
. = _percpu_end;
. = ALIGN(4K);
_edata = .;
.bss : AT(.) ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
SECTIONS {
linkme_IRQ : { *(linkme_IRQ) }
linkm2_IRQ : { *(linkm2_IRQ) }
linkme_PAGE_FAULT : { *(linkme_PAGE_FAULT) }
linkm2_PAGE_FAULT : { *(linkm2_PAGE_FAULT) }
linkme_SYSCALL : { *(linkme_SYSCALL) }
linkm2_SYSCALL : { *(linkm2_SYSCALL) }
axns_resource : { *(axns_resource) }
}
INSERT AFTER .tbss;
|
lemonsuqing/arceos_ftp | 1,965 | modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
lemonsuqing/arceos_ftp | 4,325 | modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# 0xffff_8000_0000_0000 ~ 0xffff_8000_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 255
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
lemonsuqing/arceos_ftp | 2,544 | tools/raspi4/chainloader/src/_arch/aarch64/cpu/boot.s | // SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g2:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed on the boot core. Park it otherwise.
mrs x0, MPIDR_EL1
and x0, x0, {CONST_CORE_ID_MASK}
ldr x1, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x0, x1
b.ne .L_parking_loop
// If execution reaches here, it is the boot core.
// Initialize DRAM.
ADR_ABS x0, __bss_start
ADR_ABS x1, __bss_end_exclusive
.L_bss_init_loop:
cmp x0, x1
b.eq .L_relocate_binary
stp xzr, xzr, [x0], #16
b .L_bss_init_loop
// Next, relocate the binary.
.L_relocate_binary:
ADR_REL x0, __binary_nonzero_start // The address the binary got loaded to.
ADR_ABS x1, __binary_nonzero_start // The address the binary was linked to.
ADR_ABS x2, __binary_nonzero_end_exclusive
.L_copy_loop:
ldr x3, [x0], #8
str x3, [x1], #8
cmp x1, x2
b.lo .L_copy_loop
// Prepare the jump to Rust code.
// Set the stack pointer.
ADR_ABS x0, __boot_core_stack_end_exclusive
mov sp, x0
// Jump to the relocated Rust code.
ADR_ABS x1, _start_rust
br x1
// Infinitely wait for events (aka "park the core").
.L_parking_loop:
wfe
b .L_parking_loop
.size _start, . - _start
.type _start, function
.global _start
|
LeonRust/StarryE1000Driver | 1,771 | modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
. = ALIGN(4K);
*(.text.signal_trampoline)
. = ALIGN(4K);
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
.rodata : ALIGN(4K) {
_srodata = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
. = ALIGN(4K);
_erodata = .;
}
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
_img_start = .;
. = ALIGN(4K);
_img_end = .;
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = ALIGN(64);
_percpu_size_aligned = .;
. = _percpu_load_start + _percpu_size_aligned * %SMP%;
}
. = _percpu_start + SIZEOF(.percpu);
_percpu_end = .;
. = ALIGN(4K);
_edata = .;
.bss : ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
|
LeonRust/StarryE1000Driver | 121 | modules/axdriver/image.S |
.section .data
.global img_start
.global img_end
.align 16
img_start:
.incbin "./disk.img"
img_end:
|
LeonRust/StarryE1000Driver | 210 | modules/axhal/src/arch/riscv/signal.S | # To create the sigreturn trampoline
.equ __NR_sigreturn, 139
.section .text.signal_trampoline
.balign 4
.global start_signal_trampoline
start_signal_trampoline:
li a7, __NR_sigreturn
li a0, 0
ecall |
LeonRust/StarryE1000Driver | 2,325 | modules/axhal/src/arch/riscv/trap.S | .macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.short 0xa622 // fsd fs0,264(sp)
.short 0xaa26 // fsd fs1,272(sp)
.if \from_user == 1
LDR t1, sp, 2 // load user gp with CPU ID
LDR t0, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t1
mv tp, t0
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2
LDR t0, sp, 3
STR gp, sp, 2 // load user gp and tp
STR tp, sp, 3 // save supervisor tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
LDR t0, sp, 31
LDR t1, sp, 32
csrw sepc, t0
csrw sstatus, t1
.short 0x2432 // fld fs0,264(sp)
.short 0x24d2 // fld fs1,272(sp)
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // switch sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
.altmacro
.macro COPY n
ld t2, (\n)*8(a0)
sd t2, (\n)*8(a1)
.endm
.section .text
.globl __copy
__copy:
# __copy(
# frame_address: *const TrapFrame,
# kernel_base: *mut T
# )
.set n, 0
.rept 33
COPY %n
.set n, n + 1
.endr
ret
|
LeonRust/StarryE1000Driver | 223 | modules/axhal/src/arch/aarch64/signal.S | # To create the sigreturn trampoline
.equ __NR_sigreturn, 139
.section .text.signal_trampoline
.balign 4
.global start_signal_trampoline
start_signal_trampoline:
mov x8, #139 // 设置系统调用号为 139
svc #0 // 触发系统调用 |
LeonRust/StarryE1000Driver | 4,138 | modules/axhal/src/arch/aarch64/trap.S | .macro clear_gp_regs
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
mov x\n, xzr
.endr
.endm
.macro SAVE_REGS, el
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
str x30, [sp, 30 * 8]
mrs x10, elr_el1
mrs x11, spsr_el1
stp x10, x11, [sp, 32 * 8]
.if \el == 0
clear_gp_regs
mrs x12, tpidr_el0 // save user tls pointer
ldr x13, [sp, 31 * 8] // restore current ktask ptr
mrs x9, sp_el0 // save user stack pointer */
msr sp_el0, x13 // restore kernel task ptr
.else
mov x9, sp
mov x12, xzr
.endif
str x12, [sp, 34 * 8] // save tpidr_el0
str x9, [sp, 31 * 8] // save user sp
.endm
.macro RESTORE_REGS, el
ldp x30, x9, [sp, 30 * 8] // load user sp_el0
ldp x10, x11, [sp, 32 * 8] // load ELR, SPSR
msr elr_el1, x10
msr spsr_el1, x11
ldr x12, [sp, 34 * 8]
.if \el == 0
msr tpidr_el0, x12 // restore user tls pointer
mrs x13, sp_el0 // save current ktask ptr
str x13, [sp, 31 * 8]
msr sp_el0, x9 // restore user sp
.endif
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, 35 * 8
.endm
.macro HANDLE_TRAP, el, ht, regsize, label
.p2align 7
b handle_el\el\ht\()_\regsize\()_\label
.endm
.macro HANDLE, el, ht, regsize, label
.section .text
handle_el\el\ht\()_\regsize\()_\label:
sub sp, sp, 35 * 8
SAVE_REGS \el
mov x0, sp
bl handle_el\el\ht\()_\regsize\()_\label\()_exception
.if \el == 1
b ret_to_kernel
.else
b ret_to_user
.endif
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
HANDLE_TRAP 1, t, 64, sync
HANDLE_TRAP 1, t, 64, irq
HANDLE_TRAP 1, t, 64, fiq
HANDLE_TRAP 1, t, 64, error
// current EL, with SP_ELx
HANDLE_TRAP 1, h, 64, sync
HANDLE_TRAP 1, h, 64, irq
HANDLE_TRAP 1, h, 64, fiq
HANDLE_TRAP 1, h, 64, error
// lower EL, aarch64 with SP_EL0
HANDLE_TRAP 0, t, 64, sync
HANDLE_TRAP 0, t, 64, irq
HANDLE_TRAP 0, t, 64, fiq
HANDLE_TRAP 0, t, 64, error
// lower EL, aarch32
HANDLE_TRAP 0, t, 32, sync
HANDLE_TRAP 0, t, 32, irq
HANDLE_TRAP 0, t, 32, fiq
HANDLE_TRAP 0, t, 32, error
/*
* used to create handle_el_label_trap
*/
// current EL, with SP_EL0
HANDLE 1, t, 64, sync
HANDLE 1, t, 64, irq
HANDLE 1, t, 64, fiq
HANDLE 1, t, 64, error
// current EL, with SP_ELx
HANDLE 1, h, 64, sync
HANDLE 1, h, 64, irq
HANDLE 1, h, 64, fiq
HANDLE 1, h, 64, error
// lower EL, aarch64 with SP_EL0
HANDLE 0, t, 64, sync
HANDLE 0, t, 64, irq
HANDLE 0, t, 64, fiq
HANDLE 0, t, 64, error
// lower EL, aarch32
HANDLE 0, t, 32, sync
HANDLE 0, t, 32, irq
HANDLE 0, t, 32, fiq
HANDLE 0, t, 32, error
.section .text
.global ret_to_kernel
ret_to_kernel:
RESTORE_REGS 1
eret
.section .text
.global ret_to_user
ret_to_user:
RESTORE_REGS 0
eret
.section .text
.global ret_to_first_user
ret_to_first_user:
mov sp, x0
b ret_to_user
|
LeonRust/StarryE1000Driver | 190 | modules/axhal/src/arch/x86_64/signal.S | # To create the sigreturn trampoline
.section .text.signal_trampoline
.code64
.global start_signal_trampoline
start_signal_trampoline:
# syscall id rdi = 15
mov rax, 0xf
syscall |
LeonRust/StarryE1000Driver | 1,143 | modules/axhal/src/arch/x86_64/syscall.S | .section .text
syscall_entry:
swapgs
mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp
mov rsp, gs:[offset __PERCPU_KERNEL_RSP_OFFSET]
sub rsp, 8 // skip user_ss
push gs:[offset __PERCPU_USER_RSP_OFFSET] // user_rsp
push r11 // rflags
mov [rsp - 2 * 8], rcx // rip
sub rsp, 4 * 8 // skip until general registers
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_syscall_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 7 * 8
mov rcx, [rsp - 5 * 8] // rip
mov r11, [rsp - 3 * 8] // rflags
mov rsp, [rsp - 2 * 8] // user_rsp
swapgs
sysretq |
LeonRust/StarryE1000Driver | 1,505 | modules/axhal/src/arch/x86_64/trap.S | .equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
LeonRust/StarryE1000Driver | 1,965 | modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
LeonRust/StarryE1000Driver | 4,307 | modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 510
# 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
LeonRust/Starry-FastDDS | 1,771 | modules/axhal/linker.lds.S | OUTPUT_ARCH(%ARCH%)
BASE_ADDRESS = %KERNEL_BASE%;
ENTRY(_start)
SECTIONS
{
. = BASE_ADDRESS;
_skernel = .;
.text : ALIGN(4K) {
_stext = .;
*(.text.boot)
. = ALIGN(4K);
*(.text.signal_trampoline)
. = ALIGN(4K);
*(.text .text.*)
. = ALIGN(4K);
_etext = .;
}
.rodata : ALIGN(4K) {
_srodata = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
*(.sdata2 .sdata2.*)
. = ALIGN(4K);
_erodata = .;
}
.data : ALIGN(4K) {
_sdata = .;
*(.data.boot_page_table)
. = ALIGN(4K);
_img_start = .;
. = ALIGN(4K);
_img_end = .;
. = ALIGN(4K);
*(.data .data.*)
*(.sdata .sdata.*)
*(.got .got.*)
}
.tdata : ALIGN(0x10) {
_stdata = .;
*(.tdata .tdata.*)
_etdata = .;
}
.tbss : ALIGN(0x10) {
_stbss = .;
*(.tbss .tbss.*)
*(.tcommon)
_etbss = .;
}
. = ALIGN(4K);
_percpu_start = .;
.percpu 0x0 : AT(_percpu_start) {
_percpu_load_start = .;
*(.percpu .percpu.*)
_percpu_load_end = .;
. = ALIGN(64);
_percpu_size_aligned = .;
. = _percpu_load_start + _percpu_size_aligned * %SMP%;
}
. = _percpu_start + SIZEOF(.percpu);
_percpu_end = .;
. = ALIGN(4K);
_edata = .;
.bss : ALIGN(4K) {
boot_stack = .;
*(.bss.stack)
. = ALIGN(4K);
boot_stack_top = .;
_sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
_ekernel = .;
/DISCARD/ : {
*(.comment) *(.gnu*) *(.note*) *(.eh_frame*)
}
}
|
LeonRust/Starry-FastDDS | 121 | modules/axdriver/image.S |
.section .data
.global img_start
.global img_end
.align 16
img_start:
.incbin "./disk.img"
img_end:
|
LeonRust/Starry-FastDDS | 210 | modules/axhal/src/arch/riscv/signal.S | # To create the sigreturn trampoline
.equ __NR_sigreturn, 139
.section .text.signal_trampoline
.balign 4
.global start_signal_trampoline
start_signal_trampoline:
li a7, __NR_sigreturn
li a0, 0
ecall |
LeonRust/Starry-FastDDS | 2,325 | modules/axhal/src/arch/riscv/trap.S | .macro SAVE_REGS, from_user
addi sp, sp, -{trapframe_size}
PUSH_GENERAL_REGS
csrr t0, sepc
csrr t1, sstatus
csrrw t2, sscratch, zero // save sscratch (sp) and zero it
STR t0, sp, 31 // tf.sepc
STR t1, sp, 32 // tf.sstatus
STR t2, sp, 1 // tf.regs.sp
.short 0xa622 // fsd fs0,264(sp)
.short 0xaa26 // fsd fs1,272(sp)
.if \from_user == 1
LDR t1, sp, 2 // load user gp with CPU ID
LDR t0, sp, 3 // load supervisor tp
STR gp, sp, 2 // save user gp and tp
STR tp, sp, 3
mv gp, t1
mv tp, t0
.endif
.endm
.macro RESTORE_REGS, from_user
.if \from_user == 1
LDR t1, sp, 2
LDR t0, sp, 3
STR gp, sp, 2 // load user gp and tp
STR tp, sp, 3 // save supervisor tp
mv gp, t1
mv tp, t0
addi t0, sp, {trapframe_size} // put supervisor sp to scratch
csrw sscratch, t0
.endif
LDR t0, sp, 31
LDR t1, sp, 32
csrw sepc, t0
csrw sstatus, t1
.short 0x2432 // fld fs0,264(sp)
.short 0x24d2 // fld fs1,272(sp)
POP_GENERAL_REGS
LDR sp, sp, 1 // load sp from tf.regs.sp
.endm
.section .text
.balign 4
.global trap_vector_base
trap_vector_base:
// sscratch == 0: trap from S mode
// sscratch != 0: trap from U mode
csrrw sp, sscratch, sp // switch sscratch and sp
bnez sp, .Ltrap_entry_u
csrr sp, sscratch // put supervisor sp back
j .Ltrap_entry_s
.Ltrap_entry_s:
SAVE_REGS 0
mv a0, sp
li a1, 0
call riscv_trap_handler
RESTORE_REGS 0
sret
.Ltrap_entry_u:
SAVE_REGS 1
mv a0, sp
li a1, 1
call riscv_trap_handler
RESTORE_REGS 1
sret
.altmacro
.macro COPY n
ld t2, (\n)*8(a0)
sd t2, (\n)*8(a1)
.endm
.section .text
.globl __copy
__copy:
# __copy(
# frame_address: *const TrapFrame,
# kernel_base: *mut T
# )
.set n, 0
.rept 33
COPY %n
.set n, n + 1
.endr
ret
|
LeonRust/Starry-FastDDS | 223 | modules/axhal/src/arch/aarch64/signal.S | # To create the sigreturn trampoline
.equ __NR_sigreturn, 139
.section .text.signal_trampoline
.balign 4
.global start_signal_trampoline
start_signal_trampoline:
mov x8, #139 // 设置系统调用号为 139
svc #0 // 触发系统调用 |
LeonRust/Starry-FastDDS | 4,138 | modules/axhal/src/arch/aarch64/trap.S | .macro clear_gp_regs
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
mov x\n, xzr
.endr
.endm
.macro SAVE_REGS, el
stp x0, x1, [sp]
stp x2, x3, [sp, 2 * 8]
stp x4, x5, [sp, 4 * 8]
stp x6, x7, [sp, 6 * 8]
stp x8, x9, [sp, 8 * 8]
stp x10, x11, [sp, 10 * 8]
stp x12, x13, [sp, 12 * 8]
stp x14, x15, [sp, 14 * 8]
stp x16, x17, [sp, 16 * 8]
stp x18, x19, [sp, 18 * 8]
stp x20, x21, [sp, 20 * 8]
stp x22, x23, [sp, 22 * 8]
stp x24, x25, [sp, 24 * 8]
stp x26, x27, [sp, 26 * 8]
stp x28, x29, [sp, 28 * 8]
str x30, [sp, 30 * 8]
mrs x10, elr_el1
mrs x11, spsr_el1
stp x10, x11, [sp, 32 * 8]
.if \el == 0
clear_gp_regs
mrs x12, tpidr_el0 // save user tls pointer
ldr x13, [sp, 31 * 8] // restore current ktask ptr
mrs x9, sp_el0 // save user stack pointer */
msr sp_el0, x13 // restore kernel task ptr
.else
mov x9, sp
mov x12, xzr
.endif
str x12, [sp, 34 * 8] // save tpidr_el0
str x9, [sp, 31 * 8] // save user sp
.endm
.macro RESTORE_REGS, el
ldp x30, x9, [sp, 30 * 8] // load user sp_el0
ldp x10, x11, [sp, 32 * 8] // load ELR, SPSR
msr elr_el1, x10
msr spsr_el1, x11
ldr x12, [sp, 34 * 8]
.if \el == 0
msr tpidr_el0, x12 // restore user tls pointer
mrs x13, sp_el0 // save current ktask ptr
str x13, [sp, 31 * 8]
msr sp_el0, x9 // restore user sp
.endif
ldp x28, x29, [sp, 28 * 8]
ldp x26, x27, [sp, 26 * 8]
ldp x24, x25, [sp, 24 * 8]
ldp x22, x23, [sp, 22 * 8]
ldp x20, x21, [sp, 20 * 8]
ldp x18, x19, [sp, 18 * 8]
ldp x16, x17, [sp, 16 * 8]
ldp x14, x15, [sp, 14 * 8]
ldp x12, x13, [sp, 12 * 8]
ldp x10, x11, [sp, 10 * 8]
ldp x8, x9, [sp, 8 * 8]
ldp x6, x7, [sp, 6 * 8]
ldp x4, x5, [sp, 4 * 8]
ldp x2, x3, [sp, 2 * 8]
ldp x0, x1, [sp]
add sp, sp, 35 * 8
.endm
.macro HANDLE_TRAP, el, ht, regsize, label
.p2align 7
b handle_el\el\ht\()_\regsize\()_\label
.endm
.macro HANDLE, el, ht, regsize, label
.section .text
handle_el\el\ht\()_\regsize\()_\label:
sub sp, sp, 35 * 8
SAVE_REGS \el
mov x0, sp
bl handle_el\el\ht\()_\regsize\()_\label\()_exception
.if \el == 1
b ret_to_kernel
.else
b ret_to_user
.endif
.endm
.section .text
.p2align 11
.global exception_vector_base
exception_vector_base:
// current EL, with SP_EL0
HANDLE_TRAP 1, t, 64, sync
HANDLE_TRAP 1, t, 64, irq
HANDLE_TRAP 1, t, 64, fiq
HANDLE_TRAP 1, t, 64, error
// current EL, with SP_ELx
HANDLE_TRAP 1, h, 64, sync
HANDLE_TRAP 1, h, 64, irq
HANDLE_TRAP 1, h, 64, fiq
HANDLE_TRAP 1, h, 64, error
// lower EL, aarch64 with SP_EL0
HANDLE_TRAP 0, t, 64, sync
HANDLE_TRAP 0, t, 64, irq
HANDLE_TRAP 0, t, 64, fiq
HANDLE_TRAP 0, t, 64, error
// lower EL, aarch32
HANDLE_TRAP 0, t, 32, sync
HANDLE_TRAP 0, t, 32, irq
HANDLE_TRAP 0, t, 32, fiq
HANDLE_TRAP 0, t, 32, error
/*
* used to create handle_el_label_trap
*/
// current EL, with SP_EL0
HANDLE 1, t, 64, sync
HANDLE 1, t, 64, irq
HANDLE 1, t, 64, fiq
HANDLE 1, t, 64, error
// current EL, with SP_ELx
HANDLE 1, h, 64, sync
HANDLE 1, h, 64, irq
HANDLE 1, h, 64, fiq
HANDLE 1, h, 64, error
// lower EL, aarch64 with SP_EL0
HANDLE 0, t, 64, sync
HANDLE 0, t, 64, irq
HANDLE 0, t, 64, fiq
HANDLE 0, t, 64, error
// lower EL, aarch32
HANDLE 0, t, 32, sync
HANDLE 0, t, 32, irq
HANDLE 0, t, 32, fiq
HANDLE 0, t, 32, error
.section .text
.global ret_to_kernel
ret_to_kernel:
RESTORE_REGS 1
eret
.section .text
.global ret_to_user
ret_to_user:
RESTORE_REGS 0
eret
.section .text
.global ret_to_first_user
ret_to_first_user:
mov sp, x0
b ret_to_user
|
LeonRust/Starry-FastDDS | 190 | modules/axhal/src/arch/x86_64/signal.S | # To create the sigreturn trampoline
.section .text.signal_trampoline
.code64
.global start_signal_trampoline
start_signal_trampoline:
# syscall id rdi = 15
mov rax, 0xf
syscall |
LeonRust/Starry-FastDDS | 1,143 | modules/axhal/src/arch/x86_64/syscall.S | .section .text
syscall_entry:
swapgs
mov gs:[offset __PERCPU_USER_RSP_OFFSET], rsp
mov rsp, gs:[offset __PERCPU_KERNEL_RSP_OFFSET]
sub rsp, 8 // skip user_ss
push gs:[offset __PERCPU_USER_RSP_OFFSET] // user_rsp
push r11 // rflags
mov [rsp - 2 * 8], rcx // rip
sub rsp, 4 * 8 // skip until general registers
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_syscall_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 7 * 8
mov rcx, [rsp - 5 * 8] // rip
mov r11, [rsp - 3 * 8] // rflags
mov rsp, [rsp - 2 * 8] // user_rsp
swapgs
sysretq |
LeonRust/Starry-FastDDS | 1,505 | modules/axhal/src/arch/x86_64/trap.S | .equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp .Ltrap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp .Ltrap_common
.endif
.endm
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .text
.code64
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.Ltrap_common:
test byte ptr [rsp + 3 * 8], 3 # swap GS if it comes from user space
jz 1f
swapgs
1:
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbx
push rdx
push rcx
push rax
mov rdi, rsp
call x86_trap_handler
pop rax
pop rcx
pop rdx
pop rbx
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
test byte ptr [rsp + 3 * 8], 3 # swap GS back if return to user space
jz 2f
swapgs
2:
add rsp, 16 # pop vector, error_code
iretq
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
|
LeonRust/Starry-FastDDS | 1,965 | modules/axhal/src/platform/x86_pc/ap_start.S | # Boot application processors into the protected mode.
# Each non-boot CPU ("AP") is started up in response to a STARTUP
# IPI from the boot CPU. Section B.4.2 of the Multi-Processor
# Specification says that the AP will start in real mode with CS:IP
# set to XY00:0000, where XY is an 8-bit value sent with the
# STARTUP. Thus this code must start at a 4096-byte boundary.
#
# Because this code sets DS to zero, it must sit
# at an address in the low 2^16 bytes.
.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr}
.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr}
.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr}
.equ stack_ptr, {start_page_paddr} + 0xff0
.equ entry_ptr, {start_page_paddr} + 0xff8
# 0x6000
.section .text
.code16
.p2align 12
.global ap_start
ap_start:
cli
wbinvd
xor ax, ax
mov ds, ax
mov es, ax
mov ss, ax
mov fs, ax
mov gs, ax
# load the 64-bit GDT
lgdt [pa_ap_gdt_desc]
# switch to protected-mode
mov eax, cr0
or eax, (1 << 0)
mov cr0, eax
# far jump to 32-bit code. 0x8 is code32 segment selector
ljmp 0x8, offset pa_ap_start32
.code32
ap_start32:
mov esp, [stack_ptr]
mov eax, [entry_ptr]
jmp eax
.balign 8
# .type multiboot_header, STT_OBJECT
.Lap_tmp_gdt_desc:
.short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit
.long pa_ap_gdt # base
.balign 16
.Lap_tmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Lap_tmp_gdt_end:
# 0x7000
.p2align 12
.global ap_end
ap_end:
|
LeonRust/Starry-FastDDS | 4,307 | modules/axhal/src/platform/x86_pc/multiboot.S | # Bootstrapping from 32-bit with the Multiboot specification.
# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
.section .text.boot
.code32
.global _start
_start:
mov edi, eax # arg1: magic: 0x2BADB002
mov esi, ebx # arg2: multiboot info
jmp bsp_entry32
.balign 4
.type multiboot_header, STT_OBJECT
multiboot_header:
.int {mb_hdr_magic} # magic: 0x1BADB002
.int {mb_hdr_flags} # flags
.int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum
.int multiboot_header - {offset} # header_addr
.int _skernel - {offset} # load_addr
.int _edata - {offset} # load_end
.int _ebss - {offset} # bss_end_addr
.int _start - {offset} # entry_addr
# Common code in 32-bit, prepare states to enter 64-bit.
.macro ENTRY32_COMMON
# set data segment selectors
mov ax, 0x18
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
# set PAE, PGE bit in CR4
mov eax, {cr4}
mov cr4, eax
# load the temporary page table
lea eax, [.Ltmp_pml4 - {offset}]
mov cr3, eax
# set LME, NXE bit in IA32_EFER
mov ecx, {efer_msr}
mov edx, 0
mov eax, {efer}
wrmsr
# set protected mode, write protect, paging bit in CR0
mov eax, {cr0}
mov cr0, eax
.endm
# Common code in 64-bit
.macro ENTRY64_COMMON
# clear segment selectors
xor ax, ax
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
.endm
.code32
bsp_entry32:
lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT
ENTRY32_COMMON
ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment
.code32
.global ap_entry32
ap_entry32:
ENTRY32_COMMON
ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment
.code64
bsp_entry64:
ENTRY64_COMMON
# set RSP to boot stack
movabs rsp, offset {boot_stack}
add rsp, {boot_stack_size}
# call rust_entry(magic, mbi)
movabs rax, offset {entry}
call rax
jmp .Lhlt
.code64
ap_entry64:
ENTRY64_COMMON
# set RSP to high address (already set in ap_start.S)
mov rax, {offset}
add rsp, rax
# call rust_entry_secondary(magic)
mov rdi, {mb_magic}
movabs rax, offset {entry_secondary}
call rax
jmp .Lhlt
.Lhlt:
hlt
jmp .Lhlt
.section .rodata
.balign 8
.Ltmp_gdt_desc:
.short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit
.long .Ltmp_gdt - {offset} # base
.section .data
.balign 16
.Ltmp_gdt:
.quad 0x0000000000000000 # 0x00: null
.quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k)
.quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k)
.quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k)
.Ltmp_gdt_end:
.balign 4096
.Ltmp_pml4:
# 0x0000_0000 ~ 0xffff_ffff
.quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
.zero 8 * 510
# 0xffff_ff80_0000_0000 ~ 0xffff_ff80_ffff_ffff
.quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt)
# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb)
.Ltmp_pdpt_low:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
.Ltmp_pdpt_high:
.quad 0x0000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x0)
.quad 0x40000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000)
.quad 0x80000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x8000_0000)
.quad 0xc0000000 | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0xc000_0000)
.zero 8 * 508
|
leowang000/ACore-2025 | 722 | os/src/timer/timer_trap.S | .section .text
.globl __timer_trap
.align 2
__timer_trap:
csrrw sp, mscratch, sp
# Store t0, t1, t2.
sd t0, 0 * 8(sp)
sd t1, 1 * 8(sp)
sd t2, 2 * 8(sp)
# Load the address of MTIMERCMP into t0.
ld t0, 3 * 8(sp)
# Load the time interval into t1.
ld t1, 4 * 8(sp)
# Load the current time to t2.
ld t2, 0(t0)
# Get the next trigger time.
add t2, t2, t1
# Store the next trigger time to MTIMECMP
sd t2, 0(t0)
# Set SSIP bit (value 2) in SIP register to trigger supervisor-mode software interrupt.
li t0, 2
csrw sip, t0
# Restore t0, t1, t2.
ld t0, 0 * 8(sp)
ld t1, 1 * 8(sp)
ld t2, 2 * 8(sp)
csrrw sp, mscratch, sp
mret |
leowang000/ACore-2025 | 1,638 | os/src/trap/trap.S | .altmacro
.macro SAVE_GPR id
sd x\id, \id * 8(sp)
.endm
.macro LOAD_GPR id
ld x\id, \id * 8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.align 2
__alltraps:
# now in user address space
# sp->user_sp, sscratch->TRAP_CONTEXT
csrrw sp, sscratch, sp
# now sp->TRAP_CONTEXT, sscratch->user_sp
# save GPRs to the TrapContext page, except x0, sp(x2) and tp(x4)
SAVE_GPR 1
SAVE_GPR 3
.set n, 5
.rept 27
SAVE_GPR %n
.set n, n + 1
.endr
# save CSR sstatus and sepc to the TrapContext page
csrr t0, sstatus
sd t0, 32 * 8(sp)
csrr t0, sepc
sd t0, 33 * 8(sp)
# save the sp before entering the trap (i.e. the user_sp)
csrr t0, sscratch
sd t0, 2 * 8(sp)
# load kernel_satp into t0
ld t0, 34 * 8(sp)
# load trap_handler into t1
ld t1, 36 * 8(sp)
# switch to kernel stack
ld sp, 35 * 8(sp)
# switch to kernel address space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
.globl __restore
__restore:
# a0: the va of the TrapContext page (i.e. TRAP_CONTEXT)
# a1: user address space satp
# switch to user address space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp->TRAP_CONTEXT, sscratch->TRAP_CONTEXT
# restore CSR sstatus and sepc
ld t0, 32 * 8(sp)
csrw sstatus, t0
ld t0, 33 * 8(sp)
csrw sepc, t0
# restore GPRs, except x0, sp(x2) and tp(x4)
LOAD_GPR 1
LOAD_GPR 3
.set n, 5
.rept 27
LOAD_GPR %n
.set n, n + 1
.endr
# back to user stack
ld sp, 2 * 8(sp)
sret |
leowang000/ACore-2025 | 412 | os/src/task/scheduler/switch.S | .altmacro
.macro SAVE_SREG id
sd s\id, (\id + 2) * 8(a0)
.endm
.macro LOAD_SREG id
ld s\id, (\id + 2) * 8(a1)
.endm
.section .text
.globl __switch
__switch:
sd sp, 0(a0)
sd ra, 8(a0)
.set n, 0
.rept 12
SAVE_SREG %n
.set n, n + 1
.endr
ld sp, 0(a1)
ld ra, 8(a1)
.set n, 0
.rept 12
LOAD_SREG %n
.set n, n + 1
.endr
ret
|
li1553770945/gyp-mirror | 296 | test/assembly/src/lib1.S | #if PLATFORM_WINDOWS || PLATFORM_MAC
# define IDENTIFIER(n) _##n
#else /* Linux */
# define IDENTIFIER(n) n
#endif
.globl IDENTIFIER(lib1_function)
IDENTIFIER(lib1_function):
#if !defined(PLATFORM_ANDROID)
movl $42, %eax
ret
#else /* Android (assuming ARM) */
mov r0, #42
bx lr
#endif
|
li1553770945/gyp-mirror | 38 | test/ninja/s-needs-no-depfiles/empty.s | # This file intentionally left blank.
|
li1553770945/gyp-mirror | 18 | test/mac/lto/asmfile.S | .globl _asfun
ret
|
li041/RocketOS-mirror | 1,209 | os/src/arch/riscv64/entry.S | .section .text.entry
.globl _start
_start:
# a0 = hart id
# pc = 0x8020_0000
slli t0, a0, 16
la sp, boot_stack_top
sub sp, sp, t0
# since the kernel base addr is 0xffff_ffc0_8020_0000
# we need to activate pagetable here
# satp: 8 << 60 | boot_pagetable
la t0, boot_pagetable
li t1, 8 << 60
srli t0, t0, 12
or t0, t0, t1
csrw satp, t0
sfence.vma
la t0, fake_main # 加载虚拟地址符号
jr t0 # 间接跳转,跳到 fake_main
.section .bss.stack
.globl boot_stack_lower_bound
boot_stack_lower_bound:
.space 4096 * 16 * 4 # 4 CPUS at most
.globl boot_stack_top
boot_stack_top:
.section .data
.align 12
boot_pagetable:
# we need 2 pte here
# 0x0000_0000_4000_0000 -> 0x0000_0000_4000_0000
# 0x0000_0000_8000_0000 -> 0x0000_0000_8000_0000
# 0xffff_fc00_4000_0000 -> 0x0000_0000_4000_0000
# 0xffff_fc00_8000_0000 -> 0x0000_0000_8000_0000
.quad 0
.quad (0x40000 << 10) | 0xcf
.quad (0x80000 << 10) | 0xcf # VRWXAD
.quad (0xc0000 << 10) | 0xcf # VRWXAD
.zero 8 * 253
.quad (0x40000 << 10) | 0xcf # VRWXAD
.quad (0x80000 << 10) | 0xcf # VRWXAD
.zero 8 * 253 |
li041/RocketOS-mirror | 1,912 | os/src/arch/la64/entry.S | .section .text.entry
.global _start
.equ CSR_CRMD, 0x0
.equ CSR_CPUID, 0x20
.equ CSR_DMW0, 0x180
.equ CSR_DMW1, 0x181
_start:
0:
# 获取当前 CPU ID 并放入 a0
csrrd $a0, CSR_CPUID
# 为每个核设置栈
li.d $t0, 4096 * 16
mul.d $t1, $a0, $t0
la.global $sp, boot_stack_top
sub.d $sp, $sp, $t1
# 先读CSR.CRMD看看内存映射模式
# CRMD的csr_num是0x0
# csrrd $t1, 0x0
# CRMD是0x8, 直接地址映射模式
# li.d $t0, 0x800000001fe20000 # 加载 uart_addr 到临时寄存器 $t0
# li.w $t2, 0x48 # 加载字符'H'到临时寄存器 $t2
# st.b $t2, $t0, 0 # 将字符'H'写入串口
# 这个down掉了
# li.d $t0, 0x90000000
# li.w $t2, 0x48
# st.b $t2, $t0, 0
# 这个是可以的
#设置映射窗口
# addi.d $t0, $zero,0x11
# csrwr $t0, 0x180 #设置LOONGARCH_CSR_DMWIN0
# copy from Impact
pcaddi $t0, 0x0
srli.d $t0, $t0, 0x30
slli.d $t0, $t0, 0x30
addi.d $t0, $t0, 0x11
csrwr $t0, 0x181 # Make sure the window remains the same after the switch.
sub.d $t0, $t0, $t0
addi.d $t1, $t0, 0x11
# li.d $t0, 0x800000001fe20000 # 加载 uart_addr 到临时寄存器 $t0
# li.w $t2, 0x48 # 加载字符'H'到临时寄存器 $t2
# st.b $t2, $t0, 0 # 将字符'H'写入串口
csrwr $t1, 0x180 #00->00
pcaddi $t0, 0x0
slli.d $t0, $t0, 0x10
srli.d $t0, $t0, 0x10
jirl $t0, $t0, 0x10 # 跳0段的下一条指令180
# The barrier
sub.d $t2, $t1, $t1
addi.d $t2, $t2, 0x11
csrwr $t2, 0x181 #00->00
csrwr $t1, 0x180 #recover uart
#can use uart
# li.d $t0, 0x800000001fe20000 # 加载 uart_addr 到临时寄存器 $t0
# li.w $t2, 0x48 # 加载字符'H'到临时寄存器 $t2
# st.b $t2, $t0, 0 # 将字符'H'写入串口
la.global $sp, boot_stack_top
bl rust_main
.section .bss.stack
.globl boot_stack
boot_stack:
.space 4096 * 16 * 4
.globl boot_stack_top
boot_stack_top: |
li041/RocketOS-mirror | 1,271 | os/src/arch/la64/tlb_refill.S | .section .text.__rfill
.globl __rfill
.equ TLBRSAVE, 0x8b
# PGD是当前上下文中出错虚地址对应的全局目录基址
.equ PGD, 0x1b
.equ TLBRELO0, 0x8c
.equ TLBRELO1, 0x8d
.equ TLBREHI, 0x8e
.equ CRMD, 0x0
# 触发TLB重填例外的出错虚地址
.equ TLBRBADV, 0x89
.equ TLBIDX, 0x10
.equ TLBEHI, 0x11
.equ TLBELO0, 0x12
.equ TLBELO1, 0x13
# __rfill在遍历过程中会检查页表项的有效性, 如果无效则会调用错误处理
__rfill:
# 使用TLB重填例外数据保存, 保存t0
csrwr $t0, TLBRSAVE
# 加载发生TLB重填例外的页表
csrrd $t0, PGD
# 第一次lddir - 获取二级页目录
lddir $t0, $t0, 2
# 如果lddir失败, 跳转到构造无效页表项
beqz $t0, construct_invalid
# 过往的错误认知: 出来最低位为1, 需要减去
# addi.d $t0, $t0, -1
# 7.13 查出来带flags, 需要去除
# 右移12位, 再左移12位, 清除flags
srli.d $t0, $t0, 12
slli.d $t0, $t0, 12
# 第二次lddir - 获取一级页目录
lddir $t0, $t0, 1
# 如果lddir失败, 跳转到构造无效页表项
beqz $t0, construct_invalid
# addi.d $t0, $t0, -1
srli.d $t0, $t0, 12
slli.d $t0, $t0, 12
ldpte $t0, 0
ldpte $t0, 1
tlbfill
b restore_t0
construct_invalid:
csrrd $t0, TLBREHI
ori $t0, $t0, 0xC
csrwr $t0, TLBEHI
rotri.d $t0, $t0, 61
ori $t0, $t0, 3
rotri.d $t0, $t0, 3
csrwr $t0, TLBRELO0
csrrd $t0, TLBRELO0
csrwr $t0, TLBRELO1
tlbfill
restore_t0:
# 恢复t0
csrrd $t0, TLBRSAVE
# 异常返回(返回地址是TLBBADV)
ertn |
li041/RocketOS-mirror | 142 | os/src/arch/la64/load_img.S | .section .data
.global simg
.global eimg
.align 12
simg:
.incbin "../easy-fs-fuse/rootfs-ubifs-ze.img"
eimg:
.align 12 |
li041/RocketOS-mirror | 1,032 | os/src/arch/riscv64/switch/switch.S | .altmacro
.macro SAVE_CALLEE n
sd s\n, (\n+2)*8(sp)
.endm
.macro LOAD_CALLEE n
ld s\n, (\n+2)*8(a0)
.endm
.section .text
.globl __switch
__switch:
# __swtich(
# next_task_kernel_stack: *const usize,
# )
# a0 -> next_task_kernel_stack
# 硬编码, 这里在现任务的内核栈中分配一块空间大小为`TaskContext`的空间
addi sp, sp, -16*8
# 保存 ra, tp 与 s0~s11
sd ra, 0(sp)
sd tp, 8(sp)
.set n, 0
.rept 12
SAVE_CALLEE %n
.set n, n+1
.endr
# 保存satp
csrr t0, satp
sd t0, 14*8(sp)
# 重新保存当前任务的内核栈指针
sd sp, 0(tp)
# a0指向的是下一个任务的内核栈
# restore ra, tp and s0~s11 of next execution
ld ra, 0(a0)
ld tp, 8(a0)
# 之后tp指向的是下一个任务的TCB
.set n, 0
.rept 12
LOAD_CALLEE %n
.set n, n+1
.endr
# restore satp
ld t0, 14*8(a0)
csrw satp, t0
# 刷新tlb
sfence.vma
# return to next execution, 硬编码
addi a0, a0, 16*8
# Todo: 检验, 好像不需要修改next task TCB中的sp
# sd a0, 0(tp)
mv sp, a0
# # 现在sp指向的是下一个任务的内核栈
ret
|
li041/RocketOS-mirror | 4,146 | os/src/arch/riscv64/trap/trap.S | # sscratch 寄存器 在用户态时保存用户的内核栈指针, 在内核态时保存内核的用户栈指针
# 在trap中不需要修改TCB中的sp, 因为在__switch中当前任务的内核栈是根据现在的sp来确定的, 这个sp的值已经减去了TrapContext的大小
# 但在__swtich中, 需要修改当前TCB中的sp, 因为在__switch中下个任务的内核栈位置是通过TCB中的sp来确定的
# 2025-03-31 更改
# 由于用户态下会更新tp寄存器的值,因此在-8(sp)的位置保存了内核tp
# 2025-04-11 更改
# 由于clone会产生新的子进程,新子进程的tp无法在-8(sp)的位置保存,因此将trap_context更新
# 注:last-a0保存原先用户传递参数(用于signal处理SA_RESTART),只在__trap_from_user中保存即可,由handle_signal来恢复
#
# 0 ----------------------
# | ketnal_tp |
# |-----------------------|
# | last-a0 |
# |-----------------------|
# | sepc |
# |-----------------------|
# | sstatus |
# |-----------------------|
# | x0 ~ x31 | x[4](tp) 保存用户tp
# [sp] -> 36*8 -----------------------
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __trap_from_user
.globl __return_to_user
.globl __trap_from_kernel
.align 2
# 在trap进入内核态时, 将用户态的上下文保存到内核栈上, 然后调用trap_handler
__trap_from_user:
# 交换sp和sscratch的值
csrrw sp, sscratch, sp
# 在内核栈上保存用户的上下文
addi sp, sp, -36*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# 先保存用户tp
sd x4, 4*8(sp)
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# 保存用户a0参数到last_a0
sd a0, 34*8(sp)
# 第一个参数是TrapContext在内核栈上的地址, 通过a0传递
mv a0, sp
# 从kernel_tp中载入内核tp
ld tp, 35*8(sp)
# 注意要在保存上下文后设置stvec
# 设置stvec为Mode(0~1 bits) Direct, 基地址为__trap_from_kernel
la t0, __trap_from_kernel
csrw stvec, t0
# 设置sstatus的SPP位为1, 表示进入内核态, 实际并没有作用
csrs sstatus, 8
call trap_handler
j __return_to_user
# 在trap_handler返回时, 将内核栈上的上下文恢复到用户态, 然后返回到用户态
# 注意这个`__return_to_user`与TrapContext结构体强相关, 且是硬编码的
# 预期:进入该函数时sp指向该内核栈的TrapContext的位置
__return_to_user:
# 设置stvec为Mode(0~1 bits) Direct, 基地址为__trap_from_user
# 注意要在恢复上下文之前设置, 因为恢复上下文时会用到stvec
la t0, __trap_from_user
csrw stvec, t0
# now sp->kernel stack(after allocated)
ld t0, 32*8(sp)
# `__return_to_user` sstatus/sepc
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# 现在sscarch指向的是用户栈
# `return_to_user` general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
# 跳过内核tp
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# 保存内核tp到kernel_tp
sd x4, 35*8(sp)
# 恢复用户tp
ld x4, 4*8(sp)
# release TrapContext on kernel stack
addi sp, sp, 36*8
csrrw sp, sscratch, sp
# 现在sp指向的是用户栈, sscratch指向的是内核栈, 然后返回到用户态
# 置sstatus的SPP位为0, 表示返回到用户态
csrc sstatus, 8
sret
# Todo: 这个在使用内核栈保存信息的情况, 是可以做到的, 详细可能
__trap_from_kernel:
# # 交换sp和sscratch的值
# csrrw sp, sscratch, sp
# 相较于__trap_from_user, 这里不用换栈, 因为本身就在内核
# allocate another TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
# save x3~x31
.set n, 3
.rept 29
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
sd sp, 2*8(sp)
# 第一个参数是TrapContext在内核栈上的地址, 通过a0传递
mv a0, sp
call kernel_trap_handler
# 内核态的trap_handler返回后, 恢复内核态的上下文
ld t0, 32*8(sp)
ld t1, 33*8(sp)
# Todo: 这里是不是不用保存和恢复sstatus
csrw sstatus, t0
csrw sepc, t1
# sp
ld x1, 1*8(sp)
# restore x3~x31
.set n, 3
.rept 29
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
sret
|
li041/RocketOS-mirror | 177 | os/src/arch/riscv64/trampoline/trampoline.S | .section .text.trampoline
.align 12
.globl sigreturn_trampoline
# 在用户注册了信号处理程序并执行结束后,用户的信号处理程序会返回到该函数来调用sigreturn回到内核态
sigreturn_trampoline:
li a7,139
ecall |
li041/RocketOS-mirror | 1,067 | os/src/arch/la64/switch/switch.S | .altmacro
.macro SAVE_SN n
st.d $s\n, $sp, (\n+2)*8
.endm
.macro LOAD_SN n
ld.d $s\n, $a0, (\n+2)*8
.endm
.section .text
.globl __switch
.equ CSR_PGDL, 0x19
__switch:
# __swtich(
# next_task_kernel_stack: *const usize,
#)
# a0 -> next_task_kernel_stack
# 硬编码, 这里在现任务的内核栈中分配一块空间大小为`TaskContext`的空间
addi.d $sp, $sp, -16*8
# 保存 ra, tp 与 s0~s11
st.d $ra, $sp, 0
st.d $tp, $sp, 8
.set n, 0
.rept 9
SAVE_SN %n
.set n, n+1
.endr
# 保存fp到s[9]
st.d $fp, $sp, (9+2)*8
# 保存pgdl
csrrd $t0, CSR_PGDL
st.d $t0, $sp, 14*8
# 重新保存当前任务的内核栈指针
st.d $sp, $tp, 0
# a0指向的是下一个任务的内核栈
# 恢复 ra, tp 与 s0~s8, fp
ld.d $ra, $a0, 0
ld.d $tp, $a0, 8
.set n, 0
.rept 9
LOAD_SN %n
.set n, n+1
.endr
ld.d $fp, $a0, (9+2)*8
# 恢复pgdl
ld.d $t0, $a0, 14*8
csrwr $t0, CSR_PGDL
# 刷新tlb
invtlb 0x3, $zero, $zero
# return to next execution, 硬编码
addi.d $a0, $a0, 16*8
addi.d $sp, $a0, 0
# 现在sp指向的是下一个任务的内核栈
jr $ra
|
li041/RocketOS-mirror | 3,601 | os/src/arch/la64/trap/trap.S | # SAVE数据保存寄存器在用户态时保存用户的内核栈指针
# 2025-04-06 更改
# 由于用户态下会更新tp寄存器的值,因此在-8(sp)的位置保存了内核tp
# 2025-04-11 更改
# 由于clone会产生新的子进程,新子进程的tp无法在-8(sp)的位置保存,因此将trap_context更新
# 注:last-a0保存原先用户传递参数(用于signal处理SA_RESTART),只在__trap_from_user中保存即可,由handle_signal来恢复
#
# 0 ----------------------
# | ketnal_tp |
# |-----------------------|
# | last-a0 |
# |-----------------------|
# | sepc |
# |-----------------------|
# | sstatus |
# |-----------------------|
# | r0 ~ r31 | r[2](tp) 保存用户tp
# [sp] -> 36*8 -----------------------
.altmacro
.macro SAVE_GP n
st.d $r\n, $sp, \n*8
.endm
.macro LOAD_GP n
ld.d $r\n, $sp, \n*8
.endm
.section .text
.globl __trap_from_user
.globl __return_to_user
.globl __trap_from_kernel
.equ CSR_SAVE0, 0x30
.equ CSR_PRMD, 0x1
.equ CSR_ERA, 0x6
.equ CSR_EENTRY, 0xc
# 根据Eentry的语义, 保持页对齐
.align 12
__trap_from_user:
# 交换sp和SAVE0的值
csrwr $sp, CSR_SAVE0
# 此时sp->内核栈, SAVE0->用户栈
# 在内核栈上保存用户的上下文
# todo: 这里TrapContext还没确定好, 先用32GR+sstaus+sepc
addi.d $sp, $sp, -36*8
# 保存通用寄存器
st.d $r1, $sp, 1*8
# 先保存用户tp
st.d $r2, $sp, 2*8
# skip sp(r3), 我们稍后保存它
.set n, 4
.rept 28
SAVE_GP %n
.set n, n+1
.endr
# Todo: 保存浮点寄存器
# 我们可以自由使用t0/t1/t2, 因为它们已经保存在内核栈上
csrrd $t0, CSR_PRMD
csrrd $t1, CSR_ERA
st.d $t0, $sp, 32*8
st.d $t1, $sp, 33*8
# 从SAVE0读取内核栈并保存到内核栈
csrrd $t2, CSR_SAVE0
st.d $t2, $sp, 3*8
# 保存用户a0参数到last_a0
st.d $a0, $sp, 34*8
# trap_handler第一个参数是TrapContext在内核栈上的地址, 通过a0传递
add.d $a0, $sp, $zero
# 从kernel_tp中载入内核tp
ld.d $r2, $sp, 35*8
# 设置例外入口地址
la $t0, __trap_from_kernel
csrwr $t0, CSR_EENTRY
bl trap_handler
b __return_to_user
# 在trap_handler返回时, 将内核栈上的上下文恢复到用户态, 然后返回到用户态
# 注意这个`__return_to_user`与TrapContext结构体强相关, 且是硬编码的
# 预期:进入该函数时sp指向该内核栈的TrapContext的位置
__return_to_user:
# 设置EEntry为__trap_from_user
la $t0, __trap_from_user
csrwr $t0, CSR_EENTRY
# 恢复上下文
ld.d $t0, $sp, 32*8
ld.d $t1, $sp, 33*8
ld.d $t2, $sp, 3*8
csrwr $t0, CSR_PRMD
csrwr $t1, CSR_ERA
csrwr $t2, CSR_SAVE0
# 现在SAVE0指向用户栈
ld.d $r1, $sp, 1*8
# 跳过内核tp
# skip sp(r3), 我们稍后load它
.set n, 4
.rept 28
LOAD_GP %n
.set n, n+1
.endr
# 保存内核tp到kernel_tp
st.d $r2, $sp, 35*8
# 恢复用户tp
ld.d $r2, $sp, 2*8
# release TrapContext on kernel stack
addi.d $sp, $sp, 36*8
# 交换sp和SAVE0的值
csrwr $sp, CSR_SAVE0
# 现在sp指向的是用户栈, SAVE0指向的是内核栈, 然后返回到用户态
ertn
.align 12
__trap_from_kernel:
# 不涉及到tp的改变,34*8字节足够
addi.d $sp, $sp, -34*8
# 保存通用寄存器
st.d $r1, $sp, 1*8
st.d $r2, $sp, 2*8
# skip sp(r3), 稍后保存它
.set n, 4
.rept 28
SAVE_GP %n
.set n, n+1
.endr
# Todo: 保存浮点寄存器
# 我们可以自由使用t0/t1/t2, 因为它们已经保存在内核栈上
csrrd $t0, CSR_PRMD
csrrd $t1, CSR_ERA
st.d $t0, $sp, 32*8
st.d $t1, $sp, 33*8
# 保存sp
st.d $r3, $sp, 3*8
# 通过a0传递cx
add.d $a0, $sp, $zero
bl kernel_trap_handler
ld.d $r3, $sp, 3*8
# 内核态的trap_handler返回时, 恢复内核态的上下文
ld.d $t0, $sp, 32*8
ld.d $t1, $sp, 33*8
# Todo: 这里是不是不用保存和恢复sstatus
csrwr $t0, CSR_PRMD
csrwr $t1, CSR_ERA
# sp
ld.d $r1, $sp, 1*8
ld.d $r2, $sp, 2*8
.set n, 4
.rept 28
LOAD_GP %n
.set n, n+1
.endr
# 释放内核栈上的TrapContext
addi.d $sp, $sp, 34*8
ertn
|
li041/RocketOS-mirror | 199 | os/src/arch/la64/trampoline/trampoline.S | .section .text.trampoline
.align 12
.globl sigreturn_trampoline
# 在用户注册了信号处理程序并执行结束后,用户的信号处理程序会返回到该函数来调用sigreturn回到内核态
sigreturn_trampoline:
li.w $a7, 139
syscall 0 |
li1553770945/gyp-mirror | 192 | test/rules/src/an_asm.S | // Copyright (c) 2012 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Fake asm file.
int main() {}
|
LiamStanDev/mini-os | 1,309 | os/src/task/switch.S | # -----------------------------------------------------------------------------
# switch.S - Task context switch routines for RISC-V
#
# This file implements the low-level context switch routine (__switch) for RISC-V.
# It saves the current task's context (registers) to memory and restores the next
# task's context from memory, enabling preemptive multitasking.
#
# - __switch: Saves the current task's context (ra, sp, s0-s11) to the current
# task's TaskContext structure, then loads the next task's context from its
# TaskContext structure and returns to the next task.
#
# TaskContext layout in memory (offsets in 8-byte words):
# 0: ra (return address)
# 1: sp (stack pointer)
# 2-13: s0-s11 (callee-saved registers)
# -----------------------------------------------------------------------------
.altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_ctx_ptr: *mut TaskContext,
# next_task_ctx_ptr: *const TaskContext
# )
# save current task
sd ra, 0*8(a0)
sd sp, 1*8(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n+1
.endr
# load next task
ld ra, 0*8(a1)
ld sp, 1*8(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n+1
.endr
ret
|
LiamStanDev/mini-os | 4,500 | os/src/trap/trap.S | # -----------------------------------------------------------------------------
# trap.S - Trap entry and context switch routines for RISC-V
#
# This file implements the low-level trap entry (__alltraps) and context restore
# (__restore) routines for RISC-V, handling the saving and restoring of all
# general-purpose registers and control/status registers required for trap
# handling and context switching between user and kernel mode.
#
# - __alltraps: Entry point for all traps (interrupts, exceptions, syscalls).
# Saves user context into TrapContext, switches to kernel address space,
# and jumps to the kernel trap handler.
#
# - __restore: Restores user context from TrapContext and returns to user mode.
#
# TrapContext layout in memory (offsets in 8-byte words):
# 0-31: x[0]..x[31] (general-purpose registers, x[2]=sp is handled specially)
# 32: sstatus
# 33: sepc
# 34: kernel_satp
# 35: kernel_sp
# 36: trap_handler
# -----------------------------------------------------------------------------
#.altmacro
#.macro SAVE_GP n
# sd x\n, \n*8(sp)
#.endm
#.macro LOAD_GP n
# ld x\n, \n*8(sp)
#.endm
# .section .text.trampoline
# .globl __alltraps
# .globl __restore
# .align 2 # riscv specification
#__alltraps:
# # Save user stack pointer and swap with sscratch (kernel/user stack exchange)
# # sp -> user stack, sscratch -> *TrapContext in user stack
# csrrw sp, sscratch, sp
# # sp -> *TrapContext in user space, sscratch -> user stack
#
# # Save general-purpose registers except sp(x2)/tp(x4)
# sd x1, 1*8(sp) # save x1(ra)
# # skip x2(sp)
# sd x3, 3*8(sp) # save x3(gp)
# # Save x5-x31 using macro
# .set n, 5
# .rept 27
# SAVE_GP %n
# .set n, n+1
# .endr
#
# # Save control/status registers to TrapContext
# csrr t0, sstatus
# csrr t1, sepc
# csrr t2, sscratch # t2 -> user stack
# sd t0, 32*8(sp) # TrapContext.sstatus
# sd t1, 33*8(sp) # TrapContext.sepc
# sd t2, 2*8(sp) # TrapContext.x[2]
#
# # Load kernel_satp, trap_handler, and kernel_sp from TrapContext
# ld t0, 34*8(sp) # kernel_satp
# ld sp, 35*8(sp) # kernel_sp
# ld t1, 36*8(sp) # trap_handler
#
# # Switch to kernel address space and flush TLB
# csrw satp, t0
# sfence.vma
#
# # Jump to kernel trap handler
# jr t1
#__restore:
# # a0: pointer to TrapContext, a1: user satp
# csrw satp, a1
# sfence.vma # flush TLB
#
# csrw sscratch, a0
# mv sp, a0
#
# # Restore control/status registers
# ld t0, 32*8(sp) # TrapContext.sstatus
# ld t1, 33*8(sp) # TrapContext.sepc
# csrw sstatus, t0
# csrw sepc, t1
#
# # Restore general-purpose registers except sp(x2)/tp(x4)
# ld x1, 1*8(sp)
# ld x3, 3*8(sp)
# .set n, 5
# .rept 27
# LOAD_GP %n
# .set n, n+1
# .endr
#
# ld sp, 2*8(sp) # Restore user sp
# sret # Return to user mode
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
Liber1917/INCOME_smartcar | 12,541 | ref/STM32F103_Encoder_demo/CORE/startup_stm32f10x_md.s | ;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_md.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x Medium Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1_2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
;Ĵ汾룬ΪûõSystemInitע͵´Ϊֹ
;⺯汾룬ⲿʵSystemInitԳʼstm32ʱӵȡ
;IMPORT SystemInit
;LDR R0, =SystemInit
;BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
Liber1917/INCOME_smartcar | 12,160 | ref/STM32F103_Encoder_demo/CORE/startup_stm32f10x_ld.s | ;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_ld.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x Low Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1_2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD 0 ; Reserved
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SPI1_IRQHandler ; SPI1
DCD 0 ; Reserved
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD 0 ; Reserved
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
;Ĵ汾룬ΪûõSystemInitע͵´Ϊֹ
;⺯汾룬ⲿʵSystemInitԳʼstm32ʱӵȡ
;IMPORT SystemInit
;LDR R0, =SystemInit
;BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
SPI1_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
Liber1917/INCOME_smartcar | 15,145 | ref/STM32F103_Encoder_demo/CORE/startup_stm32f10x_hd.s | ;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_hd.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x High Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system and also configure the external
;* SRAM mounted on STM3210E-EVAL board to be used as data
;* memory (optional, to be enabled by user)
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1 & ADC2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
DCD TIM8_BRK_IRQHandler ; TIM8 Break
DCD TIM8_UP_IRQHandler ; TIM8 Update
DCD TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation
DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare
DCD ADC3_IRQHandler ; ADC3
DCD FSMC_IRQHandler ; FSMC
DCD SDIO_IRQHandler ; SDIO
DCD TIM5_IRQHandler ; TIM5
DCD SPI3_IRQHandler ; SPI3
DCD UART4_IRQHandler ; UART4
DCD UART5_IRQHandler ; UART5
DCD TIM6_IRQHandler ; TIM6
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3
DCD DMA2_Channel4_5_IRQHandler ; DMA2 Channel4 & Channel5
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
EXPORT TIM8_BRK_IRQHandler [WEAK]
EXPORT TIM8_UP_IRQHandler [WEAK]
EXPORT TIM8_TRG_COM_IRQHandler [WEAK]
EXPORT TIM8_CC_IRQHandler [WEAK]
EXPORT ADC3_IRQHandler [WEAK]
EXPORT FSMC_IRQHandler [WEAK]
EXPORT SDIO_IRQHandler [WEAK]
EXPORT TIM5_IRQHandler [WEAK]
EXPORT SPI3_IRQHandler [WEAK]
EXPORT UART4_IRQHandler [WEAK]
EXPORT UART5_IRQHandler [WEAK]
EXPORT TIM6_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT DMA2_Channel1_IRQHandler [WEAK]
EXPORT DMA2_Channel2_IRQHandler [WEAK]
EXPORT DMA2_Channel3_IRQHandler [WEAK]
EXPORT DMA2_Channel4_5_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
TIM8_BRK_IRQHandler
TIM8_UP_IRQHandler
TIM8_TRG_COM_IRQHandler
TIM8_CC_IRQHandler
ADC3_IRQHandler
FSMC_IRQHandler
SDIO_IRQHandler
TIM5_IRQHandler
SPI3_IRQHandler
UART4_IRQHandler
UART5_IRQHandler
TIM6_IRQHandler
TIM7_IRQHandler
DMA2_Channel1_IRQHandler
DMA2_Channel2_IRQHandler
DMA2_Channel3_IRQHandler
DMA2_Channel4_5_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
Lind-Project/wasmtime | 4,165 | crates/fiber/src/unix/s390x.S | // A WORD OF CAUTION
//
// This entire file basically needs to be kept in sync with itself. It's not
// really possible to modify just one bit of this file without understanding
// all the other bits. Documentation tries to reference various bits here and
// there but try to make sure to read over everything before tweaking things!
//
// Also at this time this file is heavily based off the x86_64 file, so you'll
// probably want to read that one as well.
.text
#define CONCAT2(a, b) a ## b
#define CONCAT(a, b) CONCAT2(a , b)
#define VERSIONED_SYMBOL(a) CONCAT(a, VERSIONED_SUFFIX)
#define GLOBL(fnname) .globl VERSIONED_SYMBOL(fnname)
#define HIDDEN(fnname) .hidden VERSIONED_SYMBOL(fnname)
#define TYPE(fnname) .type VERSIONED_SYMBOL(fnname),@function
#define FUNCTION(fnname) VERSIONED_SYMBOL(fnname)
#define SIZE(fnname) .size VERSIONED_SYMBOL(fnname),.-VERSIONED_SYMBOL(fnname)
// fn(top_of_stack(%x0): *mut u8)
HIDDEN(wasmtime_fiber_switch)
GLOBL(wasmtime_fiber_switch)
.p2align 2
TYPE(wasmtime_fiber_switch)
FUNCTION(wasmtime_fiber_switch):
// Save all callee-saved registers on the stack since we're assuming
// they're clobbered as a result of the stack switch.
stmg %r6, %r15, 48(%r15)
aghi %r15, -64
std %f8, 0(%r15)
std %f9, 8(%r15)
std %f10, 16(%r15)
std %f11, 24(%r15)
std %f12, 32(%r15)
std %f13, 40(%r15)
std %f14, 48(%r15)
std %f15, 56(%r15)
// Load our previously saved stack pointer to resume to, and save off our
// current stack pointer on where to come back to eventually.
lg %r1, -16(%r2)
stg %r15, -16(%r2)
// Switch to the new stack and restore all our callee-saved registers after
// the switch and return to our new stack.
ld %f8, 0(%r1)
ld %f9, 8(%r1)
ld %f10, 16(%r1)
ld %f11, 24(%r1)
ld %f12, 32(%r1)
ld %f13, 40(%r1)
ld %f14, 48(%r1)
ld %f15, 56(%r1)
lmg %r6, %r15, 112(%r1)
br %r14
SIZE(wasmtime_fiber_switch)
// fn(
// top_of_stack(%x0): *mut u8,
// entry_point(%x1): extern fn(*mut u8, *mut u8),
// entry_arg0(%x2): *mut u8,
// )
HIDDEN(wasmtime_fiber_init)
GLOBL(wasmtime_fiber_init)
.p2align 2
TYPE(wasmtime_fiber_init)
FUNCTION(wasmtime_fiber_init):
larl %r1, FUNCTION(wasmtime_fiber_start)
stg %r1, -48(%r2) // wasmtime_fiber_start - restored into %r14
stg %r2, -112(%r2) // top_of_stack - restored into %r6
stg %r3, -104(%r2) // entry_point - restored into %r7
stg %r4, -96(%r2) // entry_arg0 - restored into %r8
aghi %r2, -160 // 160 bytes register save area
stg %r2, 120(%r2) // bottom of register save area - restored into %r15
// `wasmtime_fiber_switch` has a 64 byte stack.
aghi %r2, -64
stg %r2, 208(%r2)
br %r14
SIZE(wasmtime_fiber_init)
.p2align 2
TYPE(wasmtime_fiber_start)
FUNCTION(wasmtime_fiber_start):
.cfi_startproc simple
.cfi_def_cfa_offset 0
// See the x86_64 file for more commentary on what these CFI directives are
// doing. Like over there note that the relative offsets to registers here
// match the frame layout in `wasmtime_fiber_switch`.
.cfi_escape 0x0f, /* DW_CFA_def_cfa_expression */ \
7, /* the byte length of this expression */ \
0x7f, 0x90, 0x1, /* DW_OP_breg15 0x90 */ \
0x06, /* DW_OP_deref */ \
0x23, 0xe0, 0x1 /* DW_OP_plus_uconst 0xe0 */
.cfi_rel_offset 6, -112
.cfi_rel_offset 7, -104
.cfi_rel_offset 8, -96
.cfi_rel_offset 9, -88
.cfi_rel_offset 10, -80
.cfi_rel_offset 11, -72
.cfi_rel_offset 12, -64
.cfi_rel_offset 13, -56
.cfi_rel_offset 14, -48
.cfi_rel_offset 15, -40
// Load our two arguments prepared by `wasmtime_fiber_init`.
lgr %r2, %r8 // entry_arg0
lgr %r3, %r6 // top_of_stack
// ... and then we call the function! Note that this is a function call so
// our frame stays on the stack to backtrace through.
basr %r14, %r7 // entry_point
// .. technically we shouldn't get here, so just trap.
.word 0x0000
.cfi_endproc
SIZE(wasmtime_fiber_start)
// Mark that we don't need executable stack.
.section .note.GNU-stack,"",%progbits
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.