repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,815 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_flush_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_flush_dcache()
*
* Flush the L1 DCache
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_flush_dcache
.ent microblaze_flush_dcache
.align 2
microblaze_flush_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Align to cache line */
ADDIK r6, r5, XPAR_MICROBLAZE_DCACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Compute end */
L_start:
wdc.flush r5, r0 /* Flush the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
BRI L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
#endif
rtsd r15, 8 /* Return */
nop
.end microblaze_flush_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,676 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_disable_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* File : microblaze_disable_dcache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable the L1 dcache on the microblaze.
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
.text
.globl microblaze_disable_dcache
.ent microblaze_disable_dcache
.align 2
microblaze_disable_dcache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
ADDIK r1, r1, -8
SI r15, r1, 0
BRLID r15, microblaze_flush_dcache /* microblaze_flush_dcache does not use r1*/
nop
LI r15, r1, 0
ADDIK r1, r1, 8
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
rtsd r15, 8
msrclr r0, 0x80
#else /* XPAR_MICROBLAZE_USE_MSR_INSTR == 1 */
ADDIK r1, r1, -8
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
SI r15, r1, 0
BRLID r15, microblaze_flush_dcache
nop
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
mfs r11, rmsr
andi r11, r11, ~(0x80)
mts rmsr, r11
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
LI r15, r1, 0
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
ADDIK r1, r1, 8
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_disable_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,555 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_update_icache.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_update_icache.s
* Date : 2003, September 24
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Update icache on the microblaze.
* Takes in three parameters
* r5 : Cache Tag Line
* r6 : Cache Data
* r7 : Lock/Valid information
* Bit 30 is Lock [ 1 indicates locked ]
* Bit 31 is Valid [ 1 indicates valid ]
*
* --------------------------------------------------------------
* | Lock | Valid | Effect
* --------------------------------------------------------------
* | 0 | 0 | Invalidate Cache
* | 0 | 1 | Valid, but unlocked cacheline
* | 1 | 0 | Invalidate Cache, No effect of lock
* | 1 | 1 | Valid cache. Locked to a
* | | | particular address
* --------------------------------------------------------------
*
*
**********************************************************************************/
#include "xparameters.h"
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
.text
.globl microblaze_update_icache
.ent microblaze_update_icache
.align 2
microblaze_update_icache:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#if XPAR_MICROBLAZE_ICACHE_LINE_LEN == 1
/* Read the MSR register into a temp register */
mfs r18, rmsr
/* Clear the icache enable bit to disable the cache
Register r10,r18 are volatile registers and hence do not need to be saved before use */
andi r10, r18, ~32
mts rmsr, r10
/* Update the lock and valid info */
andi r5, r5, 0xfffffffc
or r5, r5, r7
/* Update icache */
wic r5, r6
/* Return */
rtsd r15, 8
mts rmsr, r18
#else
/* The only valid usage of this routine for larger cache line lengths is to invalidate an instruction cache line
So call microblaze_init_icache_range appropriately to do the job */
brid microblaze_init_icache_range
addik r6, r0, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4)
/* We don't have a return instruction here. This is tail call optimization :) */
#endif /* XPAR_MICROBLAZE_ICACHE_LINE_LEN == 1 */
#else
rtsd r15, 8
nop
#endif
.end microblaze_update_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 26,672 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/hw_exception_handler.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* Microblaze HW Exception Handler
* - Non self-modifying exception handler for the following exception conditions
* - Unalignment
* - Instruction bus error
* - Data bus error
* - Illegal instruction opcode
* - Divide-by-zero
* - Stack protection violation
*******************************************************************************/
#include "microblaze_exceptions_g.h"
#include "xparameters.h"
#include "microblaze_instructions.h"
/* 64-bit definitions */
#if defined (__arch64__)
#define INTPTR_DATAITEM .quad
#define REGSIZE 8
#define DATAALIGN 4
#else
#define INTPTR_DATAITEM .long
#define REGSIZE 4
#define DATAALIGN 2
#endif /* 64-bit definitions */
/* Helpful Macros */
#define EX_HANDLER_STACK_SIZ (REGSIZE*21)
#define RMSR_OFFSET (20 * REGSIZE)
#define R17_OFFSET (0)
#define REG_OFFSET(regnum) (REGSIZE * (regnum + 1))
#define NUM_TO_REG(num) r ## num
#define R3_TO_STACK(regnum) SI r3, r1, REG_OFFSET(regnum)
#define R3_FROM_STACK(regnum) LI r3, r1, REG_OFFSET(regnum)
#define PUSH_REG(regnum) SI NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
#define POP_REG(regnum) LI NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
/* Uses r5 */
#define PUSH_MSR \
mfs r5, rmsr; \
swi r5, r1, RMSR_OFFSET;
#define PUSH_MSR_AND_ENABLE_EXC \
mfs r5, rmsr; \
swi r5, r1, RMSR_OFFSET; \
ori r5, r5, 0x100; /* Turn ON the EE bit*/ \
mts rmsr, r5;
/* Uses r5 */
#define POP_MSR \
lwi r5, r1, RMSR_OFFSET; \
mts rmsr, r5;
/* Push r17 */
#define PUSH_R17 SI r17, r1, R17_OFFSET
/* Pop r17 */
#define POP_R17 LI r17, r1, R17_OFFSET
#define LWREG_NOP \
BRI ex_handler_unhandled; \
nop;
#define SWREG_NOP \
BRI ex_handler_unhandled; \
nop;
/* r3 is the source */
#define R3_TO_LWREG_V(regnum) \
R3_TO_STACK (regnum); \
BRI ex_handler_done;
/* r3 is the source */
#define R3_TO_LWREG(regnum) \
OR NUM_TO_REG (regnum), r0, r3; \
BRI ex_handler_done;
/* r3 is the target */
#define SWREG_TO_R3_V(regnum) \
R3_FROM_STACK (regnum); \
BRI ex_sw_tail;
/* r3 is the target */
#define SWREG_TO_R3(regnum) \
OR r3, r0, NUM_TO_REG (regnum); \
BRI ex_sw_tail;
/* regnum is the source */
#define FP_EX_OPB_SAVE(regnum) \
SI NUM_TO_REG (regnum), r0, mb_fpex_op_b; \
nop; \
BRI handle_fp_ex_opa;
/* regnum is the source */
#define FP_EX_OPB_SAVE_V(regnum) \
R3_FROM_STACK (regnum); \
SI r3, r0, mb_fpex_op_b; \
BRI handle_fp_ex_opa;
/* regnum is the source */
#define FP_EX_OPA_SAVE(regnum) \
SI NUM_TO_REG (regnum), r0, mb_fpex_op_a; \
nop; \
BRI handle_fp_ex_done;
/* regnum is the source */
#define FP_EX_OPA_SAVE_V(regnum) \
R3_FROM_STACK (regnum); \
SI r3, r0, mb_fpex_op_a; \
BRI handle_fp_ex_done;
#define FP_EX_UNHANDLED \
BRI fp_ex_unhandled; \
nop; \
nop;
/* ESR masks */
#define ESR_EXC_MASK 0x0000001F
#define ESR_REG_MASK 0x000003E0
#define ESR_LW_SW_MASK 0x00000400
#define ESR_WORD_MASK 0x00000800
#define ESR_DS_MASK 0x00001000
#define ESR_LONG_MASK 0x00002000
/* Extern declarations */
.extern XNullHandler
#ifdef MICROBLAZE_EXCEPTIONS_ENABLED /* If exceptions are enabled in the processor */
/*
* hw_exception_handler - Handler for unaligned exceptions
* Exception handler notes:
* - Does not handle exceptions other than unaligned exceptions
* - Does not handle exceptions during load into r17, r1, r0.
* - Does not handle exceptions during store from r17 (cannot be done) and r1 (slows down common case)
*
* Relevant register structures
*
* EAR - |----|----|----|----|----|----|----|----|
* - < ## 32 or 64 bit faulting address ## >
*
* ESR - |----|----|----|----|----| - | - |-----|-----|
* - W S REG EXC
*
*
* STACK FRAME STRUCTURE
* ---------------------
*
* +-------------+ + 0
* | r17 |
* +-------------+ + 4 (32-bit) + 8 (64-bit)
* | Args for |
* | next func |
* +-------------+ + 8 (32-bit) + 16 (64-bit)
* | r1 |
* | . |
* | . |
* | . |
* | . |
* | r18 |
* +-------------+ + 80 (32-bit) + 160 (64-bit)
* | MSR |
* +-------------+ + 84 (32-bit) + 168 (64-bit)
* | . |
* | . |
*/
.global _hw_exception_handler
.section .text
.align 2
.ent _hw_exception_handler
.type _hw_exception_handler, @function
_hw_exception_handler:
#if defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1)
/* Immediately halt for stack protection violation exception without using any stack */
SI r3, r0, mb_sp_save_r3; /* Save temporary register */
mfs r3, resr; /* Extract ESR[DS] */
andi r3, r3, ESR_EXC_MASK;
xori r3, r3, 0x7; /* Check for stack protection violation */
BNEI r3, ex_handler_not_sp_violation;
ex_handler_sp_violation:
bri 0; /* Halt here if stack protection violation */
ex_handler_not_sp_violation:
LI r3, r0, mb_sp_save_r3; /* Restore temporary register */
#endif /* defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1) */
ADDIK r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
PUSH_REG(3);
PUSH_REG(4);
PUSH_REG(5);
PUSH_REG(6);
#ifdef MICROBLAZE_CAN_HANDLE_EXCEPTIONS_IN_DELAY_SLOTS
mfs r6, resr;
andi r6, r6, ESR_DS_MASK;
BEQI r6, ex_handler_no_ds;
mfs r17, rbtr;
ex_handler_no_ds:
#endif
PUSH_R17;
PUSH_MSR_AND_ENABLE_EXC; /* Exceptions enabled here. This will allow nested exceptions */
mfs r3, resr;
andi r5, r3, ESR_EXC_MASK; /* Extract ESR[EXC] */
#ifndef NO_UNALIGNED_EXCEPTIONS
xori r6, r5, 1; /* 00001 = Unaligned Exception */
BNEI r6, handle_ex_regular;
ADDIK r4, r0, MB_ExceptionVectorTable; /* Check if user has registered an unaligned exception handler */
#if defined (__arch64__)
LI r4, r4, 16;
#else
LI r4, r4, 8;
#endif
ADDIK r6, r0, XNullHandler; /* If exceptionvectortable entry is still XNullHandler, use */
XOR r6, r4, r6; /* the default exception handler */
BEQI r6, handle_unaligned_ex ;
handle_ex_regular:
#endif /* ! NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
xori r6, r5, 6; /* 00110 = FPU exception */
BEQI r6, handle_fp_ex; /* Go and decode the FP exception */
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
handle_other_ex: /* Handle Other exceptions here */
ori r6, r0, 20;
cmp r6, r5, r6; /* >= 20 are exceptions we do not handle. */
BLEI r6, ex_handler_unhandled;
ori r6, r0, 7;
cmp r6, r5, r6; /* Convert MMU exception indices into an ordinal of 7 */
BGTI r6, handle_other_ex_tail;
ori r5, r0, 0x7;
handle_other_ex_tail:
PUSH_REG(7); /* Save other volatiles before we make procedure calls below */
PUSH_REG(8);
PUSH_REG(9);
PUSH_REG(10);
PUSH_REG(11);
PUSH_REG(12);
PUSH_REG(15);
PUSH_REG(18);
ADDIK r4, r0, MB_ExceptionVectorTable; /* Load the Exception vector table base address */
ADDK r7, r5, r5; /* Calculate exception vector offset = r5 * 8 (32-bit) */
ADDK r7, r7, r7;
ADDK r7, r7, r7;
#if defined (__arch64__)
ADDK r7, r7, r7; /* or r5 * 16 (64-bit) */
#endif
ADDK r7, r7, r4; /* Get pointer to exception vector */
LI r5, r7, REGSIZE; /* Load argument to exception handler from table */
LOAD r7, r7, r0; /* Load vector itself here */
brald r15, r7; /* Branch to handler */
nop;
POP_REG(7); /* Restore other volatiles */
POP_REG(8);
POP_REG(9);
POP_REG(10);
POP_REG(11);
POP_REG(12);
POP_REG(15);
POP_REG(18);
BRI ex_handler_done; /* Complete exception handling */
#ifndef NO_UNALIGNED_EXCEPTIONS
handle_unaligned_ex:
andi r6, r3, ESR_REG_MASK; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
srl r6, r6;
srl r6, r6;
srl r6, r6;
sbi r6, r0, ex_reg_op; /* Store the register operand in a temporary location */
mfs r4, rear;
andi r6, r3, ESR_LW_SW_MASK; /* Extract ESR[S] */
BNEI r6, ex_sw;
#if defined (__arch64__)
ex_ll:
andi r6, r3, ESR_LONG_MASK; /* Extract ESR[L] */
BEQI r6, ex_lw;
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a long, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lbui r5, r4, 2;
sbi r5, r0, ex_tmp_data_loc_2;
lbui r5, r4, 3;
sbi r5, r0, ex_tmp_data_loc_3;
lbui r5, r4, 4;
sbi r5, r0, ex_tmp_data_loc_4;
lbui r5, r4, 5;
sbi r5, r0, ex_tmp_data_loc_5;
lbui r5, r4, 6;
sbi r5, r0, ex_tmp_data_loc_6;
lbui r5, r4, 7;
sbi r5, r0, ex_tmp_data_loc_7;
lli r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
BRI ex_lw_tail;
#endif
ex_lw:
andi r6, r3, ESR_WORD_MASK; /* Extract ESR[W] */
BEQI r6, ex_lhw;
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a word, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lbui r5, r4, 2;
sbi r5, r0, ex_tmp_data_loc_2;
lbui r5, r4, 3;
sbi r5, r0, ex_tmp_data_loc_3;
lwi r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
BRI ex_lw_tail;
ex_lhw:
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a half-word, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lhui r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
ex_lw_tail:
lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
ADDIK r6, r0, lw_table; /* Form load_word jump table offset (lw_table + (8 * regnum)) */
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r6;
bra r5;
ex_lw_end: /* Exception handling of load word, ends */
ex_sw:
lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
ADDIK r6, r0, sw_table; /* Form store_word jump table offset (sw_table + (8 * regnum)) */
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r6;
bra r5;
ex_sw_tail:
#if defined (__arch64__)
ex_sl:
mfs r6, resr;
andi r6, r6, ESR_LONG_MASK; /* Extract ESR[L] */
BEQI r6, ex_not_sl;
sli r3, r0, ex_tmp_data_loc_0;
lbui r3, r0, ex_tmp_data_loc_0; /* Store the long, byte-by-byte into destination address */
sbi r3, r4, 0;
lbui r3, r0, ex_tmp_data_loc_1;
sbi r3, r4, 1;
lbui r3, r0, ex_tmp_data_loc_2;
sbi r3, r4, 2;
lbui r3, r0, ex_tmp_data_loc_3;
sbi r3, r4, 3;
lbui r3, r0, ex_tmp_data_loc_4;
sbi r3, r4, 4;
lbui r3, r0, ex_tmp_data_loc_5;
sbi r3, r4, 5;
lbui r3, r0, ex_tmp_data_loc_6;
sbi r3, r4, 6;
lbui r3, r0, ex_tmp_data_loc_7;
sbi r3, r4, 7;
BRI ex_handler_done;
ex_not_sl:
#endif
mfs r6, resr;
andi r6, r6, ESR_WORD_MASK; /* Extract ESR[W] */
BEQI r6, ex_shw;
swi r3, r0, ex_tmp_data_loc_0;
lbui r3, r0, ex_tmp_data_loc_0; /* Store the word, byte-by-byte into destination address */
sbi r3, r4, 0;
lbui r3, r0, ex_tmp_data_loc_1;
sbi r3, r4, 1;
lbui r3, r0, ex_tmp_data_loc_2;
sbi r3, r4, 2;
lbui r3, r0, ex_tmp_data_loc_3;
sbi r3, r4, 3;
BRI ex_handler_done;
ex_shw:
swi r3, r0, ex_tmp_data_loc_0; /* Store the lower half-word, byte-by-byte into destination address */
#ifdef __LITTLE_ENDIAN__
lbui r3, r0, ex_tmp_data_loc_0;
#else
lbui r3, r0, ex_tmp_data_loc_2;
#endif
sbi r3, r4, 0;
#ifdef __LITTLE_ENDIAN__
lbui r3, r0, ex_tmp_data_loc_1;
#else
lbui r3, r0, ex_tmp_data_loc_3;
#endif
sbi r3, r4, 1;
ex_sw_end: /* Exception handling of store word, ends. */
BRI ex_handler_done;
#endif /* !NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
handle_fp_ex:
ADDIK r3, r17, -4; /* r17 contains (addr of exception causing FP instruction + 4) */
lw r4, r0, r3; /* We might find ourselves in a spot here. Unguaranteed load */
handle_fp_ex_opb:
ADDIK r6, r0, fp_table_opb; /* Decode opB and store its value in mb_fpex_op_b */
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
andi r3, r4, 0x1F;
ADDK r3, r3, r3; /* Calculate (fp_table_opb + (regno * 12)) in r5 */
ADDK r3, r3, r3;
ADDK r5, r3, r3;
ADDK r5, r5, r3;
ADDK r5, r5, r6;
bra r5;
handle_fp_ex_opa:
ADDIK r6, r0, fp_table_opa; /* Decode opA and store its value in mb_fpex_op_a */
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
andi r3, r4, 0x1F;
ADDK r3, r3, r3; /* Calculate (fp_table_opb + (regno * 12)) in r5 */
ADDK r3, r3, r3;
ADDK r5, r3, r3;
ADDK r5, r5, r3;
ADDK r5, r5, r6;
bra r5;
handle_fp_ex_done:
ori r5, r0, 6; /* Set exception number back to 6 */
BRI handle_other_ex_tail;
fp_ex_unhandled:
bri 0;
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
ex_handler_done:
POP_R17;
POP_MSR;
POP_REG(3);
POP_REG(4);
POP_REG(5);
POP_REG(6);
ADDIK r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
rted r17, 0
nop
ex_handler_unhandled:
bri 0 /* UNHANDLED. TRAP HERE */
.end _hw_exception_handler
#ifndef NO_UNALIGNED_EXCEPTIONS
/*
* hw_exception_handler Jump Table
* - Contains code snippets for each register that caused the unaligned exception.
* - Hence exception handler is NOT self-modifying
* - Separate table for load exceptions and store exceptions.
* - Each table is of size: (8 * 32) = 256 bytes
*/
.section .text
.align 4
lw_table:
lw_r0: R3_TO_LWREG (0);
lw_r1: LWREG_NOP;
lw_r2: R3_TO_LWREG (2);
lw_r3: R3_TO_LWREG_V (3);
lw_r4: R3_TO_LWREG_V (4);
lw_r5: R3_TO_LWREG_V (5);
lw_r6: R3_TO_LWREG_V (6);
lw_r7: R3_TO_LWREG (7);
lw_r8: R3_TO_LWREG (8);
lw_r9: R3_TO_LWREG (9);
lw_r10: R3_TO_LWREG (10);
lw_r11: R3_TO_LWREG (11);
lw_r12: R3_TO_LWREG (12);
lw_r13: R3_TO_LWREG (13);
lw_r14: R3_TO_LWREG (14);
lw_r15: R3_TO_LWREG (15);
lw_r16: R3_TO_LWREG (16);
lw_r17: LWREG_NOP;
lw_r18: R3_TO_LWREG (18);
lw_r19: R3_TO_LWREG (19);
lw_r20: R3_TO_LWREG (20);
lw_r21: R3_TO_LWREG (21);
lw_r22: R3_TO_LWREG (22);
lw_r23: R3_TO_LWREG (23);
lw_r24: R3_TO_LWREG (24);
lw_r25: R3_TO_LWREG (25);
lw_r26: R3_TO_LWREG (26);
lw_r27: R3_TO_LWREG (27);
lw_r28: R3_TO_LWREG (28);
lw_r29: R3_TO_LWREG (29);
lw_r30: R3_TO_LWREG (30);
lw_r31: R3_TO_LWREG (31);
sw_table:
sw_r0: SWREG_TO_R3 (0);
sw_r1: SWREG_NOP;
sw_r2: SWREG_TO_R3 (2);
sw_r3: SWREG_TO_R3_V (3);
sw_r4: SWREG_TO_R3_V (4);
sw_r5: SWREG_TO_R3_V (5);
sw_r6: SWREG_TO_R3_V (6);
sw_r7: SWREG_TO_R3 (7);
sw_r8: SWREG_TO_R3 (8);
sw_r9: SWREG_TO_R3 (9);
sw_r10: SWREG_TO_R3 (10);
sw_r11: SWREG_TO_R3 (11);
sw_r12: SWREG_TO_R3 (12);
sw_r13: SWREG_TO_R3 (13);
sw_r14: SWREG_TO_R3 (14);
sw_r15: SWREG_TO_R3 (15);
sw_r16: SWREG_TO_R3 (16);
sw_r17: SWREG_NOP;
sw_r18: SWREG_TO_R3 (18);
sw_r19: SWREG_TO_R3 (19);
sw_r20: SWREG_TO_R3 (20);
sw_r21: SWREG_TO_R3 (21);
sw_r22: SWREG_TO_R3 (22);
sw_r23: SWREG_TO_R3 (23);
sw_r24: SWREG_TO_R3 (24);
sw_r25: SWREG_TO_R3 (25);
sw_r26: SWREG_TO_R3 (26);
sw_r27: SWREG_TO_R3 (27);
sw_r28: SWREG_TO_R3 (28);
sw_r29: SWREG_TO_R3 (29);
sw_r30: SWREG_TO_R3 (30);
sw_r31: SWREG_TO_R3 (31);
/* Temporary data structures used in the handler */
.section .data
.align DATAALIGN
ex_tmp_data_loc_0:
.byte 0
ex_tmp_data_loc_1:
.byte 0
ex_tmp_data_loc_2:
.byte 0
ex_tmp_data_loc_3:
.byte 0
#if defined (__arch64__)
ex_tmp_data_loc_4:
.byte 0
ex_tmp_data_loc_5:
.byte 0
ex_tmp_data_loc_6:
.byte 0
ex_tmp_data_loc_7:
.byte 0
#endif
ex_reg_op:
.byte 0
#endif /* ! NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
/*
* FP exception decode jump table.
* - Contains code snippets for each register that could have been a source operand for an excepting FP instruction
* - Hence exception handler is NOT self-modifying
* - Separate table for opA and opB
* - Each table is of size: (12 * 32) = 384 bytes
*/
.section .text
.align 4
fp_table_opa:
opa_r0: FP_EX_OPA_SAVE (0);
opa_r1: FP_EX_UNHANDLED;
opa_r2: FP_EX_OPA_SAVE (2);
opa_r3: FP_EX_OPA_SAVE_V (3);
opa_r4: FP_EX_OPA_SAVE_V (4);
opa_r5: FP_EX_OPA_SAVE_V (5);
opa_r6: FP_EX_OPA_SAVE_V (6);
opa_r7: FP_EX_OPA_SAVE (7);
opa_r8: FP_EX_OPA_SAVE (8);
opa_r9: FP_EX_OPA_SAVE (9);
opa_r10: FP_EX_OPA_SAVE (10);
opa_r11: FP_EX_OPA_SAVE (11);
opa_r12: FP_EX_OPA_SAVE (12);
opa_r13: FP_EX_OPA_SAVE (13);
opa_r14: FP_EX_UNHANDLED;
opa_r15: FP_EX_UNHANDLED;
opa_r16: FP_EX_UNHANDLED;
opa_r17: FP_EX_UNHANDLED;
opa_r18: FP_EX_OPA_SAVE (18);
opa_r19: FP_EX_OPA_SAVE (19);
opa_r20: FP_EX_OPA_SAVE (20);
opa_r21: FP_EX_OPA_SAVE (21);
opa_r22: FP_EX_OPA_SAVE (22);
opa_r23: FP_EX_OPA_SAVE (23);
opa_r24: FP_EX_OPA_SAVE (24);
opa_r25: FP_EX_OPA_SAVE (25);
opa_r26: FP_EX_OPA_SAVE (26);
opa_r27: FP_EX_OPA_SAVE (27);
opa_r28: FP_EX_OPA_SAVE (28);
opa_r29: FP_EX_OPA_SAVE (29);
opa_r30: FP_EX_OPA_SAVE (30);
opa_r31: FP_EX_OPA_SAVE (31);
fp_table_opb:
opb_r0: FP_EX_OPB_SAVE (0);
opb_r1: FP_EX_UNHANDLED;
opb_r2: FP_EX_OPB_SAVE (2);
opb_r3: FP_EX_OPB_SAVE_V (3);
opb_r4: FP_EX_OPB_SAVE_V (4);
opb_r5: FP_EX_OPB_SAVE_V (5);
opb_r6: FP_EX_OPB_SAVE_V (6);
opb_r7: FP_EX_OPB_SAVE (7);
opb_r8: FP_EX_OPB_SAVE (8);
opb_r9: FP_EX_OPB_SAVE (9);
opb_r10: FP_EX_OPB_SAVE (10);
opb_r11: FP_EX_OPB_SAVE (11);
opb_r12: FP_EX_OPB_SAVE (12);
opb_r13: FP_EX_OPB_SAVE (13);
opb_r14: FP_EX_UNHANDLED;
opb_r15: FP_EX_UNHANDLED;
opb_r16: FP_EX_UNHANDLED;
opb_r17: FP_EX_UNHANDLED;
opb_r18: FP_EX_OPB_SAVE (18);
opb_r19: FP_EX_OPB_SAVE (19);
opb_r20: FP_EX_OPB_SAVE (20);
opb_r21: FP_EX_OPB_SAVE (21);
opb_r22: FP_EX_OPB_SAVE (22);
opb_r23: FP_EX_OPB_SAVE (23);
opb_r24: FP_EX_OPB_SAVE (24);
opb_r25: FP_EX_OPB_SAVE (25);
opb_r26: FP_EX_OPB_SAVE (26);
opb_r27: FP_EX_OPB_SAVE (27);
opb_r28: FP_EX_OPB_SAVE (28);
opb_r29: FP_EX_OPB_SAVE (29);
opb_r30: FP_EX_OPB_SAVE (30);
opb_r31: FP_EX_OPB_SAVE (31);
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
#if defined(MICROBLAZE_FP_EXCEPTION_ENABLED) && defined(MICROBLAZE_FP_EXCEPTION_DECODE)
/* This is where we store the opA and opB of the last excepting FP instruction */
.section .data
.align DATAALIGN
.global mb_fpex_op_a
.global mb_fpex_op_b
mb_fpex_op_a:
INTPTR_DATAITEM 0
mb_fpex_op_b:
INTPTR_DATAITEM 0
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
#if defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1)
/* This is where we store the register used to check which exception occurred */
.section .data
.align DATAALIGN
mb_sp_save_r3:
INTPTR_DATAITEM 0
#endif /* defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1) */
/* The exception vector table */
.section .data
.align DATAALIGN
.global MB_ExceptionVectorTable
MB_ExceptionVectorTable:
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 0 /* -- FSL Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 1 /* -- Unaligned Access Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 2 /* -- Illegal Opcode Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 3 /* -- Instruction Bus Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 4 /* -- Data Bus Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 5 /* -- Div-by-0 Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 6 /* -- FPU Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 7 /* -- MMU Exceptions -- */
#else /* Dummy exception handler, in case exceptions are not present in the processor */
.global _hw_exception_handler
.section .text
.align 2
.ent _hw_exception_handler
_hw_exception_handler:
bri 0;
.end _hw_exception_handler
#endif /* MICROBLAZE_EXCEPTIONS_ENABLED */
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,879 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_invalidate_icache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_icache_range(unsigned int cacheaddr, unsigned int len)
*
* Invalidate an ICache range
*
* Parameters:
* 'cacheaddr' - address in the Icache where invalidation begins
* 'len' - length (in bytes) worth of Icache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_icache_range
.ent microblaze_invalidate_icache_range
.align 2
microblaze_invalidate_icache_range:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Icache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
BEQI r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align start down to cache line */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wic r5, r0 /* Invalidate the cache line */
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_icache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,482 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_flush_cache_ext_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_cache_ext_range (unsigned int cacheaddr, unsigned int len)
*
*Flush a L2 Cache range
*
*Parameters:
* 'cacheaddr' - address in the L2 cache where the flush begins
* 'len ' - length (in bytes) worth of L2 cache to be flushed
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
.text
.globl microblaze_flush_cache_ext_range
.ent microblaze_flush_cache_ext_range
.align 2
microblaze_flush_cache_ext_range:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
beqi r6, Loop_done
ADDIK r6, r6, -1
ADD r6, r5, r6
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
RSUBK r6, r5, r6
Loop_start:
wdc.ext.flush r5, r6
#if defined (__arch64__ )
addlik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bneid r6, Loop_start
addik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
Loop_done:
#endif
rtsd r15, 8
nop
.end microblaze_flush_cache_ext_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,396 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_flush_cache_ext.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_cache_ext()
*
* Flush the entire L2 Cache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
#define CACHEABLE_REGION_SIZE (XPAR_MICROBLAZE_DCACHE_HIGHADDR - XPAR_MICROBLAZE_DCACHE_BASEADDR)
.text
.globl microblaze_flush_cache_ext
.ent microblaze_flush_cache_ext
.align 2
microblaze_flush_cache_ext:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN))
ADDIK r6, r0, CACHEABLE_REGION_SIZE-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
Loop_start:
wdc.ext.flush r5, r6
#if defined (__arch64__)
addlik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bgtid r6,Loop_start
addik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
#endif
rtsd r15, 8
nop
.end microblaze_flush_cache_ext
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,365 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_invalidate_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_dcache()
*
* Invalidate the entire L1 DCache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_dcache
.ent microblaze_invalidate_dcache
.align 2
microblaze_invalidate_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN))
ADDIK r6, r5, XPAR_MICROBLAZE_DCACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Compute end */
L_start:
wdc r5, r0 /* Invalidate the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,070 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_enable_icache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_enable_icache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable icache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_icache
.ent microblaze_enable_icache
.align 2
microblaze_enable_icache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x20
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Set the interrupt enable bit
ori r8, r8, 0x20
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,519 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_invalidate_cache_ext_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_cache_ext_range (unsigned int cacheaddr, unsigned int len)
*
*Invalidate an L2 cache range
*
*Parameters:
* 'cacheaddr' - address in the L2 cache where invalidation begins
* 'len ' - length (in bytes) worth of Dcache to be invalidated
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
.text
.globl microblaze_invalidate_cache_ext_range
.ent microblaze_invalidate_cache_ext_range
.align 2
microblaze_invalidate_cache_ext_range:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
beqi r6, Loop_done
ADD r6, r5, r6
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
RSUBK r6, r5, r6
Loop_start:
wdc.ext.clear r5, r6
#if defined (__arch64__ )
addlik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bneid r6, Loop_start
addik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
Loop_done:
#endif
rtsd r15, 8
nop
.end microblaze_invalidate_cache_ext_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,044 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_disable_icache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_disable_icache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable L1 icache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_disable_icache
.ent microblaze_disable_icache
.align 2
microblaze_disable_icache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrclr r0, 0x20
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Clear the icache enable bit
andi r8, r8, ~(0x20)
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif
.end microblaze_disable_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,198 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_init_dcache_range.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_init_dcache_range (unsigned int cache_start, unsigned int cache_len)
*
* Invalidate dcache on the microblaze
*
* Parameters:
* 'cache_start' - address in the Dcache where invalidation begins
* 'cache_len' - length (in bytes) worth of Dcache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_init_dcache_range
.ent microblaze_init_dcache_range
.align 2
microblaze_init_dcache_range:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
mfs r9, rmsr /* Disable Dcache and interrupts before invalidating */
andi r10, r9, (~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE))
mts rmsr, r10
andi r5, r5, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align to cache line */
add r6, r5, r6 /* Compute end */
andi r6, r6, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align to cache line */
L_start:
wdc r5, r0 /* Invalidate the Cache (delay slot) */
cmpu r18, r5, r6 /* Are we at the end ? */
blei r18, L_done
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
L_done:
rtsd r15, 8 /* Return */
mts rmsr, r9
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_init_dcache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,428 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_cache_ext.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_cache_ext()
*
*Invalidate the entire L2 Cache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
#define CACHEABLE_REGION_SIZE (XPAR_MICROBLAZE_DCACHE_HIGHADDR - XPAR_MICROBLAZE_DCACHE_BASEADDR)
.text
.globl microblaze_invalidate_cache_ext
.ent microblaze_invalidate_cache_ext
.align 2
microblaze_invalidate_cache_ext:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN))
ADDIK r6, r0, CACHEABLE_REGION_SIZE-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
Loop_start:
wdc.ext.clear r5, r6
#if defined (__arch64__ )
addlik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bgtid r6,Loop_start
addik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
#endif
rtsd r15, 8
nop
.end microblaze_invalidate_cache_ext
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,488 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_dcache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_dcache_range (unsigned int cacheaddr, unsigned int len)
*
* Invalidate a Dcache range
*
* Parameters:
* 'cacheaddr' - address in the Dcache where invalidation begins
* 'len ' - length (in bytes) worth of Dcache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#define MB_HAS_WRITEBACK_SET 0
#else
#define MB_HAS_WRITEBACK_SET XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#endif
.text
.globl microblaze_invalidate_dcache_range
.ent microblaze_invalidate_dcache_range
.align 2
microblaze_invalidate_dcache_range:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
BEQI r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align start down to cache line */
#if MB_HAS_WRITEBACK_SET == 0 /* Use a different scheme for MB version < v7.20 or when caches are write-through */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wdc r5, r0
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
#else
RSUBK r6, r5, r6
/* r6 will now contain (count of bytes - (4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) */
L_start:
wdc.clear r5, r6 /* Invalidate the cache line only if the address matches */
#if defined (__arch64__ )
addlik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
beagei r6, L_start
#else
bneid r6, L_start
addik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
#endif
#endif
L_done:
rtsd r15, 8
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8
nop
#endif
.end microblaze_invalidate_dcache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,555 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_disable_interrupts.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* @file microblaze_disable_interrupts.S
*
* @addtogroup microblaze_pseudo_asm_macro
* @{
* <h2> microblaze_disable_interrupts.S </h2>
* - API to disable Interrupts: void microblaze_disable_interrupts(void)
*
* This API Disables interrupts on the MicroBlaze processor. It can be
* called when entering a critical section of code where a context switch is
* undesirable.
*
* <pre>
* File : microblaze_disable_interrupts.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable interrupts on the microblaze.
* </pre>
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_disable_interrupts
.ent microblaze_disable_interrupts
.align 2
microblaze_disable_interrupts:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrclr r0, 0x2
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r12, rmsr
#Clear the interrupt enable bit
andi r12, r12, ~(0x2)
#Save the MSR register
mts rmsr, r12
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_disable_interrupts
/**
* @} End of "addtogroup microblaze_pseudo_asm_macro".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 5,784 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_scrub.S | /******************************************************************************
* Copyright (c) 2012 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_scrub ()
*
* Scrub LMB memory and all internal BRAMs (data cache, instruction cache,
* MMU UTLB and branch target cache) in MicroBlaze to reduce the possibility
* of an uncorrectable error when fault tolerance support is enabled.
*
* This routine assumes that the processor is in privileged mode when it is
* called, if the MMU is enabled.
*
* Call this routine regularly from a timer interrupt.
*
* Parameters:
* None
*
*
*******************************************************************************/
#include "xparameters.h"
/* Define if fault tolerance is used */
#ifdef XPAR_MICROBLAZE_FAULT_TOLERANT
#if XPAR_MICROBLAZE_FAULT_TOLERANT > 0
#define FAULT_TOLERANT
#endif
#endif
/* Define if LMB is used and can be scrubbed */
#if defined(XPAR_MICROBLAZE_D_LMB) && \
defined(XPAR_DLMB_CNTLR_BASEADDR) && \
defined(XPAR_DLMB_CNTLR_HIGHADDR)
#if XPAR_MICROBLAZE_D_LMB == 1
#define HAS_SCRUBBABLE_LMB
#define DLMB_MASK (XPAR_DLMB_CNTLR_HIGHADDR - XPAR_DLMB_CNTLR_BASEADDR)
#endif
#endif
/* Set default cache line lengths */
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 4
#endif
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 4
#endif
/* Define if internal Data Cache BRAMs are used */
#if defined(XPAR_MICROBLAZE_USE_DCACHE) && defined(XPAR_MICROBLAZE_DCACHE_BYTE_SIZE)
#if XPAR_MICROBLAZE_USE_DCACHE == 1 && XPAR_MICROBLAZE_DCACHE_BYTE_SIZE > 1024
#define HAS_BRAM_DCACHE
#define DCACHE_INCREMENT (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
#define DCACHE_MASK (XPAR_MICROBLAZE_DCACHE_BYTE_SIZE - 1)
#endif
#endif
/* Define if internal Instruction Cache BRAMs are used */
#if defined(XPAR_MICROBLAZE_USE_ICACHE) && defined(XPAR_MICROBLAZE_CACHE_BYTE_SIZE)
#if XPAR_MICROBLAZE_USE_ICACHE == 1 && XPAR_MICROBLAZE_CACHE_BYTE_SIZE > 1024
#define HAS_BRAM_ICACHE
#define ICACHE_INCREMENT (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4)
#define ICACHE_MASK (XPAR_MICROBLAZE_CACHE_BYTE_SIZE - 1)
#endif
#endif
/* Define if internal MMU UTLB BRAM is used */
#ifdef XPAR_MICROBLAZE_USE_MMU
#if XPAR_MICROBLAZE_USE_MMU > 1
#define HAS_BRAM_MMU_UTLB
#endif
#endif
/* Define if internal BTC BRAM is used, and match BTC clear to a complete cache scrub */
#if defined(XPAR_MICROBLAZE_USE_BRANCH_TARGET_CACHE) && \
defined(XPAR_MICROBLAZE_BRANCH_TARGET_CACHE_SIZE)
#if XPAR_MICROBLAZE_USE_BRANCH_TARGET_CACHE == 1
#if XPAR_MICROBLAZE_BRANCH_TARGET_CACHE_SIZE == 0 || \
XPAR_MICROBLAZE_BRANCH_TARGET_CACHE_SIZE > 4
#define HAS_BRAM_BRANCH_TARGET_CACHE
#ifdef HAS_BRAM_DCACHE
#define BTC_MASK_D (XPAR_MICROBLAZE_DCACHE_BYTE_SIZE/DCACHE_INCREMENT-1)
#else
#define BTC_MASK_D 256
#endif
#ifdef HAS_BRAM_ICACHE
#define BTC_MASK_I (XPAR_MICROBLAZE_CACHE_BYTE_SIZE/ICACHE_INCREMENT-1)
#else
#define BTC_MASK_I 256
#endif
#if BTC_MASK_D > BTC_MASK_I
#define BTC_MASK BTC_MASK_D
#else
#define BTC_MASK BTC_MASK_I
#endif
#endif
#endif
#endif
/* Define index offsets to persistent data used by this routine */
#define DLMB_INDEX_OFFSET 0
#define DCACHE_INDEX_OFFSET 4
#define ICACHE_INDEX_OFFSET 8
#define MMU_INDEX_OFFSET 12
#define BTC_CALL_COUNT_OFFSET 16
.text
.globl microblaze_scrub
.ent microblaze_scrub
.align 2
microblaze_scrub:
#ifdef FAULT_TOLERANT
la r6, r0, L_persistent_data /* Get pointer to data */
#ifdef HAS_SCRUBBABLE_LMB
L_dlmb:
lwi r5, r6, DLMB_INDEX_OFFSET /* Get dlmb index */
lw r7, r5, r0 /* Load and store */
sw r7, r5, r0
addik r5, r5, 4 /* Increment and save dlmb index */
andi r5, r5, DLMB_MASK
swi r5, r6, DLMB_INDEX_OFFSET
#endif /* HAS_SCRUBBABLE_LMB */
#ifdef HAS_BRAM_DCACHE
L_dcache:
lwi r5, r6, DCACHE_INDEX_OFFSET /* Get dcache line index */
wdc r5, r0 /* Invalidate data cache line */
addik r5, r5, DCACHE_INCREMENT /* Increment and save entry index */
andi r5, r5, DCACHE_MASK
swi r5, r6, DCACHE_INDEX_OFFSET
#endif /* HAS_BRAM_DCACHE */
#ifdef HAS_BRAM_ICACHE
L_icache:
lwi r5, r6, ICACHE_INDEX_OFFSET /* Get icache line index */
wic r5, r0 /* Invalidate data cache line */
addik r5, r5, ICACHE_INCREMENT /* Increment and save entry index */
andi r5, r5, ICACHE_MASK
swi r5, r6, ICACHE_INDEX_OFFSET
#endif /* HAS_BRAM_ICACHE */
#ifdef HAS_BRAM_MMU_UTLB
L_mmu:
lwi r5, r6, MMU_INDEX_OFFSET /* Get UTLB entry index */
mts rtlbx, r5 /* Access next entry in UTLB */
mts rtlbhi, r0 /* Clear the UTLB entry */
addik r5, r5, 1 /* Increment and save entry index */
andi r5, r5, 0x3F
swi r5, r6, MMU_INDEX_OFFSET
#endif /* HAS_BRAM_MMU_UTLB */
#ifdef HAS_BRAM_BRANCH_TARGET_CACHE
L_btc:
lwi r5, r6, BTC_CALL_COUNT_OFFSET /* Get BTC call count offset */
addik r5, r5, 1 /* Increment and save call count */
andi r5, r5, BTC_MASK
swi r5, r6, BTC_CALL_COUNT_OFFSET
bnei r5, L_skip_btc_scrub /* Skip scrub unless count wrap */
bri 4 /* Clear branch target cache */
L_skip_btc_scrub:
#endif /* HAS_BRAM_BRANCH_TARGET_CACHE */
#endif /* FAULT_TOLERANT */
L_done:
rtsd r15, 8 /* Return */
nop
.end microblaze_scrub
/* Persistent data used by this routine */
.data
.align 2
L_persistent_data:
.long 0 /* dlmb index */
.long 0 /* dcache index */
.long 0 /* icache index */
.long 0 /* mmu entry index */
.long 0 /* btc call count */
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,075 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_enable_dcache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_enable_dcache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable L1 dcache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_dcache
.ent microblaze_enable_dcache
.align 2
microblaze_enable_dcache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x80
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Set the interrupt enable bit
ori r8, r8, 0x80
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,540 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_flush_dcache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_dcache_range (unsigned int cacheaddr, unsigned int len)
*
* Flush a L1 DCache range
*
* Parameters:
* 'cacheaddr' - address in the Dcache where the flush begins
* 'len ' - length (in bytes) worth of Dcache to be flushed
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#define MB_HAS_WRITEBACK_SET 0
#else
#define MB_HAS_WRITEBACK_SET XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#endif
.text
.globl microblaze_flush_dcache_range
.ent microblaze_flush_dcache_range
.align 2
microblaze_flush_dcache_range:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
beqi r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align start down to cache line */
#if MB_HAS_WRITEBACK_SET == 0 /* Use a different scheme for MB version < v7.20 or when caches are write-through */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wdc r5, r0 /* Invalidate the cache line */
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
#else
RSUBK r6, r5, r6
/* r6 will now contain (count of bytes - (4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) */
L_start:
wdc.flush r5, r6 /* Flush the cache line */
#if defined (__arch64__ )
addlik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
beagei r6, L_start
#else
bneid r6, L_start
addik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
#endif
#endif
L_done:
rtsd r15, 8
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_flush_dcache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,200 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_init_icache_range.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_init_icache_range (unsigned int cache_start, unsigned int cache_len)
*
* Invalidate icache on the microblaze
*
* Parameters:
* 'cache_start' - address in the Icache where invalidation begins
* 'cache_len' - length (in bytes) worth of Icache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
.text
.globl microblaze_init_icache_range
.ent microblaze_init_icache_range
.align 2
microblaze_init_icache_range:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
mfs r9, rmsr /* Disable Icache and interrupts before invalidating */
andi r10, r9, (~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE))
mts rmsr, r10
andi r5, r5, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align to cache line */
add r6, r5, r6 /* Compute end */
andi r6, r6, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align to cache line */
L_start:
wic r5, r0 /* Invalidate the Cache (delay slot) */
cmpu r18, r5, r6 /* Are we at the end ? */
blei r18, L_done
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
L_done:
rtsd r15, 8 /* Return */
mts rmsr, r9
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_init_icache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,385 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_icache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_icache()
*
* Invalidate the entire ICache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_icache
.ent microblaze_invalidate_icache
.align 2
microblaze_invalidate_icache:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Icache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
ADDIK r5, r0, XPAR_MICROBLAZE_ICACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN)) /* Align to cache line */
ADDIK r6, r5, XPAR_MICROBLAZE_CACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN)) /* Compute end */
L_start:
wic r5, r0 /* Invalidate the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,575 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_enable_interrupts.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* @file microblaze_enable_interrupts.S
*
* @addtogroup microblaze_pseudo_asm_macro
* @{
* <h2> microblaze_enable_interrupts.S </h2>
* - API to Enable Interrupts: void microblaze_enable_interrupts(void)
*
* This API Enables interrupts on the MicroBlaze processor. When the MicroBlaze
* processor starts up, interrupts are disabled. Interrupts must be explicitly
* turned on using this function.
*
* <pre>
*
* File : microblaze_enable_interrupts.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable interrupts on the microblaze.
* </pre>
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_interrupts
.ent microblaze_enable_interrupts
.align 2
microblaze_enable_interrupts:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x2
nop
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r12, rmsr
#Set the interrupt enable bit
ori r12, r12, 0x2
#Save the MSR register
mts rmsr, r12
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_interrupts
/**
* @} End of "addtogroup microblaze_pseudo_asm_macro".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,550 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_update_dcache.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* File : microblaze_update_dcache.s
* Date : 2003, September 24
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Update dcache on the microblaze.
* Takes in three parameters
* r5 : Cache Tag Line
* r6 : Cache Data
* r7 : Lock/Valid information
* Bit 30 is Lock [ 1 indicates locked ]
* Bit 31 is Valid [ 1 indicates valid ]
*
* --------------------------------------------------------------
* | Lock | Valid | Effect
* --------------------------------------------------------------
* | 0 | 0 | Invalidate Cache
* | 0 | 1 | Valid, but unlocked cacheline
* | 1 | 0 | Invalidate Cache, No effect of lock
* | 1 | 1 | Valid cache. Locked to a
* | | | particular address
* --------------------------------------------------------------
*
*
**********************************************************************************/
#include "xparameters.h"
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_update_dcache
.ent microblaze_update_dcache
.align 2
microblaze_update_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#if XPAR_MICROBLAZE_DCACHE_LINE_LEN == 1
/* Read the MSR register into a temp register */
mfs r18, rmsr
/* Clear the dcache enable bit to disable the cache
Register r10,r18 are volatile registers and hence do not need to be saved before use */
andi r10, r18, ~128
mts rmsr, r10
/* Update the lock and valid info */
andi r5, r5, 0xfffffffc
or r5, r5, r7
/* Update dcache */
wdc r5, r6
/* Return */
rtsd r15, 8
mts rmsr, r18
#else
/* The only valid usage of this routine for larger cache line lengths is to invalidate a data cache line
So call microblaze_init_dcache_range appropriately to do the job */
brid microblaze_init_dcache_range
addik r6, r0, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
/* We don't have a return instruction here. This is tail call optimization :) */
#endif /* XPAR_MICROBLAZE_DCACHE_LINE_LEN == 1 */
#else
rtsd r15, 8
nop
#endif
.end microblaze_update_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,815 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_flush_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_flush_dcache()
*
* Flush the L1 DCache
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_flush_dcache
.ent microblaze_flush_dcache
.align 2
microblaze_flush_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Align to cache line */
ADDIK r6, r5, XPAR_MICROBLAZE_DCACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Compute end */
L_start:
wdc.flush r5, r0 /* Flush the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
BRI L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
#endif
rtsd r15, 8 /* Return */
nop
.end microblaze_flush_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,676 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_disable_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* File : microblaze_disable_dcache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable the L1 dcache on the microblaze.
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
.text
.globl microblaze_disable_dcache
.ent microblaze_disable_dcache
.align 2
microblaze_disable_dcache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
ADDIK r1, r1, -8
SI r15, r1, 0
BRLID r15, microblaze_flush_dcache /* microblaze_flush_dcache does not use r1*/
nop
LI r15, r1, 0
ADDIK r1, r1, 8
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
rtsd r15, 8
msrclr r0, 0x80
#else /* XPAR_MICROBLAZE_USE_MSR_INSTR == 1 */
ADDIK r1, r1, -8
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
SI r15, r1, 0
BRLID r15, microblaze_flush_dcache
nop
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
mfs r11, rmsr
andi r11, r11, ~(0x80)
mts rmsr, r11
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
LI r15, r1, 0
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
ADDIK r1, r1, 8
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_disable_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,555 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_update_icache.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_update_icache.s
* Date : 2003, September 24
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Update icache on the microblaze.
* Takes in three parameters
* r5 : Cache Tag Line
* r6 : Cache Data
* r7 : Lock/Valid information
* Bit 30 is Lock [ 1 indicates locked ]
* Bit 31 is Valid [ 1 indicates valid ]
*
* --------------------------------------------------------------
* | Lock | Valid | Effect
* --------------------------------------------------------------
* | 0 | 0 | Invalidate Cache
* | 0 | 1 | Valid, but unlocked cacheline
* | 1 | 0 | Invalidate Cache, No effect of lock
* | 1 | 1 | Valid cache. Locked to a
* | | | particular address
* --------------------------------------------------------------
*
*
**********************************************************************************/
#include "xparameters.h"
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
.text
.globl microblaze_update_icache
.ent microblaze_update_icache
.align 2
microblaze_update_icache:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#if XPAR_MICROBLAZE_ICACHE_LINE_LEN == 1
/* Read the MSR register into a temp register */
mfs r18, rmsr
/* Clear the icache enable bit to disable the cache
Register r10,r18 are volatile registers and hence do not need to be saved before use */
andi r10, r18, ~32
mts rmsr, r10
/* Update the lock and valid info */
andi r5, r5, 0xfffffffc
or r5, r5, r7
/* Update icache */
wic r5, r6
/* Return */
rtsd r15, 8
mts rmsr, r18
#else
/* The only valid usage of this routine for larger cache line lengths is to invalidate an instruction cache line
So call microblaze_init_icache_range appropriately to do the job */
brid microblaze_init_icache_range
addik r6, r0, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4)
/* We don't have a return instruction here. This is tail call optimization :) */
#endif /* XPAR_MICROBLAZE_ICACHE_LINE_LEN == 1 */
#else
rtsd r15, 8
nop
#endif
.end microblaze_update_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 26,672 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/hw_exception_handler.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* Microblaze HW Exception Handler
* - Non self-modifying exception handler for the following exception conditions
* - Unalignment
* - Instruction bus error
* - Data bus error
* - Illegal instruction opcode
* - Divide-by-zero
* - Stack protection violation
*******************************************************************************/
#include "microblaze_exceptions_g.h"
#include "xparameters.h"
#include "microblaze_instructions.h"
/* 64-bit definitions */
#if defined (__arch64__)
#define INTPTR_DATAITEM .quad
#define REGSIZE 8
#define DATAALIGN 4
#else
#define INTPTR_DATAITEM .long
#define REGSIZE 4
#define DATAALIGN 2
#endif /* 64-bit definitions */
/* Helpful Macros */
#define EX_HANDLER_STACK_SIZ (REGSIZE*21)
#define RMSR_OFFSET (20 * REGSIZE)
#define R17_OFFSET (0)
#define REG_OFFSET(regnum) (REGSIZE * (regnum + 1))
#define NUM_TO_REG(num) r ## num
#define R3_TO_STACK(regnum) SI r3, r1, REG_OFFSET(regnum)
#define R3_FROM_STACK(regnum) LI r3, r1, REG_OFFSET(regnum)
#define PUSH_REG(regnum) SI NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
#define POP_REG(regnum) LI NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
/* Uses r5 */
#define PUSH_MSR \
mfs r5, rmsr; \
swi r5, r1, RMSR_OFFSET;
#define PUSH_MSR_AND_ENABLE_EXC \
mfs r5, rmsr; \
swi r5, r1, RMSR_OFFSET; \
ori r5, r5, 0x100; /* Turn ON the EE bit*/ \
mts rmsr, r5;
/* Uses r5 */
#define POP_MSR \
lwi r5, r1, RMSR_OFFSET; \
mts rmsr, r5;
/* Push r17 */
#define PUSH_R17 SI r17, r1, R17_OFFSET
/* Pop r17 */
#define POP_R17 LI r17, r1, R17_OFFSET
#define LWREG_NOP \
BRI ex_handler_unhandled; \
nop;
#define SWREG_NOP \
BRI ex_handler_unhandled; \
nop;
/* r3 is the source */
#define R3_TO_LWREG_V(regnum) \
R3_TO_STACK (regnum); \
BRI ex_handler_done;
/* r3 is the source */
#define R3_TO_LWREG(regnum) \
OR NUM_TO_REG (regnum), r0, r3; \
BRI ex_handler_done;
/* r3 is the target */
#define SWREG_TO_R3_V(regnum) \
R3_FROM_STACK (regnum); \
BRI ex_sw_tail;
/* r3 is the target */
#define SWREG_TO_R3(regnum) \
OR r3, r0, NUM_TO_REG (regnum); \
BRI ex_sw_tail;
/* regnum is the source */
#define FP_EX_OPB_SAVE(regnum) \
SI NUM_TO_REG (regnum), r0, mb_fpex_op_b; \
nop; \
BRI handle_fp_ex_opa;
/* regnum is the source */
#define FP_EX_OPB_SAVE_V(regnum) \
R3_FROM_STACK (regnum); \
SI r3, r0, mb_fpex_op_b; \
BRI handle_fp_ex_opa;
/* regnum is the source */
#define FP_EX_OPA_SAVE(regnum) \
SI NUM_TO_REG (regnum), r0, mb_fpex_op_a; \
nop; \
BRI handle_fp_ex_done;
/* regnum is the source */
#define FP_EX_OPA_SAVE_V(regnum) \
R3_FROM_STACK (regnum); \
SI r3, r0, mb_fpex_op_a; \
BRI handle_fp_ex_done;
#define FP_EX_UNHANDLED \
BRI fp_ex_unhandled; \
nop; \
nop;
/* ESR masks */
#define ESR_EXC_MASK 0x0000001F
#define ESR_REG_MASK 0x000003E0
#define ESR_LW_SW_MASK 0x00000400
#define ESR_WORD_MASK 0x00000800
#define ESR_DS_MASK 0x00001000
#define ESR_LONG_MASK 0x00002000
/* Extern declarations */
.extern XNullHandler
#ifdef MICROBLAZE_EXCEPTIONS_ENABLED /* If exceptions are enabled in the processor */
/*
* hw_exception_handler - Handler for unaligned exceptions
* Exception handler notes:
* - Does not handle exceptions other than unaligned exceptions
* - Does not handle exceptions during load into r17, r1, r0.
* - Does not handle exceptions during store from r17 (cannot be done) and r1 (slows down common case)
*
* Relevant register structures
*
* EAR - |----|----|----|----|----|----|----|----|
* - < ## 32 or 64 bit faulting address ## >
*
* ESR - |----|----|----|----|----| - | - |-----|-----|
* - W S REG EXC
*
*
* STACK FRAME STRUCTURE
* ---------------------
*
* +-------------+ + 0
* | r17 |
* +-------------+ + 4 (32-bit) + 8 (64-bit)
* | Args for |
* | next func |
* +-------------+ + 8 (32-bit) + 16 (64-bit)
* | r1 |
* | . |
* | . |
* | . |
* | . |
* | r18 |
* +-------------+ + 80 (32-bit) + 160 (64-bit)
* | MSR |
* +-------------+ + 84 (32-bit) + 168 (64-bit)
* | . |
* | . |
*/
.global _hw_exception_handler
.section .text
.align 2
.ent _hw_exception_handler
.type _hw_exception_handler, @function
_hw_exception_handler:
#if defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1)
/* Immediately halt for stack protection violation exception without using any stack */
SI r3, r0, mb_sp_save_r3; /* Save temporary register */
mfs r3, resr; /* Extract ESR[DS] */
andi r3, r3, ESR_EXC_MASK;
xori r3, r3, 0x7; /* Check for stack protection violation */
BNEI r3, ex_handler_not_sp_violation;
ex_handler_sp_violation:
bri 0; /* Halt here if stack protection violation */
ex_handler_not_sp_violation:
LI r3, r0, mb_sp_save_r3; /* Restore temporary register */
#endif /* defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1) */
ADDIK r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
PUSH_REG(3);
PUSH_REG(4);
PUSH_REG(5);
PUSH_REG(6);
#ifdef MICROBLAZE_CAN_HANDLE_EXCEPTIONS_IN_DELAY_SLOTS
mfs r6, resr;
andi r6, r6, ESR_DS_MASK;
BEQI r6, ex_handler_no_ds;
mfs r17, rbtr;
ex_handler_no_ds:
#endif
PUSH_R17;
PUSH_MSR_AND_ENABLE_EXC; /* Exceptions enabled here. This will allow nested exceptions */
mfs r3, resr;
andi r5, r3, ESR_EXC_MASK; /* Extract ESR[EXC] */
#ifndef NO_UNALIGNED_EXCEPTIONS
xori r6, r5, 1; /* 00001 = Unaligned Exception */
BNEI r6, handle_ex_regular;
ADDIK r4, r0, MB_ExceptionVectorTable; /* Check if user has registered an unaligned exception handler */
#if defined (__arch64__)
LI r4, r4, 16;
#else
LI r4, r4, 8;
#endif
ADDIK r6, r0, XNullHandler; /* If exceptionvectortable entry is still XNullHandler, use */
XOR r6, r4, r6; /* the default exception handler */
BEQI r6, handle_unaligned_ex ;
handle_ex_regular:
#endif /* ! NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
xori r6, r5, 6; /* 00110 = FPU exception */
BEQI r6, handle_fp_ex; /* Go and decode the FP exception */
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
handle_other_ex: /* Handle Other exceptions here */
ori r6, r0, 20;
cmp r6, r5, r6; /* >= 20 are exceptions we do not handle. */
BLEI r6, ex_handler_unhandled;
ori r6, r0, 7;
cmp r6, r5, r6; /* Convert MMU exception indices into an ordinal of 7 */
BGTI r6, handle_other_ex_tail;
ori r5, r0, 0x7;
handle_other_ex_tail:
PUSH_REG(7); /* Save other volatiles before we make procedure calls below */
PUSH_REG(8);
PUSH_REG(9);
PUSH_REG(10);
PUSH_REG(11);
PUSH_REG(12);
PUSH_REG(15);
PUSH_REG(18);
ADDIK r4, r0, MB_ExceptionVectorTable; /* Load the Exception vector table base address */
ADDK r7, r5, r5; /* Calculate exception vector offset = r5 * 8 (32-bit) */
ADDK r7, r7, r7;
ADDK r7, r7, r7;
#if defined (__arch64__)
ADDK r7, r7, r7; /* or r5 * 16 (64-bit) */
#endif
ADDK r7, r7, r4; /* Get pointer to exception vector */
LI r5, r7, REGSIZE; /* Load argument to exception handler from table */
LOAD r7, r7, r0; /* Load vector itself here */
brald r15, r7; /* Branch to handler */
nop;
POP_REG(7); /* Restore other volatiles */
POP_REG(8);
POP_REG(9);
POP_REG(10);
POP_REG(11);
POP_REG(12);
POP_REG(15);
POP_REG(18);
BRI ex_handler_done; /* Complete exception handling */
#ifndef NO_UNALIGNED_EXCEPTIONS
handle_unaligned_ex:
andi r6, r3, ESR_REG_MASK; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
srl r6, r6;
srl r6, r6;
srl r6, r6;
sbi r6, r0, ex_reg_op; /* Store the register operand in a temporary location */
mfs r4, rear;
andi r6, r3, ESR_LW_SW_MASK; /* Extract ESR[S] */
BNEI r6, ex_sw;
#if defined (__arch64__)
ex_ll:
andi r6, r3, ESR_LONG_MASK; /* Extract ESR[L] */
BEQI r6, ex_lw;
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a long, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lbui r5, r4, 2;
sbi r5, r0, ex_tmp_data_loc_2;
lbui r5, r4, 3;
sbi r5, r0, ex_tmp_data_loc_3;
lbui r5, r4, 4;
sbi r5, r0, ex_tmp_data_loc_4;
lbui r5, r4, 5;
sbi r5, r0, ex_tmp_data_loc_5;
lbui r5, r4, 6;
sbi r5, r0, ex_tmp_data_loc_6;
lbui r5, r4, 7;
sbi r5, r0, ex_tmp_data_loc_7;
lli r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
BRI ex_lw_tail;
#endif
ex_lw:
andi r6, r3, ESR_WORD_MASK; /* Extract ESR[W] */
BEQI r6, ex_lhw;
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a word, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lbui r5, r4, 2;
sbi r5, r0, ex_tmp_data_loc_2;
lbui r5, r4, 3;
sbi r5, r0, ex_tmp_data_loc_3;
lwi r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
BRI ex_lw_tail;
ex_lhw:
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a half-word, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lhui r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
ex_lw_tail:
lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
ADDIK r6, r0, lw_table; /* Form load_word jump table offset (lw_table + (8 * regnum)) */
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r6;
bra r5;
ex_lw_end: /* Exception handling of load word, ends */
ex_sw:
lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
ADDIK r6, r0, sw_table; /* Form store_word jump table offset (sw_table + (8 * regnum)) */
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r6;
bra r5;
ex_sw_tail:
#if defined (__arch64__)
ex_sl:
mfs r6, resr;
andi r6, r6, ESR_LONG_MASK; /* Extract ESR[L] */
BEQI r6, ex_not_sl;
sli r3, r0, ex_tmp_data_loc_0;
lbui r3, r0, ex_tmp_data_loc_0; /* Store the long, byte-by-byte into destination address */
sbi r3, r4, 0;
lbui r3, r0, ex_tmp_data_loc_1;
sbi r3, r4, 1;
lbui r3, r0, ex_tmp_data_loc_2;
sbi r3, r4, 2;
lbui r3, r0, ex_tmp_data_loc_3;
sbi r3, r4, 3;
lbui r3, r0, ex_tmp_data_loc_4;
sbi r3, r4, 4;
lbui r3, r0, ex_tmp_data_loc_5;
sbi r3, r4, 5;
lbui r3, r0, ex_tmp_data_loc_6;
sbi r3, r4, 6;
lbui r3, r0, ex_tmp_data_loc_7;
sbi r3, r4, 7;
BRI ex_handler_done;
ex_not_sl:
#endif
mfs r6, resr;
andi r6, r6, ESR_WORD_MASK; /* Extract ESR[W] */
BEQI r6, ex_shw;
swi r3, r0, ex_tmp_data_loc_0;
lbui r3, r0, ex_tmp_data_loc_0; /* Store the word, byte-by-byte into destination address */
sbi r3, r4, 0;
lbui r3, r0, ex_tmp_data_loc_1;
sbi r3, r4, 1;
lbui r3, r0, ex_tmp_data_loc_2;
sbi r3, r4, 2;
lbui r3, r0, ex_tmp_data_loc_3;
sbi r3, r4, 3;
BRI ex_handler_done;
ex_shw:
swi r3, r0, ex_tmp_data_loc_0; /* Store the lower half-word, byte-by-byte into destination address */
#ifdef __LITTLE_ENDIAN__
lbui r3, r0, ex_tmp_data_loc_0;
#else
lbui r3, r0, ex_tmp_data_loc_2;
#endif
sbi r3, r4, 0;
#ifdef __LITTLE_ENDIAN__
lbui r3, r0, ex_tmp_data_loc_1;
#else
lbui r3, r0, ex_tmp_data_loc_3;
#endif
sbi r3, r4, 1;
ex_sw_end: /* Exception handling of store word, ends. */
BRI ex_handler_done;
#endif /* !NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
handle_fp_ex:
ADDIK r3, r17, -4; /* r17 contains (addr of exception causing FP instruction + 4) */
lw r4, r0, r3; /* We might find ourselves in a spot here. Unguaranteed load */
handle_fp_ex_opb:
ADDIK r6, r0, fp_table_opb; /* Decode opB and store its value in mb_fpex_op_b */
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
andi r3, r4, 0x1F;
ADDK r3, r3, r3; /* Calculate (fp_table_opb + (regno * 12)) in r5 */
ADDK r3, r3, r3;
ADDK r5, r3, r3;
ADDK r5, r5, r3;
ADDK r5, r5, r6;
bra r5;
handle_fp_ex_opa:
ADDIK r6, r0, fp_table_opa; /* Decode opA and store its value in mb_fpex_op_a */
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
andi r3, r4, 0x1F;
ADDK r3, r3, r3; /* Calculate (fp_table_opb + (regno * 12)) in r5 */
ADDK r3, r3, r3;
ADDK r5, r3, r3;
ADDK r5, r5, r3;
ADDK r5, r5, r6;
bra r5;
handle_fp_ex_done:
ori r5, r0, 6; /* Set exception number back to 6 */
BRI handle_other_ex_tail;
fp_ex_unhandled:
bri 0;
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
ex_handler_done:
POP_R17;
POP_MSR;
POP_REG(3);
POP_REG(4);
POP_REG(5);
POP_REG(6);
ADDIK r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
rted r17, 0
nop
ex_handler_unhandled:
bri 0 /* UNHANDLED. TRAP HERE */
.end _hw_exception_handler
#ifndef NO_UNALIGNED_EXCEPTIONS
/*
* hw_exception_handler Jump Table
* - Contains code snippets for each register that caused the unaligned exception.
* - Hence exception handler is NOT self-modifying
* - Separate table for load exceptions and store exceptions.
* - Each table is of size: (8 * 32) = 256 bytes
*/
.section .text
.align 4
lw_table:
lw_r0: R3_TO_LWREG (0);
lw_r1: LWREG_NOP;
lw_r2: R3_TO_LWREG (2);
lw_r3: R3_TO_LWREG_V (3);
lw_r4: R3_TO_LWREG_V (4);
lw_r5: R3_TO_LWREG_V (5);
lw_r6: R3_TO_LWREG_V (6);
lw_r7: R3_TO_LWREG (7);
lw_r8: R3_TO_LWREG (8);
lw_r9: R3_TO_LWREG (9);
lw_r10: R3_TO_LWREG (10);
lw_r11: R3_TO_LWREG (11);
lw_r12: R3_TO_LWREG (12);
lw_r13: R3_TO_LWREG (13);
lw_r14: R3_TO_LWREG (14);
lw_r15: R3_TO_LWREG (15);
lw_r16: R3_TO_LWREG (16);
lw_r17: LWREG_NOP;
lw_r18: R3_TO_LWREG (18);
lw_r19: R3_TO_LWREG (19);
lw_r20: R3_TO_LWREG (20);
lw_r21: R3_TO_LWREG (21);
lw_r22: R3_TO_LWREG (22);
lw_r23: R3_TO_LWREG (23);
lw_r24: R3_TO_LWREG (24);
lw_r25: R3_TO_LWREG (25);
lw_r26: R3_TO_LWREG (26);
lw_r27: R3_TO_LWREG (27);
lw_r28: R3_TO_LWREG (28);
lw_r29: R3_TO_LWREG (29);
lw_r30: R3_TO_LWREG (30);
lw_r31: R3_TO_LWREG (31);
sw_table:
sw_r0: SWREG_TO_R3 (0);
sw_r1: SWREG_NOP;
sw_r2: SWREG_TO_R3 (2);
sw_r3: SWREG_TO_R3_V (3);
sw_r4: SWREG_TO_R3_V (4);
sw_r5: SWREG_TO_R3_V (5);
sw_r6: SWREG_TO_R3_V (6);
sw_r7: SWREG_TO_R3 (7);
sw_r8: SWREG_TO_R3 (8);
sw_r9: SWREG_TO_R3 (9);
sw_r10: SWREG_TO_R3 (10);
sw_r11: SWREG_TO_R3 (11);
sw_r12: SWREG_TO_R3 (12);
sw_r13: SWREG_TO_R3 (13);
sw_r14: SWREG_TO_R3 (14);
sw_r15: SWREG_TO_R3 (15);
sw_r16: SWREG_TO_R3 (16);
sw_r17: SWREG_NOP;
sw_r18: SWREG_TO_R3 (18);
sw_r19: SWREG_TO_R3 (19);
sw_r20: SWREG_TO_R3 (20);
sw_r21: SWREG_TO_R3 (21);
sw_r22: SWREG_TO_R3 (22);
sw_r23: SWREG_TO_R3 (23);
sw_r24: SWREG_TO_R3 (24);
sw_r25: SWREG_TO_R3 (25);
sw_r26: SWREG_TO_R3 (26);
sw_r27: SWREG_TO_R3 (27);
sw_r28: SWREG_TO_R3 (28);
sw_r29: SWREG_TO_R3 (29);
sw_r30: SWREG_TO_R3 (30);
sw_r31: SWREG_TO_R3 (31);
/* Temporary data structures used in the handler */
.section .data
.align DATAALIGN
ex_tmp_data_loc_0:
.byte 0
ex_tmp_data_loc_1:
.byte 0
ex_tmp_data_loc_2:
.byte 0
ex_tmp_data_loc_3:
.byte 0
#if defined (__arch64__)
ex_tmp_data_loc_4:
.byte 0
ex_tmp_data_loc_5:
.byte 0
ex_tmp_data_loc_6:
.byte 0
ex_tmp_data_loc_7:
.byte 0
#endif
ex_reg_op:
.byte 0
#endif /* ! NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
/*
* FP exception decode jump table.
* - Contains code snippets for each register that could have been a source operand for an excepting FP instruction
* - Hence exception handler is NOT self-modifying
* - Separate table for opA and opB
* - Each table is of size: (12 * 32) = 384 bytes
*/
.section .text
.align 4
fp_table_opa:
opa_r0: FP_EX_OPA_SAVE (0);
opa_r1: FP_EX_UNHANDLED;
opa_r2: FP_EX_OPA_SAVE (2);
opa_r3: FP_EX_OPA_SAVE_V (3);
opa_r4: FP_EX_OPA_SAVE_V (4);
opa_r5: FP_EX_OPA_SAVE_V (5);
opa_r6: FP_EX_OPA_SAVE_V (6);
opa_r7: FP_EX_OPA_SAVE (7);
opa_r8: FP_EX_OPA_SAVE (8);
opa_r9: FP_EX_OPA_SAVE (9);
opa_r10: FP_EX_OPA_SAVE (10);
opa_r11: FP_EX_OPA_SAVE (11);
opa_r12: FP_EX_OPA_SAVE (12);
opa_r13: FP_EX_OPA_SAVE (13);
opa_r14: FP_EX_UNHANDLED;
opa_r15: FP_EX_UNHANDLED;
opa_r16: FP_EX_UNHANDLED;
opa_r17: FP_EX_UNHANDLED;
opa_r18: FP_EX_OPA_SAVE (18);
opa_r19: FP_EX_OPA_SAVE (19);
opa_r20: FP_EX_OPA_SAVE (20);
opa_r21: FP_EX_OPA_SAVE (21);
opa_r22: FP_EX_OPA_SAVE (22);
opa_r23: FP_EX_OPA_SAVE (23);
opa_r24: FP_EX_OPA_SAVE (24);
opa_r25: FP_EX_OPA_SAVE (25);
opa_r26: FP_EX_OPA_SAVE (26);
opa_r27: FP_EX_OPA_SAVE (27);
opa_r28: FP_EX_OPA_SAVE (28);
opa_r29: FP_EX_OPA_SAVE (29);
opa_r30: FP_EX_OPA_SAVE (30);
opa_r31: FP_EX_OPA_SAVE (31);
fp_table_opb:
opb_r0: FP_EX_OPB_SAVE (0);
opb_r1: FP_EX_UNHANDLED;
opb_r2: FP_EX_OPB_SAVE (2);
opb_r3: FP_EX_OPB_SAVE_V (3);
opb_r4: FP_EX_OPB_SAVE_V (4);
opb_r5: FP_EX_OPB_SAVE_V (5);
opb_r6: FP_EX_OPB_SAVE_V (6);
opb_r7: FP_EX_OPB_SAVE (7);
opb_r8: FP_EX_OPB_SAVE (8);
opb_r9: FP_EX_OPB_SAVE (9);
opb_r10: FP_EX_OPB_SAVE (10);
opb_r11: FP_EX_OPB_SAVE (11);
opb_r12: FP_EX_OPB_SAVE (12);
opb_r13: FP_EX_OPB_SAVE (13);
opb_r14: FP_EX_UNHANDLED;
opb_r15: FP_EX_UNHANDLED;
opb_r16: FP_EX_UNHANDLED;
opb_r17: FP_EX_UNHANDLED;
opb_r18: FP_EX_OPB_SAVE (18);
opb_r19: FP_EX_OPB_SAVE (19);
opb_r20: FP_EX_OPB_SAVE (20);
opb_r21: FP_EX_OPB_SAVE (21);
opb_r22: FP_EX_OPB_SAVE (22);
opb_r23: FP_EX_OPB_SAVE (23);
opb_r24: FP_EX_OPB_SAVE (24);
opb_r25: FP_EX_OPB_SAVE (25);
opb_r26: FP_EX_OPB_SAVE (26);
opb_r27: FP_EX_OPB_SAVE (27);
opb_r28: FP_EX_OPB_SAVE (28);
opb_r29: FP_EX_OPB_SAVE (29);
opb_r30: FP_EX_OPB_SAVE (30);
opb_r31: FP_EX_OPB_SAVE (31);
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
#if defined(MICROBLAZE_FP_EXCEPTION_ENABLED) && defined(MICROBLAZE_FP_EXCEPTION_DECODE)
/* This is where we store the opA and opB of the last excepting FP instruction */
.section .data
.align DATAALIGN
.global mb_fpex_op_a
.global mb_fpex_op_b
mb_fpex_op_a:
INTPTR_DATAITEM 0
mb_fpex_op_b:
INTPTR_DATAITEM 0
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
#if defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1)
/* This is where we store the register used to check which exception occurred */
.section .data
.align DATAALIGN
mb_sp_save_r3:
INTPTR_DATAITEM 0
#endif /* defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1) */
/* The exception vector table */
.section .data
.align DATAALIGN
.global MB_ExceptionVectorTable
MB_ExceptionVectorTable:
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 0 /* -- FSL Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 1 /* -- Unaligned Access Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 2 /* -- Illegal Opcode Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 3 /* -- Instruction Bus Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 4 /* -- Data Bus Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 5 /* -- Div-by-0 Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 6 /* -- FPU Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 7 /* -- MMU Exceptions -- */
#else /* Dummy exception handler, in case exceptions are not present in the processor */
.global _hw_exception_handler
.section .text
.align 2
.ent _hw_exception_handler
_hw_exception_handler:
bri 0;
.end _hw_exception_handler
#endif /* MICROBLAZE_EXCEPTIONS_ENABLED */
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,879 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_icache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_icache_range(unsigned int cacheaddr, unsigned int len)
*
* Invalidate an ICache range
*
* Parameters:
* 'cacheaddr' - address in the Icache where invalidation begins
* 'len' - length (in bytes) worth of Icache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_icache_range
.ent microblaze_invalidate_icache_range
.align 2
microblaze_invalidate_icache_range:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Icache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
BEQI r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align start down to cache line */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wic r5, r0 /* Invalidate the cache line */
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_icache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,482 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_flush_cache_ext_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_cache_ext_range (unsigned int cacheaddr, unsigned int len)
*
*Flush a L2 Cache range
*
*Parameters:
* 'cacheaddr' - address in the L2 cache where the flush begins
* 'len ' - length (in bytes) worth of L2 cache to be flushed
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
.text
.globl microblaze_flush_cache_ext_range
.ent microblaze_flush_cache_ext_range
.align 2
microblaze_flush_cache_ext_range:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
beqi r6, Loop_done
ADDIK r6, r6, -1
ADD r6, r5, r6
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
RSUBK r6, r5, r6
Loop_start:
wdc.ext.flush r5, r6
#if defined (__arch64__ )
addlik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bneid r6, Loop_start
addik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
Loop_done:
#endif
rtsd r15, 8
nop
.end microblaze_flush_cache_ext_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,396 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_flush_cache_ext.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_cache_ext()
*
* Flush the entire L2 Cache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
#define CACHEABLE_REGION_SIZE (XPAR_MICROBLAZE_DCACHE_HIGHADDR - XPAR_MICROBLAZE_DCACHE_BASEADDR)
.text
.globl microblaze_flush_cache_ext
.ent microblaze_flush_cache_ext
.align 2
microblaze_flush_cache_ext:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN))
ADDIK r6, r0, CACHEABLE_REGION_SIZE-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
Loop_start:
wdc.ext.flush r5, r6
#if defined (__arch64__)
addlik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bgtid r6,Loop_start
addik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
#endif
rtsd r15, 8
nop
.end microblaze_flush_cache_ext
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,365 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_dcache()
*
* Invalidate the entire L1 DCache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_dcache
.ent microblaze_invalidate_dcache
.align 2
microblaze_invalidate_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN))
ADDIK r6, r5, XPAR_MICROBLAZE_DCACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Compute end */
L_start:
wdc r5, r0 /* Invalidate the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,070 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_enable_icache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_enable_icache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable icache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_icache
.ent microblaze_enable_icache
.align 2
microblaze_enable_icache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x20
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Set the interrupt enable bit
ori r8, r8, 0x20
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,519 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_cache_ext_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_cache_ext_range (unsigned int cacheaddr, unsigned int len)
*
*Invalidate an L2 cache range
*
*Parameters:
* 'cacheaddr' - address in the L2 cache where invalidation begins
* 'len ' - length (in bytes) worth of Dcache to be invalidated
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
.text
.globl microblaze_invalidate_cache_ext_range
.ent microblaze_invalidate_cache_ext_range
.align 2
microblaze_invalidate_cache_ext_range:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
beqi r6, Loop_done
ADD r6, r5, r6
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
RSUBK r6, r5, r6
Loop_start:
wdc.ext.clear r5, r6
#if defined (__arch64__ )
addlik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bneid r6, Loop_start
addik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
Loop_done:
#endif
rtsd r15, 8
nop
.end microblaze_invalidate_cache_ext_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,765 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/iccarm/asm_vectors.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file asm_vectors.s
;
; This file contains the initial vector table for the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 1.00a Initial version
; 4.2 pkp 06/27/14 Modified return addresses for interrupt
; handlers
; 5.1 pkp 05/13/15 Saved the addresses of instruction causing data
; abort and prefetch abort into DataAbortAddr and
; PrefetchAbortAddr for further use to fix CR#854523
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
MODULE ?asm_vectors
;; Forward declaration of sections.
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
#include "xparameters.h"
;#include "xtime_l.h"
#define UART_BAUDRATE 115200
IMPORT _prestart
IMPORT __iar_program_start
SECTION .intvec:CODE:NOROOT(2)
PUBLIC _vector_table
IMPORT IRQInterrupt
IMPORT FIQInterrupt
IMPORT SWInterrupt
IMPORT DataAbortInterrupt
IMPORT PrefetchAbortInterrupt
IMPORT DataAbortAddr
IMPORT PrefetchAbortAddr
_vector_table
ARM
B __iar_program_start
B Undefined
B SVCHandler
B PrefetchAbortHandler
B DataAbortHandler
NOP ; Placeholder for address exception vector
B IRQHandler
B FIQHandler
SECTION .text:CODE:NOROOT(2)
REQUIRE _vector_table
ARM
IRQHandler ; IRQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
bl IRQInterrupt ; IRQ vector
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
FIQHandler ; FIQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
FIQLoop
bl FIQInterrupt ; FIQ vector
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
Undefined ; Undefined handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
b _prestart
movs pc, lr
SVCHandler ; SWI handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
tst r0, #0x20 ; check the T bit
ldrneh r0, [lr,#-2] ; Thumb mode
bicne r0, r0, #0xff00 ; Thumb mode
ldreq r0, [lr,#-4] ; ARM mode
biceq r0, r0, #0xff000000 ; ARM mode
bl SWInterrupt ; SWInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr ; adjust return
DataAbortHandler ; Data Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =DataAbortAddr
sub r1, lr,#8
str r1, [r0] ;Address of instruction causing data abort
bl DataAbortInterrupt ;DataAbortInterrupt :call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #8 ; adjust return
PrefetchAbortHandler ; Prefetch Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =PrefetchAbortAddr
sub r1, lr,#4
str r1, [r0] ;Address of instruction causing prefetch abort
bl PrefetchAbortInterrupt ; PrefetchAbortInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,764 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/iccarm/translation_table.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file translation_table.s
;
; This file contains the initialization for the MMU table in RAM
; needed by the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ---- -------- ---------------------------------------------------
; 1.00a ecm 10/20/09 Initial version
; 3.07a sgd 07/05/12 Configuring device address spaces as shareable device
; instead of strongly-ordered.
; 4.2 pkp 09/02/14 modified translation table entries according to address map
; 4.2 pkp 09/11/14 modified translation table entries to resolve compilation
; error for solving CR#822897
; 6.1 pkp 07/11/16 Corrected comments for memory attributes
; 6.8 mus 07/12/2018 Mark DDR memory as inner cacheable, if BSP is built
; with the USE_AMP flag.
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
EXPORT MMUTable
;ARMCC AREA |.mmu_tbl|,CODE,ALIGN=14
; RSEG mmu_tbl:CODE:ROOT (14)
SECTION .mmu_tbl:CODE:ROOT(14)
MMUTable
; Each table entry occupies one 32-bit word and there are
; 4096 entries, so the entire table takes up 16KB.
; Each entry covers a 1MB section.
; 0x00000000 - 0x3ffffff (DDR Cacheable)
count SETA 0
sect SETA 0
REPT 0x400
#ifndef USE_AMP
DCD sect + 0x15de6 ; S=1, TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1
#else
DCD sect + 0x14de6 ; S=1, TEX=b100 AP=b11, Domain=b1111, C=b0, B=b1
#endif
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0x40000000 - 0x7fffffff (GpAxi0)
count SETA 0
REPT 0x400
DCD sect + 0xc02 ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0x80000000 - 0xbfffffff (GpAxi1)
count SETA 0
REPT 0x400
DCD sect + 0xc02 ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xc0000000 - 0xdfffffff (undef)
count SETA 0
REPT 0x200
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe0000000 - 0xe02fffff (IOP dev)
count SETA 0
REPT 0x3
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe0300000 - 0xe0ffffff (undef/reserved)
count SETA 0
REPT 0xD
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe1000000 - 0xe1ffffff (NAND)
count SETA 0
REPT 0x10
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe2000000 - 0xe3ffffff (NOR)
count SETA 0
REPT 0x20
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe4000000 - 0xe5ffffff (SRAM)
count SETA 0
REPT 0x20
DCD sect + 0xc0e ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b1
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe6000000 - 0xf7ffffff (reserved)
count SETA 0
REPT 0x0120
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xf8000c00 to 0xf8000fff, 0xf8010000 to 0xf88fffff and
; 0xf8f03000 to 0xf8ffffff are reserved but due to granual size of
; 1MB, it is not possible to define separate regions for them
; 0xf8000000 - 0xf8ffffff (APB device regs)
count SETA 0
REPT 0x10
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xf9000000 - 0xfbffffff (reserved)
count SETA 0
REPT 0x30
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xfc000000 - 0xfdffffff (QSPI)
count SETA 0
REPT 0x20
DCD sect + 0xc0a ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xfe000000 - 0xffefffff (reserved)
count SETA 0
REPT 0x1F
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xfff00000 to 0xfffb0000 is reserved but due to granual size of
; 1MB, it is not possible to define separate region for it
; 0xfff00000 to 0xfffb0000 (OCM)
count SETA 0
DCD sect + 0x4c0e ; S=b0 TEX=b100 AP=b11, Domain=b0, C=b1, B=b1
sect SETA sect+0x100000
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 15,408 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/iccarm/boot.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file boot.s
;
; This file contains the initial vector table for the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 1.00a Initial version
; 4.2 pkp 08/04/14 Removed PEEP board related code which contained
; initialization of uart smc nor and sram
; 5.0 pkp 16/12/14 Modified initialization code to enable scu after
; MMU is enabled and removed incorrect initialization
; of TLB lockdown register to fix CR#830580
; 5.1 pkp 05/13/15 Changed the initialization order so to first invalidate
; caches and TLB, enable MMU and caches, then enable SMP
; bit in ACTLR. L2Cache invalidation and enabling of L2Cache
; is done later.
; 6.0 mus 08/04/16 Added code to detect zynq-7000 base silicon configuration and
; attempt to enable dual core behavior on single cpu zynq-7000s devices
; is prevented from corrupting system behavior.
; 6.6 srm 10/25/17 Added timer configuration using XTime_StartTTCTimer API.
; Now the TTC instance as specified by the user will be
; started.
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
MODULE ?boot
;; Forward declaration of sections.
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
#include "xparameters.h"
;#include "xtime_l.h"
#define UART_BAUDRATE 115200
PUBLIC _prestart
PUBLIC __iar_program_start
IMPORT _vector_table
IMPORT MMUTable
IMPORT __cmain
IMPORT Xil_ExceptionInit
IMPORT XTime_SetTime
#if defined SLEEP_TIMER_BASEADDR
IMPORT XTime_StartTTCTimer
#endif
PSS_L2CC_BASE_ADDR EQU 0xF8F02000
PSS_SLCR_BASE_ADDR EQU 0xF8000000
RESERVED EQU 0x0fffff00
TblBase EQU MMUTable
LRemap EQU 0xFE00000F ; set the base address of the peripheral block as not shared
L2CCWay EQU (PSS_L2CC_BASE_ADDR + 0x077C) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_INVLD_WAY_OFFSET)
L2CCSync EQU (PSS_L2CC_BASE_ADDR + 0x0730) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_SYNC_OFFSET)
L2CCCrtl EQU (PSS_L2CC_BASE_ADDR + 0x0100) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CNTRL_OFFSET)
L2CCAuxCrtl EQU (PSS_L2CC_BASE_ADDR + 0x0104) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_AUX_CNTRL_OFFSET)
L2CCTAGLatReg EQU (PSS_L2CC_BASE_ADDR + 0x0108) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_TAG_RAM_CNTRL_OFFSET)
L2CCDataLatReg EQU (PSS_L2CC_BASE_ADDR + 0x010C) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_DATA_RAM_CNTRL_OFFSET)
L2CCIntClear EQU (PSS_L2CC_BASE_ADDR + 0x0220) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_IAR_OFFSET)
L2CCIntRaw EQU (PSS_L2CC_BASE_ADDR + 0x021C) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_ISR_OFFSET)
SLCRlockReg EQU (PSS_SLCR_BASE_ADDR + 0x04) ;(PSS_SLCR_BASE_ADDR + XPSS_SLCR_LOCK_OFFSET)
SLCRUnlockReg EQU (PSS_SLCR_BASE_ADDR + 0x08) ;(PSS_SLCR_BASE_ADDR + XPSS_SLCR_UNLOCK_OFFSET)
SLCRL2cRamReg EQU (PSS_SLCR_BASE_ADDR + 0xA1C) ;(PSS_SLCR_BASE_ADDR + XPSS_SLCR_L2C_RAM_OFFSET)
SLCRCPURSTReg EQU (0xF8000000 + 0x244) ;(XPS_SYS_CTRL_BASEADDR + A9_CPU_RST_CTRL_OFFSET)
EFUSEStaus EQU (0xF800D000 + 0x10) ;(XPS_EFUSE_BASEADDR + EFUSE_STATUS_OFFSET)
/* workaround for simulation not working when L1 D and I caches,MMU and L2 cache enabled - DT568997 */
#if SIM_MODE == 1
CRValMmuCac EQU 00000000000000b ; Disable IDC, and MMU
#else
CRValMmuCac EQU 01000000000101b ; Enable IDC, and MMU
#endif
CRValHiVectorAddr EQU 10000000000000b ; Set the Vector address to high, 0xFFFF0000
L2CCAuxControl EQU 0x72360000 ; Enable all prefetching, Way Size (16 KB) and High Priority for SO and Dev Reads Enable
L2CCControl EQU 0x01 ; Enable L2CC
L2CCTAGLatency EQU 0x0111 ; 7 Cycles of latency for TAG RAM
L2CCDataLatency EQU 0x0121 ; 7 Cycles of latency for DATA RAM
SLCRlockKey EQU 0x767B ; SLCR lock key
SLCRUnlockKey EQU 0xDF0D ; SLCR unlock key
SLCRL2cRamConfig EQU 0x00020202 ; SLCR L2C ram configuration
vector_base EQU _vector_table
FPEXC_EN EQU 0x40000000 ; FPU enable bit, (1 << 30)
SECTION .intvec:CODE:NOROOT(2)
; this initializes the various processor modes
_prestart
__iar_program_start
#if XPAR_CPU_ID==0
; only allow cp0 through
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #0
beq OKToRun
EndlessLoop0
wfe
b EndlessLoop0
#elif XPAR_CPU_ID==1
; only allow cp1 through
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #1
beq OKToRun
EndlessLoop1
wfe
b EndlessLoop1
#endif
OKToRun
ldr r0,=EFUSEStaus
ldr r1,[r0] ; Read eFuse to detect zynq silicon configuration
ands r1,r1,#0x80 ; Check whether cpu1 is disabled through eFuse
beq DualCPU
; cpu1 is disabled through eFuse,reset cpu1
ldr r0,=SLCRUnlockReg ; Load SLCR base address base + unlock register
ldr r1,=SLCRUnlockKey ; set unlock key
str r1, [r0] ; Unlock SLCR
ldr r0,=SLCRCPURSTReg
ldr r1,[r0] ; Read CPU Software Reset Control register
orr r1,r1,#0x22
str r1,[r0] ; Reset CPU1
ldr r0,=SLCRlockReg ; Load SLCR base address base + lock register
ldr r1,=SLCRlockKey ; set lock key
str r1, [r0] ; lock SLCR
DualCPU
mrc p15, 0, r0, c0, c0, 0 ; Get the revision
and r5, r0, #0x00f00000
and r6, r0, #0x0000000f
orr r6, r6, r5, lsr #20-4
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 ; only present up to r2p2
mrcle p15, 0, r10, c15, c0, 1 ; read diagnostic register
orrle r10, r10, #1 << 4 ; set bit #4
mcrle p15, 0, r10, c15, c0, 1 ; write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_743622
teq r5, #0x00200000 ; only present in r2p*
mrceq p15, 0, r10, c15, c0, 1 ; read diagnostic register
orreq r10, r10, #1 << 6 ; set bit #6
mcreq p15, 0, r10, c15, c0, 1 ; write diagnostic register
#endif
; set VBAR to the _vector_table address in linker script
ldr r0, =vector_base
mcr p15, 0, r0, c12, c0, 0
;invalidate scu
ldr r7, =0xf8f0000c
ldr r6, =0xffff
str r6, [r7]
;Invalidate caches and TLBs
mov r0,#0 ; r0 = 0
mcr p15, 0, r0, c8, c7, 0 ; invalidate TLBs
mcr p15, 0, r0, c7, c5, 0 ; invalidate icache
mcr p15, 0, r0, c7, c5, 6 ; Invalidate branch predictor array
bl invalidate_dcache ; invalidate dcache
; Disable MMU, if enabled
mrc p15, 0, r0, c1, c0, 0 ; read CP15 register 1
bic r0, r0, #0x1 ; clear bit 0
mcr p15, 0, r0, c1, c0, 0 ; write value back
#ifdef SHAREABLE_DDR
; Mark the entire DDR memory as shareable
ldr r3, =0x3ff ; 1024 entries to cover 1G DDR
ldr r0, =TblBase ; MMU Table address in memory
ldr r2, =0x15de6 ; S=1, TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1
shareable_loop
str r2, [r0] ; write the entry to MMU table
add r0, r0, #0x4 ; next entry in the table
add r2, r2, #0x100000 ; next section
subs r3, r3, #1
bge shareable_loop ; loop till 1G is covered
#endif
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the irq stack pointer
and r2, r1, r0
orr r2, r2, #0x12 ; IRQ mode
msr cpsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=SFE(IRQ_STACK) ; IRQ stack pointer
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the supervisor stack pointer
and r2, r1, r0
orr r2, r2, #0x13 ; supervisor mode
msr cpsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=SFE(SVC_STACK) ; Supervisor stack pointer
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the Abort stack pointer
and r2, r1, r0
orr r2, r2, #0x17 ; Abort mode
msr cpsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=SFE(ABT_STACK) ; Abort stack pointer
mrs r0, cpsr ; was cpsr, get the current PSR
mvn r1, #0x1f ; set up the FIQ stack pointer
and r2, r1, r0
orr r2, r2, #0x11 ; FIQ mode
msr cpsr, r2 ; was cpsr
ldr r13,=SFE(FIQ_STACK) ; FIQ stack pointer
mrs r0, cpsr ; was cpsr, get the current PSR
mvn r1, #0x1f ; set up the Undefine stack pointer
and r2, r1, r0
orr r2, r2, #0x1b ; Undefine mode
msr cpsr, r2 ; was cpsr
ldr r13,=SFE(UND_STACK) ; Undefine stack pointer
mrs r0, cpsr ; was cpsr, get the current PSR
mvn r1, #0x1f ; set up the system stack pointer
and r2, r1, r0
orr r2, r2, #0x1f ; SYS mode
msr cpsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=SFE(CSTACK) ; SYS stack pointer
;set scu enable bit in scu
ldr r7, =0xf8f00000
ldr r0, [r7]
orr r0, r0, #0x1
str r0, [r7]
; enable MMU and cache
ldr r0,=TblBase ; Load MMU translation table base
orr r0, r0, #0x5B ; Outer-cacheable, WB
mcr p15, 0, r0, c2, c0, 0 ; TTB0
mvn r0,#0 ; Load MMU domains -- all ones=manager
mcr p15,0,r0,c3,c0,0
; Enable mmu, icahce and dcache
ldr r0,=CRValMmuCac
mcr p15,0,r0,c1,c0,0 ; Enable cache and MMU
dsb ; dsb allow the MMU to start up
isb ; isb flush prefetch buffer
; Write to ACTLR
mrc p15, 0,r0, c1, c0, 1 ; Read ACTLR
orr r0, r0, #(0x01 << 6) ; SMP bit
orr r0, r0, #(0x01 ) ; Cache/TLB maintenance broadcast
mcr p15, 0,r0, c1, c0, 1 ; Write ACTLR
; Invalidate L2 Cache and initialize L2 Cache
; For AMP, assume running on CPU1. Don't initialize L2 Cache (up to Linux)
#if USE_AMP!=1
ldr r0,=L2CCCrtl ; Load L2CC base address base + control register
mov r1, #0 ; force the disable bit
str r1, [r0] ; disable the L2 Caches
ldr r0,=L2CCAuxCrtl ; Load L2CC base address base + Aux control register
ldr r1,[r0] ; read the register
ldr r2,=L2CCAuxControl ; set the default bits
orr r1,r1,r2
str r1, [r0] ; store the Aux Control Register
ldr r0,=L2CCTAGLatReg ; Load L2CC base address base + TAG Latency address
ldr r1,=L2CCTAGLatency ; set the latencies for the TAG
str r1, [r0] ; store the TAG Latency register Register
ldr r0,=L2CCDataLatReg ; Load L2CC base address base + Data Latency address
ldr r1,=L2CCDataLatency ; set the latencies for the Data
str r1, [r0] ; store the Data Latency register Register
ldr r0,=L2CCWay ; Load L2CC base address base + way register
ldr r2, =0xFFFF
str r2, [r0] ; force invalidate
ldr r0,=L2CCSync ; need to poll 0x730, PSS_L2CC_CACHE_SYNC_OFFSET
; Load L2CC base address base + sync register
; poll for completion
Sync
ldr r1, [r0]
cmp r1, #0
bne Sync
ldr r0,=L2CCIntRaw ; clear pending interrupts
ldr r1,[r0]
ldr r0,=L2CCIntClear
str r1,[r0]
ldr r0,=SLCRUnlockReg ; Load SLCR base address base + unlock register
ldr r1,=SLCRUnlockKey ; set unlock key
str r1, [r0] ; Unlock SLCR
ldr r0,=SLCRL2cRamReg ; Load SLCR base address base + l2c Ram Control register
str r1, [r0] ; store the L2c Ram Control Register
ldr r0,=SLCRlockReg ; Load SLCR base address base + lock register
ldr r1,=SLCRlockKey ; set lock key
str r1, [r0] ; lock SLCR
ldr r0,=L2CCCrtl ; Load L2CC base address base + control register
ldr r1,[r0] ; read the register
mov r2, #L2CCControl ; set the enable bit
orr r1,r1,r2
str r1, [r0] ; enable the L2 Caches
#endif
mov r0, r0
mrc p15, 0, r1, c1, c0, 2 ; read cp access control register (CACR) into r1
orr r1, r1, #(0xf << 20) ; enable full access for p10 & p11
mcr p15, 0, r1, c1, c0, 2 ; write back into CACR
; enable vfp
fmrx r1, FPEXC ; read the exception register
orr r1,r1, #FPEXC_EN ; set VFP enable bit, leave the others in orig state
fmxr FPEXC, r1 ; write back the exception register
mrc p15, 0, r0, c1, c0, 0 ; flow prediction enable
orr r0, r0, #(0x01 << 11) ; #0x8000
mcr p15,0,r0,c1,c0,0
mrc p15, 0, r0, c1, c0, 1 ; read Auxiliary Control Register
orr r0, r0, #(0x1 << 2) ; enable Dside prefetch
orr r0, r0, #(0x1 << 1) ; enable L2 prefetch
mcr p15, 0, r0, c1, c0, 1 ; write Auxiliary Control Register
; Initialize the vector table
;bl Xil_ExceptionInit
; Clear cp15 regs with unknown reset values
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 ; DFSR
mcr p15, 0, r0, c5, c0, 1 ; IFSR
mcr p15, 0, r0, c6, c0, 0 ; DFAR
mcr p15, 0, r0, c6, c0, 2 ; IFAR
mcr p15, 0, r0, c9, c13, 2 ; PMXEVCNTR
mcr p15, 0, r0, c13, c0, 2 ; TPIDRURW
mcr p15, 0, r0, c13, c0, 3 ; TPIDRURO
; Reset and start Cycle Counter
mov r2, #0x80000000 ; clear overflow
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd ; D, C, E
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 ; enable cycle counter
mcr p15, 0, r2, c9, c12, 1
; Reset and start Global Timer
mov r0, #0x0
mov r1, #0x0
bl XTime_SetTime
; Reset and start Triple Timer counter
#if defined SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
; make sure argc and argv are valid
mov r0, #0
mov r1, #0
b __cmain ; jump to C startup code
and r0, r0, r0 ; no op
Ldone b Ldone ; Paranoia: we should never get here
; *************************************************************************
; *
; * invalidate_dcache - invalidate the entire d-cache by set/way
; *
; * Note: for Cortex-A9, there is no cp instruction for invalidating
; * the whole D-cache. Need to invalidate each line.
; *
; *************************************************************************
invalidate_dcache
mrc p15, 1, r0, c0, c0, 1 ; read CLIDR
ands r3, r0, #0x7000000
mov r3, r3, lsr #23 ; cache level value (naturally aligned)
beq finished
mov r10, #0 ; start with level 0
loop1
add r2, r10, r10, lsr #1 ; work out 3xcachelevel
mov r1, r0, lsr r2 ; bottom 3 bits are the Cache type for this level
and r1, r1, #7 ; get those 3 bits alone
cmp r1, #2
blt skip ; no cache or only instruction cache at this level
mcr p15, 2, r10, c0, c0, 0 ; write the Cache Size selection register
isb ; isb to sync the change to the CacheSizeID reg
mrc p15, 1, r1, c0, c0, 0 ; reads current Cache Size ID register
and r2, r1, #7 ; extract the line length field
add r2, r2, #4 ; add 4 for the line length offset (log2 16 bytes)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 ; r4 is the max number on the way size (right aligned)
clz r5, r4 ; r5 is the bit position of the way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 ; r7 is the max number of the index size (right aligned)
loop2
mov r9, r4 ; r9 working copy of the max way size (right aligned)
loop3
orr r11, r10, r9, lsl r5 ; factor in the way number and cache number into r11
orr r11, r11, r7, lsl r2 ; factor in the index number
mcr p15, 0, r11, c7, c6, 2 ; invalidate by set/way
subs r9, r9, #1 ; decrement the way number
bge loop3
subs r7, r7, #1 ; decrement the index
bge loop2
skip
add r10, r10, #2 ; increment the cache number
cmp r3, r10
bgt loop1
finished
mov r10, #0 ; switch back to cache level 0
mcr p15, 2, r10, c0, c0, 0 ; select current cache level in cssr
dsb
isb
bx lr
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 17,391 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/boot.S | /******************************************************************************
* Copyright (c) 2010 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file boot.S
*
* @addtogroup a9_boot_code Cortex A9 Processor Boot Code
* @{
* <h2> boot.S </h2>
* The boot code performs minimum configuration which is required for an
* application to run starting from processor's reset state. Below is a
* sequence illustrating what all configuration is performed before control
* reaches to main function.
*
* 1. Program vector table base for exception handling
* 2. Invalidate instruction cache, data cache and TLBs
* 3. Program stack pointer for various modes (IRQ, FIQ, supervisor, undefine,
* abort, system)
* 4. Configure MMU with short descriptor translation table format and program
* base address of translation table
* 5. Enable data cache, instruction cache and MMU
* 6. Enable Floating point unit
* 7. Transfer control to _start which clears BSS sections, initializes
* global timer and runs global constructor before jumping to main
* application
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 1.00a ecm/sdm 10/20/09 Initial version
* 3.06a sgd 05/15/12 Updated L2CC Auxiliary and Tag RAM Latency control
* register settings.
* 3.06a asa 06/17/12 Modified the TTBR settings and L2 Cache auxiliary
* register settings.
* 3.07a asa 07/16/12 Modified the L2 Cache controller settings to improve
* performance. Changed the property of the ".boot"
* section.
* 3.07a sgd 08/21/12 Modified the L2 Cache controller and cp15 Aux Control
* Register settings
* 3.09a sgd 02/06/13 Updated SLCR l2c Ram Control register to a
* value of 0x00020202. Fix for CR 697094 (SI#687034).
* 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
* 'xil_errata.h' for errata description
* 4.2 pkp 06/19/14 Enabled asynchronous abort exception
* 5.0 pkp 16/15/14 Modified initialization code to enable scu after
* MMU is enabled
* 5.1 pkp 05/13/15 Changed the initialization order so to first invalidate
* caches and TLB, enable MMU and caches, then enable SMP
* bit in ACTLR. L2Cache invalidation and enabling of L2Cache
* is done later.
* 5.4 asa 12/6/15 Added code to initialize SPSR for all relevant modes.
* 6.0 mus 08/04/16 Added code to detect zynq-7000 base silicon configuration and
* attempt to enable dual core behavior on single cpu zynq-7000s
* devices is prevented from corrupting system behavior.
* 6.0 mus 08/24/16 Check CPU core before putting cpu1 to reset for single core
* zynq-7000s devices
*
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xparameters.h"
#include "xil_errata.h"
.globl MMUTable
.global _prestart
.global _boot
.global __stack
.global __irq_stack
.global __supervisor_stack
.global __abort_stack
.global __fiq_stack
.global __undef_stack
.global _vector_table
.set PSS_L2CC_BASE_ADDR, 0xF8F02000
.set PSS_SLCR_BASE_ADDR, 0xF8000000
.set RESERVED, 0x0fffff00
.set TblBase , MMUTable
.set LRemap, 0xFE00000F /* set the base address of the peripheral block as not shared */
.set L2CCWay, (PSS_L2CC_BASE_ADDR + 0x077C) /*(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_INVLD_WAY_OFFSET)*/
.set L2CCSync, (PSS_L2CC_BASE_ADDR + 0x0730) /*(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_SYNC_OFFSET)*/
.set L2CCCrtl, (PSS_L2CC_BASE_ADDR + 0x0100) /*(PSS_L2CC_BASE_ADDR + PSS_L2CC_CNTRL_OFFSET)*/
.set L2CCAuxCrtl, (PSS_L2CC_BASE_ADDR + 0x0104) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_AUX_CNTRL_OFFSET)*/
.set L2CCTAGLatReg, (PSS_L2CC_BASE_ADDR + 0x0108) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_TAG_RAM_CNTRL_OFFSET)*/
.set L2CCDataLatReg, (PSS_L2CC_BASE_ADDR + 0x010C) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_DATA_RAM_CNTRL_OFFSET)*/
.set L2CCIntClear, (PSS_L2CC_BASE_ADDR + 0x0220) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_IAR_OFFSET)*/
.set L2CCIntRaw, (PSS_L2CC_BASE_ADDR + 0x021C) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_ISR_OFFSET)*/
.set SLCRlockReg, (PSS_SLCR_BASE_ADDR + 0x04) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_LOCK_OFFSET)*/
.set SLCRUnlockReg, (PSS_SLCR_BASE_ADDR + 0x08) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_UNLOCK_OFFSET)*/
.set SLCRL2cRamReg, (PSS_SLCR_BASE_ADDR + 0xA1C) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_L2C_RAM_OFFSET)*/
.set SLCRCPURSTReg, (0xF8000000 + 0x244) /*(XPS_SYS_CTRL_BASEADDR + A9_CPU_RST_CTRL_OFFSET)*/
.set EFUSEStaus, (0xF800D000 + 0x10) /*(XPS_EFUSE_BASEADDR + EFUSE_STATUS_OFFSET)*/
/* workaround for simulation not working when L1 D and I caches,MMU and L2 cache enabled - DT568997 */
.if SIM_MODE == 1
.set CRValMmuCac, 0b00000000000000 /* Disable IDC, and MMU */
.else
.set CRValMmuCac, 0b01000000000101 /* Enable IDC, and MMU */
.endif
.set CRValHiVectorAddr, 0b10000000000000 /* Set the Vector address to high, 0xFFFF0000 */
.set L2CCAuxControl, 0x72360000 /* Enable all prefetching, Cache replacement policy, Parity enable,
Event monitor bus enable and Way Size (64 KB) */
.set L2CCControl, 0x01 /* Enable L2CC */
.set L2CCTAGLatency, 0x0111 /* latency for TAG RAM */
.set L2CCDataLatency, 0x0121 /* latency for DATA RAM */
.set SLCRlockKey, 0x767B /* SLCR lock key */
.set SLCRUnlockKey, 0xDF0D /* SLCR unlock key */
.set SLCRL2cRamConfig, 0x00020202 /* SLCR L2C ram configuration */
/* Stack Pointer locations for boot code */
.set Undef_stack, __undef_stack
.set FIQ_stack, __fiq_stack
.set Abort_stack, __abort_stack
.set SPV_stack, __supervisor_stack
.set IRQ_stack, __irq_stack
.set SYS_stack, __stack
.set vector_base, _vector_table
.set FPEXC_EN, 0x40000000 /* FPU enable bit, (1 << 30) */
.section .boot,"ax"
/* this initializes the various processor modes */
_prestart:
_boot:
#if XPAR_CPU_ID==0
/* only allow cpu0 through */
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #0
beq CheckEFUSE
EndlessLoop0:
wfe
b EndlessLoop0
CheckEFUSE:
ldr r0,=EFUSEStaus
ldr r1,[r0] /* Read eFuse setting */
ands r1,r1,#0x80 /* Check whether device is having single core */
beq OKToRun
/* single core device, reset cpu1 */
ldr r0,=SLCRUnlockReg /* Load SLCR base address base + unlock register */
ldr r1,=SLCRUnlockKey /* set unlock key */
str r1, [r0] /* Unlock SLCR */
ldr r0,=SLCRCPURSTReg
ldr r1,[r0] /* Read CPU Software Reset Control register */
orr r1,r1,#0x22
str r1,[r0] /* Reset CPU1 */
ldr r0,=SLCRlockReg /* Load SLCR base address base + lock register */
ldr r1,=SLCRlockKey /* set lock key */
str r1, [r0] /* lock SLCR */
#elif XPAR_CPU_ID==1
/* only allow cpu1 through */
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #1
beq CheckEFUSE1
b EndlessLoop1
CheckEFUSE1:
ldr r0,=EFUSEStaus
ldr r1,[r0] /* Read eFuse setting */
ands r1,r1,#0x80 /* Check whether device is having single core */
beq OKToRun
EndlessLoop1:
wfe
b EndlessLoop1
#endif
OKToRun:
mrc p15, 0, r0, c0, c0, 0 /* Get the revision */
and r5, r0, #0x00f00000
and r6, r0, #0x0000000f
orr r6, r6, r5, lsr #20-4
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 /* only present up to r2p2 */
mrcle p15, 0, r10, c15, c0, 1 /* read diagnostic register */
orrle r10, r10, #1 << 4 /* set bit #4 */
mcrle p15, 0, r10, c15, c0, 1 /* write diagnostic register */
#endif
#ifdef CONFIG_ARM_ERRATA_743622
teq r5, #0x00200000 /* only present in r2p* */
mrceq p15, 0, r10, c15, c0, 1 /* read diagnostic register */
orreq r10, r10, #1 << 6 /* set bit #6 */
mcreq p15, 0, r10, c15, c0, 1 /* write diagnostic register */
#endif
/* set VBAR to the _vector_table address in linker script */
ldr r0, =vector_base
mcr p15, 0, r0, c12, c0, 0
/*invalidate scu*/
ldr r7, =0xf8f0000c
ldr r6, =0xffff
str r6, [r7]
/* Invalidate caches and TLBs */
mov r0,#0 /* r0 = 0 */
mcr p15, 0, r0, c8, c7, 0 /* invalidate TLBs */
mcr p15, 0, r0, c7, c5, 0 /* invalidate icache */
mcr p15, 0, r0, c7, c5, 6 /* Invalidate branch predictor array */
bl invalidate_dcache /* invalidate dcache */
/* Disable MMU, if enabled */
mrc p15, 0, r0, c1, c0, 0 /* read CP15 register 1 */
bic r0, r0, #0x1 /* clear bit 0 */
mcr p15, 0, r0, c1, c0, 0 /* write value back */
#ifdef SHAREABLE_DDR
/* Mark the entire DDR memory as shareable */
ldr r3, =0x3ff /* 1024 entries to cover 1G DDR */
ldr r0, =TblBase /* MMU Table address in memory */
ldr r2, =0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
shareable_loop:
str r2, [r0] /* write the entry to MMU table */
add r0, r0, #0x4 /* next entry in the table */
add r2, r2, #0x100000 /* next section */
subs r3, r3, #1
bge shareable_loop /* loop till 1G is covered */
#endif
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the irq stack pointer */
and r2, r1, r0
orr r2, r2, #0x12 /* IRQ mode */
msr cpsr, r2
ldr r13,=IRQ_stack /* IRQ stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the supervisor stack pointer */
and r2, r1, r0
orr r2, r2, #0x13 /* supervisor mode */
msr cpsr, r2
ldr r13,=SPV_stack /* Supervisor stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Abort stack pointer */
and r2, r1, r0
orr r2, r2, #0x17 /* Abort mode */
msr cpsr, r2
ldr r13,=Abort_stack /* Abort stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the FIQ stack pointer */
and r2, r1, r0
orr r2, r2, #0x11 /* FIQ mode */
msr cpsr, r2
ldr r13,=FIQ_stack /* FIQ stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Undefine stack pointer */
and r2, r1, r0
orr r2, r2, #0x1b /* Undefine mode */
msr cpsr, r2
ldr r13,=Undef_stack /* Undefine stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the system stack pointer */
and r2, r1, r0
orr r2, r2, #0x1F /* SYS mode */
msr cpsr, r2
ldr r13,=SYS_stack /* SYS stack pointer */
/*set scu enable bit in scu*/
ldr r7, =0xf8f00000
ldr r0, [r7]
orr r0, r0, #0x1
str r0, [r7]
/* enable MMU and cache */
ldr r0,=TblBase /* Load MMU translation table base */
orr r0, r0, #0x5B /* Outer-cacheable, WB */
mcr 15, 0, r0, c2, c0, 0 /* TTB0 */
mvn r0,#0 /* Load MMU domains -- all ones=manager */
mcr p15,0,r0,c3,c0,0
/* Enable mmu, icahce and dcache */
ldr r0,=CRValMmuCac
mcr p15,0,r0,c1,c0,0 /* Enable cache and MMU */
dsb /* dsb allow the MMU to start up */
isb /* isb flush prefetch buffer */
/* Write to ACTLR */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR*/
orr r0, r0, #(0x01 << 6) /* set SMP bit */
orr r0, r0, #(0x01 ) /* Cache/TLB maintenance broadcast */
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
/* Invalidate L2 Cache and enable L2 Cache*/
/* For AMP, assume running on CPU1. Don't initialize L2 Cache (up to Linux) */
#if USE_AMP!=1
ldr r0,=L2CCCrtl /* Load L2CC base address base + control register */
mov r1, #0 /* force the disable bit */
str r1, [r0] /* disable the L2 Caches */
ldr r0,=L2CCAuxCrtl /* Load L2CC base address base + Aux control register */
ldr r1,[r0] /* read the register */
ldr r2,=L2CCAuxControl /* set the default bits */
orr r1,r1,r2
str r1, [r0] /* store the Aux Control Register */
ldr r0,=L2CCTAGLatReg /* Load L2CC base address base + TAG Latency address */
ldr r1,=L2CCTAGLatency /* set the latencies for the TAG*/
str r1, [r0] /* store the TAG Latency register Register */
ldr r0,=L2CCDataLatReg /* Load L2CC base address base + Data Latency address */
ldr r1,=L2CCDataLatency /* set the latencies for the Data*/
str r1, [r0] /* store the Data Latency register Register */
ldr r0,=L2CCWay /* Load L2CC base address base + way register*/
ldr r2, =0xFFFF
str r2, [r0] /* force invalidate */
ldr r0,=L2CCSync /* need to poll 0x730, PSS_L2CC_CACHE_SYNC_OFFSET */
/* Load L2CC base address base + sync register*/
/* poll for completion */
Sync: ldr r1, [r0]
cmp r1, #0
bne Sync
ldr r0,=L2CCIntRaw /* clear pending interrupts */
ldr r1,[r0]
ldr r0,=L2CCIntClear
str r1,[r0]
ldr r0,=SLCRUnlockReg /* Load SLCR base address base + unlock register */
ldr r1,=SLCRUnlockKey /* set unlock key */
str r1, [r0] /* Unlock SLCR */
ldr r0,=SLCRL2cRamReg /* Load SLCR base address base + l2c Ram Control register */
ldr r1,=SLCRL2cRamConfig /* set the configuration value */
str r1, [r0] /* store the L2c Ram Control Register */
ldr r0,=SLCRlockReg /* Load SLCR base address base + lock register */
ldr r1,=SLCRlockKey /* set lock key */
str r1, [r0] /* lock SLCR */
ldr r0,=L2CCCrtl /* Load L2CC base address base + control register */
ldr r1,[r0] /* read the register */
mov r2, #L2CCControl /* set the enable bit */
orr r1,r1,r2
str r1, [r0] /* enable the L2 Caches */
#endif
mov r0, r0
mrc p15, 0, r1, c1, c0, 2 /* read cp access control register (CACR) into r1 */
orr r1, r1, #(0xf << 20) /* enable full access for p10 & p11 */
mcr p15, 0, r1, c1, c0, 2 /* write back into CACR */
/* enable vfp */
fmrx r1, FPEXC /* read the exception register */
orr r1,r1, #FPEXC_EN /* set VFP enable bit, leave the others in orig state */
fmxr FPEXC, r1 /* write back the exception register */
mrc p15,0,r0,c1,c0,0 /* flow prediction enable */
orr r0, r0, #(0x01 << 11) /* #0x8000 */
mcr p15,0,r0,c1,c0,0
mrc p15,0,r0,c1,c0,1 /* read Auxiliary Control Register */
orr r0, r0, #(0x1 << 2) /* enable Dside prefetch */
orr r0, r0, #(0x1 << 1) /* enable L2 Prefetch hint */
mcr p15,0,r0,c1,c0,1 /* write Auxiliary Control Register */
mrs r0, cpsr /* get the current PSR */
bic r0, r0, #0x100 /* enable asynchronous abort exception */
msr cpsr_xsf, r0
b _start /* jump to C startup code */
and r0, r0, r0 /* no op */
.Ldone: b .Ldone /* Paranoia: we should never get here */
/*
*************************************************************************
*
* invalidate_dcache - invalidate the entire d-cache by set/way
*
* Note: for Cortex-A9, there is no cp instruction for invalidating
* the whole D-cache. Need to invalidate each line.
*
*************************************************************************
*/
invalidate_dcache:
mrc p15, 1, r0, c0, c0, 1 /* read CLIDR */
ands r3, r0, #0x7000000
mov r3, r3, lsr #23 /* cache level value (naturally aligned) */
beq finished
mov r10, #0 /* start with level 0 */
loop1:
add r2, r10, r10, lsr #1 /* work out 3xcachelevel */
mov r1, r0, lsr r2 /* bottom 3 bits are the Cache type for this level */
and r1, r1, #7 /* get those 3 bits alone */
cmp r1, #2
blt skip /* no cache or only instruction cache at this level */
mcr p15, 2, r10, c0, c0, 0 /* write the Cache Size selection register */
isb /* isb to sync the change to the CacheSizeID reg */
mrc p15, 1, r1, c0, c0, 0 /* reads current Cache Size ID register */
and r2, r1, #7 /* extract the line length field */
add r2, r2, #4 /* add 4 for the line length offset (log2 16 bytes) */
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 /* r4 is the max number on the way size (right aligned) */
clz r5, r4 /* r5 is the bit position of the way size increment */
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 /* r7 is the max number of the index size (right aligned) */
loop2:
mov r9, r4 /* r9 working copy of the max way size (right aligned) */
loop3:
orr r11, r10, r9, lsl r5 /* factor in the way number and cache number into r11 */
orr r11, r11, r7, lsl r2 /* factor in the index number */
mcr p15, 0, r11, c7, c6, 2 /* invalidate by set/way */
subs r9, r9, #1 /* decrement the way number */
bge loop3
subs r7, r7, #1 /* decrement the index */
bge loop2
skip:
add r10, r10, #2 /* increment the cache number */
cmp r3, r10
bgt loop1
finished:
mov r10, #0 /* switch back to cache level 0 */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
dsb
isb
bx lr
.end
/**
* @} End of "addtogroup a9_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,892 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/asm_vectors.S | /******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex A9 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 1.00a ecm/sdm 10/20/09 Initial version
* 3.05a sdm 02/02/12 Save lr when profiling is enabled
* 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
* 'xil_errata.h' for errata description
* 4.00a pkp 22/01/14 Modified return addresses for interrupt
* handlers (DataAbortHandler and SVCHandler)
* to fix CR#767251
* 5.1 pkp 05/13/15 Saved the addresses of instruction causing data
* abort and prefetch abort into DataAbortAddr and
* PrefetchAbortAddr for further use to fix CR#854523
* 5.4 pkp 12/03/15 Added handler for undefined exception
* 6.8 mus 04/27/18 Removed __ARM_NEON__ flag definition. Now,
* saving/restoring of of HW floating point register
* would be done through newly introduced flag
* FPU_HARD_FLOAT_ABI_ENABLED. This new flag will be
* configured based on the -mfpu-abi option in extra
* compiler flags.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xil_errata.h"
#include "bspconfig.h"
.org 0
.text
.globl _vector_table
.section .vectors
_vector_table:
B _boot
B Undefined
B SVCHandler
B PrefetchAbortHandler
B DataAbortHandler
NOP /* Placeholder for address exception vector*/
B IRQHandler
B FIQHandler
IRQHandler: /* IRQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code*/
#if FPU_HARD_FLOAT_ABI_ENABLED
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
#ifdef PROFILING
ldr r2, =prof_pc
subs r3, lr, #0
str r3, [r2]
#endif
bl IRQInterrupt /* IRQ vector */
#if FPU_HARD_FLOAT_ABI_ENABLED
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
FIQHandler: /* FIQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
#if FPU_HARD_FLOAT_ABI_ENABLED
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
FIQLoop:
bl FIQInterrupt /* FIQ vector */
#if FPU_HARD_FLOAT_ABI_ENABLED
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
Undefined: /* Undefined handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =UndefinedExceptionAddr
sub r1, lr, #4
str r1, [r0] /* Store address of instruction causing undefined exception */
bl UndefinedException /* UndefinedException: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr
SVCHandler: /* SWI handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
tst r0, #0x20 /* check the T bit */
ldrneh r0, [lr,#-2] /* Thumb mode */
bicne r0, r0, #0xff00 /* Thumb mode */
ldreq r0, [lr,#-4] /* ARM mode */
biceq r0, r0, #0xff000000 /* ARM mode */
bl SWInterrupt /* SWInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr /*return to the next instruction after the SWI instruction */
DataAbortHandler: /* Data Abort handler */
#ifdef CONFIG_ARM_ERRATA_775420
dsb
#endif
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =DataAbortAddr
sub r1, lr, #8
str r1, [r0] /* Stores instruction causing data abort */
bl DataAbortInterrupt /*DataAbortInterrupt :call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #8 /* points to the instruction that caused the Data Abort exception */
PrefetchAbortHandler: /* Prefetch Abort handler */
#ifdef CONFIG_ARM_ERRATA_775420
dsb
#endif
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =PrefetchAbortAddr
sub r1, lr, #4
str r1, [r0] /* Stores instruction causing prefetch abort */
bl PrefetchAbortInterrupt /* PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* points to the instruction that caused the Prefetch Abort exception */
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 8,023 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/translation_table.S | /******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a9_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A9. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for zynq architecture. It
* utilizes short descriptor translation table format with each section defining
* 1MB of memory.
*
* The overview of translation table memory attributes is described below.
*
*| | Memory Range | Definition in Translation Table |
*|-----------------------|-------------------------|-----------------------------------|
*| DDR | 0x00000000 - 0x3FFFFFFF | Normal write-back Cacheable |
*| PL | 0x40000000 - 0xBFFFFFFF | Strongly Ordered |
*| Reserved | 0xC0000000 - 0xDFFFFFFF | Unassigned |
*| Memory mapped devices | 0xE0000000 - 0xE02FFFFF | Device Memory |
*| Reserved | 0xE0300000 - 0xE0FFFFFF | Unassigned |
*| NAND, NOR | 0xE1000000 - 0xE3FFFFFF | Device memory |
*| SRAM | 0xE4000000 - 0xE5FFFFFF | Normal write-back Cacheable |
*| Reserved | 0xE6000000 - 0xF7FFFFFF | Unassigned |
*| AMBA APB Peripherals | 0xF8000000 - 0xF8FFFFFF | Device Memory |
*| Reserved | 0xF9000000 - 0xFBFFFFFF | Unassigned |
*| Linear QSPI - XIP | 0xFC000000 - 0xFDFFFFFF | Normal write-through cacheable |
*| Reserved | 0xFE000000 - 0xFFEFFFFF | Unassigned |
*| OCM | 0xFFF00000 - 0xFFFFFFFF | Normal inner write-back cacheable |
*
* @note
*
* For region 0x00000000 - 0x3FFFFFFF, a system where DDR is less than 1GB,
* region after DDR and before PL is marked as undefined/reserved in translation
* table. In 0xF8000000 - 0xF8FFFFFF, 0xF8000C00 - 0xF8000FFF, 0xF8010000 -
* 0xF88FFFFF and 0xF8F03000 to 0xF8FFFFFF are reserved but due to granual size
* of 1MB, it is not possible to define separate regions for them. For region
* 0xFFF00000 - 0xFFFFFFFF, 0xFFF00000 to 0xFFFB0000 is reserved but due to 1MB
* granual size, it is not possible to define separate region for it
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 1.00a ecm 10/20/09 Initial version
* 3.04a sdm 01/13/12 Updated MMU table to mark DDR memory as Shareable
* 3.07a sgd 07/05/2012 Configuring device address spaces as shareable device
* instead of strongly-ordered.
* 3.07a asa 07/17/2012 Changed the property of the ".mmu_tbl" section.
* 4.2 pkp 09/02/2014 added entries for 0xfe000000 to 0xffefffff as reserved
* and 0xe0000000 - 0xe1ffffff is broken down into
* 0xe0000000 - 0xe02fffff (memory mapped divides)
* 0xe0300000 - 0xe0ffffff (reserved) and
* 0xe1000000 - 0xe1ffffff (NAND)
* 5.2 pkp 06/08/2015 put a check for XPAR_PS7_DDR_0_S_AXI_BASEADDR to confirm
* if DDR is present or not and accordingly generate the
* translation table
* 6.1 pkp 07/11/2016 Corrected comments for memory attributes
* 6.8 mus 07/12/2018 Mark DDR memory as inner cacheable, if BSP is built
* with the USE_AMP flag.
* </pre>
*
*
******************************************************************************/
#include "xparameters.h"
.globl MMUTable
.section .mmu_tbl,"a"
MMUTable:
/* Each table entry occupies one 32-bit word and there are
* 4096 entries, so the entire table takes up 16KB.
* Each entry covers a 1MB section.
*/
.set SECT, 0
#ifdef XPAR_PS7_DDR_0_S_AXI_BASEADDR
.set DDR_START, XPAR_PS7_DDR_0_S_AXI_BASEADDR
.set DDR_END, XPAR_PS7_DDR_0_S_AXI_HIGHADDR
.set DDR_SIZE, (DDR_END - DDR_START)+1
.set DDR_REG, DDR_SIZE/0x100000
#else
.set DDR_REG, 0
#endif
.set UNDEF_REG, 0x3FF - DDR_REG
#ifndef USE_AMP
/*0x00000000 - 0x00100000 (inner and outer cacheable )*/
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
#else
/*0x00000000 - 0x00100000 (inner cacheable )*/
.word SECT + 0x14de6 /* S=b1 TEX=b100 AP=b11, Domain=b1111, C=b0, B=b1 */
#endif
.set SECT, SECT+0x100000
.rept DDR_REG /* (DDR Cacheable) */
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept UNDEF_REG /* (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0400 /* 0x40000000 - 0x7fffffff (FPGA slave0) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0400 /* 0x80000000 - 0xbfffffff (FPGA slave1) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0200 /* 0xc0000000 - 0xdfffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x003 /* 0xe0000000 - 0xe02fffff (Memory mapped devices)
* UART/USB/IIC/SPI/CAN/GEM/GPIO/QSPI/SD/NAND */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0D /* 0xe0300000 - 0xe0ffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0010 /* 0xe1000000 - 0xe1ffffff (NAND) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0020 /* 0xe2000000 - 0xe3ffffff (NOR) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0020 /* 0xe4000000 - 0xe5ffffff (SRAM) */
.word SECT + 0xc0e /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0120 /* 0xe6000000 - 0xf7ffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
/* 0xf8000c00 to 0xf8000fff, 0xf8010000 to 0xf88fffff and
0xf8f03000 to 0xf8ffffff are reserved but due to granual size of
1MB, it is not possible to define separate regions for them */
.rept 0x0010 /* 0xf8000000 - 0xf8ffffff (AMBA APB Peripherals) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0030 /* 0xf9000000 - 0xfbffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0020 /* 0xfc000000 - 0xfdffffff (Linear QSPI - XIP) */
.word SECT + 0xc0a /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x001F /* 0xfe000000 - 0xffefffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
/* 0xfff00000 to 0xfffb0000 is reserved but due to granual size of
1MB, it is not possible to define separate region for it
0xfff00000 - 0xffffffff
256K OCM when mapped to high address space
inner-cacheable */
.word SECT + 0x4c0e /* S=b0 TEX=b100 AP=b11, Domain=b0, C=b1, B=b1 */
.set SECT, SECT+0x100000
.end
/**
* @} End of "addtogroup a9_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,033 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/xil-crt0.S | /******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil-crt0.S
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 1.00a ecm 10/20/09 Initial version
* 3.05a sdm 02/02/12 Added code for profiling
* 3.06a sgd 05/16/12 Added global constructors and cleanup code
* Uart initialization based on compiler flag
* 3.07a sgd 07/05/12 Updated with reset and start Global Timer
* 3.07a sgd 10/19/12 SMC NOR and SRAM initialization with build option
* 4.2 pkp 08/04/14 Removed PEEP board related code which contained
* initialization of uart smc nor and sram
* 5.3 pkp 10/07/15 Added support for OpenAMP by not initializing global
* timer when USE_AMP flag is defined
* 6.6 srm 10/18/17 Added timer configuration using XTime_StartTTCTimer API.
* Now the TTC instance as specified by the user will be
* started.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "bspconfig.h"
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
.text
.Lsbss_start:
.long __sbss_start
.Lsbss_end:
.long __sbss_end
.Lbss_start:
.long __bss_start
.Lbss_end:
.long __bss_end
.Lstack:
.long __stack
.globl _start
_start:
bl __cpu_init /* Initialize the CPU first (BSP provides this) */
mov r0, #0
/* clear sbss */
ldr r1,.Lsbss_start /* calculate beginning of the SBSS */
ldr r2,.Lsbss_end /* calculate end of the SBSS */
.Lloop_sbss:
cmp r1,r2
bge .Lenclsbss /* If no SBSS, no clearing required */
str r0, [r1], #4
b .Lloop_sbss
.Lenclsbss:
/* clear bss */
ldr r1,.Lbss_start /* calculate beginning of the BSS */
ldr r2,.Lbss_end /* calculate end of the BSS */
.Lloop_bss:
cmp r1,r2
bge .Lenclbss /* If no BSS, no clearing required */
str r0, [r1], #4
b .Lloop_bss
.Lenclbss:
/* set stack pointer */
ldr r13,.Lstack /* stack address */
/* Reset and start Global Timer */
mov r0, #0x0
mov r1, #0x0
/* Reset and start Triple Timer Counter */
#if defined SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
#if USE_AMP != 1
bl XTime_SetTime
#endif
#ifdef PROFILING /* defined in Makefile */
/* Setup profiling stuff */
bl _profile_init
#endif /* PROFILING */
/* run global constructors */
bl __libc_init_array
/* make sure argc and argv are valid */
mov r0, #0
mov r1, #0
/* Let her rip */
bl main
/* Cleanup global constructors */
bl __libc_fini_array
#ifdef PROFILING
/* Cleanup profiling stuff */
bl _profile_clean
#endif /* PROFILING */
/* All done */
bl exit
.Lexit: /* should never get here */
b .Lexit
.Lstart:
.size _start,.Lstart-_start
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,555 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/cpu_init.S | /******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file cpu_init.s
*
* This file contains CPU specific initialization. Invoked from main CRT
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 1.00a ecm/sdm 10/20/09 Initial version
* 3.04a sdm 01/02/12 Updated to clear cp15 regs with unknown reset values
* 5.0 pkp 12/16/14 removed incorrect initialization of TLB lockdown
* register to fix CR#830580
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.text
.global __cpu_init
.align 2
__cpu_init:
/* Clear cp15 regs with unknown reset values */
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 /* DFSR */
mcr p15, 0, r0, c5, c0, 1 /* IFSR */
mcr p15, 0, r0, c6, c0, 0 /* DFAR */
mcr p15, 0, r0, c6, c0, 2 /* IFAR */
mcr p15, 0, r0, c9, c13, 2 /* PMXEVCNTR */
mcr p15, 0, r0, c13, c0, 2 /* TPIDRURW */
mcr p15, 0, r0, c13, c0, 3 /* TPIDRURO */
/* Reset and start Cycle Counter */
mov r2, #0x80000000 /* clear overflow */
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd /* D, C, E */
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 /* enable cycle counter */
mcr p15, 0, r2, c9, c12, 1
bx lr
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,043 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/armcc/asm_vectors.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file asm_vectors.s
;
; This file contains the initial vector table for the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 1.00a ecm/sdm 10/20/09 Initial version
; 3.11a asa 9/17/13 Added support for neon.
; 4.00 pkp 01/22/14 Modified return addresses for interrupt
; handlers
; 5.1 pkp 05/13/15 Saved the addresses of instruction causing data
; abort and prefetch abort into DataAbortAddr and
; PrefetchAbortAddr for further use to fix CR#854523
; 5.4 pkp 12/03/15 Added handler for undefined exception
;</pre>
;
; @note
;
; None.
;
;****************************************************************************
EXPORT _vector_table
EXPORT IRQHandler
IMPORT _boot
IMPORT _prestart
IMPORT IRQInterrupt
IMPORT FIQInterrupt
IMPORT SWInterrupt
IMPORT DataAbortInterrupt
IMPORT PrefetchAbortInterrupt
IMPORT UndefinedException
IMPORT DataAbortAddr
IMPORT PrefetchAbortAddr
IMPORT UndefinedExceptionAddr
AREA |.vectors|, CODE
REQUIRE8 {TRUE}
PRESERVE8 {TRUE}
ENTRY ; define this as an entry point
_vector_table
B _boot
B Undefined
B SVCHandler
B PrefetchAbortHandler
B DataAbortHandler
NOP ; Placeholder for address exception vector
B IRQHandler
B FIQHandler
IRQHandler ; IRQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
bl IRQInterrupt ; IRQ vector
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
FIQHandler ; FIQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
FIQLoop
bl FIQInterrupt ; FIQ vector
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
Undefined ; Undefined handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =UndefinedExceptionAddr
sub r1, lr,#4
str r1, [r0] ; Address of instruction causing undefined exception
bl UndefinedException ; UndefinedException: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr
SVCHandler ; SWI handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
tst r0, #0x20 ; check the T bit
ldrneh r0, [lr,#-2] ; Thumb mode
bicne r0, r0, #0xff00 ; Thumb mode
ldreq r0, [lr,#-4] ; ARM mode
biceq r0, r0, #0xff000000 ; ARM mode
bl SWInterrupt ; SWInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr ; adjust return
DataAbortHandler ; Data Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =DataAbortAddr
sub r1, lr,#8
str r1, [r0] ;Address of instruction causing data abort
bl DataAbortInterrupt ;DataAbortInterrupt :call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #8 ; adjust return
PrefetchAbortHandler ; Prefetch Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =PrefetchAbortAddr
sub r1, lr,#4
str r1, [r0] ;Address of instruction causing prefetch abort
bl PrefetchAbortInterrupt ; PrefetchAbortInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 15,820 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/armcc/boot.S | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file boot.S
;
; This file contains the initial startup code for the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 1.00a ecm/sdm 10/20/09 Initial version
; 3.04a sdm 01/02/12 Updated to clear cp15 regs with unknown reset values
; 3.06a sgd 05/15/12 Updated L2CC Auxiliary and Tag RAM Latency control
; register settings.
; 3.06a asa 06/17/12 Modified the TTBR settings and L2 Cache auxiliary
; register settings.
; 3.07a sgd 07/05/12 Updated with reset and start Global Timer
; 3.07a sgd 10/19/12 SMC NOR and SRAM initialization with build option
; 4.2 pkp 06/19/14 Enabled asynchronous abort exception
; 4.2 pkp 08/04/14 Removed PEEP board related code which contained
; initialization of uart smc nor and sram
; 5.0 pkp 16/12/14 Modified initialization code to enable scu after
; MMU is enabled and removed incorrect initialization
; of TLB lockdown register to fix CR#830580
; 5.1 pkp 05/13/15 Changed the initialization order so to first invalidate
; caches and TLB, enable MMU and caches, then enable SMP
; bit in ACTLR. L2Cache invalidation and enabling of L2Cache
; is done later.
; 5.4 asa 12/06/15 Added code to initialize SPSR for all relevant modes.
; 6.0 mus 04/08/16 Added code to detect zynq-7000 base silicon configuration and
; attempt to enable dual core behavior on single cpu zynq-7000s devices
; is prevented from corrupting system behavior.
; 6.0 mus 24/08/16 Check CPU core before putting cpu1 to reset for single core
; zynq-7000s devices
; 6.6 srm 10/25/17 Added timer configuration using XTime_StartTTCTimer API.
* Now the TTC instance as specified by the user will be
* started.
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
#include "xparameters.h"
#include "xil_errata.h"
#define UART_BAUDRATE 115200
EXPORT _prestart
EXPORT _boot
IMPORT |Image$$ARM_LIB_STACK$$ZI$$Limit|
IMPORT |Image$$IRQ_STACK$$ZI$$Limit|
IMPORT |Image$$SPV_STACK$$ZI$$Limit|
IMPORT |Image$$ABORT_STACK$$ZI$$Limit|
IMPORT MMUTable
IMPORT _vector_table
IMPORT __main
IMPORT Xil_ExceptionInit
IMPORT XTime_SetTime
#if defined SLEEP_TIMER_BASEADDR
IMPORT XTime_StartTTCTimer
#endif
PSS_L2CC_BASE_ADDR EQU 0xF8F02000
PSS_SLCR_BASE_ADDR EQU 0xF8000000
L2CCWay EQU (PSS_L2CC_BASE_ADDR + 0x077C) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_INVLD_WAY_OFFSET)
L2CCSync EQU (PSS_L2CC_BASE_ADDR + 0x0730) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_SYNC_OFFSET)
L2CCCrtl EQU (PSS_L2CC_BASE_ADDR + 0x0100) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CNTRL_OFFSET)
L2CCAuxCrtl EQU (PSS_L2CC_BASE_ADDR + 0x0104) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_AUX_CNTRL_OFFSET)
L2CCTAGLatReg EQU (PSS_L2CC_BASE_ADDR + 0x0108) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_TAG_RAM_CNTRL_OFFSET)
L2CCDataLatReg EQU (PSS_L2CC_BASE_ADDR + 0x010C) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_DATA_RAM_CNTRL_OFFSET)
L2CCIntClear EQU (PSS_L2CC_BASE_ADDR + 0x0220) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_IAR_OFFSET)
L2CCIntRaw EQU (PSS_L2CC_BASE_ADDR + 0x021C) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_ISR_OFFSET)
SLCRlockReg EQU (PSS_SLCR_BASE_ADDR + 0x04) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_LOCK_OFFSET)*/
SLCRUnlockReg EQU (PSS_SLCR_BASE_ADDR + 0x08) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_UNLOCK_OFFSET)*/
SLCRL2cRamReg EQU (PSS_SLCR_BASE_ADDR + 0xA1C) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_L2C_RAM_OFFSET)*/
SLCRCPURSTReg EQU (0xF8000000 + 0x244) ;(XPS_SYS_CTRL_BASEADDR + A9_CPU_RST_CTRL_OFFSET)
EFUSEStaus EQU (0xF800D000 + 0x10) ;(XPS_EFUSE_BASEADDR + EFUSE_STATUS_OFFSET)
SLCRlockKey EQU 0x767B /* SLCR lock key */
SLCRUnlockKey EQU 0xDF0D /* SLCR unlock key */
SLCRL2cRamConfig EQU 0x00020202 /* SLCR L2C ram configuration */
CRValMmuCac EQU 2_01000000000101 ; Enable IDC, and MMU
CRValHiVectorAddr EQU 2_10000000000000 ; Set the Vector address to high, 0xFFFF0000
L2CCAuxControl EQU 0x72360000 ; Enable all prefetching, Way Size (16 KB) and High Priority for SO and Dev Reads Enable
L2CCControl EQU 0x01 ; Enable L2CC
L2CCTAGLatency EQU 0x0111 ; 7 Cycles of latency for TAG RAM
L2CCDataLatency EQU 0x0121 ; 7 Cycles of latency for DATA RAM
FPEXC_EN EQU 0x40000000 ; FPU enable bit, (1 << 30)
AREA |.boot|, CODE
PRESERVE8
; this initializes the various processor modes
_prestart
_boot
#if XPAR_CPU_ID==0
; only allow cp0 through
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #0
beq CheckEFUSE
EndlessLoop0
wfe
b EndlessLoop0
CheckEFUSE
ldr r0,=EFUSEStaus
ldr r1,[r0] ; Read eFuse setting
ands r1,r1,#0x80 ; Check whether device is having single core
beq OKToRun
; Single core device, reset CPU1
ldr r0,=SLCRUnlockReg ; Load SLCR base address base + unlock register
ldr r1,=SLCRUnlockKey ; set unlock key
str r1, [r0] ; Unlock SLCR
ldr r0,=SLCRCPURSTReg
ldr r1,[r0] ; Read CPU Software Reset Control register
orr r1,r1,#0x22
str r1,[r0] ; Reset CPU1
ldr r0,=SLCRlockReg ; Load SLCR base address base + lock register
ldr r1,=SLCRlockKey ; set lock key
str r1, [r0] ; lock SLCR
#elif XPAR_CPU_ID==1
; only allow cpu1 through
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #1
beq CheckEFUSE1
b EndlessLoop1
CheckEFUSE1
ldr r0,=EFUSEStaus
ldr r1,[r0] ; Read eFuse setting
ands r1,r1,#0x80 ; Check whether device is having single core
beq OKToRun
EndlessLoop1
wfe
b EndlessLoop1
#endif
OKToRun
mrc p15, 0, r0, c0, c0, 0 /* Get the revision */
and r5, r0, #0x00f00000
and r6, r0, #0x0000000f
orr r6, r6, r5, lsr #20-4
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 /* only present up to r2p2 */
mrcle p15, 0, r10, c15, c0, 1 /* read diagnostic register */
orrle r10, r10, #1 << 4 /* set bit #4 */
mcrle p15, 0, r10, c15, c0, 1 /* write diagnostic register */
#endif
#ifdef CONFIG_ARM_ERRATA_743622
teq r5, #0x00200000 /* only present in r2p* */
mrceq p15, 0, r10, c15, c0, 1 /* read diagnostic register */
orreq r10, r10, #1 << 6 /* set bit #6 */
mcreq p15, 0, r10, c15, c0, 1 /* write diagnostic register */
#endif
/* set VBAR to the _vector_table address in scatter file */
ldr r0, =_vector_table
mcr p15, 0, r0, c12, c0, 0
;invalidate scu
ldr r7, =0xf8f0000c
ldr r6, =0xffff
str r6, [r7]
;Invalidate caches and TLBs
mov r0,#0 ; r0 = 0
mcr p15, 0, r0, c8, c7, 0 ; invalidate TLBs
mcr p15, 0, r0, c7, c5, 0 ; invalidate icache
mcr p15, 0, r0, c7, c5, 6 ; Invalidate branch predictor array
bl invalidate_dcache ; invalidate dcache
; Disable MMU, if enabled
mrc p15, 0, r0, c1, c0, 0 ; read CP15 register 1
bic r0, r0, #0x1 ; clear bit 0
mcr p15, 0, r0, c1, c0, 0 ; write value back
#ifdef SHAREABLE_DDR
; Mark the entire DDR memory as shareable
ldr r3, =0x3ff ; 1024 entries to cover 1G DDR
ldr r0, =TblBase ; MMU Table address in memory
ldr r2, =0x15de6 ; S=1, TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1
shareable_loop:
str r2, [r0] ; write the entry to MMU table
add r0, r0, #0x4 ; next entry in the table
add r2, r2, #0x100000 ; next section
subs r3, r3, #1
bge shareable_loop ; loop till 1G is covered
#endif
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the irq stack pointer
and r2, r1, r0
orr r2, r2, #0x12 ; IRQ mode
msr apsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=|Image$$IRQ_STACK$$ZI$$Limit| ; IRQ stack pointer
bic r2, r2, #(0x1 << 9) ; Set EE bit to little-endian
msr spsr_fsxc,r2
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the supervisor stack pointer
and r2, r1, r0
orr r2, r2, #0x13 ; supervisor mode
msr apsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=|Image$$SPV_STACK$$ZI$$Limit| ; Supervisor stack pointer
bic r2, r2, #(0x1 << 9) ; Set EE bit to little-endian
msr spsr_fsxc,r2
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the Abort stack pointer
and r2, r1, r0
orr r2, r2, #0x17 ; Abort mode
msr apsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=|Image$$ABORT_STACK$$ZI$$Limit| ; Abort stack pointer
bic r2, r2, #(0x1 << 9) ; Set EE bit to little-endian
msr spsr_fsxc,r2
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the system stack pointer
and r2, r1, r0
orr r2, r2, #0x1f ; SYS mode
msr apsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=|Image$$ARM_LIB_STACK$$ZI$$Limit| ; SYS stack pointer
;set scu enable bit in scu
ldr r7, =0xf8f00000
ldr r0, [r7]
orr r0, r0, #0x1
str r0, [r7]
; enable MMU and cache
ldr r0,=MMUTable ; Load MMU translation table base
orr r0, r0, #0x5B ; Outer-cacheable, WB
mcr p15, 0, r0, c2, c0, 0 ; TTB0
mvn r0,#0
mcr p15,0,r0,c3,c0,0
; Enable mmu, icahce and dcache
ldr r0,=CRValMmuCac
mcr p15,0,r0,c1,c0,0 ; Enable cache and MMU
dsb ; dsb allow the MMU to start up
isb ; isb flush prefetch buffer
; Write to ACTLR
mrc p15, 0,r0, c1, c0, 1 ; Read ACTLR
orr r0, r0, #(0x01 << 6) ; SMP bit
orr r0, r0, #(0x01 ) ; Cache/TLB maintenance broadcast
mcr p15, 0,r0, c1, c0, 1 ; Write ACTLR
; Invalidate L2 Cache and initialize L2 Cache
; For AMP, assume running on CPU1. Don't initialize L2 Cache (up to Linux)
#if USE_AMP!=1
ldr r0,=L2CCCrtl ; Load L2CC base address base + control register
mov r1, #0 ; force the disable bit
str r1, [r0] ; disable the L2 Caches
ldr r0,=L2CCAuxCrtl ; Load L2CC base address base + Aux control register
ldr r1,[r0] ; read the register
ldr r2,=L2CCAuxControl ; set the default bits
orr r1,r1,r2
str r1, [r0] ; store the Aux Control Register
ldr r0,=L2CCTAGLatReg ; Load L2CC base address base + TAG Latency address
ldr r1,=L2CCTAGLatency ; set the latencies for the TAG
str r1, [r0] ; store the TAG Latency register Register
ldr r0,=L2CCDataLatReg ; Load L2CC base address base + Data Latency address
ldr r1,=L2CCDataLatency ; set the latencies for the Data
str r1, [r0] ; store the Data Latency register Register
ldr r0,=L2CCWay ; Load L2CC base address base + way register
ldr r2, =0xFFFF
str r2, [r0] ; force invalidate
ldr r0,=L2CCSync ; need to poll 0x730, PSS_L2CC_CACHE_SYNC_OFFSET
; Load L2CC base address base + sync register
; poll for completion
Sync
ldr r1, [r0]
cmp r1, #0
bne Sync
ldr r0,=L2CCIntRaw ; clear pending interrupts
ldr r1,[r0]
ldr r0,=L2CCIntClear
str r1,[r0]
ldr r0,=SLCRUnlockReg ;Load SLCR base address base + unlock register
ldr r1,=SLCRUnlockKey ;set unlock key
str r1, [r0] ;Unlock SLCR
ldr r0,=SLCRL2cRamReg ;Load SLCR base address base + l2c Ram Control register
ldr r1,=SLCRL2cRamConfig ;set the configuration value
str r1, [r0] ;store the L2c Ram Control Register
ldr r0,=SLCRlockReg ;Load SLCR base address base + lock register
ldr r1,=SLCRlockKey ;set lock key
str r1, [r0] ;lock SLCR
ldr r0,=L2CCCrtl ; Load L2CC base address base + control register
ldr r1,[r0] ; read the register
mov r2, #L2CCControl ; set the enable bit
orr r1,r1,r2
str r1, [r0] ; enable the L2 Caches
#endif
mov r0, r0
mrc p15, 0, r1, c1, c0, 2 ; read cp access control register (CACR) into r1
orr r1, r1, #(0xf << 20) ; enable full access for p10 & p11
mcr p15, 0, r1, c1, c0, 2 ; write back into CACR
; enable vfp
fmrx r1, FPEXC ; read the exception register
orr r1,r1, #FPEXC_EN ; set VFP enable bit, leave the others in orig state
fmxr FPEXC, r1 ; write back the exception register
mrc p15, 0, r0, c1, c0, 0 ; flow prediction enable
orr r0, r0, #(0x01 << 11) ; #0x8000
mcr p15,0,r0,c1,c0,0
mrc p15, 0, r0, c1, c0, 1 ; read Auxiliary Control Register
orr r0, r0, #(0x1 << 2) ; enable Dside prefetch
orr r0, r0, #(0x1 << 1) ; enable L2 prefetch
mcr p15, 0, r0, c1, c0, 1 ; write Auxiliary Control Register
mrs r0, cpsr /* get the current PSR */
bic r0, r0, #0x100 /* enable asynchronous abort exception */
msr cpsr_xsf, r0
; Clear cp15 regs with unknown reset values
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 ; DFSR
mcr p15, 0, r0, c5, c0, 1 ; IFSR
mcr p15, 0, r0, c6, c0, 0 ; DFAR
mcr p15, 0, r0, c6, c0, 2 ; IFAR
mcr p15, 0, r0, c9, c13, 2 ; PMXEVCNTR
mcr p15, 0, r0, c13, c0, 2 ; TPIDRURW
mcr p15, 0, r0, c13, c0, 3 ; TPIDRURO
; Reset and start Cycle Counter
mov r2, #0x80000000 ; clear overflow
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd ; D, C, E
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 ; enable cycle counter
mcr p15, 0, r2, c9, c12, 1
; Reset and start Global Timer
mov r0, #0x0
mov r1, #0x0
bl XTime_SetTime
; Reset and start Triple Timer counter
#if defined SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
#ifdef PROFILING /* defined in Makefile */
/* Setup profiling stuff */
bl _profile_init
#endif /* PROFILING */
; make sure argc and argv are valid
mov r0, #0
mov r1, #0
b __main ; jump to C startup code
and r0, r0, r0 ; no op
Ldone b Ldone ; Paranoia: we should never get here
; *************************************************************************
; *
; * invalidate_dcache - invalidate the entire d-cache by set/way
; *
; * Note: for Cortex-A9, there is no cp instruction for invalidating
; * the whole D-cache. Need to invalidate each line.
; *
; *************************************************************************
invalidate_dcache
mrc p15, 1, r0, c0, c0, 1 ; read CLIDR
ands r3, r0, #0x7000000
mov r3, r3, lsr #23 ; cache level value (naturally aligned)
beq finished
mov r10, #0 ; start with level 0
loop1
add r2, r10, r10, lsr #1 ; work out 3xcachelevel
mov r1, r0, lsr r2 ; bottom 3 bits are the Cache type for this level
and r1, r1, #7 ; get those 3 bits alone
cmp r1, #2
blt skip ; no cache or only instruction cache at this level
mcr p15, 2, r10, c0, c0, 0 ; write the Cache Size selection register
isb ; isb to sync the change to the CacheSizeID reg
mrc p15, 1, r1, c0, c0, 0 ; reads current Cache Size ID register
and r2, r1, #7 ; extract the line length field
add r2, r2, #4 ; add 4 for the line length offset (log2 16 bytes)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 ; r4 is the max number on the way size (right aligned)
clz r5, r4 ; r5 is the bit position of the way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 ; r7 is the max number of the index size (right aligned)
loop2
mov r9, r4 ; r9 working copy of the max way size (right aligned)
loop3
orr r11, r10, r9, lsl r5 ; factor in the way number and cache number into r11
orr r11, r11, r7, lsl r2 ; factor in the index number
mcr p15, 0, r11, c7, c6, 2 ; invalidate by set/way
subs r9, r9, #1 ; decrement the way number
bge loop3
subs r7, r7, #1 ; decrement the index
bge loop2
skip
add r10, r10, #2 ; increment the cache number
cmp r3, r10
bgt loop1
finished
mov r10, #0 ; switch back to cache level 0
mcr p15, 2, r10, c0, c0, 0 ; select current cache level in cssr
isb
bx lr
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,612 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/armcc/translation_table.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file translation_table.s
;
; This file contains the initialization for the MMU table in RAM
; needed by the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ---- -------- ---------------------------------------------------
; 1.00a ecm 10/20/09 Initial version
; 3.07a sgd 07/05/2012 Configuring device address spaces as shareable device
; instead of strongly-ordered.
; 4.2 pkp 09/02/14 modified translation table entries according to address map
; 4.2 pkp 09/11/14 modified translation table entries to resolve compilation
; error for solving CR#822897
; 6.1 pkp 07/11/16 Corrected comments for memory attributes
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
EXPORT MMUTable
AREA |.mmu_tbl|,CODE,ALIGN=14
MMUTable
; Each table entry occupies one 32-bit word and there are
; 4096 entries, so the entire table takes up 16KB.
; Each entry covers a 1MB section.
GBLA count
GBLA sect
; 0x00000000 - 0x3ffffff (DDR Cacheable)
count SETA 0
sect SETA 0
WHILE count<0x400
DCD sect + 0x15de6 ; S=1, TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1
sect SETA sect+0x100000
count SETA count+1
WEND
; 0x40000000 - 0x7fffffff (GpAxi0)
count SETA 0
WHILE count<0x400
DCD sect + 0xc02 ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0x80000000 - 0xbfffffff (GpAxi1)
count SETA 0
WHILE count<0x400
DCD sect + 0xc02 ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xc0000000 - 0xdfffffff (undef)
count SETA 0
WHILE count<0x200
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe0000000 - 0xe02fffff (IOP dev)
count SETA 0
WHILE count<0x3
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe0300000 - 0xe0ffffff (undef/reserved)
count SETA 0
WHILE count<0xD
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe1000000 - 0xe1ffffff (NAND)
count SETA 0
WHILE count<0x10
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe2000000 - 0xe3ffffff (NOR)
count SETA 0
WHILE count<0x20
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe4000000 - 0xe5ffffff (SRAM)
count SETA 0
WHILE count<0x20
DCD sect + 0xc0e ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b1
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe6000000 - 0xf7ffffff (reserved)
count SETA 0
WHILE count<0x120
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xf8000c00 to 0xf8000fff, 0xf8010000 to 0xf88fffff and
; 0xf8f03000 to 0xf8ffffff are reserved but due to granual size of
; 1MB, it is not possible to define separate regions for them
; 0xf8000000 - 0xf8ffffff (APB device regs)
count SETA 0
WHILE count<0x10
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xf9000000 - 0xfbffffff (reserved)
count SETA 0
WHILE count<0x30
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xfc000000 - 0xfdffffff (QSPI)
count SETA 0
WHILE count<0x20
DCD sect + 0xc0a ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xfe000000 - 0xffefffff (reserved)
count SETA 0
WHILE count<0x1F
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xfff00000 to 0xfffb0000 is reserved but due to granual size of
; 1MB, it is not possible to define separate region for it
; 0xfff00000 to 0xfffb0000 (OCM)
count SETA 0
DCD sect + 0x4c0e ; S=b0 TEX=b100 AP=b11, Domain=b0, C=b1, B=b1
sect SETA sect+0x100000
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,131 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/iccarm/asm_vectors.s | ;******************************************************************************
; Copyright (c) 2017 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************/
;*****************************************************************************/
;**
; @file asm_vectors.s
;
; This file contains the initial vector table for the Cortex R5 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 6.2 mus 01/27/17 Initial version
; </pre>
;
; @note
;
; None.
;
;*****************************************************************************/
MODULE ?asm_vectors
;; Forward declaration of sections.
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
#define UART_BAUDRATE 115200
IMPORT _prestart
IMPORT __iar_program_start
SECTION .intvec:CODE:NOROOT(2)
PUBLIC _vector_table
IMPORT FIQInterrupt
IMPORT IRQInterrupt
IMPORT SWInterrupt
IMPORT DataAbortInterrupt
IMPORT PrefetchAbortInterrupt
IMPORT UndefinedException
IMPORT UndefinedExceptionAddr
IMPORT PrefetchAbortAddr
IMPORT DataAbortAddr
IMPORT prof_pc
_vector_table
ARM
ldr pc,=__iar_program_start
ldr pc,=Undefined
ldr pc,=SVCHandler
ldr pc,=PrefetchAbortHandler
ldr pc,=DataAbortHandler
NOP ; Placeholder for address exception vector
ldr pc,=IRQHandler
ldr pc,=FIQHandler
SECTION .text:CODE:NOROOT(2)
REQUIRE _vector_table
ARM
IRQHandler ; IRQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
#ifndef __SOFTFP__
vpush {d0-d7} /* Store floating point registers */
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
bl IRQInterrupt ; IRQ vector
#ifndef __SOFTFP__
pop {r1} /* Restore floating point registers */
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
FIQHandler ; FIQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
FIQLoop
bl FIQInterrupt ; FIQ vector
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
Undefined ; Undefined handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =UndefinedExceptionAddr
sub r1, lr, #4
str r1, [r0] ; Store address of instruction causing undefined exception
bl UndefinedException ; UndefinedException: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr
SVCHandler ; SWI handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
tst r0, #0x20 ; check the T bit
ldrneh r0, [lr,#-2] ; Thumb mode
bicne r0, r0, #0xff00 ; Thumb mode
ldreq r0, [lr,#-4] ; ARM mode
biceq r0, r0, #0xff000000 ; ARM mode
bl SWInterrupt ; SWInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr ; adjust return
DataAbortHandler ; Data Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =DataAbortAddr
sub r1, lr, #8
str r1, [r0] ; Stores instruction causing data abort
bl DataAbortInterrupt ;DataAbortInterrupt :call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #8 ; adjust return
PrefetchAbortHandler ; Prefetch Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =PrefetchAbortAddr
sub r1, lr, #4
str r1, [r0] ; Stores instruction causing prefetch abort */
bl PrefetchAbortInterrupt ; PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code */
subs pc, lr, #4 ; adjust return */
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 8,976 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/iccarm/boot.s | ;******************************************************************************
; Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
; *****************************************************************************/
; ****************************************************************************/
; **
; @file boot.S
;
; This file contains the initial startup code for the Cortex R5 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ---- -------- ---------------------------------------------------
; 5.00 mus 01/27/17 Initial version
; 6.6 srm 10/18/17 Updated the timer configuration with XTime_StartTTCTimer.
; Now the timer instance as specified by the user will be
; started.
; 6.6 mus 02/23/17 Disable the debug logic in non-JTAG boot mode(when
; processor is in lockstep configuration), based
; on the mld parameter "lockstep_mode_debug".
* 6.8 mus 09/20/18 Clear VINITHI field in RPU_0_CFG/RPU_1_CFG
* registers to initialize CortexR5 core with LOVEC
* on reset. It fixes CR#1010656.
* 7.0 mus 03/19/19 Disable FPU only in case of softp, otherwise enable it by
* default. CR#1021638
;
; </pre>
;
; @note
;
; None.
;
; *****************************************************************************/
MODULE ?boot
;; Forward declaration of sections.
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
#include "xparameters.h"
#define UART_BAUDRATE 115200
PUBLIC _prestart
PUBLIC __iar_program_start
IMPORT _vector_table
IMPORT Init_MPU
#ifdef SLEEP_TIMER_BASEADDR
IMPORT XTime_StartTTCTimer
#endif
IMPORT __cmain
vector_base EQU _vector_table
RPU_GLBL_CNTL EQU 0xFF9A0000
RPU_ERR_INJ EQU 0xFF9A0020
RPU_0_CFG EQU 0xFF9A0100
RPU_1_CFG EQU 0xFF9A0200
RST_LPD_DBG EQU 0xFF5E0240
BOOT_MODE_USER EQU 0xFF5E0200
fault_log_enable EQU 0x101
SECTION .boot:CODE:NOROOT(2)
/* this initializes the various processor modes */
_prestart
__iar_program_start
OKToRun
REQUIRE _vector_table
/* Initialize processor registers to 0 */
mov r0,#0
mov r1,#0
mov r2,#0
mov r3,#0
mov r4,#0
mov r5,#0
mov r6,#0
mov r7,#0
mov r8,#0
mov r9,#0
mov r10,#0
mov r11,#0
mov r12,#0
/* Initialize stack pointer and banked registers for various mode */
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the irq stack pointer
and r2, r1, r0
orr r2, r2, #0x12 ; IRQ mode
msr cpsr, r2
ldr r13,=SFE(IRQ_STACK) ; IRQ stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the supervisor stack pointer
and r2, r1, r0
orr r2, r2, #0x13 ; supervisor mode
msr cpsr, r2
ldr r13,=SFE(SVC_STACK) ; Supervisor stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the Abort stack pointer
and r2, r1, r0
orr r2, r2, #0x17 ; Abort mode
msr cpsr, r2
ldr r13,=SFE(ABT_STACK) ; Abort stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the FIQ stack pointer
and r2, r1, r0
orr r2, r2, #0x11 ; FIQ mode
msr cpsr, r2
mov r8, #0
mov r9, #0
mov r10, #0
mov r11, #0
mov r12, #0
ldr r13,=SFE(FIQ_STACK) ; FIQ stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the Undefine stack pointer
and r2, r1, r0
orr r2, r2, #0x1b ; Undefine mode
msr cpsr, r2
ldr r13,=SFE(UND_STACK) ; Undefine stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the system stack pointer
and r2, r1, r0
orr r2, r2, #0x1F ; SYS mode
msr cpsr, r2
ldr r13,=SFE(CSTACK) ; SYS stack pointer
mov r14,#0
;
; Enable access to VFP by enabling access to Coprocessors 10 and 11.
; Enables Full Access i.e. in both privileged and non privileged modes
;
mrc p15, 0, r0, c1, c0, 2 ; Read Coprocessor Access Control Register (CPACR)
orr r0, r0, #(0xF << 20) ; Enable access to CP 10 & 11
mcr p15, 0, r0, c1, c0, 2 ; Write Coprocessor Access Control Register (CPACR)
isb
; enable fpu access
vmrs r3, FPEXC
orr r1, r3, #(1<<30)
vmsr FPEXC, r1
; clear the floating point register
mov r1,#0
vmov d0,r1,r1
vmov d1,r1,r1
vmov d2,r1,r1
vmov d3,r1,r1
vmov d4,r1,r1
vmov d5,r1,r1
vmov d6,r1,r1
vmov d7,r1,r1
vmov d8,r1,r1
vmov d9,r1,r1
vmov d10,r1,r1
vmov d11,r1,r1
vmov d12,r1,r1
vmov d13,r1,r1
vmov d14,r1,r1
vmov d15,r1,r1
#ifdef __SOFTFP__
; Disable FPU by restoring previous value for FPU access
vmsr FPEXC,r3
#endif
; Disable MPU and caches
mrc p15, 0, r0, c1, c0, 0 ; Read CP15 Control Register
bic r0, r0, #0x05 ; Disable MPU (M bit) and data cache (C bit)
bic r0, r0, #0x1000 ; Disable instruction cache (I bit)
dsb ; Ensure all previous loads/stores have completed
mcr p15, 0, r0, c1, c0, 0 ; Write CP15 Control Register
isb ; Ensure subsequent insts execute wrt new MPU settings
; Disable Branch prediction, TCM ECC checks
mrc p15, 0, r0, c1, c0, 1 ; Read ACTLR
orr r0, r0, #(0x1 << 17) ; Enable RSDIS bit 17 to disable the return stack
orr r0, r0, #(0x1 << 16) ; Clear BP bit 15 and set BP bit 16
bic r0, r0, #(0x1 << 15) ; Branch always not taken and history table updates disabled
bic r0, r0, #(0x1 << 27) ; Disable B1TCM ECC check
bic r0, r0, #(0x1 << 26) ; Disable B0TCM ECC check
bic r0, r0, #(0x1 << 25) ; Disable ATCM ECC check
orr r0, r0, #(0x1 << 5) ; Enable ECC with no forced write through with [5:3]=b'101
bic r0, r0, #(0x1 << 4)
orr r0, r0, #(0x1 << 3)
mcr p15, 0, r0, c1, c0, 1 ; Write ACTLR*/
dsb ; Complete all outstanding explicit memory operations*/
; Invalidate caches
mov r0,#0 ; r0 = 0
dsb
mcr p15, 0, r0, c7, c5, 0 ; invalidate icache
mcr p15, 0, r0, c15, c5, 0 ; Invalidate entire data cache
isb
#if LOCKSTEP_MODE_DEBUG == 0
; enable fault log for lock step
ldr r0,=RPU_GLBL_CNTL
ldr r1, [r0]
ands r1, r1, #0x8
; branch to initialization if split mode
bne init
; check for boot mode if in lock step, branch to init if JTAG boot mode
ldr r0,=BOOT_MODE_USER
ldr r1, [r0]
ands r1, r1, #0xF
beq init
; reset the debug logic
ldr r0,=RST_LPD_DBG
ldr r1, [r0]
orr r1, r1, #(0x1 << 1)
orr r1, r1, #(0x1 << 4)
orr r1, r1, #(0x1 << 5)
str r1, [r0]
; enable fault log
ldr r0,=RPU_ERR_INJ
ldr r1,=fault_log_enable
ldr r2, [r0]
orr r2, r2, r1
str r2, [r0]
nop
nop
#endif
init
bl Init_MPU ; Initialize MPU
; Enable Branch prediction
mrc p15, 0, r0, c1, c0, 1 ; Read ACTLR
bic r0, r0, #(0x1 << 17) ; Clear RSDIS bit 17 to enable return stack
bic r0, r0, #(0x1 << 16) ; Clear BP bit 15 and BP bit 16:
bic r0, r0, #(0x1 << 15) ; Normal operation, BP is taken from the global history table.
orr r0, r0, #(0x1 << 14) ; Disable DBWR for errata 780125
mcr p15, 0, r0, c1, c0, 1 ; Write ACTLR
; Enable icahce and dcache
mrc p15,0,r1,c1,c0,0
ldr r0, =0x1005
orr r1,r1,r0
dsb
mcr p15,0,r1,c1,c0,0 ; Enable cache
isb ; isb flush prefetch buffer
; Set vector table in TCM/LOVEC
#ifndef VEC_TABLE_IN_OCM
mrc p15, 0, r0, c1, c0, 0
mvn r1, #0x2000
and r0, r0, r1
mcr p15, 0, r0, c1, c0, 0
; Clear VINITHI to enable LOVEC on reset
#if XPAR_CPU_ID == 0
ldr r0, =RPU_0_CFG
#else
ldr r0, =RPU_1_CFG
#endif
ldr r1, [r0]
bic r1, r1, #(0x1 << 2)
str r1, [r0]
#endif
; enable asynchronous abort exception
mrs r0, cpsr
bic r0, r0, #0x100
msr cpsr_xsf, r0
; Clear cp15 regs with unknown reset values
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 ; DFSR
mcr p15, 0, r0, c5, c0, 1 ; IFSR
mcr p15, 0, r0, c6, c0, 0 ; DFAR
mcr p15, 0, r0, c6, c0, 2 ; IFAR
mcr p15, 0, r0, c9, c13, 2 ; PMXEVCNTR
mcr p15, 0, r0, c13, c0, 2 ; TPIDRURW
mcr p15, 0, r0, c13, c0, 3 ; TPIDRURO
; Reset and start Cycle Counter
mov r2, #0x80000000 ; clear overflow
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd ; D, C, E
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 ; enable cycle counter
mcr p15, 0, r2, c9, c12, 1
; configure the timer if TTC is present
#ifdef SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
; make sure argc and argv are valid
mov r0, #0
mov r1, #0
b __cmain ; jump to C startup code
Ldone b Ldone ; Paranoia: we should never get here
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 10,478 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/gcc/boot.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file boot.S
*
* @addtogroup r5_boot_code Cortex R5 Processor Boot Code
* @{
* <h2> boot.S </h2>
* The boot code performs minimum configuration which is required for an
* application to run starting from processor's reset state. Below is a
* sequence illustrating what all configuration is performed before control
* reaches to main function.
*
* 1. Program vector table base for exception handling
* 2. Program stack pointer for various modes (IRQ, FIQ, supervisor, undefine,
* abort, system)
* 3. Disable instruction cache, data cache and MPU
* 4. Invalidate instruction and data cache
* 5. Configure MPU with short descriptor translation table format and program
* base address of translation table
* 6. Enable data cache, instruction cache and MPU
* 7. Enable Floating point unit
* 8. Transfer control to _start which clears BSS sections and jumping to main
* application
*
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.00 pkp 02/10/14 Initial version
* 5.04 pkp 09/11/15 Disabled ACTLR.DBWR bit to avoid potential R5 deadlock
* for errata 780125
* 5.04 pkp 02/04/16 Enabled the fault log for lock-step mode
* 5.04 pkp 02/25/16 Initialized the banked registers for various modes,
* initialized floating point registers and enabled the
* cache ECC check before enabling the fault log for
* lock step mode
* 5.04 pkp 03/24/16 Reset the dbg_lpd_reset before enabling the fault log
* to avoid intervention for lock-step mode
* 5.05 pkp 04/11/16 Enable the comparators for non-JTAG boot mode for
* lock-step to avoid putting debug logic to reset
* 6.02 pkp 02/13/17 Added support for hard float
* 6.6 mus 02/23/17 Enable/Disable the debug logic in non-JTAG boot mode(when
* processor is in lockstep configuration), based
* on the mld parameter "lockstep_mode_debug".
* 6.8 mus 09/20/18 Clear VINITHI field in RPU_0_CFG/RPU_1_CFG
* registers to initialize CortexR5 core with LOVEC
* on reset. It fixes CR#1010656.
* 7.1 mus 03/27/19 Skip reading/writing to the RPU address space registers,
* in case if processor is nonsecure and RPU
* address space is secure. CR#1015725.
* 7.2 mus 10/11/19 Resetting the r5_0 and r5_1 debug logic is sufficient
* to avoid intervention for lock-step mode. So, removed
* code which resets dbg_lpd_reset, to unblock debugger
* access to LPD. Fix for CR#1027983.
* </pre>
*
******************************************************************************/
#include "xparameters.h"
.global _prestart
.global _boot
.global __stack
.global __irq_stack
.global __supervisor_stack
.global __abort_stack
.global __fiq_stack
.global __undef_stack
.global _vector_table
/* Stack Pointer locations for boot code */
.set Undef_stack, __undef_stack
.set FIQ_stack, __fiq_stack
.set Abort_stack, __abort_stack
.set SPV_stack, __supervisor_stack
.set IRQ_stack, __irq_stack
.set SYS_stack, __stack
.set vector_base, _vector_table
.set RPU_GLBL_CNTL, 0xFF9A0000
.set RPU_ERR_INJ, 0xFF9A0020
.set RPU_0_CFG, 0xFF9A0100
.set RPU_1_CFG, 0xFF9A0200
#if defined(versal)
.set RST_LPD_DBG, 0xFF5E0338
.set BOOT_MODE_USER, 0xF1260200
#else
.set RST_LPD_DBG, 0xFF5E0240
.set BOOT_MODE_USER, 0xFF5E0200
#endif
.set fault_log_enable, 0x101
/*
* 0th bit of PROCESSOR_ACCESS_VALUE macro signifies trustzone
* setting for RPU address space
*/
#define RPU_TZ_MASK 0x1
.section .boot,"axS"
/* this initializes the various processor modes */
_prestart:
_boot:
OKToRun:
/* Initialize processor registers to 0 */
mov r0,#0
mov r1,#0
mov r2,#0
mov r3,#0
mov r4,#0
mov r5,#0
mov r6,#0
mov r7,#0
mov r8,#0
mov r9,#0
mov r10,#0
mov r11,#0
mov r12,#0
/* Initialize stack pointer and banked registers for various mode */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the irq stack pointer */
and r2, r1, r0
orr r2, r2, #0x12 /* IRQ mode */
msr cpsr, r2
ldr r13,=IRQ_stack /* IRQ stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the supervisor stack pointer */
and r2, r1, r0
orr r2, r2, #0x13 /* supervisor mode */
msr cpsr, r2
ldr r13,=SPV_stack /* Supervisor stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Abort stack pointer */
and r2, r1, r0
orr r2, r2, #0x17 /* Abort mode */
msr cpsr, r2
ldr r13,=Abort_stack /* Abort stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the FIQ stack pointer */
and r2, r1, r0
orr r2, r2, #0x11 /* FIQ mode */
msr cpsr, r2
mov r8, #0
mov r9, #0
mov r10, #0
mov r11, #0
mov r12, #0
ldr r13,=FIQ_stack /* FIQ stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Undefine stack pointer */
and r2, r1, r0
orr r2, r2, #0x1b /* Undefine mode */
msr cpsr, r2
ldr r13,=Undef_stack /* Undefine stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the system stack pointer */
and r2, r1, r0
orr r2, r2, #0x1F /* SYS mode */
msr cpsr, r2
ldr r13,=SYS_stack /* SYS stack pointer */
mov r14,#0
/*
* Enable access to VFP by enabling access to Coprocessors 10 and 11.
* Enables Full Access i.e. in both privileged and non privileged modes
*/
mrc p15, 0, r0, c1, c0, 2 /* Read Coprocessor Access Control Register (CPACR) */
orr r0, r0, #(0xF << 20) /* Enable access to CP 10 & 11 */
mcr p15, 0, r0, c1, c0, 2 /* Write Coprocessor Access Control Register (CPACR) */
isb
/* enable fpu access */
vmrs r3, FPEXC
orr r1, r3, #(1<<30)
vmsr FPEXC, r1
/* clear the floating point register*/
mov r1,#0
vmov d0,r1,r1
vmov d1,r1,r1
vmov d2,r1,r1
vmov d3,r1,r1
vmov d4,r1,r1
vmov d5,r1,r1
vmov d6,r1,r1
vmov d7,r1,r1
vmov d8,r1,r1
vmov d9,r1,r1
vmov d10,r1,r1
vmov d11,r1,r1
vmov d12,r1,r1
vmov d13,r1,r1
vmov d14,r1,r1
vmov d15,r1,r1
#ifdef __SOFTFP__
/* Disable the FPU if SOFTFP is defined*/
vmsr FPEXC,r3
#endif
/* Disable MPU and caches */
mrc p15, 0, r0, c1, c0, 0 /* Read CP15 Control Register*/
bic r0, r0, #0x05 /* Disable MPU (M bit) and data cache (C bit) */
bic r0, r0, #0x1000 /* Disable instruction cache (I bit) */
dsb /* Ensure all previous loads/stores have completed */
mcr p15, 0, r0, c1, c0, 0 /* Write CP15 Control Register */
isb /* Ensure subsequent insts execute wrt new MPU settings */
/* Disable Branch prediction, TCM ECC checks */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR */
orr r0, r0, #(0x1 << 17) /* Enable RSDIS bit 17 to disable the return stack */
orr r0, r0, #(0x1 << 16) /* Clear BP bit 15 and set BP bit 16:*/
bic r0, r0, #(0x1 << 15) /* Branch always not taken and history table updates disabled*/
orr r0, r0, #(0x1 << 27) /* Enable B1TCM ECC check */
orr r0, r0, #(0x1 << 26) /* Enable B0TCM ECC check */
orr r0, r0, #(0x1 << 25) /* Enable ATCM ECC check */
bic r0, r0, #(0x1 << 5) /* Generate abort on parity errors, with [5:3]=b 000*/
bic r0, r0, #(0x1 << 4)
bic r0, r0, #(0x1 << 3)
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
dsb /* Complete all outstanding explicit memory operations*/
/* Invalidate caches */
mov r0,#0 /* r0 = 0 */
dsb
mcr p15, 0, r0, c7, c5, 0 /* invalidate icache */
mcr p15, 0, r0, c15, c5, 0 /* Invalidate entire data cache*/
isb
#if LOCKSTEP_MODE_DEBUG == 0 && (PROCESSOR_ACCESS_VALUE & RPU_TZ_MASK)
/* enable fault log for lock step */
ldr r0,=RPU_GLBL_CNTL
ldr r1, [r0]
ands r1, r1, #0x8
/* branch to initialization if split mode*/
bne init
/* check for boot mode if in lock step, branch to init if JTAG boot mode*/
ldr r0,=BOOT_MODE_USER
ldr r1, [r0]
ands r1, r1, #0xF
beq init
/* reset the debug logic */
ldr r0,=RST_LPD_DBG
ldr r1, [r0]
orr r1, r1, #(0x1 << 4)
orr r1, r1, #(0x1 << 5)
str r1, [r0]
/* enable fault log */
ldr r0,=RPU_ERR_INJ
ldr r1,=fault_log_enable
ldr r2, [r0]
orr r2, r2, r1
str r2, [r0]
nop
nop
#endif
init:
bl Init_MPU /* Initialize MPU */
/* Enable Branch prediction */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR*/
bic r0, r0, #(0x1 << 17) /* Clear RSDIS bit 17 to enable return stack*/
bic r0, r0, #(0x1 << 16) /* Clear BP bit 15 and BP bit 16:*/
bic r0, r0, #(0x1 << 15) /* Normal operation, BP is taken from the global history table.*/
orr r0, r0, #(0x1 << 14) /* Disable DBWR for errata 780125 */
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
/* Enable icahce and dcache */
mrc p15,0,r1,c1,c0,0
ldr r0, =0x1005
orr r1,r1,r0
dsb
mcr p15,0,r1,c1,c0,0 /* Enable cache */
isb /* isb flush prefetch buffer */
/* Warning message to be removed after 2016.1 */
/* USEAMP was introduced in 2015.4 with ZynqMP and caused confusion with USE_AMP */
#ifdef USEAMP
#warning "-DUSEAMP=1 is deprecated, use -DVEC_TABLE_IN_OCM instead to set vector table in OCM"
#endif
/* Set vector table in TCM/LOVEC */
#ifndef VEC_TABLE_IN_OCM
mrc p15, 0, r0, c1, c0, 0
mvn r1, #0x2000
and r0, r0, r1
mcr p15, 0, r0, c1, c0, 0
/* Check if processor is having access to RPU address space */
#if (PROCESSOR_ACCESS_VALUE & RPU_TZ_MASK)
/* Clear VINITHI to enable LOVEC on reset */
#if XPAR_CPU_ID == 0
ldr r0, =RPU_0_CFG
#else
ldr r0, =RPU_1_CFG
#endif
ldr r1, [r0]
bic r1, r1, #(0x1 << 2)
str r1, [r0]
#endif
#endif
/* enable asynchronous abort exception */
mrs r0, cpsr
bic r0, r0, #0x100
msr cpsr_xsf, r0
b _startup /* jump to C startup code */
.Ldone: b .Ldone /* Paranoia: we should never get here */
.end
/**
* @} End of "addtogroup r5_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,773 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/gcc/asm_vectors.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex R5 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.00 pkp 02/10/14 Initial version
* 6.0 mus 27/07/16 Added UndefinedException handler
* 6.3 pkp 02/13/17 Added support for hard float
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.org 0
.text
.globl _boot
.globl _vector_table
.globl FIQInterrupt
.globl IRQInterrupt
.globl SWInterrupt
.globl DataAbortInterrupt
.globl PrefetchAbortInterrupt
.globl IRQHandler
.globl prof_pc
.section .vectors, "a"
_vector_table:
ldr pc,=_boot
ldr pc,=Undefined
ldr pc,=SVCHandler
ldr pc,=PrefetchAbortHandler
ldr pc,=DataAbortHandler
NOP /* Placeholder for address exception vector*/
ldr pc,=IRQHandler
ldr pc,=FIQHandler
.text
IRQHandler: /* IRQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code*/
#ifndef __SOFTFP__
vpush {d0-d7} /* Store floating point registers */
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
bl IRQInterrupt /* IRQ vector */
#ifndef __SOFTFP__
pop {r1} /* Restore floating point registers */
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
FIQHandler: /* FIQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
FIQLoop:
bl FIQInterrupt /* FIQ vector */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
Undefined: /* Undefined handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =UndefinedExceptionAddr
sub r1, lr, #4
str r1, [r0] /* Store address of instruction causing undefined exception */
bl UndefinedException /* UndefinedException: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr
SVCHandler: /* SWI handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
tst r0, #0x20 /* check the T bit */
ldrneh r0, [lr,#-2] /* Thumb mode */
bicne r0, r0, #0xff00 /* Thumb mode */
ldreq r0, [lr,#-4] /* ARM mode */
biceq r0, r0, #0xff000000 /* ARM mode */
bl SWInterrupt /* SWInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr /* adjust return */
DataAbortHandler: /* Data Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =DataAbortAddr
sub r1, lr, #8
str r1, [r0] /* Stores instruction causing data abort */
bl DataAbortInterrupt /*DataAbortInterrupt :call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #8 /* adjust return */
PrefetchAbortHandler: /* Prefetch Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =PrefetchAbortAddr
sub r1, lr, #4
str r1, [r0] /* Stores instruction causing prefetch abort */
bl PrefetchAbortInterrupt /* PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,572 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/gcc/xil-crt0.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil-crt0.S
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.00 pkp 02/10/14 First release
* 5.04 pkp 12/18/15 Initialized global constructor for C++ applications
* 5.04 pkp 02/19/16 Added timer configuration using XTime_StartTimer API when
* TTC3 is present
* 6.4 asa 08/16/17 Added call to Xil_InitializeExistingMPURegConfig to
* initialize the MPU configuration table with the MPU
* configurations already set in Init_Mpu function.
* 6.6 srm 10/18/17 Updated the timer configuration with XTime_StartTTCTimer.
* Now the timer instance as specified by the user will be
* started.
* 7.2 mus 10/22/19 Defined RPU_TZ_MASK as #define instead of variable.
* 7.2 sd 03/20/20 Add clocking support.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
/*
* 0th bit of PROCESSOR_ACCESS_VALUE macro signifies trustzone
* setting for RPU address space
*/
#define RPU_TZ_MASK 0x1
.text
.Lsbss_start:
.long __sbss_start
.Lsbss_end:
.long __sbss_end
.Lbss_start:
.long __bss_start__
.Lbss_end:
.long __bss_end__
.Lstack:
.long __stack
.set RPU_0_PWRCTL, 0xFF9A0108
.set RPU_1_PWRCTL, 0xFF9A0208
.set MPIDR_AFF0, 0xFF
.set PWRCTL_MASK, 0x1
.globl _startup
_startup:
bl __cpu_init /* Initialize the CPU first (BSP provides this) */
#if (PROCESSOR_ACCESS_VALUE & RPU_TZ_MASK)
mrc p15, 0, r0, c0, c0, 5 /* Read MPIDR register */
ands r0, r0, #MPIDR_AFF0 /* Get affinity level 0 */
bne core1
ldr r10, =RPU_0_PWRCTL /* Load PWRCTRL address for core 0 */
b test_boot_status
core1:
ldr r10, =RPU_1_PWRCTL /* Load PWRCTRL address for core 1 */
test_boot_status:
ldr r11, [r10] /* Read PWRCTRL register */
ands r11, r11, #PWRCTL_MASK /* Extract and test core's PWRCTRL */
/* if warm reset, skip the clearing of BSS and SBSS */
bne .Lenclbss
#endif
mov r0, #0
/* clear sbss */
ldr r1,.Lsbss_start /* calculate beginning of the SBSS */
ldr r2,.Lsbss_end /* calculate end of the SBSS */
.Lloop_sbss:
cmp r1,r2
bge .Lenclsbss /* If no SBSS, no clearing required */
str r0, [r1], #4
b .Lloop_sbss
.Lenclsbss:
/* clear bss */
ldr r1,.Lbss_start /* calculate beginning of the BSS */
ldr r2,.Lbss_end /* calculate end of the BSS */
.Lloop_bss:
cmp r1,r2
bge .Lenclbss /* If no BSS, no clearing required */
str r0, [r1], #4
b .Lloop_bss
.Lenclbss:
/* set stack pointer */
ldr r13,.Lstack /* stack address */
/* configure the timer if TTC is present */
#ifdef SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
bl Xil_InitializeExistingMPURegConfig /* Initialize MPU config */
/* run global constructors */
bl __libc_init_array
/* make sure argc and argv are valid */
mov r0, #0
mov r1, #0
#ifdef XCLOCKING
bl Xil_ClockInit
#endif
bl main /* Jump to main C code */
/* Cleanup global constructors */
bl __libc_fini_array
bl exit
.Lexit: /* should never get here */
b .Lexit
.Lstart:
.size _startup,.Lstart-_startup
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,379 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/gcc/cpu_init.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file cpu_init.s
*
* This file contains CPU specific initialization. Invoked from main CRT
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.00 pkp 02/10/14 Initial version
*
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.text
.global __cpu_init
.align 2
__cpu_init:
/* Clear cp15 regs with unknown reset values */
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 /* DFSR */
mcr p15, 0, r0, c5, c0, 1 /* IFSR */
mcr p15, 0, r0, c6, c0, 0 /* DFAR */
mcr p15, 0, r0, c6, c0, 2 /* IFAR */
mcr p15, 0, r0, c9, c13, 2 /* PMXEVCNTR */
mcr p15, 0, r0, c13, c0, 2 /* TPIDRURW */
mcr p15, 0, r0, c13, c0, 3 /* TPIDRURO */
/* Reset and start Cycle Counter */
mov r2, #0x80000000 /* clear overflow */
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd /* D, C, E */
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 /* enable cycle counter */
mcr p15, 0, r2, c9, c12, 1
bx lr
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 9,069 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/gcc/boot.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file boot.S
*
* @addtogroup a53_32_boot_code Cortex A53 32bit Processor Boot Code
* @{
* <h2> boot.S </h2>
* The boot code performs minimum configuration which is required for an
* application to run starting from processor's reset state. Below is a
* sequence illustrating what all configuration is performed before control
* reaches to main function.
*
* 1. Program vector table base for exception handling
* 2. Invalidate instruction cache, data cache and TLBs
* 3. Program stack pointer for various modes (IRQ, FIQ, supervisor, undefine,
* abort, system)
* 4. Program counter frequency
* 5. Configure MMU with short descriptor translation table format and program
* base address of translation table
* 6. Enable data cache, instruction cache and MMU
* 7. Transfer control to _start which clears BSS sections and runs global
* constructor before jumping to main application
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* 5.4 pkp 09/11/15 Enable I-Cache and D-Cache in the initialization
* 6.0 pkp 07/25/16 Program the counter frequency
* 6.4 mus 07/25/17 Set VFP enable bit in FPEXC register, to support
* hard floating point in BSP
* </pre>
*
*
******************************************************************************/
#include "xparameters.h"
.globl MMUTable
.global _prestart
.global _boot
.global __stack
.global __irq_stack
.global __supervisor_stack
.global __abort_stack
.global __fiq_stack
.global __undef_stack
.global _vector_table
.set PSS_L2CC_BASE_ADDR, 0xF8F02000
.set PSS_SLCR_BASE_ADDR, 0xF8000000
.set RESERVED, 0x0fffff00
.set TblBase , MMUTable
.set LRemap, 0xFE00000F /* set the base address of the peripheral block as not shared */
.set CRValMmuCac, 0b01000000000001 /* Enable IDC, and MMU */
.set counterfreq, XPAR_CPU_CORTEXA53_0_TIMESTAMP_CLK_FREQ
/* Stack Pointer locations for boot code */
.set Undef_stack, __undef_stack
.set FIQ_stack, __fiq_stack
.set Abort_stack, __abort_stack
.set SPV_stack, __supervisor_stack
.set IRQ_stack, __irq_stack
.set SYS_stack, __stack
.set vector_base, _vector_table
.set FPEXC_EN, 0x40000000 /* FPU enable bit, (1 << 30) */
.section .boot,"ax"
/* this initializes the various processor modes */
_prestart:
_boot:
OKToRun:
mrc p15, 0, r0, c0, c0, 0 /* Get the revision */
and r5, r0, #0x00f00000
and r6, r0, #0x0000000f
orr r6, r6, r5, lsr #20-4
/* set VBAR to the _vector_table address in linker script */
ldr r0, =vector_base
mcr p15, 0, r0, c12, c0, 0
/* Invalidate caches and TLBs */
mov r0,#0 /* r0 = 0 */
mcr p15, 0, r0, c8, c7, 0 /* invalidate TLBs */
mcr p15, 0, r0, c7, c5, 0 /* invalidate icache */
mcr p15, 0, r0, c7, c5, 6 /* Invalidate branch predictor array */
bl invalidate_dcache /* invalidate dcache */
/* Disable MMU, if enabled */
mrc p15, 0, r0, c1, c0, 0 /* read CP15 register 1 */
bic r0, r0, #0x1 /* clear bit 0 */
mcr p15, 0, r0, c1, c0, 0 /* write value back */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the irq stack pointer */
and r2, r1, r0
orr r2, r2, #0x12 /* IRQ mode */
msr cpsr, r2
ldr r13,=IRQ_stack /* IRQ stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the supervisor stack pointer */
and r2, r1, r0
orr r2, r2, #0x13 /* supervisor mode */
msr cpsr, r2
ldr r13,=SPV_stack /* Supervisor stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Abort stack pointer */
and r2, r1, r0
orr r2, r2, #0x17 /* Abort mode */
msr cpsr, r2
ldr r13,=Abort_stack /* Abort stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the FIQ stack pointer */
and r2, r1, r0
orr r2, r2, #0x11 /* FIQ mode */
msr cpsr, r2
ldr r13,=FIQ_stack /* FIQ stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Undefine stack pointer */
and r2, r1, r0
orr r2, r2, #0x1b /* Undefine mode */
msr cpsr, r2
ldr r13,=Undef_stack /* Undefine stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the system stack pointer */
and r2, r1, r0
orr r2, r2, #0x1F /* SYS mode */
msr cpsr, r2
ldr r13,=SYS_stack /* SYS stack pointer */
/* program the timer counter frequency */
ldr r0,=counterfreq
mcr 15,0,r0,c14,c0,0
mov r0,#0
mcr 15,0,r0,c2,c0,2 /* N = 0 to use ttbr0 */
/* Write to ACTLR */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR */
orr r0, r0, #(1 << 0) /* Enable access to CPUECTLR */
orr r0, r0, #(1 << 1)
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR */
/* Write to CPUECTLR */
mrrc p15, 1, r0, r1, c15 /* Read CPUECTLR */
orr r0, r0, #(0x01 << 6) /* Set SMPEN bit */
mcrr p15, 1, r0, r1, c15 /* Write CPUECTLR */
/* enable MMU and cache */
ldr r0,=TblBase /* Load MMU translation table base */
orr r0, r0, #0x5B /* Outer-cacheable, WB */
mcr 15, 0, r0, c2, c0, 0 /* TTB0 */
mov r0,#0x5B
mcr p15,0,r0,c2,c0,1
mvn r0,#0 /* Load MMU domains -- all ones=manager */
mcr p15,0,r0,c3,c0,0
/* Enable mmu, icahce and dcache */
mrc p15,0,r0,c1,c0,0
bic r0, r0, #(0x1 << 13)
orr r0, r0, #(0x1 << 12) /* enable I-cache */
orr r0, r0, #(0x1 << 2) /* enable D-Cache */
orr r0, r0, #0x1 /* enable MMU */
dsb /* dsb allow the MMU to start up */
mcr p15,0,r0,c1,c0,0 /* Enable cache and MMU */
isb /* isb flush prefetch buffer */
mov r0, r0
mrc p15, 0, r1, c1, c0, 2 /* read cp access control register (CACR) into r1 */
orr r1, r1, #(0xf << 20) /* enable full access for p10 & p11 */
mcr p15, 0, r1, c1, c0, 2 /* write back into CACR */
/* enable vfp */
vmrs r1, FPEXC /* read the exception register */
orr r1,r1, #FPEXC_EN /* set VFP enable bit, leave the others in orig state */
vmsr FPEXC, r1 /* write back the exception register */
mrc p15,0,r0,c1,c0,0 /* flow prediction enable */
orr r0, r0, #(0x01 << 11) /* #0x8000 */
mcr p15,0,r0,c1,c0,0
mrc p15,0,r0,c1,c0,1 /* read Auxiliary Control Register */
orr r0, r0, #(0x1 << 2) /* enable Dside prefetch */
orr r0, r0, #(0x1 << 1) /* enable L2 Prefetch hint */
mcr p15,0,r0,c1,c0,1 /* write Auxiliary Control Register */
mrs r0, cpsr /* get the current PSR */
bic r0, r0, #0x100 /* enable asynchronous abort exception */
msr cpsr_xsf, r0
b _startup /* jump to C startup code */
and r0, r0, r0 /* no op */
.Ldone: b .Ldone /* Paranoia: we should never get here */
/*
*************************************************************************
*
* invalidate_dcache - invalidate the entire d-cache by set/way
*
* Note: for Cortex-A53, there is no cp instruction for invalidating
* the whole D-cache. Need to invalidate each line.
*
*************************************************************************
*/
invalidate_dcache:
mrc p15, 1, r0, c0, c0, 1 /* read CLIDR */
ands r3, r0, #0x7000000
mov r3, r3, lsr #23 /* cache level value (naturally aligned) */
beq finished
mov r10, #0 /* start with level 0 */
loop1:
add r2, r10, r10, lsr #1 /* work out 3xcachelevel */
mov r1, r0, lsr r2 /* bottom 3 bits are the Cache type for this level */
and r1, r1, #7 /* get those 3 bits alone */
cmp r1, #2
blt skip /* no cache or only instruction cache at this level */
mcr p15, 2, r10, c0, c0, 0 /* write the Cache Size selection register */
isb /* isb to sync the change to the CacheSizeID reg */
mrc p15, 1, r1, c0, c0, 0 /* reads current Cache Size ID register */
and r2, r1, #7 /* extract the line length field */
add r2, r2, #4 /* add 4 for the line length offset (log2 16 bytes) */
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 /* r4 is the max number on the way size (right aligned) */
clz r5, r4 /* r5 is the bit position of the way size increment */
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 /* r7 is the max number of the index size (right aligned) */
loop2:
mov r9, r4 /* r9 working copy of the max way size (right aligned) */
loop3:
orr r11, r10, r9, lsl r5 /* factor in the way number and cache number into r11 */
orr r11, r11, r7, lsl r2 /* factor in the index number */
mcr p15, 0, r11, c7, c6, 2 /* invalidate by set/way */
subs r9, r9, #1 /* decrement the way number */
bge loop3
subs r7, r7, #1 /* decrement the index */
bge loop2
skip:
add r10, r10, #2 /* increment the cache number */
cmp r3, r10
bgt loop1
finished:
mov r10, #0 /* switch back to cache level 0 */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
dsb
isb
bx lr
.end
/**
* @} End of "addtogroup a53_32_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,011 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/gcc/asm_vectors.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex A53 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* 6.0 mus 27/07/16 Added Undefined exception handler
* 6.4 mus 25/07/17 Added support for hard floating point
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.org 0
.text
.globl _boot
.globl _vector_table
.globl FIQInterrupt
.globl IRQInterrupt
.globl SWInterrupt
.globl DataAbortInterrupt
.globl PrefetchAbortInterrupt
.globl IRQHandler
.globl prof_pc
.section .vectors, "a"
_vector_table:
B _boot
B Undefined
B SVCHandler
B PrefetchAbortHandler
B DataAbortHandler
NOP /* Placeholder for address exception vector*/
B IRQHandler
B FIQHandler
IRQHandler: /* IRQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code*/
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
bl IRQInterrupt /* IRQ vector */
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
FIQHandler: /* FIQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
FIQLoop:
bl FIQInterrupt /* FIQ vector */
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
Undefined: /* Undefined handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =UndefinedExceptionAddr
sub r1, lr, #4
str r1, [r0] /* Store address of instruction causing undefined exception */
bl UndefinedException /* UndefinedException: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr
SVCHandler: /* SWI handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
tst r0, #0x20 /* check the T bit */
ldrneh r0, [lr,#-2] /* Thumb mode */
bicne r0, r0, #0xff00 /* Thumb mode */
ldreq r0, [lr,#-4] /* ARM mode */
biceq r0, r0, #0xff000000 /* ARM mode */
bl SWInterrupt /* SWInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr /*return to the next instruction after the SWI instruction */
DataAbortHandler: /* Data Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =DataAbortAddr
sub r1, lr, #8
str r1, [r0] /* Stores instruction causing data abort */
bl DataAbortInterrupt /*DataAbortInterrupt :call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #8 /* points to the instruction that caused the Data Abort exception */
PrefetchAbortHandler: /* Prefetch Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =PrefetchAbortAddr
sub r1, lr, #4
str r1, [r0] /* Stores instruction causing prefetch abort */
bl PrefetchAbortInterrupt /* PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* points to the instruction that caused the Prefetch Abort exception */
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,156 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/gcc/xil-crt0.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil-crt0.S
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* 5.4 pkp 18/12/15 Initialized global constructor for C++ applications
* 6.6 srm 10/18/17 Added timer configuration using XTime_StartTTCTimer API.
* Now the TTC instance as specified by the user will be
* started.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xparameters.h"
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
.text
.Lsbss_start:
.long __sbss_start
.Lsbss_end:
.long __sbss_end
.Lbss_start:
.long __bss_start__
.Lbss_end:
.long __bss_end__
.Lstack:
.long __stack
.globl _startup
_startup:
mov r0, #0
/* clear sbss */
ldr r1,.Lsbss_start /* calculate beginning of the SBSS */
ldr r2,.Lsbss_end /* calculate end of the SBSS */
.Lloop_sbss:
cmp r1,r2
bge .Lenclsbss /* If no SBSS, no clearing required */
str r0, [r1], #4
b .Lloop_sbss
.Lenclsbss:
/* clear bss */
ldr r1,.Lbss_start /* calculate beginning of the BSS */
ldr r2,.Lbss_end /* calculate end of the BSS */
.Lloop_bss:
cmp r1,r2
bge .Lenclbss /* If no BSS, no clearing required */
str r0, [r1], #4
b .Lloop_bss
.Lenclbss:
/* set stack pointer */
ldr r13,.Lstack /* stack address */
/* run global constructors */
bl __libc_init_array
/* Reset and start Triple Timer Counter */
#if defined (SLEEP_TIMER_BASEADDR)
bl XTime_StartTTCTimer
#endif
/* make sure argc and argv are valid */
mov r0, #0
mov r1, #0
bl main /* Jump to main C code */
/* Cleanup global constructors */
bl __libc_fini_array
bl exit
.Lexit: /* should never get here */
b .Lexit
.Lstart:
.size _startup,.Lstart-_startup
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,374 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/gcc/cpu_init.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file cpu_init.s
*
* This file contains CPU specific initialization. Invoked from main CRT
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.text
.global __cpu_init
.align 2
__cpu_init:
/* Clear cp15 regs with unknown reset values */
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 /* DFSR */
mcr p15, 0, r0, c5, c0, 1 /* IFSR */
mcr p15, 0, r0, c6, c0, 0 /* DFAR */
mcr p15, 0, r0, c6, c0, 2 /* IFAR */
mcr p15, 0, r0, c9, c13, 2 /* PMXEVCNTR */
mcr p15, 0, r0, c13, c0, 2 /* TPIDRURW */
mcr p15, 0, r0, c13, c0, 3 /* TPIDRURO */
/* Reset and start Cycle Counter */
mov r2, #0x80000000 /* clear overflow */
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd /* D, C, E */
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 /* enable cycle counter */
mcr p15, 0, r2, c9, c12, 1
bx lr
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 5,660 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/platform/ZynqMP/translation_table.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a53_32_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A53. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for zynq ultrascale+
* architecture. It utilizes short descriptor translation table format with each
* section defining 1MB of memory.
* The overview of translation table memory attributes is described below.
*
*| | Memory Range | Definition in Translation Table |
*|-----------------|-------------------------|---------------------------------|
*| DDR | 0x00000000 - 0x7FFFFFFF | Normal write-back Cacheable |
*| PL | 0x80000000 - 0xBFFFFFFF | Strongly Ordered |
*| QSPI, lower PCIe| 0xC0000000 - 0xEFFFFFFF | Device Memory |
*| Reserved | 0xF0000000 - 0xF7FFFFFF | Unassigned |
*| STM Coresight | 0xF8000000 - 0xF8FFFFFF | Device Memory |
*| GIC | 0xF9000000 - 0xF90FFFFF | Device memory |
*| Reserved | 0xF9100000 - 0xFCFFFFFF | Unassigned |
*| FPS, LPS slaves | 0xFD000000 - 0xFFBFFFFF | Device memory |
*| CSU, PMU | 0xFFC00000 - 0xFFDFFFFF | Device Memory |
*| TCM, OCM | 0xFFE00000 - 0xFFFFFFFF | Normal write-back cacheable |
*
* @note
*
* For DDR in region 0x00000000 - 0x7FFFFFFF, a system where DDR is less than
* 2GB, region after DDR and before PL is marked as undefined/reserved in
* translation table. In region 0xFFC00000 - 0xFFDFFFFF, it contains CSU
* and PMU memory which are marked as Device since it is less than 1MB and
* falls in a region with device memory.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* 5.4 pkp 18/12/15 Updated the address map according to proper address map
* 6.0 mus 20/07/16 Added warning for ddrless HW design CR-954977
* </pre>
*
*
******************************************************************************/
#include "xparameters.h"
.globl MMUTable
.section .mmu_tbl,"a"
MMUTable:
/* Each table entry occupies one 32-bit word and there are
* 4096 entries, so the entire table takes up 16KB.
* Each entry covers a 1MB section.
*/
.set SECT, 0
#ifdef XPAR_PSU_DDR_0_S_AXI_BASEADDR
.set DDR_START, XPAR_PSU_DDR_0_S_AXI_BASEADDR
.set DDR_END, XPAR_PSU_DDR_0_S_AXI_HIGHADDR
.set DDR_SIZE, (DDR_END - DDR_START)+1
.if DDR_SIZE > 0x80000000
/* If DDR size is larger than 2GB, truncate to 2GB */
.set DDR_REG, 0x800
.else
.set DDR_REG, DDR_SIZE/0x100000
.endif
#else
.set DDR_REG, 0
#warning "There's no DDR in the HW design. MMU translation table marks 2 GB DDR address space as undefined"
#endif
.set UNDEF_REG, 0x800 - DDR_REG
.rept DDR_REG /* DDR Cacheable */
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept UNDEF_REG /* unassigned/reserved */
/* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0200 /* 0x80000000 - 0x9fffffff (FPGA slave0) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0200 /* 0xA0000000 - 0xbfffffff (FPGA slave1) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0200 /* 0xc0000000 - 0xdfffffff (OSPI IOU)*/
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0100 /* 0xe0000000 - 0xefffffff (Lower PCIe)*/
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x80 /* 0xf0000000 - 0xf7ffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x10 /* 0xf8000000 - 0xf8ffffff (STM Coresight) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x1 /* 0xf9000000 - 0xf90fffff (RPU_A53_GIC) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x3f /* 0xf9100000 - 0xfcffffff (reserved).*/
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x10 /* 0xfd000000 - 0xfdffffff (FPS Slaves) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x1C /* 0xfe0000000 - 0xfeffffff (LPS Slaves) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x2 /* 0xffc000000 - 0xffdfffff (CSU and PMU) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x02 /* 0xffe00000 - 0xffffffff (TCM and OCM Cacheable) */
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.end
/**
* @} End of "addtogroup a53_32_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 11,334 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/armclang/boot.S | ;/******************************************************************************
;* Copyright (c) 2019 - 2020 Xilinx, Inc. All rights reserved.
;* SPDX-License-Identifier: MIT
;******************************************************************************/
;/*****************************************************************************/
;/**
;* @file boot.S
;*
;* @addtogroup a53_64_boot_code Cortex A53 64bit Processor Boot Code
;* @{
;* <h2> boot.S </h2>
;*
;* The boot code performs minimum configuration which is required for an
;* application. Cortex-A53 starts by checking current exception level. If the
;* current exception level is EL3 and BSP is built for EL3, it will do
;* initialization required for application execution at EL3. Below is a
;* sequence illustrating what all configuration is performed before control
;* reaches to main function for EL3 execution.
;*
;* 1. Program vector table base for exception handling
;* 2. Set reset vector table base address
;* 3. Program stack pointer for EL3
;* 4. Routing of interrupts to EL3
;* 5. Enable ECC protection
;* 6. Program generic counter frequency
;* 7. Invalidate instruction cache, data cache and TLBs
;* 8. Configure MMU registers and program base address of translation table
;* 9. Transfer control to _start which clears BSS sections and runs global
;* constructor before jumping to main application
;*
;* If current exception level is EL1 and BSP is also built for EL1_NONSECURE
;* it will perform initialization required for application execution at EL1
;* non-secure. For all other combination, the execution will go into infinite
;* loop. Below is a sequence illustrating what all configuration is performed
;* before control reaches to main function for EL1 execution.
;*
;* 1. Program vector table base for exception handling
;* 2. Program stack pointer for EL1
;* 3. Invalidate instruction cache, data cache and TLBs
;* 4. Configure MMU registers and program base address of translation table
;* 5. Transfer control to _start which clears BSS sections and runs global
;* constructor before jumping to main application
;*
;* <pre>
;* MODIFICATION HISTORY:
;*
;* Ver Who Date Changes
;* ----- ------- -------- ---------------------------------------------------
;* 7.0 mus 02/26/19 First release
;* 7.2 mus 01/08/19 Added support for versal
;* sd 02/23/20 Clock Init is called
;* sd 03/21/20 Added XCLOCKING flag
;*
;******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
#include "xil_errata.h"
EXPORT _prestart
EXPORT _boot
IMPORT MMUTableL0
IMPORT |Image$$ARM_LIB_STACK$$ZI$$Base|
IMPORT _vector_table
IMPORT __main
#ifdef XCLOCKING
IMPORT Xil_ClockInit
#endif
#ifndef FREERTOS_BSP
IMPORT FPUStatus
#endif
rvbar_base EQU 0xFD5C0040
MODE_EL1 EQU 0x5
DAIF_BIT EQU 0x1C0
TT_S1_FAULT EQU 0x0
TT_S1_TABLE EQU 0x3
AREA |.boot|, CODE
; This initializes the various processor modes
_prestart
_boot
mov x0, #0
mov x1, #0
mov x2, #0
mov x3, #0
mov x4, #0
mov x5, #0
mov x6, #0
mov x7, #0
mov x8, #0
mov x9, #0
mov x10, #0
mov x11, #0
mov x12, #0
mov x13, #0
mov x14, #0
mov x15, #0
mov x16, #0
mov x17, #0
mov x18, #0
mov x19, #0
mov x20, #0
mov x21, #0
mov x22, #0
mov x23, #0
mov x24, #0
mov x25, #0
mov x26, #0
mov x27, #0
mov x28, #0
mov x29, #0
mov x30, #0
OKToRun
mrs x0, currentEL
cmp x0, #0xC
beq InitEL3
cmp x0, #0x4
beq InitEL1
b error ; Go to error if current exception level is neither EL3 nor EL1
InitEL3
#if (EL3 == 1)
ldr x1, =_vector_table ; Set vector table base address
msr VBAR_EL3, x1
mrs x0, MPIDR_EL1 ; Get the CPU ID
and x0, x0, #0xFF
mov w0, w0
ldr w2, =rvbar_base ; Calculate the rvbar base address for particular CPU core
mov w3, #0x8
mul w0, w0, w3
add w2, w2, w0
str x1, [x2] ; Store vector base address to rvbar
ldr x2, =|Image$$ARM_LIB_STACK$$ZI$$Base| ; Define stack pointer for current exception level
mov sp, x2
mov x0, #0 ; Enable Trapping of SIMD/FPU register for standalone BSP
#ifndef FREERTOS_BSP
orr x0, x0, #(0x1 << 10)
#endif
msr CPTR_EL3, x0
isb
;
; Clear FPUStatus variable to make sure that it contains current
; status of FPU i.e. disabled. In case of a warm restart execution
; when bss sections are not cleared, it may contain previously updated
; value which does not hold true now
;
#ifndef FREERTOS_BSP
ldr x0, =FPUStatus
str xzr, [x0]
#endif
; Configure SCR_EL3
mov w1, #0 ; Initial value of register is unknown
orr w1, w1, #(1 << 11) ; Set ST bit (Secure EL1 can access CNTPS_TVAL_EL1, CNTPS_CTL_EL1 & CNTPS_CVAL_EL1)
orr w1, w1, #(1 << 10) ; Set RW bit (EL1 is AArch64, as this is the Secure world)
orr w1, w1, #(1 << 3) ; Set EA bit (SError routed to EL3)
orr w1, w1, #(1 << 2) ; Set FIQ bit (FIQs routed to EL3)
orr w1, w1, #(1 << 1) ; Set IRQ bit (IRQs routed to EL3)
msr SCR_EL3, x1
; Configure cpu auxiliary control register EL1
ldr x0, =0x80CA000 ; L1 Data prefetch control - 5, Enable device split throttle, 2 independent data prefetch streams
#if (CONFIG_ARM_ERRATA_855873)
;
; Set ENDCCASCI bit in CPUACTLR_EL1 register, to execute data
; cache clean operations as data cache clean and invalidate
;
orr x0, x0, #(1 << 44) ; Set ENDCCASCI bit
#endif
msr S3_1_C15_C2_0, x0 ; CPUACTLR_EL1
; Program the counter frequency
#if defined (versal)
ldr x0, =XPAR_CPU_CORTEXA72_0_TIMESTAMP_CLK_FREQ
#else
ldr x0, =XPAR_CPU_CORTEXA53_0_TIMESTAMP_CLK_FREQ
msr CNTFRQ_EL0, x0
#endif
; Enable hardware coherency between cores
mrs x0, S3_1_c15_c2_1 ; Read EL1 CPU Extended Control Register
orr x0, x0, #(1 << 6) ; Set the SMPEN bit
msr S3_1_c15_c2_1, x0 ; Write EL1 CPU Extended Control Register
isb
tlbi ALLE3
ic IALLU ; Invalidate I cache to PoU
bl invalidate_dcaches
dsb sy
isb
ldr x1, =MMUTableL0 ; Get address of level 0 for TTBR0_EL3
msr TTBR0_EL3, x1 ; Set TTBR0_EL3
;
; Set up memory attributes
; This equates to:
; 0 = b01000100 = Normal, Inner/Outer Non-Cacheable
; 1 = b11111111 = Normal, Inner/Outer WB/WA/RA
; 2 = b00000000 = Device-nGnRnE
; 3 = b00000100 = Device-nGnRE
; 4 = b10111011 = Normal, Inner/Outer WT/WA/RA
;
ldr x1, =0x000000BB0400FF44
msr MAIR_EL3, x1
#if defined (versal)
; Set up TCR_EL3
; Physical Address Size PS = 100 -> 44bits 16 TB
; Granual Size TG0 = 00 -> 4KB
; size offset of the memory region T0SZ = 20 -> (region size 2^(64-20) = 2^44)
ldr x1,=0x80843514
#else
;
; Set up TCR_EL3
; Physical Address Size PS = 010 -> 40bits 1TB
; Granule Size TG0 = 00 -> 4KB
; Size offset of the memory region T0SZ = 24 -> (region size 2^(64-24) = 2^40)
;
ldr x1, =0x80823518
#endif
msr TCR_EL3, x1
isb
; Enable SError Exception for asynchronous abort
mrs x1, DAIF
mov x2, #(0x1<<8)
bic x1, x1, x2
msr DAIF, x1
; Configure SCTLR_EL3
mov x1, #0 ; Most of the SCTLR_EL3 bits are unknown at reset
orr x1, x1, #(1 << 12) ; Enable I cache
orr x1, x1, #(1 << 3) ; Enable SP alignment check
orr x1, x1, #(1 << 2) ; Enable caches
orr x1, x1, #(1 << 0) ; Enable MMU
msr SCTLR_EL3, x1
dsb sy
isb
#ifdef XCLOCKING
b Xil_Clockinit
#endif
b __main ; Jump to start
#else
b error ; Present exception level and selected exception level mismatch
#endif
InitEL1
#if (EL1_NONSECURE == 1)
ldr x1, =_vector_table ; Set vector table base address
msr VBAR_EL1, x1
mrs x0, CPACR_EL1
mov x2, #(0x3 << 0x20)
bic x0, x0, x2
msr CPACR_EL1, x0
isb
;
; Clear FPUStatus variable to make sure that it contains current
; status of FPU i.e. disabled. In case of a warm restart execution
; when bss sections are not cleared, it may contain previously updated
; value which does not hold true now
;
#ifndef FREERTOS_BSP
ldr x0, =FPUStatus
str xzr, [x0]
#endif
ldr x2, =|Image$$ARM_LIB_STACK$$ZI$$Base| ; Define stack pointer for current exception level
mov sp, x2
; Disable MMU
mov x1, #0x0
msr SCTLR_EL1, x1
isb
TLBI VMALLE1
ic IALLU ; Invalidate I cache to PoU
bl invalidate_dcaches
dsb sy
isb
ldr x1, =MMUTableL0 ; Get address of level 0 for TTBR0_EL1
msr TTBR0_EL1, x1 ; Set TTBR0_EL1
;
; Set up memory attributes
; This equates to:
; 0 = b01000100 = Normal, Inner/Outer Non-Cacheable
; 1 = b11111111 = Normal, Inner/Outer WB/WA/RA
; 2 = b00000000 = Device-nGnRnE
; 3 = b00000100 = Device-nGnRE
; 4 = b10111011 = Normal, Inner/Outer WT/WA/RA
;
ldr x1, =0x000000BB0400FF44
msr MAIR_EL1, x1
#if defined (versal)
;
; Set up TCR_EL1
; Physical Address Size PS = 100 -> 44bits 16TB
; Granual Size TG0 = 00 -> 4KB
; size offset of the memory region T0SZ = 20 -> (region size 2^(64-20) = 2^44)
;
ldr x1,=0x485800514
#else
;
; Set up TCR_EL1
; Physical Address Size PS = 010 -> 40bits 1TB
; Granule Size TG0 = 00 -> 4KB
; Size offset of the memory region T0SZ = 24 -> (region size 2^(64-24) = 2^40)
;
ldr x1, =0x285800518
#endif
msr TCR_EL1, x1
isb
; Enable SError Exception for asynchronous abort
mrs x1,DAIF
mov x2, #(0x1<<8)
bic x1,x1,x2
msr DAIF,x1
; Enable MMU
mov x1,#0x0
orr x1, x1, #(1 << 18) ; Set WFE non trapping
orr x1, x1, #(1 << 17) ; Set WFI non trapping
orr x1, x1, #(1 << 5) ; Set CP15 barrier enabled
orr x1, x1, #(1 << 12) ; Set I bit
orr x1, x1, #(1 << 2) ; Set C bit
orr x1, x1, #(1 << 0) ; Set M bit
msr SCTLR_EL1, x1
isb
bl __main ; Jump to start
#else
b error ; present exception level and selected exception level mismatch
#endif
error
b error
invalidate_dcaches
dmb ISH
mrs x0, CLIDR_EL1 ; x0 = CLIDR
ubfx w2, w0, #24, #3 ; w2 = CLIDR>Loc
cmp w2, #0 ; LoC is 0?
b.eq invalidateCaches_end ; No cleaning required and enable MMU
mov w1, #0 ; w1 = level iterator
invalidateCaches_flush_level
add w3, w1, w1, lsl #1 ; w3 = w1 * 3 (right-shift for cache type)
lsr w3, w0, w3 ; w3 = w0 >> w3
ubfx w3, w3, #0, #3 ; w3 = cache type of this level
cmp w3, #2 ; No cache at this level?
b.lt invalidateCaches_next_level
lsl w4, w1, #1
msr CSSELR_EL1, x4 ; Select current cache level in CSSELR
isb ; ISB required to reflect new CSIDR
mrs x4, CCSIDR_EL1 ; w4 = CSIDR
ubfx w3, w4, #0, #3
add w3, w3, #2 ; w3 = log2(line size)
ubfx w5, w4, #13, #15
ubfx w4, w4, #3, #10 ; w4 = Way number
clz w6, w4 ; w6 = 32 - log2(number of ways)
invalidateCaches_flush_set
mov w8, w4 ; w8 = Way number
invalidateCaches_flush_way
lsl w7, w1, #1 ; Fill level field
lsl w9, w5, w3
orr w7, w7, w9 ; Fill index field
lsl w9, w8, w6
orr w7, w7, w9 ; Fill way field
dc CISW, x7 ; Invalidate by set/way to point of coherency
subs w8, w8, #1 ; Decrement way
b.ge invalidateCaches_flush_way
subs w5, w5, #1 ; Decrement set
b.ge invalidateCaches_flush_set
invalidateCaches_next_level
add w1, w1, #1 ; Next level
cmp w2, w1
b.gt invalidateCaches_flush_level
invalidateCaches_end
ret
END
;
; @} End of "addtogroup a53_64_boot_code"
;
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 7,075 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/armclang/asm_vectors.S | ;/******************************************************************************
;* Copyright (c) 2019 - 2020 Xilinx, Inc. All rights reserved.
;* SPDX-License-Identifier: MIT
;******************************************************************************/
;/*****************************************************************************/
;/**
;* @file asm_vectors.s
;*
;* This file contains the initial vector table for the Cortex A53 processor
;*
;* <pre>
;* MODIFICATION HISTORY:
;*
;* Ver Who Date Changes
;* ----- ------- -------- ---------------------------------------------------
;* 7.0 cjp 02/26/19 First release
;* </pre>
;*
;* @note
;*
;* None.
;*
;******************************************************************************/
#include "bspconfig.h"
EXPORT _vector_table
EXPORT FPUStatus
IMPORT _boot
IMPORT FIQInterrupt
IMPORT IRQInterrupt
IMPORT SErrorInterrupt
IMPORT SynchronousInterrupt
;
; FPUContextSize is the size of the array where floating point registers are
; stored when required. The default size corresponds to the case when there is
; no nested interrupt. If there are nested interrupts in application which are
; using floating point operation, the size of FPUContextSize need to be
; increased as per requirement
;
FPUContextSize EQU 528
MACRO
saveregister
stp X0, X1, [sp,#-0x10]!
stp X2, X3, [sp,#-0x10]!
stp X4, X5, [sp,#-0x10]!
stp X6, X7, [sp,#-0x10]!
stp X8, X9, [sp,#-0x10]!
stp X10, X11, [sp,#-0x10]!
stp X12, X13, [sp,#-0x10]!
stp X14, X15, [sp,#-0x10]!
stp X16, X17, [sp,#-0x10]!
stp X18, X19, [sp,#-0x10]!
stp X29, X30, [sp,#-0x10]!
MEND
MACRO
restoreregister
ldp X29, X30, [sp], #0x10
ldp X18, X19, [sp], #0x10
ldp X16, X17, [sp], #0x10
ldp X14, X15, [sp], #0x10
ldp X12, X13, [sp], #0x10
ldp X10, X11, [sp], #0x10
ldp X8, X9, [sp], #0x10
ldp X6, X7, [sp], #0x10
ldp X4, X5, [sp], #0x10
ldp X2, X3, [sp], #0x10
ldp X0, X1, [sp], #0x10
MEND
MACRO
savefloatregister
ldr x1, =FPUContextBase ; Load the floating point context array address from FPUContextBase
ldr x0, [x1]
stp q0, q1, [x0], #0x20 ; Save all the floating point register to the array
stp q2, q3, [x0], #0x20
stp q4, q5, [x0], #0x20
stp q6, q7, [x0], #0x20
stp q8, q9, [x0], #0x20
stp q10, q11, [x0], #0x20
stp q12, q13, [x0], #0x20
stp q14, q15, [x0], #0x20
stp q16, q17, [x0], #0x20
stp q18, q19, [x0], #0x20
stp q20, q21, [x0], #0x20
stp q22, q23, [x0], #0x20
stp q24, q25, [x0], #0x20
stp q26, q27, [x0], #0x20
stp q28, q29, [x0], #0x20
stp q30, q31, [x0], #0x20
mrs x2, FPCR
mrs x3, FPSR
stp x2, x3, [x0], #0x10
str x0, [x1] ; Save current address of floating point context array to FPUContextBase
MEND
MACRO
restorefloatregister
ldr x1, =FPUContextBase ; Restore the address of floating point context array from FPUContextBase
ldr x0, [x1]
ldp x2, x3, [x0,#-0x10]! ; Restore all the floating point register from the array
msr FPCR, x2
msr FPSR, x3
ldp q30, q31, [x0,#-0x20]!
ldp q28, q29, [x0,#-0x20]!
ldp q26, q27, [x0,#-0x20]!
ldp q24, q25, [x0,#-0x20]!
ldp q22, q23, [x0,#-0x20]!
ldp q20, q21, [x0,#-0x20]!
ldp q18, q19, [x0,#-0x20]!
ldp q16, q17, [x0,#-0x20]!
ldp q14, q15, [x0,#-0x20]!
ldp q12, q13, [x0,#-0x20]!
ldp q10, q11, [x0,#-0x20]!
ldp q8, q9, [x0,#-0x20]!
ldp q6, q7, [x0,#-0x20]!
ldp q4, q5, [x0,#-0x20]!
ldp q2, q3, [x0,#-0x20]!
ldp q0, q1, [x0,#-0x20]!
str x0, [x1] ; Save current address of floating point context array to FPUContextBase
MEND
AREA |.vectors|, CODE
REQUIRE8 {TRUE}
PRESERVE8 {TRUE}
ENTRY ; Define this as an entry point
_vector_table
;
; If application is built for XEN GUEST as EL1 Non-secure following image
; header is required by XEN.
;
#if (HYP_GUEST == 1)
ldr x16, =_boot ; Valid Image header
br x16 ; HW reset vector
DCD 0 ; Text offset
DCD 0 ; Image size
DCD 8 ; Flags
DCD 0 ; RES0
DCD 0
DCD 0
DCD 0x644d5241 ; Magic
DCD 0 ; RES0
#endif
B _boot
ALIGN 512
B SynchronousInterruptHandler
ALIGN 128
B IRQInterruptHandler
ALIGN 128
B FIQInterruptHandler
ALIGN 128
B SErrorInterruptHandler
SynchronousInterruptHandler
saveregister
; Check if the Synchronous abort is occurred due to floating point access
#if (EL3 == 1)
mrs x0, ESR_EL3
#else
mrs x0, ESR_EL1
#endif
and x0, x0, #(0x3F << 26)
mov x1, #(0x7 << 26)
cmp x0, x1
;
; If exception is not due to floating point access go to synchronous
; handler
;
bne synchronoushandler
;
; If exception occurred due to floating point access, Enable the floating point
; access i.e. do not trap floating point instruction
;
#if (EL3 == 1)
mrs x1, CPTR_EL3
mov x2, #(0x1<<10)
bic x1, x1, x2
msr CPTR_EL3, x1
#else
mrs x1, CPACR_EL1
orr x1, x1, #(0x1<<20)
msr CPACR_EL1, x1
#endif
isb
;
; If the floating point access was previously enabled, store FPU context
; registers(storefloat)
;
ldr x0, =FPUStatus
ldrb w1, [x0]
cbnz w1, storefloat
;
; If the floating point access was not enabled previously, save the status of
; floating point accessibility i.e. enabled and store floating point context
; array address(FPUContext) to FPUContextBase
;
mov w1, #0x1
strb w1, [x0]
ldr x0, =FPUContext
ldr x1, =FPUContextBase
str x0, [x1]
b restorecontext
storefloat
savefloatregister
b restorecontext
synchronoushandler
bl SynchronousInterrupt
restorecontext
restoreregister
eret
IRQInterruptHandler
saveregister
; Save the status of SPSR, ELR and CPTR to stack
#if (EL3 == 1)
mrs x0, CPTR_EL3
mrs x1, ELR_EL3
mrs x2, SPSR_EL3
#else
mrs x0, CPACR_EL1
mrs x1, ELR_EL1
mrs x2, SPSR_EL1
#endif
stp x0, x1, [sp,#-0x10]!
str x2, [sp,#-0x10]!
; Trap floating point access
#if (EL3 == 1)
mrs x1, CPTR_EL3
orr x1, x1, #(0x1<<10)
msr CPTR_EL3, x1
#else
mrs x1, CPACR_EL1
mov x2, #(0x1<<20)
bic x1, x1, x2
msr CPACR_EL1, x1
#endif
isb
bl IRQInterrupt
;
; If floating point access is enabled during interrupt handling, restore
; floating point registers
;
#if (EL3 == 1)
mrs x0, CPTR_EL3
ands x0, x0, #(0x1<<10)
bne RestorePrevState
#else
mrs x0, CPACR_EL1
ands x0, x0, #(0x1<<20)
beq RestorePrevState
#endif
restorefloatregister
; Restore the status of SPSR, ELR and CPTR from stack
RestorePrevState
ldr x2, [sp], #0x10
ldp x0, x1, [sp],#0x10
#if (EL3 == 1)
msr CPTR_EL3, x0
msr ELR_EL3, x1
msr SPSR_EL3, x2
#else
msr CPACR_EL1, x0
msr ELR_EL1, x1
msr SPSR_EL1, x2
#endif
restoreregister
eret
FIQInterruptHandler
saveregister
bl FIQInterrupt
restoreregister
eret
SErrorInterruptHandler
saveregister
bl SErrorInterrupt
restoreregister
eret
ALIGN 8
; Array to store floating point registers
FPUContext
SPACE FPUContextSize
; Stores address for floating point context array
FPUContextBase
SPACE 8
FPUStatus
SPACE 4
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,679 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/xpvxenconsole/hypercall.S | /*
Copyright DornerWorks 2016
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
following disclaimer.
THIS SOFTWARE IS PROVIDED BY DORNERWORKS FOR USE ON THE CONTRACTED PROJECT, AND ANY EXPRESS OR IMPLIED WARRANTY
IS LIMITED TO THIS USE. FOR ALL OTHER USES THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DORNERWORKS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "xen.h"
.globl HYPERVISOR_console_io;
.align 4;
HYPERVISOR_console_io:
mov x16, __HYPERVISOR_console_io;
hvc 0xEA1;
ret;
.globl HYPERVISOR_hvm_op;
.align 4;
HYPERVISOR_hvm_op:
mov x16, __HYPERVISOR_hvm_op;
hvc 0xEA1;
ret;
.globl HYPERVISOR_memory_op;
.align 4;
HYPERVISOR_memory_op:
mov x16, __HYPERVISOR_memory_op;
hvc 0xEA1;
ret;
.globl HYPERVISOR_event_channel_op;
.align 4;
HYPERVISOR_event_channel_op:
mov x16, __HYPERVISOR_event_channel_op
hvc 0xEA1;
ret;
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 13,827 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/gcc/boot.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file boot.S
*
* @addtogroup a53_64_boot_code Cortex A53 64bit Processor Boot Code
* @{
* <h2> boot.S </h2>
*
* The boot code performs minimum configuration which is required for an
* application. Cortex-A53 starts by checking current exception level. If the
* current exception level is EL3 and BSP is built for EL3, it will do
* initialization required for application execution at EL3. Below is a
* sequence illustrating what all configuration is performed before control
* reaches to main function for EL3 execution.
*
* 1. Program vector table base for exception handling
* 2. Set reset vector table base address
* 3. Program stack pointer for EL3
* 4. Routing of interrupts to EL3
* 5. Enable ECC protection
* 6. Program generic counter frequency
* 7. Invalidate instruction cache, data cache and TLBs
* 8. Configure MMU registers and program base address of translation table
* 9. Transfer control to _start which clears BSS sections and runs global
* constructor before jumping to main application
*
* If the current exception level is EL1 and BSP is also built for EL1_NONSECURE
* it will perform initialization required for application execution at EL1
* non-secure. For all other combination, the execution will go into infinite
* loop. Below is a sequence illustrating what all configuration is performed
* before control reaches to main function for EL1 execution.
*
* 1. Program vector table base for exception handling
* 2. Program stack pointer for EL1
* 3. Invalidate instruction cache, data cache and TLBs
* 4. Configure MMU registers and program base address of translation table
* 5. Transfer control to _start which clears BSS sections and runs global
* constructor before jumping to main application
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.00 pkp 05/21/14 Initial version
* 6.00 pkp 07/25/16 Program the counter frequency
* 6.02 pkp 01/22/17 Added support for EL1 non-secure
* 6.02 pkp 01/24/17 Clearing status of FPUStatus variable to ensure it
* holds correct value.
* 6.3 mus 04/20/17 CPU Cache protection bit in the L2CTLR_EL1 will be in
* set state on reset. So, setting that bit through boot
* code is redundant, hence removed the code which sets
* CPU cache protection bit.
* 6.4 mus 08/11/17 Implemented ARM erratum 855873.It fixes
* CR#982209.
* 6.6 mus 01/19/18 Added isb after writing to the cpacr_el1/cptr_el3,
* to ensure floating-point unit is disabled, before
* any subsequent instruction.
* 7.0 mus 03/26/18 Updated TCR_EL3/TCR_EL1 as per versal address map
*
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
#include "xil_errata.h"
.globl MMUTableL0
.globl MMUTableL1
.globl MMUTableL2
.global _prestart
.global _boot
.global __el3_stack
.global __el2_stack
.global __el1_stack
.global __el0_stack
.global _vector_table
.set EL3_stack, __el3_stack
.set EL2_stack, __el2_stack
.set EL1_stack, __el1_stack
.set EL0_stack, __el0_stack
.set TT_S1_FAULT, 0x0
.set TT_S1_TABLE, 0x3
.set L0Table, MMUTableL0
.set L1Table, MMUTableL1
.set L2Table, MMUTableL2
.set vector_base, _vector_table
.set rvbar_base, 0xFD5C0040
#if defined (versal)
.set counterfreq, XPAR_CPU_CORTEXA72_0_TIMESTAMP_CLK_FREQ
#else
.set counterfreq, XPAR_CPU_CORTEXA53_0_TIMESTAMP_CLK_FREQ
#endif
.set MODE_EL1, 0x5
.set DAIF_BIT, 0x1C0
.section .boot,"ax"
/* this initializes the various processor modes */
_prestart:
_boot:
mov x0, #0
mov x1, #0
mov x2, #0
mov x3, #0
mov x4, #0
mov x5, #0
mov x6, #0
mov x7, #0
mov x8, #0
mov x9, #0
mov x10, #0
mov x11, #0
mov x12, #0
mov x13, #0
mov x14, #0
mov x15, #0
mov x16, #0
mov x17, #0
mov x18, #0
mov x19, #0
mov x20, #0
mov x21, #0
mov x22, #0
mov x23, #0
mov x24, #0
mov x25, #0
mov x26, #0
mov x27, #0
mov x28, #0
mov x29, #0
mov x30, #0
#if 0 //don't put other a53 cpus in wfi
//Which core am I
// ----------------
mrs x0, MPIDR_EL1
and x0, x0, #0xFF //Mask off to leave Aff0
cbz x0, OKToRun //If core 0, run the primary init code
EndlessLoop0:
wfi
b EndlessLoop0
#endif
OKToRun:
mrs x0, currentEL
cmp x0, #0xC
beq InitEL3
cmp x0, #0x4
beq InitEL1
b error // go to error if current exception level is neither EL3 nor EL1
InitEL3:
.if (EL3 == 1)
/*Set vector table base address*/
ldr x1, =vector_base
msr VBAR_EL3,x1
/* Set reset vector address */
/* Get the cpu ID */
mrs x0, MPIDR_EL1
and x0, x0, #0xFF
mov w0, w0
ldr w2, =rvbar_base
/* calculate the rvbar base address for particular CPU core */
mov w3, #0x8
mul w0, w0, w3
add w2, w2, w0
/* store vector base address to RVBAR */
str x1, [x2]
/*Define stack pointer for current exception level*/
ldr x2,=EL3_stack
mov sp,x2
/* Enable Trapping of SIMD/FPU register for standalone BSP */
mov x0, #0
#ifndef FREERTOS_BSP
orr x0, x0, #(0x1 << 10)
#endif
msr CPTR_EL3, x0
isb
/*
* Clear FPUStatus variable to make sure that it contains current
* status of FPU i.e. disabled. In case of a warm restart execution
* when bss sections are not cleared, it may contain previously updated
* value which does not hold true now.
*/
#ifndef FREERTOS_BSP
ldr x0,=FPUStatus
str xzr, [x0]
#endif
/* Configure SCR_EL3 */
mov w1, #0 //; Initial value of register is unknown
orr w1, w1, #(1 << 11) //; Set ST bit (Secure EL1 can access CNTPS_TVAL_EL1, CNTPS_CTL_EL1 & CNTPS_CVAL_EL1)
orr w1, w1, #(1 << 10) //; Set RW bit (EL1 is AArch64, as this is the Secure world)
orr w1, w1, #(1 << 3) //; Set EA bit (SError routed to EL3)
orr w1, w1, #(1 << 2) //; Set FIQ bit (FIQs routed to EL3)
orr w1, w1, #(1 << 1) //; Set IRQ bit (IRQs routed to EL3)
msr SCR_EL3, x1
/*configure cpu auxiliary control register EL1 */
ldr x0,=0x80CA000 // L1 Data prefetch control - 5, Enable device split throttle, 2 independent data prefetch streams
#if CONFIG_ARM_ERRATA_855873
/*
* Set ENDCCASCI bit in CPUACTLR_EL1 register, to execute data
* cache clean operations as data cache clean and invalidate
*
*/
orr x0, x0, #(1 << 44) //; Set ENDCCASCI bit
#endif
msr S3_1_C15_C2_0, x0 //CPUACTLR_EL1
/* program the counter frequency */
ldr x0,=counterfreq
msr CNTFRQ_EL0, x0
/*Enable hardware coherency between cores*/
mrs x0, S3_1_c15_c2_1 //Read EL1 CPU Extended Control Register
orr x0, x0, #(1 << 6) //Set the SMPEN bit
msr S3_1_c15_c2_1, x0 //Write EL1 CPU Extended Control Register
isb
tlbi ALLE3
ic IALLU //; Invalidate I cache to PoU
bl invalidate_dcaches
dsb sy
isb
ldr x1, =L0Table //; Get address of level 0 for TTBR0_EL3
msr TTBR0_EL3, x1 //; Set TTBR0_EL3
/**********************************************
* Set up memory attributes
* This equates to:
* 0 = b01000100 = Normal, Inner/Outer Non-Cacheable
* 1 = b11111111 = Normal, Inner/Outer WB/WA/RA
* 2 = b00000000 = Device-nGnRnE
* 3 = b00000100 = Device-nGnRE
* 4 = b10111011 = Normal, Inner/Outer WT/WA/RA
**********************************************/
ldr x1, =0x000000BB0400FF44
msr MAIR_EL3, x1
#if defined (versal)
/**********************************************
* Set up TCR_EL3
* Physical Address Size PS = 100 -> 44bits 16 TB
* Granual Size TG0 = 00 -> 4KB
* size offset of the memory region T0SZ = 20 -> (region size 2^(64-20) = 2^44)
***************************************************/
ldr x1,=0x80843514
#else
/**********************************************
* Set up TCR_EL3
* Physical Address Size PS = 010 -> 40bits 1TB
* Granual Size TG0 = 00 -> 4KB
* size offset of the memory region T0SZ = 24 -> (region size 2^(64-24) = 2^40)
***************************************************/
ldr x1,=0x80823518
#endif
msr TCR_EL3, x1
isb
/* Enable SError Exception for asynchronous abort */
mrs x1,DAIF
bic x1,x1,#(0x1<<8)
msr DAIF,x1
/* Configure SCTLR_EL3 */
mov x1, #0 //Most of the SCTLR_EL3 bits are unknown at reset
orr x1, x1, #(1 << 12) //Enable I cache
orr x1, x1, #(1 << 3) //Enable SP alignment check
orr x1, x1, #(1 << 2) //Enable caches
orr x1, x1, #(1 << 0) //Enable MMU
msr SCTLR_EL3, x1
dsb sy
isb
b _startup //jump to start
.else
b error // present exception level and selected exception level mismatch
.endif
InitEL1:
.if (EL1_NONSECURE == 1)
/*Set vector table base address*/
ldr x1, =vector_base
msr VBAR_EL1,x1
mrs x0, CPACR_EL1
bic x0, x0, #(0x3 << 0x20)
msr CPACR_EL1, x0
isb
/*
* Clear FPUStatus variable to make sure that it contains current
* status of FPU i.e. disabled. In case of a warm restart execution
* when bss sections are not cleared, it may contain previously updated
* value which does not hold true now.
*/
#ifndef FREERTOS_BSP
ldr x0,=FPUStatus
str xzr, [x0]
#endif
/*Define stack pointer for current exception level*/
ldr x2,=EL1_stack
mov sp,x2
/* Disable MMU first */
mov x1,#0x0
msr SCTLR_EL1, x1
isb
TLBI VMALLE1
ic IALLU //; Invalidate I cache to PoU
bl invalidate_dcaches
dsb sy
isb
ldr x1, =L0Table //; Get address of level 0 for TTBR0_EL1
msr TTBR0_EL1, x1 //; Set TTBR0_EL1
/**********************************************
* Set up memory attributes
* This equates to:
* 0 = b01000100 = Normal, Inner/Outer Non-Cacheable
* 1 = b11111111 = Normal, Inner/Outer WB/WA/RA
* 2 = b00000000 = Device-nGnRnE
* 3 = b00000100 = Device-nGnRE
* 4 = b10111011 = Normal, Inner/Outer WT/WA/RA
**********************************************/
ldr x1, =0x000000BB0400FF44
msr MAIR_EL1, x1
#if defined (versal)
/**********************************************
* Set up TCR_EL1
* Physical Address Size PS = 100 -> 44bits 16TB
* Granual Size TG0 = 00 -> 4KB
* size offset of the memory region T0SZ = 20 -> (region size 2^(64-20) = 2^44)
***************************************************/
ldr x1,=0x485800514
#else
/**********************************************
* Set up TCR_EL1
* Physical Address Size PS = 010 -> 44bits 16TB
* Granual Size TG0 = 00 -> 4KB
* size offset of the memory region T0SZ = 24 -> (region size 2^(64-24) = 2^40)
***************************************************/
ldr x1,=0x285800518
#endif
msr TCR_EL1, x1
isb
/* Enable SError Exception for asynchronous abort */
mrs x1,DAIF
bic x1,x1,#(0x1<<8)
msr DAIF,x1
//; Enable MMU
mov x1,#0x0
orr x1, x1, #(1 << 18) // ; Set WFE non trapping
orr x1, x1, #(1 << 17) // ; Set WFI non trapping
orr x1, x1, #(1 << 5) // ; Set CP15 barrier enabled
orr x1, x1, #(1 << 12) // ; Set I bit
orr x1, x1, #(1 << 2) // ; Set C bit
orr x1, x1, #(1 << 0) // ; Set M bit
msr SCTLR_EL1, x1
isb
bl _startup //jump to start
.else
b error // present exception level and selected exception level mismatch
.endif
error: b error
invalidate_dcaches:
dmb ISH
mrs x0, CLIDR_EL1 //; x0 = CLIDR
ubfx w2, w0, #24, #3 //; w2 = CLIDR.LoC
cmp w2, #0 //; LoC is 0?
b.eq invalidateCaches_end //; No cleaning required and enable MMU
mov w1, #0 //; w1 = level iterator
invalidateCaches_flush_level:
add w3, w1, w1, lsl #1 //; w3 = w1 * 3 (right-shift for cache type)
lsr w3, w0, w3 //; w3 = w0 >> w3
ubfx w3, w3, #0, #3 //; w3 = cache type of this level
cmp w3, #2 //; No cache at this level?
b.lt invalidateCaches_next_level
lsl w4, w1, #1
msr CSSELR_EL1, x4 //; Select current cache level in CSSELR
isb //; ISB required to reflect new CSIDR
mrs x4, CCSIDR_EL1 //; w4 = CSIDR
ubfx w3, w4, #0, #3
add w3, w3, #2 //; w3 = log2(line size)
ubfx w5, w4, #13, #15
ubfx w4, w4, #3, #10 //; w4 = Way number
clz w6, w4 //; w6 = 32 - log2(number of ways)
invalidateCaches_flush_set:
mov w8, w4 //; w8 = Way number
invalidateCaches_flush_way:
lsl w7, w1, #1 //; Fill level field
lsl w9, w5, w3
orr w7, w7, w9 //; Fill index field
lsl w9, w8, w6
orr w7, w7, w9 //; Fill way field
dc CISW, x7 //; Invalidate by set/way to point of coherency
subs w8, w8, #1 //; Decrement way
b.ge invalidateCaches_flush_way
subs w5, w5, #1 //; Descrement set
b.ge invalidateCaches_flush_set
invalidateCaches_next_level:
add w1, w1, #1 //; Next level
cmp w2, w1
b.gt invalidateCaches_flush_level
invalidateCaches_end:
ret
.end
/**
* @} End of "addtogroup a53_64_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 7,362 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/gcc/asm_vectors.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex A53 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.00 pkp 05/21/14 Initial version
* 6.02 pkp 12/21/16 Added support for floating point access
* 6.02 pkp 01/22/17 Added support for EL1 non-secure and hypervisor
* baremetal guest
* 6.4 mus 06/14/17 Fixed bug in IRQInterruptHandler code snippet,
* which checks for the FPEN bit of CPACR_EL1
* 6.6 mus 01/19/18 Added isb after writing to the cpacr_el1/cptr_el3,
* to ensure enabling/disabling of floating-point unit
* is completed, before any subsequent instruction.
*
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "bspconfig.h"
.org 0
.text
.globl _boot
.globl _vector_table
.globl FIQInterrupt
.globl IRQInterrupt
.globl SErrorInterrupt
.globl SynchronousInterrupt
.globl FPUStatus
/*
* FPUContextSize is the size of the array where floating point registers are
* stored when required. The default size corresponds to the case when there is no
* nested interrupt. If there are nested interrupts in application which are using
* floating point operation, the size of FPUContextSize need to be increased as per
* requirement
*/
.set FPUContextSize, 528
.macro saveregister
stp X0,X1, [sp,#-0x10]!
stp X2,X3, [sp,#-0x10]!
stp X4,X5, [sp,#-0x10]!
stp X6,X7, [sp,#-0x10]!
stp X8,X9, [sp,#-0x10]!
stp X10,X11, [sp,#-0x10]!
stp X12,X13, [sp,#-0x10]!
stp X14,X15, [sp,#-0x10]!
stp X16,X17, [sp,#-0x10]!
stp X18,X19, [sp,#-0x10]!
stp X29,X30, [sp,#-0x10]!
.endm
.macro restoreregister
ldp X29,X30, [sp], #0x10
ldp X18,X19, [sp], #0x10
ldp X16,X17, [sp], #0x10
ldp X14,X15, [sp], #0x10
ldp X12,X13, [sp], #0x10
ldp X10,X11, [sp], #0x10
ldp X8,X9, [sp], #0x10
ldp X6,X7, [sp], #0x10
ldp X4,X5, [sp], #0x10
ldp X2,X3, [sp], #0x10
ldp X0,X1, [sp], #0x10
.endm
.macro savefloatregister
/* Load the floating point context array address from FPUContextBase */
ldr x1,=FPUContextBase
ldr x0, [x1]
/* Save all the floating point register to the array */
stp q0,q1, [x0], #0x20
stp q2,q3, [x0], #0x20
stp q4,q5, [x0], #0x20
stp q6,q7, [x0], #0x20
stp q8,q9, [x0], #0x20
stp q10,q11, [x0], #0x20
stp q12,q13, [x0], #0x20
stp q14,q15, [x0], #0x20
stp q16,q17, [x0], #0x20
stp q18,q19, [x0], #0x20
stp q20,q21, [x0], #0x20
stp q22,q23, [x0], #0x20
stp q24,q25, [x0], #0x20
stp q26,q27, [x0], #0x20
stp q28,q29, [x0], #0x20
stp q30,q31, [x0], #0x20
mrs x2, FPCR
mrs x3, FPSR
stp x2, x3, [x0], #0x10
/* Save current address of floating point context array to FPUContextBase */
str x0, [x1]
.endm
.macro restorefloatregister
/* Restore the address of floating point context array from FPUContextBase */
ldr x1,=FPUContextBase
ldr x0, [x1]
/* Restore all the floating point register from the array */
ldp x2, x3, [x0,#-0x10]!
msr FPCR, x2
msr FPSR, x3
ldp q30,q31, [x0,#-0x20]!
ldp q28,q29, [x0,#-0x20]!
ldp q26,q27, [x0,#-0x20]!
ldp q24,q25, [x0,#-0x20]!
ldp q22,q23, [x0,#-0x20]!
ldp q20,q21, [x0,#-0x20]!
ldp q18,q19, [x0,#-0x20]!
ldp q16,q17, [x0,#-0x20]!
ldp q14,q15, [x0,#-0x20]!
ldp q12,q13, [x0,#-0x20]!
ldp q10,q11, [x0,#-0x20]!
ldp q8,q9, [x0,#-0x20]!
ldp q6,q7, [x0,#-0x20]!
ldp q4,q5, [x0,#-0x20]!
ldp q2,q3, [x0,#-0x20]!
ldp q0,q1, [x0,#-0x20]!
/* Save current address of floating point context array to FPUContextBase */
str x0, [x1]
.endm
.org 0
.section .vectors, "a"
_vector_table:
.set VBAR, _vector_table
.org VBAR
/*
* if application is built for XEN GUEST as EL1 Non-secure following image
* header is required by XEN.
*/
.if (HYP_GUEST == 1)
/* Valid Image header. */
/* HW reset vector. */
ldr x16, =_boot
br x16
/* text offset. */
.dword 0
/* image size. */
.dword 0
/* flags. */
.dword 8
/* RES0 */
.dword 0
.dword 0
.dword 0
/* magic */
.dword 0x644d5241
/* RES0 */
.dword 0
/* End of Image header. */
.endif
b _boot
.org (VBAR + 0x200)
b SynchronousInterruptHandler
.org (VBAR + 0x280)
b IRQInterruptHandler
.org (VBAR + 0x300)
b FIQInterruptHandler
.org (VBAR + 0x380)
b SErrorInterruptHandler
SynchronousInterruptHandler:
saveregister
/* Check if the Synchronous abort is occurred due to floating point access. */
.if (EL3 == 1)
mrs x0, ESR_EL3
.else
mrs x0, ESR_EL1
.endif
and x0, x0, #(0x3F << 26)
mov x1, #(0x7 << 26)
cmp x0, x1
/* If exception is not due to floating point access go to synchronous handler */
bne synchronoushandler
/*
* If excpetion occurred due to floating point access, Enable the floating point
* access i.e. do not trap floating point instruction
*/
.if (EL3 == 1)
mrs x1,CPTR_EL3
bic x1, x1, #(0x1<<10)
msr CPTR_EL3, x1
.else
mrs x1,CPACR_EL1
orr x1, x1, #(0x1<<20)
msr CPACR_EL1, x1
.endif
isb
/* If the floating point access was previously enabled, store FPU context
* registers(storefloat).
*/
ldr x0, =FPUStatus
ldrb w1,[x0]
cbnz w1, storefloat
/*
* If the floating point access was not enabled previously, save the status of
* floating point accessibility i.e. enabled and store floating point context
* array address(FPUContext) to FPUContextBase.
*/
mov w1, #0x1
strb w1, [x0]
ldr x0, =FPUContext
ldr x1, =FPUContextBase
str x0,[x1]
b restorecontext
storefloat:
savefloatregister
b restorecontext
synchronoushandler:
bl SynchronousInterrupt
restorecontext:
restoreregister
eret
IRQInterruptHandler:
saveregister
/* Save the status of SPSR, ELR and CPTR to stack */
.if (EL3 == 1)
mrs x0, CPTR_EL3
mrs x1, ELR_EL3
mrs x2, SPSR_EL3
.else
mrs x0, CPACR_EL1
mrs x1, ELR_EL1
mrs x2, SPSR_EL1
.endif
stp x0, x1, [sp,#-0x10]!
str x2, [sp,#-0x10]!
/* Trap floating point access */
.if (EL3 == 1)
mrs x1,CPTR_EL3
orr x1, x1, #(0x1<<10)
msr CPTR_EL3, x1
.else
mrs x1,CPACR_EL1
bic x1, x1, #(0x1<<20)
msr CPACR_EL1, x1
.endif
isb
bl IRQInterrupt
/*
* If floating point access is enabled during interrupt handling,
* restore floating point registers.
*/
.if (EL3 == 1)
mrs x0, CPTR_EL3
ands x0, x0, #(0x1<<10)
bne RestorePrevState
.else
mrs x0,CPACR_EL1
ands x0, x0, #(0x1<<20)
beq RestorePrevState
.endif
restorefloatregister
/* Restore the status of SPSR, ELR and CPTR from stack */
RestorePrevState:
ldr x2,[sp],0x10
ldp x0, x1, [sp],0x10
.if (EL3 == 1)
msr CPTR_EL3, x0
msr ELR_EL3, x1
msr SPSR_EL3, x2
.else
msr CPACR_EL1, x0
msr ELR_EL1, x1
msr SPSR_EL1, x2
.endif
restoreregister
eret
FIQInterruptHandler:
saveregister
bl FIQInterrupt
restoreregister
eret
SErrorInterruptHandler:
saveregister
bl SErrorInterrupt
restoreregister
eret
.align 8
/* Array to store floating point registers */
FPUContext: .skip FPUContextSize
/* Stores address for floating point context array */
FPUContextBase: .skip 8
FPUStatus: .skip 1
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,119 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/gcc/xil-crt0.S | /******************************************************************************
* Copyright (C) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil-crt0.S
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.00 pkp 05/21/14 Initial version
* 5.04 pkp 12/18/15 Initialized global constructor for C++ applications
* 5.04 pkp 01/05/16 Set the reset vector register RVBAR equivalent to
* vector table base address
* 6.02 pkp 01/22/17 Added support for EL1 non-secure
* 6.6 srm 10/18/17 Added timer configuration using XTime_StartTTCTimer API.
* Now the TTC instance as specified by the user will be
* started.
* 6.6 mus 01/29/18 Initialized the xen PV console for Cortexa53 64 bit
* EL1 NS BSP.
* 7.2 sd 02/23/20 Clock Init is called
* 7.2 sd 02/23/20 Clock code added under XCLOCKING flag
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
.text
.Lsbss_start:
.quad __sbss_start
.Lsbss_end:
.quad __sbss_end
.Lbss_start:
.quad __bss_start__
.Lbss_end:
.quad __bss_end__
.set APU_PWRCTL, 0xFD5C0090
.globl _startup
_startup:
mov x0, #0
.if (EL3 == 1)
/* Check whether the clearing of bss sections shall be skipped */
ldr x10, =APU_PWRCTL /* Load PWRCTRL address */
ldr w11, [x10] /* Read PWRCTRL register */
mrs x2, MPIDR_EL1 /* Read MPIDR_EL1 */
ubfx x2, x2, #0, #8 /* Extract CPU ID (affinity level 0) */
mov w1, #1
lsl w2, w1, w2 /* Shift CPU ID to get one-hot ID */
ands w11, w11, w2 /* Get PWRCTRL bit for this core */
bne .Lenclbss /* Skip BSS and SBSS clearing */
.endif
/* clear sbss */
ldr x1,.Lsbss_start /* calculate beginning of the SBSS */
ldr x2,.Lsbss_end /* calculate end of the SBSS */
.Lloop_sbss:
cmp x1,x2
bge .Lenclsbss /* If no SBSS, no clearing required */
str x0, [x1], #8
b .Lloop_sbss
.Lenclsbss:
/* clear bss */
ldr x1,.Lbss_start /* calculate beginning of the BSS */
ldr x2,.Lbss_end /* calculate end of the BSS */
.Lloop_bss:
cmp x1,x2
bge .Lenclbss /* If no BSS, no clearing required */
str x0, [x1], #8
b .Lloop_bss
.Lenclbss:
/* run global constructors */
bl __libc_init_array
/* Reset and start Triple Timer Counter */
#if defined (SLEEP_TIMER_BASEADDR)
bl XTime_StartTTCTimer
#endif
.if (EL1_NONSECURE == 1 && HYP_GUEST == 1 && \
XEN_USE_PV_CONSOLE == 1)
bl XPVXenConsole_Init
.endif
/* make sure argc and argv are valid */
mov x0, #0
mov x1, #0
#ifdef XCLOCKING
bl Xil_ClockInit
#endif
bl main /* Jump to main C code */
/* Cleanup global constructors */
bl __libc_fini_array
bl exit
.Lexit: /* should never get here */
b .Lexit
.Lstart:
.size _startup,.Lstart-_startup
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 15,453 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/platform/versal/armclang/translation_table.S | /******************************************************************************
* Copyright (C) 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a72_64_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A72. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for versal
* architecture. It utilizes translation granual size of 4KB with 2MB section
* size for initial 5GB memory and 1GB section size for memory after 5GB.
* The overview of translation table memory attributes is described below.
*
*| Name | Memory Range | Def. in Translation Table |
*|-----------------------|-----------------------------------|-----------------------------|
*| DDR | 0x000_0000_0000 - 0x000_7FFF_FFFF | Normal WB Cacheable |
*| LPD_AFI_FS | 0x000_8000_0000 - 0x000_9FFF_FFFF | Strongly Ordered |
*| Reserved | 0x000_A000_0000 - 0x000_A3FF_FFFF | Unassigned |
*| FPD_AFI_0 | 0x000_A400_0000 - 0x000_AFFF_FFFF | Strongly Ordered |
*| FPD_AFI_1 | 0x000_B000_0000 - 0x000_BFFF_FFFF | Strongly Ordered |
*| QSPI | 0x000_C000_0000 - 0x000_DFFF_FFFF | Strongly Ordered |
*| PCIE region 0 | 0x000_E000_0000 - 0x000_EFFF_FFFF | Strongly Ordered |
*| PMC | 0x000_F000_0000 - 0x000_F7FF_FFFF | Strongly Ordered |
*| STM_CORESIGHT | 0x000_F800_0000 - 0x000_F8FF_FFFF | Strongly Ordered |
*| GIC | 0x000_F900_0000 - 0x000_F90F_FFFF | Strongly Ordered |
*| Reserved | 0x000_F910_0000 - 0x000_FBFF_FFFF | Unassigned |
*| CPM | 0x000_FC00_0000 - 0x000_FCFF_FFFF | Strongly Ordered |
*| FPD slaves | 0x000_FD00_0000 - 0x000_FDFF_FFFF | Strongly Ordered |
*| LPD slaves | 0x000_FE00_0000 - 0x000_FFDF_FFFF | Strongly Ordered |
*| OCM | 0x000_FFE0_0000 - 0xFFF_FFFF_FFFF | Normal WB Cacheable |
*| PMC region 0-3 | 0x001_0000_0000 - 0x001_1FFF_FFFF | Strongly Ordered |
*| Reserved | 0x001_2000_0000 - 0x001_FFFF_FFFF | Unassigned |
*| ME Array 0-3 | 0x002_0000_0000 - 0x002_FFFF_FFFF | Strongly Ordered |
*| Reserved | 0x003_0000_0000 - 0x003_FFFF_FFFF | Unassigned |
*| PL- via PS | 0x004_0000_0000 - 0x005_FFFF_FFFF | Strongly Ordered |
*| PCIe region 1 | 0x006_0000_0000 - 0x007_FFFF_FFFF | Strongly Ordered |
*| DDR | 0x008_0000_0000 - 0x00F_FFFF_FFFF | Normal WB Cacheable |
*| Reserved | 0x010_0000_0000 - 0x03F_FFFF_FFFF | Unassigned |
*| HBM 0-3 | 0x040_0000_0000 - 0x07F_FFFF_FFFF | Strongly Ordered |
*| PCIe region 2 | 0x080_0000_0000 - 0x0BF_FFFF_FFFF | Strongly Ordered |
*| DDR | 0x0C0_0000_0000 - 0x1B7_7FFF_FFFF | Normal WB Cacheable |
*| Reserved | 0x1B7_8000_0000 - 0x1FF_FFFF_FFFF | Unassigned |
*| PL- Via NoC | 0x200_0000_0000 - 0x3FF_FFFF_FFFF | Strongly Ordered |
*| PL- Via PS | 0x400_0000_0000 - 0x4FF_FFFF_FFFF | Strongly Ordered |
*| DDR CH1-CH3 | 0x500_0000_0000 - 0x7FF_FFFF_FFFF | Normal WB Cacheable |
*| PL- Via NoC | 0x800_0000_0000 - 0xFFF_FFFF_FFFF | Strongly Ordered |
*
* @note
*
* For DDR region 0x0000000000 - 0x007FFFFFFF, a system where DDR is less than
* 2GB, region after DDR and before PL is marked as undefined/reserved in
* translation table. Region 0xF9100000 - 0xF91FFFFF is reserved memory in
* 0x00F9000000 - 0x00F91FFFFF range, but it is marked as strongly ordered
* because minimum section size in translation table section is 2MB.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 7.2 mus 01/09/20 Initial version
*
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
EXPORT MMUTableL0
EXPORT MMUTableL1
EXPORT MMUTableL2
GBLA abscnt
GBLA count
GBLA sect
Reserved EQU 0x0 ; Fault
#if EL1_NONSECURE
Memory EQU 0x405:OR:(2:SHL:8):OR:0x0 ; normal writeback write allocate outer shared read write */
#else
Memory EQU 0x405:OR:(3:SHL:8):OR:0x0 ; normal writeback write allocate inner shared read write */
#endif
Device EQU 0x409:OR:(1:SHL:53):OR:(1:SHL:54):OR:0x0 ; strongly ordered read write non executable
AREA |.mmu_tbl0|, CODE, ALIGN=12
MMUTableL0
count SETA 0
WHILE count<0x1f
DCQU MMUTableL1+count*0x1000+0x3 ; 0x0000_0000 - 0x7F_FFFF_FFFF
count SETA count+1
WEND
count SETA 1
WHILE count<0x20
DCQ MMUTableL1+count*0x1000+0x3 ; 0x80_0000_0000 - 0xFFF_FFFF_FFFF
count SETA count+1
WEND
AREA |.mmu_tbl1|, CODE, ALIGN=12
MMUTableL1
DCQU MMUTableL2+0x3 ; 0x0000_0000 - 0x3FFF_FFFF
count SETA 1 ; 0x4000_0000 - 0x1_3FFF_FFFF
WHILE count<5
DCQ MMUTableL2+count*0x1000+0x3 ; 1GB DDR, 512MB LPD_AFI_FS, 448MB FPD_AFI_0, 512MB QSPI,
; 256MB PCIe region 0, PMC 128MB, GIC 1 MB, reserved 47MB,
; 2GB other devices and memory, 512 MB PMC
count SETA count+1
WEND
Fixlocl1 EQU 0x140000000
abscnt SETA 0
count SETA 0
WHILE count<0x3
DCQU Fixlocl1+abscnt*0x40000000+Reserved ; 0x1_4000_0000 - 0x1_FFFF_FFFF
; 3GB Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x4
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x2_0000_0000 - 0x2_FFFF_FFFF
; 4GB ME Array 0-3
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x4
DCQU Fixlocl1+abscnt*0x40000000+Reserved ; 0x3_0000_0000 - 0x3_FFFF_FFFF
; 4GB Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x10
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x4_0000_0000 - 0x7_FFFF_FFFF
; 8GB PL - via PS, 8GB PCIe region1
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_LOW_1_BASEADDR
DDR_1_START EQU XPAR_AXI_NOC_DDR_LOW_1_BASEADDR
DDR_1_END EQU XPAR_AXI_NOC_DDR_LOW_1_HIGHADDR
DDR_1_SIZE EQU (DDR_1_END - DDR_1_START+1)
#if DDR_1_SIZE > 0x800000000
; If DDR size is larger than 32GB, truncate to 32GB
DDR_1_REG EQU 0x20
#else
DDR_1_REG EQU DDR_1_SIZE/0x40000000
#endif
#else
DDR_1_REG EQU 0
#endif
UNDEF_1_REG EQU (0x20 - DDR_1_REG)
; DDR based on size in hw design
count SETA 0
WHILE count<DDR_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0xC0
DCQU Fixlocl1+abscnt*0x40000000+Reserved ; 0x10_0000_0000 - 0x3F_FFFF_FFFF
; 192GB Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x40_0000_0000 - 0x7F_FFFF_FFFF
; 256GB HBM 0-3
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x80_0000_0000 - 0xBF_FFFF_FFFF
; 256GB PCIe 2
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_LOW_2_BASEADDR
DDR_2_START EQU XPAR_AXI_NOC_DDR_LOW_2_BASEADDR
DDR_2_END EQU XPAR_AXI_NOC_DDR_LOW_2_HIGHADDR
DDR_2_SIZE EQU (DDR_2_END - DDR_2_START+1)
#if DDR_2_SIZE > 0x4000000000
; If DDR size is larger than 256 GB, truncate to 256GB
DDR_2_REG EQU 0x100
#else
DDR_2_REG EQU DDR_2_SIZE/0x40000000
#endif
#else
DDR_2_REG EQU 0
#endif
UNDEF_2_REG EQU (0x100 - DDR_2_REG)
; DDR based on size in hw design
count SETA 0
WHILE count<DDR_2_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_2_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_LOW_3_BASEADDR
DDR_3_START EQU XPAR_AXI_NOC_DDR_LOW_3_BASEADDR
DDR_3_END EQU XPAR_AXI_NOC_DDR_LOW_3_HIGHADDR
DDR_3_SIZE EQU (DDR_3_END - DDR_3_START+1)
#if DDR_3_SIZE > 0xB780000000
; If DDR size is larger than 734 GB, truncate to 734GB
DDR_3_REG EQU 0x2de
#else
DDR_3_REG EQU DDR_3_SIZE/0x40000000
#endif
#else
DDR_3_REG EQU 0
#endif
UNDEF_3_REG EQU (0x2de - DDR_3_REG)
; DDR based on size in hw design
count SETA 0
WHILE count<DDR_3_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_3_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x122
DCQU Fixlocl1+abscnt*0x40000000+Reserved ; 0x1B7_8000_0000 - 0x1FF_FFFF_FFFF
; 290GB reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x800
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x200_0000_0000 - 0x3FF_FFFF_FFFF
; 2TB PL- via NoC
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x400
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x400_0000_0000 - 0x4FF_FFFF_FFFF
; 1TB PL- via PS
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_CH_1_BASEADDR
DDR_CH_1_START EQU XPAR_AXI_NOC_DDR_CH_1_BASEADDR
DDR_CH_1_END EQU XPAR_AXI_NOC_DDR_CH_1_HIGHADDR
DDR_CH_1_SIZE EQU (DDR_CH_1_END - DDR_CH_1_START + 1)
#if DDR_CH_1_SIZE > 0x010000000000
; If DDR size is larger than 1TB, truncate to 1 TB
DDR_CH_1_REG EQU 0x400 ; 0x500_0000_0000 - 0x5FF_FFFF_FFFF
#else
DDR_CH_1_REG EQU DDR_CH_1_SIZE/0x40000000
#endif
#else
DDR_CH_1_REG EQU 0
#endif
UNDEF_CH_1_REG EQU (0x400 - DDR_CH_1_REG)
; DDR based on size in hw design, Max size 1 TB
count SETA 0
WHILE count<DDR_CH_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_CH_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_CH_2_BASEADDR
DDR_CH_2_START EQU XPAR_AXI_NOC_DDR_CH_2_BASEADDR
DDR_CH_2_END EQU XPAR_AXI_NOC_DDR_CH_2_HIGHADDR
DDR_CH_2_SIZE EQU (DDR_CH_2_END - DDR_CH_2_START + 1)
#if DDR_CH_2_SIZE > 0x010000000000
; If DDR_CH_2 size is larger than 1TB, truncate to 1 TB
DDR_CH_2_REG EQU 0x400 ; 0x600_0000_0000 - 0x6FF_FFFF_FFFF
#else
DDR_CH_2_REG EQU DDR_CH_2_SIZE/0x40000000
#endif
#else
DDR_CH_2_REG EQU 0
#endif
UNDEF_CH_2_REG EQU (0x400 - DDR_CH_2_REG)
; DDR based on size in hw design, Max size 1 TB
count SETA 0
WHILE count<DDR_CH_2_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_CH_2_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_CH_3_BASEADDR
DDR_CH_3_START EQU XPAR_AXI_NOC_DDR_CH_3_BASEADDR
DDR_CH_3_END EQU XPAR_AXI_NOC_DDR_CH_3_HIGHADDR
DDR_CH_3_SIZE EQU (DDR_CH_3_END - DDR_CH_3_START+1)
#if DDR_CH_3_SIZE > 0x010000000000
; If DDR_CH_3 size is larger than 1TB, truncate to 1 TB */
DDR_CH_3_REG EQU 0x400 ; 0x700_0000_0000 - 0x7FF_FFFF_FFFF
#else
DDR_CH_3_REG EQU DDR_CH_3_SIZE/0x40000000
#endif
#else
DDR_CH_3_REG EQU 0
#endif
UNDEF_CH_3_REG EQU (0x400 - DDR_CH_3_REG)
; DDR based on size in hw design, Max size 1 TB
count SETA 0
WHILE count<DDR_CH_3_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_CH_3_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x2000
DCQU Fixlocl1+abscnt*0x40000000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
AREA |.mmu_tbl2|, CODE, ALIGN=12
MMUTableL2
abscnt SETA 0
#ifdef XPAR_AXI_NOC_DDR_LOW_0_BASEADDR
DDR_0_START EQU XPAR_AXI_NOC_DDR_LOW_0_BASEADDR
DDR_0_END EQU XPAR_AXI_NOC_DDR_LOW_0_HIGHADDR
DDR_0_SIZE EQU (DDR_0_END - DDR_0_START+1)
#if DDR_0_SIZE > 0x80000000
; If DDR size is larger than 2GB, truncate to 2GB
.set DDR_0_REG, 0x400
#else
DDR_0_REG EQU DDR_0_SIZE/0x200000
#endif
#else
DDR_0_REG EQU 0
#endif
UNDEF_0_REG EQU (0x400 - DDR_0_REG)
; DDR based on size in hw design
count SETA 0
WHILE count<DDR_0_REG
DCQU abscnt*0x200000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_0_REG
DCQU abscnt*0x200000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x20 ; 0xA000_0000 - 0xA3FF_FFFF
DCQU abscnt*0x200000+Device ; 64MB reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x60 ; 0xA400_0000 - 0xAFFF_FFFF
DCQU abscnt*0x200000+Device ; 192MB FPD AFI 0
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x80 ; 0xB000_0000 - 0xBFFF_FFFF
DCQU abscnt*0x200000+Device ; 192MB FPD AFI 1
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100 ; 0xC000_0000 - 0xDFFF_FFFF
DCQU abscnt*0x200000+Device ; 512MB QSPI
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x80 ; 0xE000_0000 - 0xEFFF_FFFF
DCQU abscnt*0x200000+Device ; 256MB lower PCIe
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x40 ; 0xF000_0000 - 0xF7FF_FFFF
DCQU abscnt*0x200000+Device ; 128MB PMC
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x8 ; 0xF800_0000 - 0xF8FF_FFFF
DCQU abscnt*0x200000+Device ; 16MB coresight
count SETA count+1
abscnt SETA abscnt+1
WEND
; 1MB GIC is marked for 2MB region as the minimum block size in
; translation table is 2MB and adjacent 47MB reserved region is
; converted to 46MB
DCQU abscnt*0x200000+Device ; 0xF910_0000 - 0xF90F_FFFF
abscnt SETA abscnt+1
; Reserved 46MB 0xF91FFFFF - 0xFBFFFFFF
count SETA 0
WHILE count<0x17 ; 0xF91F_FFFF - 0xFBFF_FFFF
DCQU abscnt*0x200000+Reserved ; 46MB reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x1F ; 0xFC00_0000 - 0xFFDF_FFFF
DCQU abscnt*0x200000+Device ; 16MB CPM,16MB FPS, 30MB LPS slaves
count SETA count+1
abscnt SETA abscnt+1
WEND
DCQU abscnt*0x200000+Memory ; 0xFFE0_0000 - 0xFFFF_FFFF
abscnt SETA abscnt+1
count SETA 0
WHILE count<0x100 ; 0x1_0000_0000 - 0x1_1FFF_FFFF
DCQU abscnt*0x200000+Device ; 512MB PMC 0-3
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100 ; 0x1_2000_0000 - 0x1_3FFF_FFFF
DCQU abscnt*0x200000+Device ; 512MB reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
END
/**
* @} End of "addtogroup a53_64_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 15,212 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/platform/versal/gcc/translation_table.S | /******************************************************************************
* Copyright (C) 2018 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a53_64_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A53. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for zynq ultrascale+
* architecture. It utilizes translation granual size of 4KB with 2MB section
* size for initial 5GB memory and 1GB section size for memory after 5GB.
* The overview of translation table memory attributes is described below.
*
*| Name | Memory Range | Def. in Translation Table |
*|-----------------------|-----------------------------------|-----------------------------|
*| DDR | 0x000_0000_0000 - 0x000_7FFF_FFFF | Normal WB Cacheable |
*| LPD_AFI_FS | 0x000_8000_0000 - 0x000_9FFF_FFFF | Strongly Ordered |
*| Reserved | 0x000_A000_0000 - 0x000_A3FF_FFFF | Unassigned |
*| FPD_AFI_0 | 0x000_A400_0000 - 0x000_AFFF_FFFF | Strongly Ordered |
*| FPD_AFI_1 | 0x000_B000_0000 - 0x000_BFFF_FFFF | Strongly Ordered |
*| QSPI | 0x000_C000_0000 - 0x000_DFFF_FFFF | Strongly Ordered |
*| PCIE region 0 | 0x000_E000_0000 - 0x000_EFFF_FFFF | Strongly Ordered |
*| PMC | 0x000_F000_0000 - 0x000_F7FF_FFFF | Strongly Ordered |
*| STM_CORESIGHT | 0x000_F800_0000 - 0x000_F8FF_FFFF | Strongly Ordered |
*| GIC | 0x000_F900_0000 - 0x000_F90F_FFFF | Strongly Ordered |
*| Reserved | 0x000_F910_0000 - 0x000_FBFF_FFFF | Unassigned |
*| CPM | 0x000_FC00_0000 - 0x000_FCFF_FFFF | Strongly Ordered |
*| FPD slaves | 0x000_FD00_0000 - 0x000_FDFF_FFFF | Strongly Ordered |
*| LPD slaves | 0x000_FE00_0000 - 0x000_FFDF_FFFF | Strongly Ordered |
*| OCM | 0x000_FFE0_0000 - 0xFFF_FFFF_FFFF | Normal WB Cacheable |
*| PMC region 0-3 | 0x001_0000_0000 - 0x001_1FFF_FFFF | Strongly Ordered |
*| Reserved | 0x001_2000_0000 - 0x001_FFFF_FFFF | Unassigned |
*| ME Array 0-3 | 0x002_0000_0000 - 0x002_FFFF_FFFF | Strongly Ordered |
*| Reserved | 0x003_0000_0000 - 0x003_FFFF_FFFF | Unassigned |
*| PL- via PS | 0x004_0000_0000 - 0x005_FFFF_FFFF | Strongly Ordered |
*| PCIe region 1 | 0x006_0000_0000 - 0x007_FFFF_FFFF | Strongly Ordered |
*| DDR | 0x008_0000_0000 - 0x00F_FFFF_FFFF | Normal WB Cacheable |
*| Reserved | 0x010_0000_0000 - 0x03F_FFFF_FFFF | Unassigned |
*| HBM 0-3 | 0x040_0000_0000 - 0x07F_FFFF_FFFF | Strongly Ordered |
*| PCIe region 2 | 0x080_0000_0000 - 0x0BF_FFFF_FFFF | Strongly Ordered |
*| DDR | 0x0C0_0000_0000 - 0x1B7_7FFF_FFFF | Normal WB Cacheable |
*| Reserved | 0x1B7_8000_0000 - 0x1FF_FFFF_FFFF | Unassigned |
*| PL- Via NoC | 0x200_0000_0000 - 0x3FF_FFFF_FFFF | Strongly Ordered |
*| PL- Via PS | 0x400_0000_0000 - 0x4FF_FFFF_FFFF | Strongly Ordered |
*| DDR CH1-CH3 | 0x500_0000_0000 - 0x7FF_FFFF_FFFF | Normal WB Cacheable |
*| PL- Via NoC | 0x800_0000_0000 - 0xFFF_FFFF_FFFF | Strongly Ordered |
*
* @note
*
* For DDR region 0x0000000000 - 0x007FFFFFFF, a system where DDR is less than
* 2GB, region after DDR and before PL is marked as undefined/reserved in
* translation table. Region 0xF9100000 - 0xF91FFFFF is reserved memory in
* 0x00F9000000 - 0x00F91FFFFF range, but it is marked as strongly ordered
* because minimum section size in translation table section is 2MB. Region
* 0x00FFC00000 - 0x00FFDFFFFF contains CSU and PMU memory which are marked as
* Device since it is less than 1MB and falls in a region with device memory.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 7.00 mus 05/21/14 Initial version
* 7.00 mus 03/16/19 Updated translation table to mark DDR regions as
* memory, based on the DDR size in hdf
* 7.1 mus 08/29/19 Updated translation table entries for DDR_CH_1,
* DDR_CH_2 and DDR_CH_3 based on respective size in hdf
*
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
.globl MMUTableL0
.globl MMUTableL1
.globl MMUTableL2
.set reserved, 0x0 /* Fault*/
#if EL1_NONSECURE
.set Memory, 0x405 | (2 << 8) | (0x0) /* normal writeback write allocate outer shared read write */
#else
.set Memory, 0x405 | (3 << 8) | (0x0) /* normal writeback write allocate inner shared read write */
#endif
.set Device, 0x409 | (1 << 53)| (1 << 54) |(0x0) /* strongly ordered read write non executable*/
.section .mmu_tbl0,"a"
MMUTableL0:
.set SECT, MMUTableL1 /* 0x0000_0000 - 0x7F_FFFF_FFFF */
.8byte SECT + 0x3
.rept 0x1f
.set SECT, SECT + 0x1000 /* 0x80_0000_0000 - 0xFFF_FFFF_FFFF */
.8byte SECT + 0x3
.endr
.section .mmu_tbl1,"a"
MMUTableL1:
.set SECT, MMUTableL2 /* 0x0000_0000 - 0x3FFF_FFFF */
.8byte SECT + 0x3 /* 1GB DDR */
.rept 0x4 /* 0x4000_0000 - 0x1_3FFF_FFFF */
.set SECT, SECT + 0x1000 /*1GB DDR, 512MB LPD_AFI_FS, 448MB FPD_AFI_0, 512MB QSPI,
256MB PCIe region 0, PMC 128MB, GIC 1 MB, reserved 47MB,
2GB other devices and memory, 512 MB PMC */
.8byte SECT + 0x3
.endr
.set SECT,0x140000000
.rept 0x3 /* 0x1_4000_0000 - 0x1_FFFF_FFFF */
.8byte SECT + reserved /* 3GB Reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x4 /* 0x2_0000_0000 - 0x2_FFFF_FFFF */
.8byte SECT + Device /* 4GB ME Array 0-3*/
.set SECT, SECT + 0x40000000
.endr
.rept 0x4 /* 0x3_0000_0000 - 0x3_FFFF_FFFF */
.8byte SECT + reserved /* 4GB Reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x10 /* 0x4_0000_0000 - 0x7_FFFF_FFFF */
.8byte SECT + Device /* 8GB PL - via PS, 8GB PCIe region1 */
.set SECT, SECT + 0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_LOW_1_BASEADDR
.set DDR_1_START, XPAR_AXI_NOC_DDR_LOW_1_BASEADDR
.set DDR_1_END, XPAR_AXI_NOC_DDR_LOW_1_HIGHADDR
.set DDR_1_SIZE, (DDR_1_END - DDR_1_START)+1
.if DDR_1_SIZE > 0x800000000
/* If DDR size is larger than 32GB, truncate to 32GB */
.set DDR_1_REG, 0x20
.else
.set DDR_1_REG, DDR_1_SIZE/0x40000000
.endif
#else
.set DDR_1_REG, 0
#warning "There's no DDR_1 in the HW design. MMU translation table marks 32 GB DDR address space as undefined"
#endif
.set UNDEF_1_REG, 0x20 - DDR_1_REG
.rept DDR_1_REG /* DDR based on size in hdf*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_1_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
.rept 0xC0 /* 0x10_0000_0000 - 0x3F_FFFF_FFFF */
.8byte SECT + reserved /* 192GB Reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x100 /* 0x40_0000_0000 - 0x7F_FFFF_FFFF */
.8byte SECT + Device /* 256GB HBM 0-3*/
.set SECT, SECT + 0x40000000
.endr
.rept 0x100 /* 0x80_0000_0000 - 0xBF_FFFF_FFFF */
.8byte SECT + Device /* 256GB PCIe 2 */
.set SECT, SECT + 0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_LOW_2_BASEADDR
.set DDR_2_START, XPAR_AXI_NOC_DDR_LOW_2_BASEADDR
.set DDR_2_END, XPAR_AXI_NOC_DDR_LOW_2_HIGHADDR
.set DDR_2_SIZE, (DDR_2_END - DDR_2_START)+1
.if DDR_2_SIZE > 0x4000000000
/* If DDR size is larger than 256 GB, truncate to 256GB */
.set DDR_2_REG, 0x100
.else
.set DDR_2_REG, DDR_2_SIZE/0x40000000
.endif
#else
.set DDR_2_REG, 0
#warning "There's no DDR_LOW_2 in the HW design. MMU translation table marks 256 GB DDR address space as undefined"
#endif
.set UNDEF_2_REG, 0x100 - DDR_2_REG
.rept DDR_2_REG /* DDR based on size in hdf 0xC0_0000_0000 - 0xFF_FFFF_FFFF */
.8byte SECT + Memory /* Maximum DDR region size - 256GB */
.set SECT, SECT + 0x40000000
.endr
.rept UNDEF_2_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_LOW_3_BASEADDR
.set DDR_3_START, XPAR_AXI_NOC_DDR_LOW_3_BASEADDR
.set DDR_3_END, XPAR_AXI_NOC_DDR_LOW_3_HIGHADDR
.set DDR_3_SIZE, (DDR_3_END - DDR_3_START)+1
.if DDR_3_SIZE > 0xB780000000
/* If DDR size is larger than 734 GB, truncate to 734GB */
.set DDR_3_REG, 0x2de
.else
.set DDR_3_REG, DDR_3_SIZE/0x40000000
.endif
#else
.set DDR_3_REG, 0
#warning "There's no DDR_LOW_3 in the HW design. MMU translation table marks 734 GB DDR address space as undefined"
#endif
.set UNDEF_3_REG, 0x2de - DDR_3_REG
.rept DDR_3_REG /* DDR based on size in hdf 0x100_0000_0000 - 0x1B7_7FFF_FFFF */
.8byte SECT + Memory /* Maximum DDR region size - 734GB DDR */
.set SECT, SECT + 0x40000000
.endr
.rept UNDEF_3_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
.rept 0x122 /* 0x1B7_8000_0000 - 0x1FF_FFFF_FFFF */
.8byte SECT + reserved /* 290GB reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x800 /* 0x200_0000_0000 - 0x3FF_FFFF_FFFF */
.8byte SECT + Device /* 2TB PL- via NoC */
.set SECT, SECT + 0x40000000
.endr
.rept 0x400 /* 0x400_0000_0000 - 0x4FF_FFFF_FFFF */
.8byte SECT + Device /* 1TB PL- via PS */
.set SECT, SECT + 0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_CH_1_BASEADDR
.set DDR_CH_1_START, XPAR_AXI_NOC_DDR_CH_1_BASEADDR
.set DDR_CH_1_END, XPAR_AXI_NOC_DDR_CH_1_HIGHADDR
.set DDR_CH_1_SIZE, (DDR_CH_1_END - DDR_CH_1_START)+1
.if DDR_CH_1_SIZE > 0x010000000000
/* If DDR size is larger than 1TB, truncate to 1 TB */
.set DDR_CH_1_REG, 0x400 /* 0x500_0000_0000 - 0x5FF_FFFF_FFFF */
.else
.set DDR_CH_1_REG, DDR_CH_1_SIZE/0x40000000
.endif
#else
.set DDR_CH_1_REG, 0
#warning "There's no DDR_CH_1 in the HW design. MMU translation table marks 1 TB DDR address space as undefined"
#endif
.set UNDEF_CH_1_REG, 0x400 - DDR_CH_1_REG
.rept DDR_CH_1_REG /* DDR based on size in hdf, Max size 1 TB*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_CH_1_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_CH_2_BASEADDR
.set DDR_CH_2_START, XPAR_AXI_NOC_DDR_CH_2_BASEADDR
.set DDR_CH_2_END, XPAR_AXI_NOC_DDR_CH_2_HIGHADDR
.set DDR_CH_2_SIZE, (DDR_CH_2_END - DDR_CH_2_START)+1
.if DDR_CH_2_SIZE > 0x010000000000
/* If DDR_CH_2 size is larger than 1TB, truncate to 1 TB */
.set DDR_CH_2_REG, 0x400 /* 0x600_0000_0000 - 0x6FF_FFFF_FFFF */
.else
.set DDR_CH_2_REG, DDR_CH_2_SIZE/0x40000000
.endif
#else
.set DDR_CH_2_REG, 0
#warning "There's no DDR_CH_2 in the HW design. MMU translation table marks 1 TB DDR address space as undefined"
#endif
.set UNDEF_CH_2_REG, 0x400 - DDR_CH_2_REG
.rept DDR_CH_2_REG /* DDR based on size in hdf, Max size 1 TB*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_CH_2_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_CH_3_BASEADDR
.set DDR_CH_3_START, XPAR_AXI_NOC_DDR_CH_3_BASEADDR
.set DDR_CH_3_END, XPAR_AXI_NOC_DDR_CH_3_HIGHADDR
.set DDR_CH_3_SIZE, (DDR_CH_3_END - DDR_CH_3_START)+1
.if DDR_CH_3_SIZE > 0x010000000000
/* If DDR_CH_3 size is larger than 1TB, truncate to 1 TB */
.set DDR_CH_3_REG, 0x400 /* 0x700_0000_0000 - 0x7FF_FFFF_FFFF */
.else
.set DDR_CH_3_REG, DDR_CH_3_SIZE/0x40000000
.endif
#else
.set DDR_CH_3_REG, 0
#warning "There's no DDR_CH_3 in the HW design. MMU translation table marks 1 TB DDR address space as undefined"
#endif
.set UNDEF_CH_3_REG, 0x400 - DDR_CH_3_REG
.rept DDR_CH_3_REG /* DDR based on size in hdf, Max size 1 TB*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_CH_3_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
.rept 0x2000 /* 0x800_0000_0000 - 0xFFF_FFFF_FFFF */
.8byte SECT + Device /* 8TB PL- via NoC*/
.set SECT, SECT + 0x40000000
.endr
.section .mmu_tbl2,"a"
MMUTableL2:
.set SECT, 0
#ifdef XPAR_AXI_NOC_DDR_LOW_0_BASEADDR
.set DDR_0_START, XPAR_AXI_NOC_DDR_LOW_0_BASEADDR
.set DDR_0_END, XPAR_AXI_NOC_DDR_LOW_0_HIGHADDR
.set DDR_0_SIZE, (DDR_0_END - DDR_0_START)+1
.if DDR_0_SIZE > 0x80000000
/* If DDR size is larger than 2GB, truncate to 2GB */
.set DDR_0_REG, 0x400
.else
.set DDR_0_REG, DDR_0_SIZE/0x200000
.endif
#else
.set DDR_0_REG, 0
#warning "There's no DDR_0 in the HW design. MMU translation table marks 2 GB DDR address space as undefined"
#endif
.set UNDEF_0_REG, 0x400 - DDR_0_REG
.rept DDR_0_REG /* DDR based on size in hdf*/
.8byte SECT + Memory
.set SECT, SECT+0x200000
.endr
.rept UNDEF_0_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x200000
.endr
.rept 0x0100 /* 0x8000_0000 - 0xBFFF_FFFF */
.8byte SECT + Device /* 512MB LPD AFI */
.set SECT, SECT+0x200000
.endr
.rept 0x020 /* 0xA000_0000 - 0xA3FF_FFFF */
.8byte SECT + reserved /* 64MB reserved*/
.set SECT, SECT+0x200000
.endr
.rept 0x60 /* 0xA400_0000 - 0xAFFF_FFFF */
.8byte SECT + Device /* 192MB FPD AFI 0 */
.set SECT, SECT+0x200000
.endr
.rept 0x80 /* 0xB000_0000 - 0xBFFF_FFFF */
.8byte SECT + Device /* 192MB FPD AFI 1 */
.set SECT, SECT+0x200000
.endr
.rept 0x100 /* 0xC000_0000 - 0xDFFF_FFFF */
.8byte SECT + Device /* 512MB QSPI */
.set SECT, SECT+0x200000
.endr
.rept 0x080 /* 0xE000_0000 - 0xEFFF_FFFF */
.8byte SECT + Device /* 256MB lower PCIe */
.set SECT, SECT+0x200000
.endr
.rept 0x040 /* 0xF000_0000 - 0xF7FF_FFFF */
.8byte SECT + Device /* 128MB PMC */
.set SECT, SECT+0x200000
.endr
.rept 0x08 /* 0xF800_0000 - 0xF8FF_FFFF */
.8byte SECT + Device /* 16MB coresight */
.set SECT, SECT+0x200000
.endr
/* 1MB GIC is marked for 2MB region as the minimum block size in
translation table is 2MB and adjacent 47MB reserved region is
converted to 46MB */
.8byte SECT + Device /* 0xF910_0000 - 0xF90F_FFFF */
/* Reserved 46MB 0xF91FFFFF - 0xFBFFFFFF*/
.rept 0x17 /* 0xF91F_FFFF - 0xFBFF_FFFF */
.set SECT, SECT+0x200000
.8byte SECT + reserved /* 46MB reserved */
.endr
.rept 0x1F /* 0xFC00_0000 - 0xFFDF_FFFF */
.set SECT, SECT+0x200000
.8byte SECT + Device /* 16MB CPM,16MB FPS, 30MB LPS slaves */
.endr
.set SECT, SECT+0x200000 /* 0xFFE0_0000 - 0xFFFF_FFFF*/
.8byte SECT + Memory /*2MB OCM/TCM*/
.rept 0x100 /* 0x1_0000_0000 - 0x1_1FFF_FFFF */
.set SECT, SECT+0x200000
.8byte SECT + Device /* 512MB PMC 0-3 */
.endr
.rept 0x100 /* 0x1_2000_0000 - 0x1_3FFF_FFFF */
.set SECT, SECT+0x200000
.8byte SECT + reserved /* 512MB reserved */
.endr
.end
/**
* @} End of "addtogroup a53_64_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 8,548 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/platform/ZynqMP/armclang/translation_table.S | ;/******************************************************************************
;* Copyright (c) 2018 - 2020 Xilinx, Inc. All rights reserved.
;* SPDX-License-Identifier: MIT
;******************************************************************************/
;/*****************************************************************************/
;/**
;* @file translation_table.s
;*
;* @addtogroup a53_64_boot_code
;* @{
;* <h2> translation_table.S </h2>
;* translation_table.S contains a static page table required by MMU for
;* cortex-A53. This translation table is flat mapped (input address = output
;* address) with default memory attributes defined for zynq ultrascale+
;* architecture. It utilizes translation granule size of 4KB with 2MB section
;* size for initial 4GB memory and 1GB section size for memory after 4GB.
;* The overview of translation table memory attributes is described below.
;*
;*| | Memory Range | Definition in Translation Table |
;*|-----------------------|-----------------------------|-----------------------------------|
;*| DDR | 0x0000000000 - 0x007FFFFFFF | Normal write-back Cacheable |
;*| PL | 0x0080000000 - 0x00BFFFFFFF | Strongly Ordered |
;*| QSPI, lower PCIe | 0x00C0000000 - 0x00EFFFFFFF | Strongly Ordered |
;*| Reserved | 0x00F0000000 - 0x00F7FFFFFF | Unassigned |
;*| STM Coresight | 0x00F8000000 - 0x00F8FFFFFF | Strongly Ordered |
;*| GIC | 0x00F9000000 - 0x00F91FFFFF | Strongly Ordered |
;*| Reserved | 0x00F9200000 - 0x00FCFFFFFF | Unassigned |
;*| FPS, LPS slaves | 0x00FD000000 - 0x00FFBFFFFF | Strongly Ordered |
;*| CSU, PMU | 0x00FFC00000 - 0x00FFDFFFFF | Strongly Ordered |
;*| TCM, OCM | 0x00FFE00000 - 0x00FFFFFFFF | Normal inner write-back cacheable |
;*| Reserved | 0x0100000000 - 0x03FFFFFFFF | Unassigned |
;*| PL, PCIe | 0x0400000000 - 0x07FFFFFFFF | Strongly Ordered |
;*| DDR | 0x0800000000 - 0x0FFFFFFFFF | Normal inner write-back cacheable |
;*| PL, PCIe | 0x1000000000 - 0xBFFFFFFFFF | Strongly Ordered |
;*| Reserved | 0xC000000000 - 0xFFFFFFFFFF | Unassigned |
;*
;* @note
;*
;* For DDR region 0x0000000000 - 0x007FFFFFFF, a system where DDR is less than
;* 2GB, region after DDR and before PL is marked as undefined/reserved in
;* translation table. Region 0xF9100000 - 0xF91FFFFF is reserved memory in
;* 0x00F9000000 - 0x00F91FFFFF range, but it is marked as strongly ordered
;* because minimum section size in translation table section is 2MB. Region
;* 0x00FFC00000 - 0x00FFDFFFFF contains CSU and PMU memory which are marked as
;* Device since it is less than 1MB and falls in a region with device memory.
;*
;* <pre>
;* MODIFICATION HISTORY:
;*
;* Ver Who Date Changes
;* ----- ---- -------- ---------------------------------------------------
;* 7.0 cjp 02/26/19 First release
;*
;******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
EXPORT MMUTableL0
EXPORT MMUTableL1
EXPORT MMUTableL2
GBLA abscnt
GBLA count
GBLA sect
; Fault
Reserved EQU 0
#if (EL1_NONSECURE == 1)
Memory EQU 0x405:OR:(2:SHL:8):OR:0x0 ; Normal writeback write allocate outer shared read write
#else
Memory EQU 0x405:OR:(3:SHL:8):OR:0x0 ; Normal writeback write allocate inner shared read write
#endif
Device EQU 0x409:OR:(1:SHL:53):OR:(1:SHL:54):OR:0x0 ; Strongly ordered read write non executable
AREA |.mmu_tbl0|, CODE, ALIGN=12
MMUTableL0
DCQU MMUTableL1+0x3 ; 0x0000_0000 - 0x7F_FFFF_FFFF
DCQU MMUTableL1+0x1000+0x3 ; 0x80_0000_0000 - 0xFF_FFFF_FFFF
AREA |.mmu_tbl1|, CODE, ALIGN=12
MMUTableL1
;
; 0x4000_0000 - 0xFFFF_FFFF
; 1GB DDR, 1GB PL, 2GB other devices n memory
;
count SETA 0
WHILE count<0x4
DCQU MMUTableL2+count*0x1000+0x3
count SETA count+1
WEND
Fixlocl1 EQU 0x100000000
abscnt SETA 0
;
; 0x0001_0000_0000 - 0x0003_FFFF_FFFF
; 12GB Reserved
;
count SETA 0
WHILE count<0xc
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x0004_0000_0000 - 0x0007_FFFF_FFFF
; 8GB PL, 8GB PCIe
;
count SETA 0
WHILE count<0x10
DCQU Fixlocl1+abscnt*0x40000000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_PSU_DDR_1_S_AXI_BASEADDR
DDR_1_START EQU XPAR_PSU_DDR_1_S_AXI_BASEADDR
DDR_1_END EQU XPAR_PSU_DDR_1_S_AXI_HIGHADDR
DDR_1_SIZE EQU (DDR_1_END - DDR_1_START + 1)
#if (DDR_1_SIZE > 0x800000000)
DDR_1_REG EQU 0x20 ; If DDR size is larger than 32GB, truncate to 32GB
#else
DDR_1_REG EQU DDR_1_SIZE / 0x40000000
#endif
#else
DDR_1_REG EQU 0
#endif
UNDEF_1_REG EQU (0x20 - DDR_1_REG)
; DDR based on size in hdf
count SETA 0
WHILE count<DDR_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x0010_0000_0000 - 0x007F_FFFF_FFFF
; 448 GB PL
;
count SETA 0
WHILE count<0x1C0
DCQU Fixlocl1 + abscnt * 0x40000000 + Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x0080_0000_0000 - 0x00BF_FFFF_FFFF
; 256GB PCIe
;
count SETA 0
WHILE count<0x100
DCQU Fixlocl1+abscnt*0x40000000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x00C0_0000_0000 - 0x00FF_FFFF_FFFF
; 256GB Reserved
;
count SETA 0
WHILE count<0x100
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
AREA |.mmu_tbl2|, CODE, ALIGN=12
MMUTableL2
abscnt SETA 0
#ifdef XPAR_PSU_DDR_0_S_AXI_BASEADDR
DDR_0_START EQU XPAR_PSU_DDR_0_S_AXI_BASEADDR
DDR_0_END EQU XPAR_PSU_DDR_0_S_AXI_HIGHADDR
DDR_0_SIZE EQU (DDR_0_END - DDR_0_START + 1)
#if (DDR_0_SIZE > 0x80000000)
DDR_0_REG EQU 0x400 ; If DDR size is larger than 2GB, truncate to 2GB
#else
DDR_0_REG EQU DDR_0_SIZE / 0x200000
#endif
#else
DDR_0_REG EQU 0
#endif
UNDEF_0_REG EQU (0x400 - DDR_0_REG)
; DDR based on size in hdf
count SETA 0
WHILE count<DDR_0_REG
DCQU abscnt*0x200000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_0_REG
DCQU abscnt*0x200000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x8000_0000 - 0xBFFF_FFFF
; 1GB lower PL
;
count SETA 0
WHILE count<0x0200
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xC000_0000 - 0xDFFF_FFFF
; 512MB QSPI
;
count SETA 0
WHILE count<0x0100
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xE000_0000 - 0xEFFF_FFFF
; 256MB lower PCIe
;
count SETA 0
WHILE count<0x080
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xF000_0000 - 0xF7FF_FFFF
; 128MB Reserved
;
count SETA 0
WHILE count<0x040
DCQU abscnt*0x200000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xF800_0000 - 0xF8FF_FFFF
; 16MB Coresight
;
count SETA 0
WHILE count<0x8
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 1MB RPU LLP is marked for 2MB region as the minimum block size in translation
; table is 2MB and adjacent 63MB reserved region is converted to 62MB
;
;
; 0xF900_0000 - 0xF91F_FFFF
; 2MB RPU low latency port
;
count SETA 0
WHILE count<0x1
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xF920_0000 - 0xFCFF_FFFF
; 62MB Reserved
;
count SETA 0
WHILE count<0x1f
DCQU abscnt*0x200000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xFD00_0000 - 0xFDFF_FFFF
; 16MB FPS
;
count SETA 0
WHILE count<0x8
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xFE00_0000 - 0xFFBF_FFFF
; 28MB LPS
;
count SETA 0
WHILE count<0xE
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xFFC0_0000 - 0xFFDF_FFFF
; 2MB PMU/CSU
;
DCQU abscnt*0x200000+Device
abscnt SETA abscnt+1
;
; 0xFFE0_0000 - 0xFFFF_FFFF
; 2MB OCM/TCM
;
DCQU abscnt*0x200000+Memory
END
;
; @} End of "addtogroup a53_64_boot_code"
;
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 8,360 | Chapter05/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/platform/ZynqMP/gcc/translation_table.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a53_64_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A53. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for zynq ultrascale+
* architecture. It utilizes translation granual size of 4KB with 2MB section
* size for initial 4GB memory and 1GB section size for memory after 4GB.
* The overview of translation table memory attributes is described below.
*
*| | Memory Range | Definition in Translation Table |
*|-----------------------|-----------------------------|-----------------------------------|
*| DDR | 0x0000000000 - 0x007FFFFFFF | Normal write-back Cacheable |
*| PL | 0x0080000000 - 0x00BFFFFFFF | Strongly Ordered |
*| QSPI, lower PCIe | 0x00C0000000 - 0x00EFFFFFFF | Strongly Ordere |
*| Reserved | 0x00F0000000 - 0x00F7FFFFFF | Unassigned |
*| STM Coresight | 0x00F8000000 - 0x00F8FFFFFF | Strongly Ordered |
*| GIC | 0x00F9000000 - 0x00F91FFFFF | Strongly Ordered |
*| Reserved | 0x00F9200000 - 0x00FCFFFFFF | Unassigned |
*| FPS, LPS slaves | 0x00FD000000 - 0x00FFBFFFFF | Strongly Ordered |
*| CSU, PMU | 0x00FFC00000 - 0x00FFDFFFFF | Strongly Ordered |
*| TCM, OCM | 0x00FFE00000 - 0x00FFFFFFFF | Normal inner write-back cacheable |
*| Reserved | 0x0100000000 - 0x03FFFFFFFF | Unassigned |
*| PL, PCIe | 0x0400000000 - 0x07FFFFFFFF | Strongly Ordered |
*| DDR | 0x0800000000 - 0x0FFFFFFFFF | Normal inner write-back cacheable |
*| PL, PCIe | 0x1000000000 - 0xBFFFFFFFFF | Strongly Ordered |
*| Reserved | 0xC000000000 - 0xFFFFFFFFFF | Unassigned |
*
* @note
*
* For DDR region 0x0000000000 - 0x007FFFFFFF, a system where DDR is less than
* 2GB, region after DDR and before PL is marked as undefined/reserved in
* translation table. Region 0xF9100000 - 0xF91FFFFF is reserved memory in
* 0x00F9000000 - 0x00F91FFFFF range, but it is marked as strongly ordered
* because minimum section size in translation table section is 2MB. Region
* 0x00FFC00000 - 0x00FFDFFFFF contains CSU and PMU memory which are marked as
* Device since it is less than 1MB and falls in a region with device memory.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.00 pkp 05/21/14 Initial version
* 5.04 pkp 12/18/15 Updated the address map according to proper address map
* 6.0 mus 07/20/16 Added warning for ddrless HW design CR-954977
* 6.2 pkp 12/14/16 DDR memory in 0x800000000 - 0xFFFFFFFFF range is marked
* as normal writeback for the size defined in hdf and rest
* of the memory in that 32GB range is marked as reserved.
* 6.4 mus 08/10/17 Marked memory as a outer shareable for EL1 NS execution,
* to support CCI enabled IP's.
*
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
.globl MMUTableL0
.globl MMUTableL1
.globl MMUTableL2
.set reserved, 0x0 /* Fault*/
#if EL1_NONSECURE
.set Memory, 0x405 | (2 << 8) | (0x0) /* normal writeback write allocate outer shared read write */
#else
.set Memory, 0x405 | (3 << 8) | (0x0) /* normal writeback write allocate inner shared read write */
#endif
.set Device, 0x409 | (1 << 53)| (1 << 54) |(0x0) /* strongly ordered read write non executable*/
.section .mmu_tbl0,"a"
MMUTableL0:
.set SECT, MMUTableL1 /* 0x0000_0000 - 0x7F_FFFF_FFFF */
.8byte SECT + 0x3
.set SECT, MMUTableL1+0x1000 /* 0x80_0000_0000 - 0xFF_FFFF_FFFF */
.8byte SECT + 0x3
.section .mmu_tbl1,"a"
MMUTableL1:
.set SECT, MMUTableL2 /* 0x0000_0000 - 0x3FFF_FFFF */
.8byte SECT + 0x3 /* 1GB DDR */
.rept 0x3 /* 0x4000_0000 - 0xFFFF_FFFF */
.set SECT, SECT + 0x1000 /*1GB DDR, 1GB PL, 2GB other devices n memory */
.8byte SECT + 0x3
.endr
.set SECT,0x100000000
.rept 0xC /* 0x0001_0000_0000 - 0x0003_FFFF_FFFF */
.8byte SECT + reserved /* 12GB Reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x10 /* 0x0004_0000_0000 - 0x0007_FFFF_FFFF */
.8byte SECT + Device /* 8GB PL, 8GB PCIe */
.set SECT, SECT + 0x40000000
.endr
#ifdef XPAR_PSU_DDR_1_S_AXI_BASEADDR
.set DDR_1_START, XPAR_PSU_DDR_1_S_AXI_BASEADDR
.set DDR_1_END, XPAR_PSU_DDR_1_S_AXI_HIGHADDR
.set DDR_1_SIZE, (DDR_1_END - DDR_1_START)+1
.if DDR_1_SIZE > 0x800000000
/* If DDR size is larger than 32GB, truncate to 32GB */
.set DDR_1_REG, 0x20
.else
.set DDR_1_REG, DDR_1_SIZE/0x40000000
.endif
#else
.set DDR_1_REG, 0
#warning "There's no DDR_1 in the HW design. MMU translation table marks 32 GB DDR address space as undefined"
#endif
.set UNDEF_1_REG, 0x20 - DDR_1_REG
.rept DDR_1_REG /* DDR based on size in hdf*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_1_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
.rept 0x1C0 /* 0x0010_0000_0000 - 0x007F_FFFF_FFFF */
.8byte SECT + Device /* 448 GB PL */
.set SECT, SECT + 0x40000000
.endr
.rept 0x100 /* 0x0080_0000_0000 - 0x00BF_FFFF_FFFF */
.8byte SECT + Device /* 256GB PCIe */
.set SECT, SECT + 0x40000000
.endr
.rept 0x100 /* 0x00C0_0000_0000 - 0x00FF_FFFF_FFFF */
.8byte SECT + reserved /* 256GB reserved */
.set SECT, SECT + 0x40000000
.endr
.section .mmu_tbl2,"a"
MMUTableL2:
.set SECT, 0
#ifdef XPAR_PSU_DDR_0_S_AXI_BASEADDR
.set DDR_0_START, XPAR_PSU_DDR_0_S_AXI_BASEADDR
.set DDR_0_END, XPAR_PSU_DDR_0_S_AXI_HIGHADDR
.set DDR_0_SIZE, (DDR_0_END - DDR_0_START)+1
.if DDR_0_SIZE > 0x80000000
/* If DDR size is larger than 2GB, truncate to 2GB */
.set DDR_0_REG, 0x400
.else
.set DDR_0_REG, DDR_0_SIZE/0x200000
.endif
#else
.set DDR_0_REG, 0
#warning "There's no DDR_0 in the HW design. MMU translation table marks 2 GB DDR address space as undefined"
#endif
.set UNDEF_0_REG, 0x400 - DDR_0_REG
.rept DDR_0_REG /* DDR based on size in hdf*/
.8byte SECT + Memory
.set SECT, SECT+0x200000
.endr
.rept UNDEF_0_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x200000
.endr
.rept 0x0200 /* 0x8000_0000 - 0xBFFF_FFFF */
.8byte SECT + Device /* 1GB lower PL */
.set SECT, SECT+0x200000
.endr
.rept 0x0100 /* 0xC000_0000 - 0xDFFF_FFFF */
.8byte SECT + Device /* 512MB QSPI */
.set SECT, SECT+0x200000
.endr
.rept 0x080 /* 0xE000_0000 - 0xEFFF_FFFF */
.8byte SECT + Device /* 256MB lower PCIe */
.set SECT, SECT+0x200000
.endr
.rept 0x040 /* 0xF000_0000 - 0xF7FF_FFFF */
.8byte SECT + reserved /* 128MB Reserved */
.set SECT, SECT+0x200000
.endr
.rept 0x8 /* 0xF800_0000 - 0xF8FF_FFFF */
.8byte SECT + Device /* 16MB coresight */
.set SECT, SECT+0x200000
.endr
/* 1MB RPU LLP is marked for 2MB region as the minimum block size in
translation table is 2MB and adjacent 63MB reserved region is
converted to 62MB */
.rept 0x1 /* 0xF900_0000 - 0xF91F_FFFF */
.8byte SECT + Device /* 2MB RPU low latency port */
.set SECT, SECT+0x200000
.endr
.rept 0x1F /* 0xF920_0000 - 0xFCFF_FFFF */
.8byte SECT + reserved /* 62MB Reserved */
.set SECT, SECT+0x200000
.endr
.rept 0x8 /* 0xFD00_0000 - 0xFDFF_FFFF */
.8byte SECT + Device /* 16MB FPS */
.set SECT, SECT+0x200000
.endr
.rept 0xE /* 0xFE00_0000 - 0xFFBF_FFFF */
.8byte SECT + Device /* 28MB LPS */
.set SECT, SECT+0x200000
.endr
/* 0xFFC0_0000 - 0xFFDF_FFFF */
.8byte SECT + Device /*2MB PMU/CSU */
.set SECT, SECT+0x200000 /* 0xFFE0_0000 - 0xFFFF_FFFF*/
.8byte SECT + Memory /*2MB OCM/TCM*/
.end
/**
* @} End of "addtogroup a53_64_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 10,151 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/freertos10_xilinx_v1_6/src/portasm.S | /*
* FreeRTOS Kernel V10.3.0
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/* FreeRTOS includes. */
#include "FreeRTOSConfig.h"
/* Xilinx library includes. */
#include "microblaze_exceptions_g.h"
#include "xparameters.h"
#include "microblaze_instructions.h"
/* The context is oversized to allow functions called from the ISR to write
back into the caller stack. */
#if defined (__arch64__)
#if( XPAR_MICROBLAZE_USE_FPU != 0 )
#define portCONTEXT_SIZE 272
#define portMINUS_CONTEXT_SIZE -272
#else
#define portCONTEXT_SIZE 264
#define portMINUS_CONTEXT_SIZE -264
#endif
#else
#if( XPAR_MICROBLAZE_USE_FPU != 0 )
#define portCONTEXT_SIZE 136
#define portMINUS_CONTEXT_SIZE -136
#else
#define portCONTEXT_SIZE 132
#define portMINUS_CONTEXT_SIZE -132
#endif
#endif
/* Offsets from the stack pointer at which saved registers are placed. */
#if defined (__arch64__)
#define portR31_OFFSET 8
#define portR30_OFFSET 16
#define portR29_OFFSET 24
#define portR28_OFFSET 32
#define portR27_OFFSET 40
#define portR26_OFFSET 48
#define portR25_OFFSET 56
#define portR24_OFFSET 64
#define portR23_OFFSET 72
#define portR22_OFFSET 80
#define portR21_OFFSET 88
#define portR20_OFFSET 96
#define portR19_OFFSET 104
#define portR18_OFFSET 112
#define portR17_OFFSET 120
#define portR16_OFFSET 128
#define portR15_OFFSET 136
#define portR14_OFFSET 144
#define portR13_OFFSET 152
#define portR12_OFFSET 160
#define portR11_OFFSET 168
#define portR10_OFFSET 176
#define portR9_OFFSET 184
#define portR8_OFFSET 192
#define portR7_OFFSET 200
#define portR6_OFFSET 208
#define portR5_OFFSET 216
#define portR4_OFFSET 224
#define portR3_OFFSET 232
#define portR2_OFFSET 240
#define portCRITICAL_NESTING_OFFSET 248
#define portMSR_OFFSET 256
#define portFSR_OFFSET 264
#else
#define portR31_OFFSET 4
#define portR30_OFFSET 8
#define portR29_OFFSET 12
#define portR28_OFFSET 16
#define portR27_OFFSET 20
#define portR26_OFFSET 24
#define portR25_OFFSET 28
#define portR24_OFFSET 32
#define portR23_OFFSET 36
#define portR22_OFFSET 40
#define portR21_OFFSET 44
#define portR20_OFFSET 48
#define portR19_OFFSET 52
#define portR18_OFFSET 56
#define portR17_OFFSET 60
#define portR16_OFFSET 64
#define portR15_OFFSET 68
#define portR14_OFFSET 72
#define portR13_OFFSET 76
#define portR12_OFFSET 80
#define portR11_OFFSET 84
#define portR10_OFFSET 88
#define portR9_OFFSET 92
#define portR8_OFFSET 96
#define portR7_OFFSET 100
#define portR6_OFFSET 104
#define portR5_OFFSET 108
#define portR4_OFFSET 112
#define portR3_OFFSET 116
#define portR2_OFFSET 120
#define portCRITICAL_NESTING_OFFSET 124
#define portMSR_OFFSET 128
#define portFSR_OFFSET 132
#endif
.extern pxCurrentTCB
.extern XIntc_DeviceInterruptHandler
.extern vTaskSwitchContext
.extern uxCriticalNesting
.extern pulISRStack
.extern ulTaskSwitchRequested
.extern vPortExceptionHandler
.extern pulStackPointerOnFunctionEntry
.global _interrupt_handler
.global VPortYieldASM
.global vPortStartFirstTask
.global vPortExceptionHandlerEntry
.macro portSAVE_CONTEXT
/* Make room for the context on the stack. */
ADDLIK r1, r1, portMINUS_CONTEXT_SIZE
/* Stack general registers. */
SI r31, r1, portR31_OFFSET
SI r30, r1, portR30_OFFSET
SI r29, r1, portR29_OFFSET
SI r28, r1, portR28_OFFSET
SI r27, r1, portR27_OFFSET
SI r26, r1, portR26_OFFSET
SI r25, r1, portR25_OFFSET
SI r24, r1, portR24_OFFSET
SI r23, r1, portR23_OFFSET
SI r22, r1, portR22_OFFSET
SI r21, r1, portR21_OFFSET
SI r20, r1, portR20_OFFSET
SI r19, r1, portR19_OFFSET
SI r18, r1, portR18_OFFSET
SI r17, r1, portR17_OFFSET
SI r16, r1, portR16_OFFSET
SI r15, r1, portR15_OFFSET
/* R14 is saved later as it needs adjustment if a yield is performed. */
SI r13, r1, portR13_OFFSET
SI r12, r1, portR12_OFFSET
SI r11, r1, portR11_OFFSET
SI r10, r1, portR10_OFFSET
SI r9, r1, portR9_OFFSET
SI r8, r1, portR8_OFFSET
SI r7, r1, portR7_OFFSET
SI r6, r1, portR6_OFFSET
SI r5, r1, portR5_OFFSET
SI r4, r1, portR4_OFFSET
SI r3, r1, portR3_OFFSET
SI r2, r1, portR2_OFFSET
/* Stack the critical section nesting value. */
LI r18, r0, uxCriticalNesting
SI r18, r1, portCRITICAL_NESTING_OFFSET
/* Stack MSR. */
mfs r18, rmsr
SI r18, r1, portMSR_OFFSET
#if( XPAR_MICROBLAZE_USE_FPU != 0 )
/* Stack FSR. */
mfs r18, rfsr
SI r18, r1, portFSR_OFFSET
#endif
/* Save the top of stack value to the TCB. */
LI r3, r0, pxCurrentTCB
STORE r1, r0, r3
.endm
.macro portRESTORE_CONTEXT
/* Load the top of stack value from the TCB. */
LI r18, r0, pxCurrentTCB
LOAD r1, r0, r18
/* Restore the general registers. */
LI r31, r1, portR31_OFFSET
LI r30, r1, portR30_OFFSET
LI r29, r1, portR29_OFFSET
LI r28, r1, portR28_OFFSET
LI r27, r1, portR27_OFFSET
LI r26, r1, portR26_OFFSET
LI r25, r1, portR25_OFFSET
LI r24, r1, portR24_OFFSET
LI r23, r1, portR23_OFFSET
LI r22, r1, portR22_OFFSET
LI r21, r1, portR21_OFFSET
LI r20, r1, portR20_OFFSET
LI r19, r1, portR19_OFFSET
LI r17, r1, portR17_OFFSET
LI r16, r1, portR16_OFFSET
LI r15, r1, portR15_OFFSET
LI r14, r1, portR14_OFFSET
LI r13, r1, portR13_OFFSET
LI r12, r1, portR12_OFFSET
LI r11, r1, portR11_OFFSET
LI r10, r1, portR10_OFFSET
LI r9, r1, portR9_OFFSET
LI r8, r1, portR8_OFFSET
LI r7, r1, portR7_OFFSET
LI r6, r1, portR6_OFFSET
LI r5, r1, portR5_OFFSET
LI r4, r1, portR4_OFFSET
LI r3, r1, portR3_OFFSET
LI r2, r1, portR2_OFFSET
/* Reload the rmsr from the stack. */
LI r18, r1, portMSR_OFFSET
mts rmsr, r18
#if( XPAR_MICROBLAZE_USE_FPU != 0 )
/* Reload the FSR from the stack. */
LI r18, r1, portFSR_OFFSET
mts rfsr, r18
#endif
/* Load the critical nesting value. */
LI r18, r1, portCRITICAL_NESTING_OFFSET
SI r18, r0, uxCriticalNesting
/* Test the critical nesting value. If it is non zero then the task last
exited the running state using a yield. If it is zero, then the task
last exited the running state through an interrupt. */
XORI r18, r18, 0
BNEI r18, exit_from_yield
/* r18 was being used as a temporary. Now restore its true value from the
stack. */
LI r18, r1, portR18_OFFSET
/* Remove the stack frame. */
ADDLIK r1, r1, portCONTEXT_SIZE
/* Return using rtid so interrupts are re-enabled as this function is
exited. */
rtid r14, 0
OR r0, r0, r0
.endm
/* This function is used to exit portRESTORE_CONTEXT() if the task being
returned to last left the Running state by calling taskYIELD() (rather than
being preempted by an interrupt). */
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
exit_from_yield:
/* r18 was being used as a temporary. Now restore its true value from the
stack. */
LI r18, r1, portR18_OFFSET
/* Remove the stack frame. */
ADDLIK r1, r1, portCONTEXT_SIZE
/* Return to the task. */
rtsd r14, 0
OR r0, r0, r0
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
_interrupt_handler:
portSAVE_CONTEXT
/* Stack the return address. */
SI r14, r1, portR14_OFFSET
/* Switch to the ISR stack. */
LI r1, r0, pulISRStack
/* The parameter to the interrupt handler. */
ORI r5, r0, configINTERRUPT_CONTROLLER_TO_USE
/* Execute any pending interrupts. */
BRALID r15, XIntc_DeviceInterruptHandler
OR r0, r0, r0
/* See if a new task should be selected to execute. */
LI r18, r0, ulTaskSwitchRequested
OR r18, r18, r0
/* If ulTaskSwitchRequested is already zero, then jump straight to
restoring the task that is already in the Running state. */
BEQI r18, task_switch_not_requested
/* Set ulTaskSwitchRequested back to zero as a task switch is about to be
performed. */
SI r0, r0, ulTaskSwitchRequested
/* ulTaskSwitchRequested was not 0 when tested. Select the next task to
execute. */
BRALID r15, vTaskSwitchContext
OR r0, r0, r0
task_switch_not_requested:
/* Restore the context of the next task scheduled to execute. */
portRESTORE_CONTEXT
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
VPortYieldASM:
portSAVE_CONTEXT
/* Modify the return address so a return is done to the instruction after
the call to VPortYieldASM. */
ADDI r14, r14, 8
SI r14, r1, portR14_OFFSET
/* Switch to use the ISR stack. */
LI r1, r0, pulISRStack
/* Select the next task to execute. */
BRALID r15, vTaskSwitchContext
OR r0, r0, r0
/* Restore the context of the next task scheduled to execute. */
portRESTORE_CONTEXT
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
vPortStartFirstTask:
portRESTORE_CONTEXT
#if ( MICROBLAZE_EXCEPTIONS_ENABLED == 1 ) && ( configINSTALL_EXCEPTION_HANDLERS == 1 )
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
vPortExceptionHandlerEntry:
/* Take a copy of the stack pointer before vPortExecptionHandler is called,
storing its value prior to the function stack frame being created. */
SI r1, r0, pulStackPointerOnFunctionEntry
BRALID r15, vPortExceptionHandler
OR r0, r0, r0
#endif /* ( MICROBLAZE_EXCEPTIONS_ENABLED == 1 ) && ( configINSTALL_EXCEPTION_HANDLERS == 1 ) */
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 11,706 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/freertos10_xilinx_v1_6/src/Source/portable/GCC/ARM_CA53/portASM.S | /*
* FreeRTOS Kernel V10.3.0
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (C) 2014 - 2020 Xilinx, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#if defined (versal) && !defined(ARMR5)
#define GICv3
#else
#define GICv2
#endif
.text
/* Variables and functions. */
.extern ullMaxAPIPriorityMask
.extern pxCurrentTCB
.extern vTaskSwitchContext
.extern vApplicationIRQHandler
.extern ullPortInterruptNesting
.extern ullPortTaskHasFPUContext
.extern ullCriticalNesting
.extern ullPortYieldRequired
.extern ullICCEOIR
.extern ullICCIAR
.extern _freertos_vector_table
.global FreeRTOS_IRQ_Handler
.global FreeRTOS_SWI_Handler
.global vPortRestoreTaskContext
.macro portSAVE_CONTEXT
/* Switch to use the EL0 stack pointer. */
MSR SPSEL, #0
/* Save the entire context. */
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X20, X21, [SP, #-0x10]!
STP X22, X23, [SP, #-0x10]!
STP X24, X25, [SP, #-0x10]!
STP X26, X27, [SP, #-0x10]!
STP X28, X29, [SP, #-0x10]!
STP X30, XZR, [SP, #-0x10]!
/* Save the SPSR. */
#if defined( GUEST )
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
#else
MRS X3, SPSR_EL3
/* Save the ELR. */
MRS X2, ELR_EL3
#endif
STP X2, X3, [SP, #-0x10]!
/* Save the critical section nesting depth. */
LDR X0, ullCriticalNestingConst
LDR X3, [X0]
/* Save the FPU context indicator. */
LDR X0, ullPortTaskHasFPUContextConst
LDR X2, [X0]
/* Save the FPU context, if any (32 128-bit registers). */
CMP X2, #0
B.EQ 1f
STP Q0, Q1, [SP,#-0x20]!
STP Q2, Q3, [SP,#-0x20]!
STP Q4, Q5, [SP,#-0x20]!
STP Q6, Q7, [SP,#-0x20]!
STP Q8, Q9, [SP,#-0x20]!
STP Q10, Q11, [SP,#-0x20]!
STP Q12, Q13, [SP,#-0x20]!
STP Q14, Q15, [SP,#-0x20]!
STP Q16, Q17, [SP,#-0x20]!
STP Q18, Q19, [SP,#-0x20]!
STP Q20, Q21, [SP,#-0x20]!
STP Q22, Q23, [SP,#-0x20]!
STP Q24, Q25, [SP,#-0x20]!
STP Q26, Q27, [SP,#-0x20]!
STP Q28, Q29, [SP,#-0x20]!
STP Q30, Q31, [SP,#-0x20]!
1:
/* Store the critical nesting count and FPU context indicator. */
STP X2, X3, [SP, #-0x10]!
LDR X0, pxCurrentTCBConst
LDR X1, [X0]
MOV X0, SP /* Move SP into X0 for saving. */
STR X0, [X1]
/* Switch to use the ELx stack pointer. */
MSR SPSEL, #1
.endm
; /**********************************************************************/
.macro portRESTORE_CONTEXT
/* Switch to use the EL0 stack pointer. */
MSR SPSEL, #0
/* Set the SP to point to the stack of the task being restored. */
LDR X0, pxCurrentTCBConst
LDR X1, [X0]
LDR X0, [X1]
MOV SP, X0
LDP X2, X3, [SP], #0x10 /* Critical nesting and FPU context. */
/* Set the PMR register to be correct for the current critical nesting
depth. */
LDR X0, ullCriticalNestingConst /* X0 holds the address of ullCriticalNesting. */
MOV X1, #255 /* X1 holds the unmask value. */
#if defined(GICv2)
LDR X4, ullICCPMRConst /* X4 holds the address of the ICCPMR constant. */
CMP X3, #0
LDR X5, [X4] /* X5 holds the address of the ICCPMR register. */
B.EQ 1f
LDR X6, ullMaxAPIPriorityMaskConst
LDR X1, [X6] /* X1 holds the mask value. */
1:
STR W1, [X5] /* Write the mask value to ICCPMR. */
#else
CMP X3, #0
B.EQ 2f
LDR X6, ullMaxAPIPriorityMaskConst
LDR X1, [X6] /* X1 holds the mask value. */
2:
MSR S3_0_C4_C6_0, X1
#endif
DSB SY /* _RB_Barriers probably not required here. */
ISB SY
STR X3, [X0] /* Restore the task's critical nesting count. */
/* Restore the FPU context indicator. */
LDR X0, ullPortTaskHasFPUContextConst
STR X2, [X0]
/* Restore the FPU context, if any. */
CMP X2, #0
B.EQ 1f
LDP Q30, Q31, [SP], #0x20
LDP Q28, Q29, [SP], #0x20
LDP Q26, Q27, [SP], #0x20
LDP Q24, Q25, [SP], #0x20
LDP Q22, Q23, [SP], #0x20
LDP Q20, Q21, [SP], #0x20
LDP Q18, Q19, [SP], #0x20
LDP Q16, Q17, [SP], #0x20
LDP Q14, Q15, [SP], #0x20
LDP Q12, Q13, [SP], #0x20
LDP Q10, Q11, [SP], #0x20
LDP Q8, Q9, [SP], #0x20
LDP Q6, Q7, [SP], #0x20
LDP Q4, Q5, [SP], #0x20
LDP Q2, Q3, [SP], #0x20
LDP Q0, Q1, [SP], #0x20
1:
LDP X2, X3, [SP], #0x10 /* SPSR and ELR. */
#if defined( GUEST )
/* Restore the SPSR. */
MSR SPSR_EL1, X3
/* Restore the ELR. */
MSR ELR_EL1, X2
#else
/* Restore the SPSR. */
MSR SPSR_EL3, X3 /*_RB_ Assumes started in EL3. */
/* Restore the ELR. */
MSR ELR_EL3, X2
#endif
LDP X30, XZR, [SP], #0x10
LDP X28, X29, [SP], #0x10
LDP X26, X27, [SP], #0x10
LDP X24, X25, [SP], #0x10
LDP X22, X23, [SP], #0x10
LDP X20, X21, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
/* Switch to use the ELx stack pointer. _RB_ Might not be required. */
MSR SPSEL, #1
ERET
.endm
/******************************************************************************
* FreeRTOS_SWI_Handler handler is used to perform a context switch.
*****************************************************************************/
.align 8
.type FreeRTOS_SWI_Handler, %function
FreeRTOS_SWI_Handler:
/* Save the context of the current task and select a new task to run. */
portSAVE_CONTEXT
#if defined( GUEST )
MRS X0, ESR_EL1
#else
MRS X0, ESR_EL3
#endif
LSR X1, X0, #26
#if defined( GUEST )
CMP X1, #0x15 /* 0x15 = SVC instruction. */
#else
CMP X1, #0x17 /* 0x17 = SMC instruction. */
#endif
B.NE FreeRTOS_Abort
BL vTaskSwitchContext
portRESTORE_CONTEXT
FreeRTOS_Abort:
/* Full ESR is in X0, exception class code is in X1. */
B .
/******************************************************************************
* vPortRestoreTaskContext is used to start the scheduler.
*****************************************************************************/
.align 8
.type vPortRestoreTaskContext, %function
vPortRestoreTaskContext:
.set freertos_vector_base, _freertos_vector_table
/* Install the FreeRTOS interrupt handlers. */
LDR X1, =freertos_vector_base
#if defined( GUEST )
MSR VBAR_EL1, X1
#else
MSR VBAR_EL3, X1
#endif
DSB SY
ISB SY
/* Start the first task. */
portRESTORE_CONTEXT
/******************************************************************************
* FreeRTOS_IRQ_Handler handles IRQ entry and exit.
*****************************************************************************/
.align 8
.type FreeRTOS_IRQ_Handler, %function
FreeRTOS_IRQ_Handler:
/* Save volatile registers. */
STP X0, X1, [SP, #-0x10]!
STP X2, X3, [SP, #-0x10]!
STP X4, X5, [SP, #-0x10]!
STP X6, X7, [SP, #-0x10]!
STP X8, X9, [SP, #-0x10]!
STP X10, X11, [SP, #-0x10]!
STP X12, X13, [SP, #-0x10]!
STP X14, X15, [SP, #-0x10]!
STP X16, X17, [SP, #-0x10]!
STP X18, X19, [SP, #-0x10]!
STP X29, X30, [SP, #-0x10]!
/* Save the SPSR and ELR. */
#if defined( GUEST )
MRS X3, SPSR_EL1
MRS X2, ELR_EL1
#else
MRS X3, SPSR_EL3
MRS X2, ELR_EL3
#endif
STP X2, X3, [SP, #-0x10]!
/* Increment the interrupt nesting counter. */
LDR X5, ullPortInterruptNestingConst
LDR X1, [X5] /* Old nesting count in X1. */
ADD X6, X1, #1
STR X6, [X5] /* Address of nesting count variable in X5. */
/* Maintain the interrupt nesting information across the function call. */
STP X1, X5, [SP, #-0x10]!
/* Read value from the interrupt acknowledge register, which is stored in W0
for future parameter and interrupt clearing use. */
#if defined(GICv2)
LDR X2, ullICCIARConst
LDR X3, [X2]
LDR W0, [X3] /* ICCIAR in W0 as parameter. */
#else
MRS X0, S3_0_C12_C8_0
#endif
/* ICCIAR in W0 as parameter. */
/* Maintain the ICCIAR value across the function call. */
STP X0, X1, [SP, #-0x10]!
/* Call the C handler. */
BL vApplicationIRQHandler
/* Disable interrupts. */
#if defined(GICv2)
MSR DAIFSET, #3
#else
MSR DAIFSET, #2
#endif
DSB SY
ISB SY
/* Restore the ICCIAR value. */
LDP X0, X1, [SP], #0x10
/* End IRQ processing by writing ICCIAR to the EOI register. */
#if defined(GICv2)
LDR X4, ullICCEOIRConst
LDR X4, [X4]
STR W0, [X4]
#else
MSR S3_0_C12_C8_1, X0
#endif
/* Restore the critical nesting count. */
LDP X1, X5, [SP], #0x10
STR X1, [X5]
/* Has interrupt nesting unwound? */
CMP X1, #0
B.NE Exit_IRQ_No_Context_Switch
/* Is a context switch required? */
LDR X0, ullPortYieldRequiredConst
LDR X1, [X0]
CMP X1, #0
B.EQ Exit_IRQ_No_Context_Switch
/* Reset ullPortYieldRequired to 0. */
MOV X2, #0
STR X2, [X0]
/* Restore volatile registers. */
LDP X4, X5, [SP], #0x10 /* SPSR and ELR. */
#if defined( GUEST )
MSR SPSR_EL1, X5
MSR ELR_EL1, X4
#else
MSR SPSR_EL3, X5 /*_RB_ Assumes started in EL3. */
MSR ELR_EL3, X4
#endif
DSB SY
ISB SY
LDP X29, X30, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
/* Save the context of the current task and select a new task to run. */
portSAVE_CONTEXT
BL vTaskSwitchContext
portRESTORE_CONTEXT
Exit_IRQ_No_Context_Switch:
/* Restore volatile registers. */
LDP X4, X5, [SP], #0x10 /* SPSR and ELR. */
#if defined( GUEST )
MSR SPSR_EL1, X5
MSR ELR_EL1, X4
#else
MSR SPSR_EL3, X5 /*_RB_ Assumes started in EL3. */
MSR ELR_EL3, X4
#endif
DSB SY
ISB SY
LDP X29, X30, [SP], #0x10
LDP X18, X19, [SP], #0x10
LDP X16, X17, [SP], #0x10
LDP X14, X15, [SP], #0x10
LDP X12, X13, [SP], #0x10
LDP X10, X11, [SP], #0x10
LDP X8, X9, [SP], #0x10
LDP X6, X7, [SP], #0x10
LDP X4, X5, [SP], #0x10
LDP X2, X3, [SP], #0x10
LDP X0, X1, [SP], #0x10
ERET
.align 8
pxCurrentTCBConst: .dword pxCurrentTCB
ullCriticalNestingConst: .dword ullCriticalNesting
ullPortTaskHasFPUContextConst: .dword ullPortTaskHasFPUContext
ullMaxAPIPriorityMaskConst: .dword ullMaxAPIPriorityMask
vApplicationIRQHandlerConst: .dword vApplicationIRQHandler
ullPortInterruptNestingConst: .dword ullPortInterruptNesting
ullPortYieldRequiredConst: .dword ullPortYieldRequired
#if defined(GICv2)
ullICCPMRConst: .dword ullICCPMR
ullICCIARConst: .dword ullICCIAR
ullICCEOIRConst: .dword ullICCEOIR
#endif
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 6,362 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/freertos10_xilinx_v1_6/src/Source/portable/GCC/ARM_CA53/port_asm_vectors.S | /******************************************************************************
*
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (C) 2014 - 2020 Xilinx, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex A53 processor
* Currently NEON registers are not saved on stack if interrupt is taken.
* It will be implemented.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.00 pkp 5/21/14 Initial version
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5)
#define GICv3
#else
#define GICv2
#endif
.org 0
.text
.globl _boot
.globl _vector_table
.globl _freertos_vector_table
.globl FIQInterrupt
.globl IRQInterrupt
.globl SErrorInterrupt
.globl SynchronousInterrupt
.org 0
.section .vectors, "a"
_vector_table:
.set VBAR, _vector_table
.org VBAR
b _boot
.org (VBAR + 0x80)
b .
.org (VBAR + 0x100)
b .
.org (VBAR + 0x180)
b .
.org (VBAR + 0x200)
b .
.org (VBAR + 0x280)
b .
.org (VBAR + 0x300)
b .
.org (VBAR + 0x380)
b .
.org (VBAR + 0x400)
b .
.org (VBAR + 0x480)
b .
.org (VBAR + 0x500)
b .
.org (VBAR + 0x580)
b .
.org (VBAR + 0x600)
b .
.org (VBAR + 0x680)
b .
.org (VBAR + 0x700)
b .
.org (VBAR + 0x780)
b .
/******************************************************************************
* Vector table to use when FreeRTOS is running.
*****************************************************************************/
.set FREERTOS_VBAR, (VBAR+0x1000)
.org(FREERTOS_VBAR)
_freertos_vector_table:
b FreeRTOS_SWI_Handler
.org (FREERTOS_VBAR + 0x80)
b FreeRTOS_IRQ_Handler
.org (FREERTOS_VBAR + 0x100)
#if defined(GICv3)
b FreeRTOS_IRQ_Handler
#else
b .
#endif
.org (FREERTOS_VBAR + 0x180)
b .
.org (FREERTOS_VBAR + 0x200)
b FreeRTOS_SWI_Handler
.org (FREERTOS_VBAR + 0x280)
b FreeRTOS_IRQ_Handler
.org (FREERTOS_VBAR + 0x300)
b FreeRTOS_IRQ_Handler
.org (FREERTOS_VBAR + 0x380)
b .
.org (FREERTOS_VBAR + 0x400)
b .
.org (FREERTOS_VBAR + 0x480)
b .
.org (FREERTOS_VBAR + 0x500)
b .
.org (FREERTOS_VBAR + 0x580)
b .
.org (FREERTOS_VBAR + 0x600)
b .
.org (FREERTOS_VBAR + 0x680)
b .
.org (FREERTOS_VBAR + 0x700)
b .
.org (FREERTOS_VBAR + 0x780)
b .
.org (FREERTOS_VBAR + 0x800)
SynchronousInterruptHandler:
stp X0,X1, [sp,#-0x10]!
stp X2,X3, [sp,#-0x10]!
stp X4,X5, [sp,#-0x10]!
stp X6,X7, [sp,#-0x10]!
stp X8,X9, [sp,#-0x10]!
stp X10,X11, [sp,#-0x10]!
stp X12,X13, [sp,#-0x10]!
stp X14,X15, [sp,#-0x10]!
stp X16,X17, [sp,#-0x10]!
stp X18,X19, [sp,#-0x10]!
stp X29,X30, [sp,#-0x10]!
bl SynchronousInterrupt
ldp X29,X30, [sp], #0x10
ldp X18,X19, [sp], #0x10
ldp X16,X17, [sp], #0x10
ldp X14,X15, [sp], #0x10
ldp X12,X13, [sp], #0x10
ldp X10,X11, [sp], #0x10
ldp X8,X9, [sp], #0x10
ldp X6,X7, [sp], #0x10
ldp X4,X5, [sp], #0x10
ldp X2,X3, [sp], #0x10
ldp X0,X1, [sp], #0x10
eret
IRQInterruptHandler:
stp X0,X1, [sp,#-0x10]!
stp X2,X3, [sp,#-0x10]!
stp X4,X5, [sp,#-0x10]!
stp X6,X7, [sp,#-0x10]!
stp X8,X9, [sp,#-0x10]!
stp X10,X11, [sp,#-0x10]!
stp X12,X13, [sp,#-0x10]!
stp X14,X15, [sp,#-0x10]!
stp X16,X17, [sp,#-0x10]!
stp X18,X19, [sp,#-0x10]!
stp X29,X30, [sp,#-0x10]!
bl IRQInterrupt
ldp X29,X30, [sp], #0x10
ldp X18,X19, [sp], #0x10
ldp X16,X17, [sp], #0x10
ldp X14,X15, [sp], #0x10
ldp X12,X13, [sp], #0x10
ldp X10,X11, [sp], #0x10
ldp X8,X9, [sp], #0x10
ldp X6,X7, [sp], #0x10
ldp X4,X5, [sp], #0x10
ldp X2,X3, [sp], #0x10
ldp X0,X1, [sp], #0x10
eret
FIQInterruptHandler:
stp X0,X1, [sp,#-0x10]!
stp X2,X3, [sp,#-0x10]!
stp X4,X5, [sp,#-0x10]!
stp X6,X7, [sp,#-0x10]!
stp X8,X9, [sp,#-0x10]!
stp X10,X11, [sp,#-0x10]!
stp X12,X13, [sp,#-0x10]!
stp X14,X15, [sp,#-0x10]!
stp X16,X17, [sp,#-0x10]!
stp X18,X19, [sp,#-0x10]!
stp X29,X30, [sp,#-0x10]!
bl FIQInterrupt
ldp X29,X30, [sp], #0x10
ldp X18,X19, [sp], #0x10
ldp X16,X17, [sp], #0x10
ldp X14,X15, [sp], #0x10
ldp X12,X13, [sp], #0x10
ldp X10,X11, [sp], #0x10
ldp X8,X9, [sp], #0x10
ldp X6,X7, [sp], #0x10
ldp X4,X5, [sp], #0x10
ldp X2,X3, [sp], #0x10
ldp X0,X1, [sp], #0x10
eret
SErrorInterruptHandler:
stp X0,X1, [sp,#-0x10]!
stp X2,X3, [sp,#-0x10]!
stp X4,X5, [sp,#-0x10]!
stp X6,X7, [sp,#-0x10]!
stp X8,X9, [sp,#-0x10]!
stp X10,X11, [sp,#-0x10]!
stp X12,X13, [sp,#-0x10]!
stp X14,X15, [sp,#-0x10]!
stp X16,X17, [sp,#-0x10]!
stp X18,X19, [sp,#-0x10]!
stp X29,X30, [sp,#-0x10]!
bl SErrorInterrupt
ldp X29,X30, [sp], #0x10
ldp X18,X19, [sp], #0x10
ldp X16,X17, [sp], #0x10
ldp X14,X15, [sp], #0x10
ldp X12,X13, [sp], #0x10
ldp X10,X11, [sp], #0x10
ldp X8,X9, [sp], #0x10
ldp X6,X7, [sp], #0x10
ldp X4,X5, [sp], #0x10
ldp X2,X3, [sp], #0x10
ldp X0,X1, [sp], #0x10
eret
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 10,151 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/freertos10_xilinx_v1_6/src/Source/portable/GCC/MicroBlazeV9/portasm.S | /*
* FreeRTOS Kernel V10.3.0
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/* FreeRTOS includes. */
#include "FreeRTOSConfig.h"
/* Xilinx library includes. */
#include "microblaze_exceptions_g.h"
#include "xparameters.h"
#include "microblaze_instructions.h"
/* The context is oversized to allow functions called from the ISR to write
back into the caller stack. */
#if defined (__arch64__)
#if( XPAR_MICROBLAZE_USE_FPU != 0 )
#define portCONTEXT_SIZE 272
#define portMINUS_CONTEXT_SIZE -272
#else
#define portCONTEXT_SIZE 264
#define portMINUS_CONTEXT_SIZE -264
#endif
#else
#if( XPAR_MICROBLAZE_USE_FPU != 0 )
#define portCONTEXT_SIZE 136
#define portMINUS_CONTEXT_SIZE -136
#else
#define portCONTEXT_SIZE 132
#define portMINUS_CONTEXT_SIZE -132
#endif
#endif
/* Offsets from the stack pointer at which saved registers are placed. */
#if defined (__arch64__)
#define portR31_OFFSET 8
#define portR30_OFFSET 16
#define portR29_OFFSET 24
#define portR28_OFFSET 32
#define portR27_OFFSET 40
#define portR26_OFFSET 48
#define portR25_OFFSET 56
#define portR24_OFFSET 64
#define portR23_OFFSET 72
#define portR22_OFFSET 80
#define portR21_OFFSET 88
#define portR20_OFFSET 96
#define portR19_OFFSET 104
#define portR18_OFFSET 112
#define portR17_OFFSET 120
#define portR16_OFFSET 128
#define portR15_OFFSET 136
#define portR14_OFFSET 144
#define portR13_OFFSET 152
#define portR12_OFFSET 160
#define portR11_OFFSET 168
#define portR10_OFFSET 176
#define portR9_OFFSET 184
#define portR8_OFFSET 192
#define portR7_OFFSET 200
#define portR6_OFFSET 208
#define portR5_OFFSET 216
#define portR4_OFFSET 224
#define portR3_OFFSET 232
#define portR2_OFFSET 240
#define portCRITICAL_NESTING_OFFSET 248
#define portMSR_OFFSET 256
#define portFSR_OFFSET 264
#else
#define portR31_OFFSET 4
#define portR30_OFFSET 8
#define portR29_OFFSET 12
#define portR28_OFFSET 16
#define portR27_OFFSET 20
#define portR26_OFFSET 24
#define portR25_OFFSET 28
#define portR24_OFFSET 32
#define portR23_OFFSET 36
#define portR22_OFFSET 40
#define portR21_OFFSET 44
#define portR20_OFFSET 48
#define portR19_OFFSET 52
#define portR18_OFFSET 56
#define portR17_OFFSET 60
#define portR16_OFFSET 64
#define portR15_OFFSET 68
#define portR14_OFFSET 72
#define portR13_OFFSET 76
#define portR12_OFFSET 80
#define portR11_OFFSET 84
#define portR10_OFFSET 88
#define portR9_OFFSET 92
#define portR8_OFFSET 96
#define portR7_OFFSET 100
#define portR6_OFFSET 104
#define portR5_OFFSET 108
#define portR4_OFFSET 112
#define portR3_OFFSET 116
#define portR2_OFFSET 120
#define portCRITICAL_NESTING_OFFSET 124
#define portMSR_OFFSET 128
#define portFSR_OFFSET 132
#endif
.extern pxCurrentTCB
.extern XIntc_DeviceInterruptHandler
.extern vTaskSwitchContext
.extern uxCriticalNesting
.extern pulISRStack
.extern ulTaskSwitchRequested
.extern vPortExceptionHandler
.extern pulStackPointerOnFunctionEntry
.global _interrupt_handler
.global VPortYieldASM
.global vPortStartFirstTask
.global vPortExceptionHandlerEntry
.macro portSAVE_CONTEXT
/* Make room for the context on the stack. */
ADDLIK r1, r1, portMINUS_CONTEXT_SIZE
/* Stack general registers. */
SI r31, r1, portR31_OFFSET
SI r30, r1, portR30_OFFSET
SI r29, r1, portR29_OFFSET
SI r28, r1, portR28_OFFSET
SI r27, r1, portR27_OFFSET
SI r26, r1, portR26_OFFSET
SI r25, r1, portR25_OFFSET
SI r24, r1, portR24_OFFSET
SI r23, r1, portR23_OFFSET
SI r22, r1, portR22_OFFSET
SI r21, r1, portR21_OFFSET
SI r20, r1, portR20_OFFSET
SI r19, r1, portR19_OFFSET
SI r18, r1, portR18_OFFSET
SI r17, r1, portR17_OFFSET
SI r16, r1, portR16_OFFSET
SI r15, r1, portR15_OFFSET
/* R14 is saved later as it needs adjustment if a yield is performed. */
SI r13, r1, portR13_OFFSET
SI r12, r1, portR12_OFFSET
SI r11, r1, portR11_OFFSET
SI r10, r1, portR10_OFFSET
SI r9, r1, portR9_OFFSET
SI r8, r1, portR8_OFFSET
SI r7, r1, portR7_OFFSET
SI r6, r1, portR6_OFFSET
SI r5, r1, portR5_OFFSET
SI r4, r1, portR4_OFFSET
SI r3, r1, portR3_OFFSET
SI r2, r1, portR2_OFFSET
/* Stack the critical section nesting value. */
LI r18, r0, uxCriticalNesting
SI r18, r1, portCRITICAL_NESTING_OFFSET
/* Stack MSR. */
mfs r18, rmsr
SI r18, r1, portMSR_OFFSET
#if( XPAR_MICROBLAZE_USE_FPU != 0 )
/* Stack FSR. */
mfs r18, rfsr
SI r18, r1, portFSR_OFFSET
#endif
/* Save the top of stack value to the TCB. */
LI r3, r0, pxCurrentTCB
STORE r1, r0, r3
.endm
.macro portRESTORE_CONTEXT
/* Load the top of stack value from the TCB. */
LI r18, r0, pxCurrentTCB
LOAD r1, r0, r18
/* Restore the general registers. */
LI r31, r1, portR31_OFFSET
LI r30, r1, portR30_OFFSET
LI r29, r1, portR29_OFFSET
LI r28, r1, portR28_OFFSET
LI r27, r1, portR27_OFFSET
LI r26, r1, portR26_OFFSET
LI r25, r1, portR25_OFFSET
LI r24, r1, portR24_OFFSET
LI r23, r1, portR23_OFFSET
LI r22, r1, portR22_OFFSET
LI r21, r1, portR21_OFFSET
LI r20, r1, portR20_OFFSET
LI r19, r1, portR19_OFFSET
LI r17, r1, portR17_OFFSET
LI r16, r1, portR16_OFFSET
LI r15, r1, portR15_OFFSET
LI r14, r1, portR14_OFFSET
LI r13, r1, portR13_OFFSET
LI r12, r1, portR12_OFFSET
LI r11, r1, portR11_OFFSET
LI r10, r1, portR10_OFFSET
LI r9, r1, portR9_OFFSET
LI r8, r1, portR8_OFFSET
LI r7, r1, portR7_OFFSET
LI r6, r1, portR6_OFFSET
LI r5, r1, portR5_OFFSET
LI r4, r1, portR4_OFFSET
LI r3, r1, portR3_OFFSET
LI r2, r1, portR2_OFFSET
/* Reload the rmsr from the stack. */
LI r18, r1, portMSR_OFFSET
mts rmsr, r18
#if( XPAR_MICROBLAZE_USE_FPU != 0 )
/* Reload the FSR from the stack. */
LI r18, r1, portFSR_OFFSET
mts rfsr, r18
#endif
/* Load the critical nesting value. */
LI r18, r1, portCRITICAL_NESTING_OFFSET
SI r18, r0, uxCriticalNesting
/* Test the critical nesting value. If it is non zero then the task last
exited the running state using a yield. If it is zero, then the task
last exited the running state through an interrupt. */
XORI r18, r18, 0
BNEI r18, exit_from_yield
/* r18 was being used as a temporary. Now restore its true value from the
stack. */
LI r18, r1, portR18_OFFSET
/* Remove the stack frame. */
ADDLIK r1, r1, portCONTEXT_SIZE
/* Return using rtid so interrupts are re-enabled as this function is
exited. */
rtid r14, 0
OR r0, r0, r0
.endm
/* This function is used to exit portRESTORE_CONTEXT() if the task being
returned to last left the Running state by calling taskYIELD() (rather than
being preempted by an interrupt). */
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
exit_from_yield:
/* r18 was being used as a temporary. Now restore its true value from the
stack. */
LI r18, r1, portR18_OFFSET
/* Remove the stack frame. */
ADDLIK r1, r1, portCONTEXT_SIZE
/* Return to the task. */
rtsd r14, 0
OR r0, r0, r0
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
_interrupt_handler:
portSAVE_CONTEXT
/* Stack the return address. */
SI r14, r1, portR14_OFFSET
/* Switch to the ISR stack. */
LI r1, r0, pulISRStack
/* The parameter to the interrupt handler. */
ORI r5, r0, configINTERRUPT_CONTROLLER_TO_USE
/* Execute any pending interrupts. */
BRALID r15, XIntc_DeviceInterruptHandler
OR r0, r0, r0
/* See if a new task should be selected to execute. */
LI r18, r0, ulTaskSwitchRequested
OR r18, r18, r0
/* If ulTaskSwitchRequested is already zero, then jump straight to
restoring the task that is already in the Running state. */
BEQI r18, task_switch_not_requested
/* Set ulTaskSwitchRequested back to zero as a task switch is about to be
performed. */
SI r0, r0, ulTaskSwitchRequested
/* ulTaskSwitchRequested was not 0 when tested. Select the next task to
execute. */
BRALID r15, vTaskSwitchContext
OR r0, r0, r0
task_switch_not_requested:
/* Restore the context of the next task scheduled to execute. */
portRESTORE_CONTEXT
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
VPortYieldASM:
portSAVE_CONTEXT
/* Modify the return address so a return is done to the instruction after
the call to VPortYieldASM. */
ADDI r14, r14, 8
SI r14, r1, portR14_OFFSET
/* Switch to use the ISR stack. */
LI r1, r0, pulISRStack
/* Select the next task to execute. */
BRALID r15, vTaskSwitchContext
OR r0, r0, r0
/* Restore the context of the next task scheduled to execute. */
portRESTORE_CONTEXT
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
vPortStartFirstTask:
portRESTORE_CONTEXT
#if ( MICROBLAZE_EXCEPTIONS_ENABLED == 1 ) && ( configINSTALL_EXCEPTION_HANDLERS == 1 )
.text
#ifdef __arch64__
.align 8
#else
.align 4
#endif
vPortExceptionHandlerEntry:
/* Take a copy of the stack pointer before vPortExecptionHandler is called,
storing its value prior to the function stack frame being created. */
SI r1, r0, pulStackPointerOnFunctionEntry
BRALID r15, vPortExceptionHandler
OR r0, r0, r0
#endif /* ( MICROBLAZE_EXCEPTIONS_ENABLED == 1 ) && ( configINSTALL_EXCEPTION_HANDLERS == 1 ) */
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 8,861 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/freertos10_xilinx_v1_6/src/Source/portable/GCC/ARM_CA9/portASM.S | /*
* FreeRTOS Kernel V10.3.0
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
.text
.arm
.set SYS_MODE, 0x1f
.set SVC_MODE, 0x13
.set IRQ_MODE, 0x12
/* Hardware registers. */
.extern ulICCIAR
.extern ulICCEOIR
.extern ulICCPMR
/* Variables and functions. */
.extern ulMaxAPIPriorityMask
.extern _freertos_vector_table
.extern pxCurrentTCB
.extern vTaskSwitchContext
.extern vApplicationIRQHandler
.extern ulPortInterruptNesting
.extern ulPortTaskHasFPUContext
.global FreeRTOS_IRQ_Handler
.global FreeRTOS_SWI_Handler
.global vPortRestoreTaskContext
.macro portSAVE_CONTEXT
/* Save the LR and SPSR onto the system mode stack before switching to
system mode to save the remaining system mode registers. */
SRSDB sp!, #SYS_MODE
CPS #SYS_MODE
PUSH {R0-R12, R14}
/* Push the critical nesting count. */
LDR R2, ulCriticalNestingConst
LDR R1, [R2]
PUSH {R1}
/* Does the task have a floating point context that needs saving? If
ulPortTaskHasFPUContext is 0 then no. */
LDR R2, ulPortTaskHasFPUContextConst
LDR R3, [R2]
CMP R3, #0
/* Save the floating point context, if any. */
FMRXNE R1, FPSCR
VPUSHNE {D0-D15}
VPUSHNE {D16-D31}
PUSHNE {R1}
/* Save ulPortTaskHasFPUContext itself. */
PUSH {R3}
/* Save the stack pointer in the TCB. */
LDR R0, pxCurrentTCBConst
LDR R1, [R0]
STR SP, [R1]
.endm
; /**********************************************************************/
.macro portRESTORE_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
LDR R0, pxCurrentTCBConst
LDR R1, [R0]
LDR SP, [R1]
/* Is there a floating point context to restore? If the restored
ulPortTaskHasFPUContext is zero then no. */
LDR R0, ulPortTaskHasFPUContextConst
POP {R1}
STR R1, [R0]
CMP R1, #0
/* Restore the floating point context, if any. */
POPNE {R0}
VPOPNE {D16-D31}
VPOPNE {D0-D15}
VMSRNE FPSCR, R0
/* Restore the critical section nesting depth. */
LDR R0, ulCriticalNestingConst
POP {R1}
STR R1, [R0]
/* Ensure the priority mask is correct for the critical nesting depth. */
LDR R2, ulICCPMRConst
LDR R2, [R2]
CMP R1, #0
MOVEQ R4, #255
LDRNE R4, ulMaxAPIPriorityMaskConst
LDRNE R4, [R4]
STR R4, [R2]
/* Restore all system mode registers other than the SP (which is already
being used). */
POP {R0-R12, R14}
/* Return to the task code, loading CPSR on the way. */
RFEIA sp!
.endm
/******************************************************************************
* SVC handler is used to start the scheduler.
*****************************************************************************/
.align 4
.type FreeRTOS_SWI_Handler, %function
FreeRTOS_SWI_Handler:
/* Save the context of the current task and select a new task to run. */
portSAVE_CONTEXT
LDR R0, vTaskSwitchContextConst
BLX R0
portRESTORE_CONTEXT
/******************************************************************************
* vPortRestoreTaskContext is used to start the scheduler.
*****************************************************************************/
.type vPortRestoreTaskContext, %function
vPortRestoreTaskContext:
/* Switch to system mode. */
CPS #SYS_MODE
portRESTORE_CONTEXT
.align 4
.type FreeRTOS_IRQ_Handler, %function
FreeRTOS_IRQ_Handler:
/* Return to the interrupted instruction. */
SUB lr, lr, #4
/* Push the return address and SPSR. */
PUSH {lr}
MRS lr, SPSR
PUSH {lr}
/* Change to supervisor mode to allow reentry. */
CPS #SVC_MODE
/* Push used registers. */
PUSH {r0-r4, r12}
/* Increment nesting count. r3 holds the address of ulPortInterruptNesting
for future use. r1 holds the original ulPortInterruptNesting value for
future use. */
LDR r3, ulPortInterruptNestingConst
LDR r1, [r3]
ADD r4, r1, #1
STR r4, [r3]
/* Read value from the interrupt acknowledge register, which is stored in r0
for future parameter and interrupt clearing use. */
LDR r2, ulICCIARConst
LDR r2, [r2]
LDR r0, [r2]
/* Ensure bit 2 of the stack pointer is clear. r2 holds the bit 2 value for
future use. _RB_ Does this ever actually need to be done provided the start
of the stack is 8-byte aligned? */
MOV r2, sp
AND r2, r2, #4
SUB sp, sp, r2
/* Call the interrupt handler. r4 pushed to maintain alignment. */
PUSH {r0-r4, lr}
LDR r1, vApplicationIRQHandlerConst
BLX r1
POP {r0-r4, lr}
ADD sp, sp, r2
CPSID i
DSB
ISB
/* Write the value read from ICCIAR to ICCEOIR. */
LDR r4, ulICCEOIRConst
LDR r4, [r4]
STR r0, [r4]
/* Restore the old nesting count. */
STR r1, [r3]
/* A context switch is never performed if the nesting count is not 0. */
CMP r1, #0
BNE exit_without_switch
/* Did the interrupt request a context switch? r1 holds the address of
ulPortYieldRequired and r0 the value of ulPortYieldRequired for future
use. */
LDR r1, =ulPortYieldRequired
LDR r0, [r1]
CMP r0, #0
BNE switch_before_exit
exit_without_switch:
/* No context switch. Restore used registers, LR_irq and SPSR before
returning. */
POP {r0-r4, r12}
CPS #IRQ_MODE
POP {LR}
MSR SPSR_cxsf, LR
POP {LR}
MOVS PC, LR
switch_before_exit:
/* A context switch is to be performed. Clear the context switch pending
flag. */
MOV r0, #0
STR r0, [r1]
/* Restore used registers, LR-irq and SPSR before saving the context
to the task stack. */
POP {r0-r4, r12}
CPS #IRQ_MODE
POP {LR}
MSR SPSR_cxsf, LR
POP {LR}
portSAVE_CONTEXT
/* Call the function that selects the new task to execute.
vTaskSwitchContext() if vTaskSwitchContext() uses LDRD or STRD
instructions, or 8 byte aligned stack allocated data. LR does not need
saving as a new LR will be loaded by portRESTORE_CONTEXT anyway. */
LDR R0, vTaskSwitchContextConst
BLX R0
/* Restore the context of, and branch to, the task selected to execute
next. */
portRESTORE_CONTEXT
/******************************************************************************
* If the application provides an implementation of vApplicationIRQHandler(),
* then it will get called directly without saving the FPU registers on
* interrupt entry, and this weak implementation of
* vApplicationIRQHandler() will not get called.
*
* If the application provides its own implementation of
* vApplicationFPUSafeIRQHandler() then this implementation of
* vApplicationIRQHandler() will be called, save the FPU registers, and then
* call vApplicationFPUSafeIRQHandler().
*
* Therefore, if the application writer wants FPU registers to be saved on
* interrupt entry their IRQ handler must be called
* vApplicationFPUSafeIRQHandler(), and if the application writer does not want
* FPU registers to be saved on interrupt entry their IRQ handler must be
* called vApplicationIRQHandler().
*****************************************************************************/
.align 4
.weak vApplicationIRQHandler
.type vApplicationIRQHandler, %function
vApplicationIRQHandler:
PUSH {LR}
FMRX R1, FPSCR
VPUSH {D0-D15}
VPUSH {D16-D31}
PUSH {R1}
LDR r1, vApplicationFPUSafeIRQHandlerConst
BLX r1
POP {R0}
VPOP {D16-D31}
VPOP {D0-D15}
VMSR FPSCR, R0
POP {PC}
ulICCIARConst: .word ulICCIAR
ulICCEOIRConst: .word ulICCEOIR
ulICCPMRConst: .word ulICCPMR
pxCurrentTCBConst: .word pxCurrentTCB
ulCriticalNestingConst: .word ulCriticalNesting
ulPortTaskHasFPUContextConst: .word ulPortTaskHasFPUContext
ulMaxAPIPriorityMaskConst: .word ulMaxAPIPriorityMask
vTaskSwitchContextConst: .word vTaskSwitchContext
vApplicationIRQHandlerConst: .word vApplicationIRQHandler
ulPortInterruptNestingConst: .word ulPortInterruptNesting
vApplicationFPUSafeIRQHandlerConst: .word vApplicationFPUSafeIRQHandler
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,010 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/freertos10_xilinx_v1_6/src/Source/portable/GCC/ARM_CA9/port_asm_vectors.S | /******************************************************************************
*
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (C) 2009-2020 Xilinx, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex A9 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 1.00a ecm/sdm 10/20/09 Initial version
* 3.05a sdm 02/02/12 Save lr when profiling is enabled
* 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
* 'xil_errata.h' for errata description
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xil_errata.h"
.org 0
.text
.arm
.global _boot
.global _freertos_vector_table
.global _vector_table
.global FIQInterrupt
.global DataAbortInterrupt
.global PrefetchAbortInterrupt
.global vPortInstallFreeRTOSVectorTable
.extern FreeRTOS_IRQ_Handler
.extern FreeRTOS_SWI_Handler
.section .vectors
_vector_table:
_freertos_vector_table:
B _boot
B FreeRTOS_Undefined
ldr pc, _swi
B FreeRTOS_PrefetchAbortHandler
B FreeRTOS_DataAbortHandler
NOP /* Placeholder for address exception vector*/
LDR PC, _irq
B FreeRTOS_FIQHandler
_irq: .word FreeRTOS_IRQ_Handler
_swi: .word FreeRTOS_SWI_Handler
.align 4
FreeRTOS_FIQHandler: /* FIQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
FIQLoop:
blx FIQInterrupt /* FIQ vector */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
.align 4
FreeRTOS_Undefined: /* Undefined handler */
b .
.align 4
FreeRTOS_DataAbortHandler: /* Data Abort handler */
#ifdef CONFIG_ARM_ERRATA_775420
dsb
#endif
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
blx DataAbortInterrupt /*DataAbortInterrupt :call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
.align 4
FreeRTOS_PrefetchAbortHandler: /* Prefetch Abort handler */
#ifdef CONFIG_ARM_ERRATA_775420
dsb
#endif
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
blx PrefetchAbortInterrupt /* PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
.align 4
.type vPortInstallFreeRTOSVectorTable, %function
vPortInstallFreeRTOSVectorTable:
/* Set VBAR to the vector table that contains the FreeRTOS handlers. */
ldr r0, =_freertos_vector_table
mcr p15, 0, r0, c12, c0, 0
dsb
isb
bx lr
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 7,431 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/freertos10_xilinx_v1_6/src/Source/portable/GCC/ARM_CR5/portASM.S | /*
* FreeRTOS Kernel V10.3.0
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
.text
.arm
.set SYS_MODE, 0x1f
.set SVC_MODE, 0x13
.set IRQ_MODE, 0x12
/* Hardware registers. */
.extern ulICCIAR
.extern ulICCEOIR
.extern ulICCPMR
/* Variables and functions. */
.extern ulMaxAPIPriorityMask
.extern _freertos_vector_table
.extern pxCurrentTCB
.extern vTaskSwitchContext
.extern vApplicationIRQHandler
.extern ulPortInterruptNesting
.extern ulPortTaskHasFPUContext
.global FreeRTOS_IRQ_Handler
.global FreeRTOS_SWI_Handler
.global vPortRestoreTaskContext
.macro portSAVE_CONTEXT
/* Save the LR and SPSR onto the system mode stack before switching to
system mode to save the remaining system mode registers. */
SRSDB sp!, #SYS_MODE
CPS #SYS_MODE
PUSH {R0-R12, R14}
/* Push the critical nesting count. */
LDR R2, ulCriticalNestingConst
LDR R1, [R2]
PUSH {R1}
/* Does the task have a floating point context that needs saving? If
ulPortTaskHasFPUContext is 0 then no. */
LDR R2, ulPortTaskHasFPUContextConst
LDR R3, [R2]
CMP R3, #0
/* Save the floating point context, if any. */
FMRXNE R1, FPSCR
VPUSHNE {D0-D15}
PUSHNE {R1}
/* Save ulPortTaskHasFPUContext itself. */
PUSH {R3}
/* Save the stack pointer in the TCB. */
LDR R0, pxCurrentTCBConst
LDR R1, [R0]
STR SP, [R1]
.endm
; /**********************************************************************/
.macro portRESTORE_CONTEXT
/* Set the SP to point to the stack of the task being restored. */
LDR R0, pxCurrentTCBConst
LDR R1, [R0]
LDR SP, [R1]
/* Is there a floating point context to restore? If the restored
ulPortTaskHasFPUContext is zero then no. */
LDR R0, ulPortTaskHasFPUContextConst
POP {R1}
STR R1, [R0]
CMP R1, #0
/* Restore the floating point context, if any. */
POPNE {R0}
VPOPNE {D0-D15}
VMSRNE FPSCR, R0
/* Restore the critical section nesting depth. */
LDR R0, ulCriticalNestingConst
POP {R1}
STR R1, [R0]
/* Ensure the priority mask is correct for the critical nesting depth. */
LDR R2, ulICCPMRConst
LDR R2, [R2]
CMP R1, #0
MOVEQ R4, #255
LDRNE R4, ulMaxAPIPriorityMaskConst
LDRNE R4, [R4]
STR R4, [R2]
/* Restore all system mode registers other than the SP (which is already
being used). */
POP {R0-R12, R14}
/* Return to the task code, loading CPSR on the way. */
RFEIA sp!
.endm
/******************************************************************************
* SVC handler is used to start the scheduler.
*****************************************************************************/
.align 4
.type FreeRTOS_SWI_Handler, %function
FreeRTOS_SWI_Handler:
/* Save the context of the current task and select a new task to run. */
portSAVE_CONTEXT
LDR R0, vTaskSwitchContextConst
BLX R0
portRESTORE_CONTEXT
/******************************************************************************
* vPortRestoreTaskContext is used to start the scheduler.
*****************************************************************************/
.type vPortRestoreTaskContext, %function
vPortRestoreTaskContext:
/* Switch to system mode. */
CPS #SYS_MODE
portRESTORE_CONTEXT
.align 4
.type FreeRTOS_IRQ_Handler, %function
FreeRTOS_IRQ_Handler:
/* Return to the interrupted instruction. */
SUB lr, lr, #4
/* Push the return address and SPSR. */
PUSH {lr}
MRS lr, SPSR
PUSH {lr}
/* Change to supervisor mode to allow reentry. */
CPS #SVC_MODE
/* Push used registers. */
PUSH {r0-r4, r12}
/* Increment nesting count. r3 holds the address of ulPortInterruptNesting
for future use. r1 holds the original ulPortInterruptNesting value for
future use. */
LDR r3, ulPortInterruptNestingConst
LDR r1, [r3]
ADD r4, r1, #1
STR r4, [r3]
/* Read value from the interrupt acknowledge register, which is stored in r0
for future parameter and interrupt clearing use. */
LDR r2, ulICCIARConst
LDR r2, [r2]
LDR r0, [r2]
/* Ensure bit 2 of the stack pointer is clear. r2 holds the bit 2 value for
future use. _RB_ Is this ever needed provided the start of the stack is
aligned on an 8-byte boundary? */
MOV r2, sp
AND r2, r2, #4
SUB sp, sp, r2
/* Call the interrupt handler. */
PUSH {r0-r4, lr}
LDR r1, vApplicationIRQHandlerConst
BLX r1
POP {r0-r4, lr}
ADD sp, sp, r2
CPSID i
DSB
ISB
/* Write the value read from ICCIAR to ICCEOIR. */
LDR r4, ulICCEOIRConst
LDR r4, [r4]
STR r0, [r4]
/* Restore the old nesting count. */
STR r1, [r3]
/* A context switch is never performed if the nesting count is not 0. */
CMP r1, #0
BNE exit_without_switch
/* Did the interrupt request a context switch? r1 holds the address of
ulPortYieldRequired and r0 the value of ulPortYieldRequired for future
use. */
LDR r1, =ulPortYieldRequired
LDR r0, [r1]
CMP r0, #0
BNE switch_before_exit
exit_without_switch:
/* No context switch. Restore used registers, LR_irq and SPSR before
returning. */
POP {r0-r4, r12}
CPS #IRQ_MODE
POP {LR}
MSR SPSR_cxsf, LR
POP {LR}
MOVS PC, LR
switch_before_exit:
/* A context switch is to be performed. Clear the context switch pending
flag. */
MOV r0, #0
STR r0, [r1]
/* Restore used registers, LR-irq and SPSR before saving the context
to the task stack. */
POP {r0-r4, r12}
CPS #IRQ_MODE
POP {LR}
MSR SPSR_cxsf, LR
POP {LR}
portSAVE_CONTEXT
/* Call the function that selects the new task to execute.
vTaskSwitchContext() if vTaskSwitchContext() uses LDRD or STRD
instructions, or 8 byte aligned stack allocated data. LR does not need
saving as a new LR will be loaded by portRESTORE_CONTEXT anyway. */
LDR R0, vTaskSwitchContextConst
BLX R0
/* Restore the context of, and branch to, the task selected to execute
next. */
portRESTORE_CONTEXT
ulICCIARConst: .word ulICCIAR
ulICCEOIRConst: .word ulICCEOIR
ulICCPMRConst: .word ulICCPMR
pxCurrentTCBConst: .word pxCurrentTCB
ulCriticalNestingConst: .word ulCriticalNesting
ulPortTaskHasFPUContextConst: .word ulPortTaskHasFPUContext
ulMaxAPIPriorityMaskConst: .word ulMaxAPIPriorityMask
vTaskSwitchContextConst: .word vTaskSwitchContext
vApplicationIRQHandlerConst: .word vApplicationIRQHandler
ulPortInterruptNestingConst: .word ulPortInterruptNesting
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,873 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/freertos10_xilinx_v1_6/src/Source/portable/GCC/ARM_CR5/port_asm_vectors.S | /*
* FreeRTOS Kernel V10.3.0
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* Copyright (C) 2014 - 2020 Xilinx, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
.org 0
.text
.globl _boot
.globl _vector_table
.globl FIQInterrupt
.globl FreeRTOS_IRQ_Handler
.globl FreeRTOS_SWI_Handler
.globl DataAbortInterrupt
.globl PrefetchAbortInterrupt
.section .vectors,"a"
_vector_table:
ldr pc,=_boot
ldr pc,=Undefined
ldr pc, _swi
ldr pc,=PrefetchAbortHandler
ldr pc,=DataAbortHandler
NOP /* Placeholder for address exception vector*/
ldr pc, _irq
ldr pc,=FIQHandler
_irq: .word FreeRTOS_IRQ_Handler
_swi: .word FreeRTOS_SWI_Handler
.text
FIQHandler: /* FIQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
FIQLoop:
bl FIQInterrupt /* FIQ vector */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
Undefined: /* Undefined handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
b _prestart
movs pc, lr
DataAbortHandler: /* Data Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
bl DataAbortInterrupt /*DataAbortInterrupt :call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #8 /* adjust return */
PrefetchAbortHandler: /* Prefetch Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
bl PrefetchAbortInterrupt /* PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,044 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_disable_icache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_disable_icache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable L1 icache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_disable_icache
.ent microblaze_disable_icache
.align 2
microblaze_disable_icache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrclr r0, 0x20
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Clear the icache enable bit
andi r8, r8, ~(0x20)
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif
.end microblaze_disable_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,198 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_init_dcache_range.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_init_dcache_range (unsigned int cache_start, unsigned int cache_len)
*
* Invalidate dcache on the microblaze
*
* Parameters:
* 'cache_start' - address in the Dcache where invalidation begins
* 'cache_len' - length (in bytes) worth of Dcache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_init_dcache_range
.ent microblaze_init_dcache_range
.align 2
microblaze_init_dcache_range:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
mfs r9, rmsr /* Disable Dcache and interrupts before invalidating */
andi r10, r9, (~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE))
mts rmsr, r10
andi r5, r5, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align to cache line */
add r6, r5, r6 /* Compute end */
andi r6, r6, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align to cache line */
L_start:
wdc r5, r0 /* Invalidate the Cache (delay slot) */
cmpu r18, r5, r6 /* Are we at the end ? */
blei r18, L_done
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
L_done:
rtsd r15, 8 /* Return */
mts rmsr, r9
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_init_dcache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,428 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_invalidate_cache_ext.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_cache_ext()
*
*Invalidate the entire L2 Cache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
#define CACHEABLE_REGION_SIZE (XPAR_MICROBLAZE_DCACHE_HIGHADDR - XPAR_MICROBLAZE_DCACHE_BASEADDR)
.text
.globl microblaze_invalidate_cache_ext
.ent microblaze_invalidate_cache_ext
.align 2
microblaze_invalidate_cache_ext:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN))
ADDIK r6, r0, CACHEABLE_REGION_SIZE-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
Loop_start:
wdc.ext.clear r5, r6
#if defined (__arch64__ )
addlik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bgtid r6,Loop_start
addik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
#endif
rtsd r15, 8
nop
.end microblaze_invalidate_cache_ext
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,488 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_invalidate_dcache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_dcache_range (unsigned int cacheaddr, unsigned int len)
*
* Invalidate a Dcache range
*
* Parameters:
* 'cacheaddr' - address in the Dcache where invalidation begins
* 'len ' - length (in bytes) worth of Dcache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#define MB_HAS_WRITEBACK_SET 0
#else
#define MB_HAS_WRITEBACK_SET XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#endif
.text
.globl microblaze_invalidate_dcache_range
.ent microblaze_invalidate_dcache_range
.align 2
microblaze_invalidate_dcache_range:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
BEQI r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align start down to cache line */
#if MB_HAS_WRITEBACK_SET == 0 /* Use a different scheme for MB version < v7.20 or when caches are write-through */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wdc r5, r0
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
#else
RSUBK r6, r5, r6
/* r6 will now contain (count of bytes - (4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) */
L_start:
wdc.clear r5, r6 /* Invalidate the cache line only if the address matches */
#if defined (__arch64__ )
addlik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
beagei r6, L_start
#else
bneid r6, L_start
addik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
#endif
#endif
L_done:
rtsd r15, 8
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8
nop
#endif
.end microblaze_invalidate_dcache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,555 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_disable_interrupts.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* @file microblaze_disable_interrupts.S
*
* @addtogroup microblaze_pseudo_asm_macro
* @{
* <h2> microblaze_disable_interrupts.S </h2>
* - API to disable Interrupts: void microblaze_disable_interrupts(void)
*
* This API Disables interrupts on the MicroBlaze processor. It can be
* called when entering a critical section of code where a context switch is
* undesirable.
*
* <pre>
* File : microblaze_disable_interrupts.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable interrupts on the microblaze.
* </pre>
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_disable_interrupts
.ent microblaze_disable_interrupts
.align 2
microblaze_disable_interrupts:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrclr r0, 0x2
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r12, rmsr
#Clear the interrupt enable bit
andi r12, r12, ~(0x2)
#Save the MSR register
mts rmsr, r12
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_disable_interrupts
/**
* @} End of "addtogroup microblaze_pseudo_asm_macro".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 5,784 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_scrub.S | /******************************************************************************
* Copyright (c) 2012 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_scrub ()
*
* Scrub LMB memory and all internal BRAMs (data cache, instruction cache,
* MMU UTLB and branch target cache) in MicroBlaze to reduce the possibility
* of an uncorrectable error when fault tolerance support is enabled.
*
* This routine assumes that the processor is in privileged mode when it is
* called, if the MMU is enabled.
*
* Call this routine regularly from a timer interrupt.
*
* Parameters:
* None
*
*
*******************************************************************************/
#include "xparameters.h"
/* Define if fault tolerance is used */
#ifdef XPAR_MICROBLAZE_FAULT_TOLERANT
#if XPAR_MICROBLAZE_FAULT_TOLERANT > 0
#define FAULT_TOLERANT
#endif
#endif
/* Define if LMB is used and can be scrubbed */
#if defined(XPAR_MICROBLAZE_D_LMB) && \
defined(XPAR_DLMB_CNTLR_BASEADDR) && \
defined(XPAR_DLMB_CNTLR_HIGHADDR)
#if XPAR_MICROBLAZE_D_LMB == 1
#define HAS_SCRUBBABLE_LMB
#define DLMB_MASK (XPAR_DLMB_CNTLR_HIGHADDR - XPAR_DLMB_CNTLR_BASEADDR)
#endif
#endif
/* Set default cache line lengths */
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 4
#endif
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 4
#endif
/* Define if internal Data Cache BRAMs are used */
#if defined(XPAR_MICROBLAZE_USE_DCACHE) && defined(XPAR_MICROBLAZE_DCACHE_BYTE_SIZE)
#if XPAR_MICROBLAZE_USE_DCACHE == 1 && XPAR_MICROBLAZE_DCACHE_BYTE_SIZE > 1024
#define HAS_BRAM_DCACHE
#define DCACHE_INCREMENT (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
#define DCACHE_MASK (XPAR_MICROBLAZE_DCACHE_BYTE_SIZE - 1)
#endif
#endif
/* Define if internal Instruction Cache BRAMs are used */
#if defined(XPAR_MICROBLAZE_USE_ICACHE) && defined(XPAR_MICROBLAZE_CACHE_BYTE_SIZE)
#if XPAR_MICROBLAZE_USE_ICACHE == 1 && XPAR_MICROBLAZE_CACHE_BYTE_SIZE > 1024
#define HAS_BRAM_ICACHE
#define ICACHE_INCREMENT (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4)
#define ICACHE_MASK (XPAR_MICROBLAZE_CACHE_BYTE_SIZE - 1)
#endif
#endif
/* Define if internal MMU UTLB BRAM is used */
#ifdef XPAR_MICROBLAZE_USE_MMU
#if XPAR_MICROBLAZE_USE_MMU > 1
#define HAS_BRAM_MMU_UTLB
#endif
#endif
/* Define if internal BTC BRAM is used, and match BTC clear to a complete cache scrub */
#if defined(XPAR_MICROBLAZE_USE_BRANCH_TARGET_CACHE) && \
defined(XPAR_MICROBLAZE_BRANCH_TARGET_CACHE_SIZE)
#if XPAR_MICROBLAZE_USE_BRANCH_TARGET_CACHE == 1
#if XPAR_MICROBLAZE_BRANCH_TARGET_CACHE_SIZE == 0 || \
XPAR_MICROBLAZE_BRANCH_TARGET_CACHE_SIZE > 4
#define HAS_BRAM_BRANCH_TARGET_CACHE
#ifdef HAS_BRAM_DCACHE
#define BTC_MASK_D (XPAR_MICROBLAZE_DCACHE_BYTE_SIZE/DCACHE_INCREMENT-1)
#else
#define BTC_MASK_D 256
#endif
#ifdef HAS_BRAM_ICACHE
#define BTC_MASK_I (XPAR_MICROBLAZE_CACHE_BYTE_SIZE/ICACHE_INCREMENT-1)
#else
#define BTC_MASK_I 256
#endif
#if BTC_MASK_D > BTC_MASK_I
#define BTC_MASK BTC_MASK_D
#else
#define BTC_MASK BTC_MASK_I
#endif
#endif
#endif
#endif
/* Define index offsets to persistent data used by this routine */
#define DLMB_INDEX_OFFSET 0
#define DCACHE_INDEX_OFFSET 4
#define ICACHE_INDEX_OFFSET 8
#define MMU_INDEX_OFFSET 12
#define BTC_CALL_COUNT_OFFSET 16
.text
.globl microblaze_scrub
.ent microblaze_scrub
.align 2
microblaze_scrub:
#ifdef FAULT_TOLERANT
la r6, r0, L_persistent_data /* Get pointer to data */
#ifdef HAS_SCRUBBABLE_LMB
L_dlmb:
lwi r5, r6, DLMB_INDEX_OFFSET /* Get dlmb index */
lw r7, r5, r0 /* Load and store */
sw r7, r5, r0
addik r5, r5, 4 /* Increment and save dlmb index */
andi r5, r5, DLMB_MASK
swi r5, r6, DLMB_INDEX_OFFSET
#endif /* HAS_SCRUBBABLE_LMB */
#ifdef HAS_BRAM_DCACHE
L_dcache:
lwi r5, r6, DCACHE_INDEX_OFFSET /* Get dcache line index */
wdc r5, r0 /* Invalidate data cache line */
addik r5, r5, DCACHE_INCREMENT /* Increment and save entry index */
andi r5, r5, DCACHE_MASK
swi r5, r6, DCACHE_INDEX_OFFSET
#endif /* HAS_BRAM_DCACHE */
#ifdef HAS_BRAM_ICACHE
L_icache:
lwi r5, r6, ICACHE_INDEX_OFFSET /* Get icache line index */
wic r5, r0 /* Invalidate data cache line */
addik r5, r5, ICACHE_INCREMENT /* Increment and save entry index */
andi r5, r5, ICACHE_MASK
swi r5, r6, ICACHE_INDEX_OFFSET
#endif /* HAS_BRAM_ICACHE */
#ifdef HAS_BRAM_MMU_UTLB
L_mmu:
lwi r5, r6, MMU_INDEX_OFFSET /* Get UTLB entry index */
mts rtlbx, r5 /* Access next entry in UTLB */
mts rtlbhi, r0 /* Clear the UTLB entry */
addik r5, r5, 1 /* Increment and save entry index */
andi r5, r5, 0x3F
swi r5, r6, MMU_INDEX_OFFSET
#endif /* HAS_BRAM_MMU_UTLB */
#ifdef HAS_BRAM_BRANCH_TARGET_CACHE
L_btc:
lwi r5, r6, BTC_CALL_COUNT_OFFSET /* Get BTC call count offset */
addik r5, r5, 1 /* Increment and save call count */
andi r5, r5, BTC_MASK
swi r5, r6, BTC_CALL_COUNT_OFFSET
bnei r5, L_skip_btc_scrub /* Skip scrub unless count wrap */
bri 4 /* Clear branch target cache */
L_skip_btc_scrub:
#endif /* HAS_BRAM_BRANCH_TARGET_CACHE */
#endif /* FAULT_TOLERANT */
L_done:
rtsd r15, 8 /* Return */
nop
.end microblaze_scrub
/* Persistent data used by this routine */
.data
.align 2
L_persistent_data:
.long 0 /* dlmb index */
.long 0 /* dcache index */
.long 0 /* icache index */
.long 0 /* mmu entry index */
.long 0 /* btc call count */
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,075 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_enable_dcache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_enable_dcache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable L1 dcache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_dcache
.ent microblaze_enable_dcache
.align 2
microblaze_enable_dcache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x80
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Set the interrupt enable bit
ori r8, r8, 0x80
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,540 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_flush_dcache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_dcache_range (unsigned int cacheaddr, unsigned int len)
*
* Flush a L1 DCache range
*
* Parameters:
* 'cacheaddr' - address in the Dcache where the flush begins
* 'len ' - length (in bytes) worth of Dcache to be flushed
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#define MB_HAS_WRITEBACK_SET 0
#else
#define MB_HAS_WRITEBACK_SET XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#endif
.text
.globl microblaze_flush_dcache_range
.ent microblaze_flush_dcache_range
.align 2
microblaze_flush_dcache_range:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
beqi r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align start down to cache line */
#if MB_HAS_WRITEBACK_SET == 0 /* Use a different scheme for MB version < v7.20 or when caches are write-through */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wdc r5, r0 /* Invalidate the cache line */
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
#else
RSUBK r6, r5, r6
/* r6 will now contain (count of bytes - (4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) */
L_start:
wdc.flush r5, r6 /* Flush the cache line */
#if defined (__arch64__ )
addlik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
beagei r6, L_start
#else
bneid r6, L_start
addik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
#endif
#endif
L_done:
rtsd r15, 8
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_flush_dcache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,200 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_init_icache_range.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_init_icache_range (unsigned int cache_start, unsigned int cache_len)
*
* Invalidate icache on the microblaze
*
* Parameters:
* 'cache_start' - address in the Icache where invalidation begins
* 'cache_len' - length (in bytes) worth of Icache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
.text
.globl microblaze_init_icache_range
.ent microblaze_init_icache_range
.align 2
microblaze_init_icache_range:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
mfs r9, rmsr /* Disable Icache and interrupts before invalidating */
andi r10, r9, (~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE))
mts rmsr, r10
andi r5, r5, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align to cache line */
add r6, r5, r6 /* Compute end */
andi r6, r6, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align to cache line */
L_start:
wic r5, r0 /* Invalidate the Cache (delay slot) */
cmpu r18, r5, r6 /* Are we at the end ? */
blei r18, L_done
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
L_done:
rtsd r15, 8 /* Return */
mts rmsr, r9
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_init_icache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,385 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_invalidate_icache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_icache()
*
* Invalidate the entire ICache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_icache
.ent microblaze_invalidate_icache
.align 2
microblaze_invalidate_icache:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Icache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
ADDIK r5, r0, XPAR_MICROBLAZE_ICACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN)) /* Align to cache line */
ADDIK r6, r5, XPAR_MICROBLAZE_CACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN)) /* Compute end */
L_start:
wic r5, r0 /* Invalidate the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,575 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_enable_interrupts.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* @file microblaze_enable_interrupts.S
*
* @addtogroup microblaze_pseudo_asm_macro
* @{
* <h2> microblaze_enable_interrupts.S </h2>
* - API to Enable Interrupts: void microblaze_enable_interrupts(void)
*
* This API Enables interrupts on the MicroBlaze processor. When the MicroBlaze
* processor starts up, interrupts are disabled. Interrupts must be explicitly
* turned on using this function.
*
* <pre>
*
* File : microblaze_enable_interrupts.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable interrupts on the microblaze.
* </pre>
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_interrupts
.ent microblaze_enable_interrupts
.align 2
microblaze_enable_interrupts:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x2
nop
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r12, rmsr
#Set the interrupt enable bit
ori r12, r12, 0x2
#Save the MSR register
mts rmsr, r12
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_interrupts
/**
* @} End of "addtogroup microblaze_pseudo_asm_macro".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,550 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_update_dcache.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* File : microblaze_update_dcache.s
* Date : 2003, September 24
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Update dcache on the microblaze.
* Takes in three parameters
* r5 : Cache Tag Line
* r6 : Cache Data
* r7 : Lock/Valid information
* Bit 30 is Lock [ 1 indicates locked ]
* Bit 31 is Valid [ 1 indicates valid ]
*
* --------------------------------------------------------------
* | Lock | Valid | Effect
* --------------------------------------------------------------
* | 0 | 0 | Invalidate Cache
* | 0 | 1 | Valid, but unlocked cacheline
* | 1 | 0 | Invalidate Cache, No effect of lock
* | 1 | 1 | Valid cache. Locked to a
* | | | particular address
* --------------------------------------------------------------
*
*
**********************************************************************************/
#include "xparameters.h"
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_update_dcache
.ent microblaze_update_dcache
.align 2
microblaze_update_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#if XPAR_MICROBLAZE_DCACHE_LINE_LEN == 1
/* Read the MSR register into a temp register */
mfs r18, rmsr
/* Clear the dcache enable bit to disable the cache
Register r10,r18 are volatile registers and hence do not need to be saved before use */
andi r10, r18, ~128
mts rmsr, r10
/* Update the lock and valid info */
andi r5, r5, 0xfffffffc
or r5, r5, r7
/* Update dcache */
wdc r5, r6
/* Return */
rtsd r15, 8
mts rmsr, r18
#else
/* The only valid usage of this routine for larger cache line lengths is to invalidate a data cache line
So call microblaze_init_dcache_range appropriately to do the job */
brid microblaze_init_dcache_range
addik r6, r0, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
/* We don't have a return instruction here. This is tail call optimization :) */
#endif /* XPAR_MICROBLAZE_DCACHE_LINE_LEN == 1 */
#else
rtsd r15, 8
nop
#endif
.end microblaze_update_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,815 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_flush_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_flush_dcache()
*
* Flush the L1 DCache
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_flush_dcache
.ent microblaze_flush_dcache
.align 2
microblaze_flush_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Align to cache line */
ADDIK r6, r5, XPAR_MICROBLAZE_DCACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Compute end */
L_start:
wdc.flush r5, r0 /* Flush the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
BRI L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
#endif
rtsd r15, 8 /* Return */
nop
.end microblaze_flush_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,676 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_disable_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* File : microblaze_disable_dcache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable the L1 dcache on the microblaze.
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
.text
.globl microblaze_disable_dcache
.ent microblaze_disable_dcache
.align 2
microblaze_disable_dcache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
ADDIK r1, r1, -8
SI r15, r1, 0
BRLID r15, microblaze_flush_dcache /* microblaze_flush_dcache does not use r1*/
nop
LI r15, r1, 0
ADDIK r1, r1, 8
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
rtsd r15, 8
msrclr r0, 0x80
#else /* XPAR_MICROBLAZE_USE_MSR_INSTR == 1 */
ADDIK r1, r1, -8
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
SI r15, r1, 0
BRLID r15, microblaze_flush_dcache
nop
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
mfs r11, rmsr
andi r11, r11, ~(0x80)
mts rmsr, r11
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
LI r15, r1, 0
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
ADDIK r1, r1, 8
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_disable_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,555 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_update_icache.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_update_icache.s
* Date : 2003, September 24
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Update icache on the microblaze.
* Takes in three parameters
* r5 : Cache Tag Line
* r6 : Cache Data
* r7 : Lock/Valid information
* Bit 30 is Lock [ 1 indicates locked ]
* Bit 31 is Valid [ 1 indicates valid ]
*
* --------------------------------------------------------------
* | Lock | Valid | Effect
* --------------------------------------------------------------
* | 0 | 0 | Invalidate Cache
* | 0 | 1 | Valid, but unlocked cacheline
* | 1 | 0 | Invalidate Cache, No effect of lock
* | 1 | 1 | Valid cache. Locked to a
* | | | particular address
* --------------------------------------------------------------
*
*
**********************************************************************************/
#include "xparameters.h"
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
.text
.globl microblaze_update_icache
.ent microblaze_update_icache
.align 2
microblaze_update_icache:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#if XPAR_MICROBLAZE_ICACHE_LINE_LEN == 1
/* Read the MSR register into a temp register */
mfs r18, rmsr
/* Clear the icache enable bit to disable the cache
Register r10,r18 are volatile registers and hence do not need to be saved before use */
andi r10, r18, ~32
mts rmsr, r10
/* Update the lock and valid info */
andi r5, r5, 0xfffffffc
or r5, r5, r7
/* Update icache */
wic r5, r6
/* Return */
rtsd r15, 8
mts rmsr, r18
#else
/* The only valid usage of this routine for larger cache line lengths is to invalidate an instruction cache line
So call microblaze_init_icache_range appropriately to do the job */
brid microblaze_init_icache_range
addik r6, r0, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4)
/* We don't have a return instruction here. This is tail call optimization :) */
#endif /* XPAR_MICROBLAZE_ICACHE_LINE_LEN == 1 */
#else
rtsd r15, 8
nop
#endif
.end microblaze_update_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 26,672 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/hw_exception_handler.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* Microblaze HW Exception Handler
* - Non self-modifying exception handler for the following exception conditions
* - Unalignment
* - Instruction bus error
* - Data bus error
* - Illegal instruction opcode
* - Divide-by-zero
* - Stack protection violation
*******************************************************************************/
#include "microblaze_exceptions_g.h"
#include "xparameters.h"
#include "microblaze_instructions.h"
/* 64-bit definitions */
#if defined (__arch64__)
#define INTPTR_DATAITEM .quad
#define REGSIZE 8
#define DATAALIGN 4
#else
#define INTPTR_DATAITEM .long
#define REGSIZE 4
#define DATAALIGN 2
#endif /* 64-bit definitions */
/* Helpful Macros */
#define EX_HANDLER_STACK_SIZ (REGSIZE*21)
#define RMSR_OFFSET (20 * REGSIZE)
#define R17_OFFSET (0)
#define REG_OFFSET(regnum) (REGSIZE * (regnum + 1))
#define NUM_TO_REG(num) r ## num
#define R3_TO_STACK(regnum) SI r3, r1, REG_OFFSET(regnum)
#define R3_FROM_STACK(regnum) LI r3, r1, REG_OFFSET(regnum)
#define PUSH_REG(regnum) SI NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
#define POP_REG(regnum) LI NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
/* Uses r5 */
#define PUSH_MSR \
mfs r5, rmsr; \
swi r5, r1, RMSR_OFFSET;
#define PUSH_MSR_AND_ENABLE_EXC \
mfs r5, rmsr; \
swi r5, r1, RMSR_OFFSET; \
ori r5, r5, 0x100; /* Turn ON the EE bit*/ \
mts rmsr, r5;
/* Uses r5 */
#define POP_MSR \
lwi r5, r1, RMSR_OFFSET; \
mts rmsr, r5;
/* Push r17 */
#define PUSH_R17 SI r17, r1, R17_OFFSET
/* Pop r17 */
#define POP_R17 LI r17, r1, R17_OFFSET
#define LWREG_NOP \
BRI ex_handler_unhandled; \
nop;
#define SWREG_NOP \
BRI ex_handler_unhandled; \
nop;
/* r3 is the source */
#define R3_TO_LWREG_V(regnum) \
R3_TO_STACK (regnum); \
BRI ex_handler_done;
/* r3 is the source */
#define R3_TO_LWREG(regnum) \
OR NUM_TO_REG (regnum), r0, r3; \
BRI ex_handler_done;
/* r3 is the target */
#define SWREG_TO_R3_V(regnum) \
R3_FROM_STACK (regnum); \
BRI ex_sw_tail;
/* r3 is the target */
#define SWREG_TO_R3(regnum) \
OR r3, r0, NUM_TO_REG (regnum); \
BRI ex_sw_tail;
/* regnum is the source */
#define FP_EX_OPB_SAVE(regnum) \
SI NUM_TO_REG (regnum), r0, mb_fpex_op_b; \
nop; \
BRI handle_fp_ex_opa;
/* regnum is the source */
#define FP_EX_OPB_SAVE_V(regnum) \
R3_FROM_STACK (regnum); \
SI r3, r0, mb_fpex_op_b; \
BRI handle_fp_ex_opa;
/* regnum is the source */
#define FP_EX_OPA_SAVE(regnum) \
SI NUM_TO_REG (regnum), r0, mb_fpex_op_a; \
nop; \
BRI handle_fp_ex_done;
/* regnum is the source */
#define FP_EX_OPA_SAVE_V(regnum) \
R3_FROM_STACK (regnum); \
SI r3, r0, mb_fpex_op_a; \
BRI handle_fp_ex_done;
#define FP_EX_UNHANDLED \
BRI fp_ex_unhandled; \
nop; \
nop;
/* ESR masks */
#define ESR_EXC_MASK 0x0000001F
#define ESR_REG_MASK 0x000003E0
#define ESR_LW_SW_MASK 0x00000400
#define ESR_WORD_MASK 0x00000800
#define ESR_DS_MASK 0x00001000
#define ESR_LONG_MASK 0x00002000
/* Extern declarations */
.extern XNullHandler
#ifdef MICROBLAZE_EXCEPTIONS_ENABLED /* If exceptions are enabled in the processor */
/*
* hw_exception_handler - Handler for unaligned exceptions
* Exception handler notes:
* - Does not handle exceptions other than unaligned exceptions
* - Does not handle exceptions during load into r17, r1, r0.
* - Does not handle exceptions during store from r17 (cannot be done) and r1 (slows down common case)
*
* Relevant register structures
*
* EAR - |----|----|----|----|----|----|----|----|
* - < ## 32 or 64 bit faulting address ## >
*
* ESR - |----|----|----|----|----| - | - |-----|-----|
* - W S REG EXC
*
*
* STACK FRAME STRUCTURE
* ---------------------
*
* +-------------+ + 0
* | r17 |
* +-------------+ + 4 (32-bit) + 8 (64-bit)
* | Args for |
* | next func |
* +-------------+ + 8 (32-bit) + 16 (64-bit)
* | r1 |
* | . |
* | . |
* | . |
* | . |
* | r18 |
* +-------------+ + 80 (32-bit) + 160 (64-bit)
* | MSR |
* +-------------+ + 84 (32-bit) + 168 (64-bit)
* | . |
* | . |
*/
.global _hw_exception_handler
.section .text
.align 2
.ent _hw_exception_handler
.type _hw_exception_handler, @function
_hw_exception_handler:
#if defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1)
/* Immediately halt for stack protection violation exception without using any stack */
SI r3, r0, mb_sp_save_r3; /* Save temporary register */
mfs r3, resr; /* Extract ESR[DS] */
andi r3, r3, ESR_EXC_MASK;
xori r3, r3, 0x7; /* Check for stack protection violation */
BNEI r3, ex_handler_not_sp_violation;
ex_handler_sp_violation:
bri 0; /* Halt here if stack protection violation */
ex_handler_not_sp_violation:
LI r3, r0, mb_sp_save_r3; /* Restore temporary register */
#endif /* defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1) */
ADDIK r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
PUSH_REG(3);
PUSH_REG(4);
PUSH_REG(5);
PUSH_REG(6);
#ifdef MICROBLAZE_CAN_HANDLE_EXCEPTIONS_IN_DELAY_SLOTS
mfs r6, resr;
andi r6, r6, ESR_DS_MASK;
BEQI r6, ex_handler_no_ds;
mfs r17, rbtr;
ex_handler_no_ds:
#endif
PUSH_R17;
PUSH_MSR_AND_ENABLE_EXC; /* Exceptions enabled here. This will allow nested exceptions */
mfs r3, resr;
andi r5, r3, ESR_EXC_MASK; /* Extract ESR[EXC] */
#ifndef NO_UNALIGNED_EXCEPTIONS
xori r6, r5, 1; /* 00001 = Unaligned Exception */
BNEI r6, handle_ex_regular;
ADDIK r4, r0, MB_ExceptionVectorTable; /* Check if user has registered an unaligned exception handler */
#if defined (__arch64__)
LI r4, r4, 16;
#else
LI r4, r4, 8;
#endif
ADDIK r6, r0, XNullHandler; /* If exceptionvectortable entry is still XNullHandler, use */
XOR r6, r4, r6; /* the default exception handler */
BEQI r6, handle_unaligned_ex ;
handle_ex_regular:
#endif /* ! NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
xori r6, r5, 6; /* 00110 = FPU exception */
BEQI r6, handle_fp_ex; /* Go and decode the FP exception */
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
handle_other_ex: /* Handle Other exceptions here */
ori r6, r0, 20;
cmp r6, r5, r6; /* >= 20 are exceptions we do not handle. */
BLEI r6, ex_handler_unhandled;
ori r6, r0, 7;
cmp r6, r5, r6; /* Convert MMU exception indices into an ordinal of 7 */
BGTI r6, handle_other_ex_tail;
ori r5, r0, 0x7;
handle_other_ex_tail:
PUSH_REG(7); /* Save other volatiles before we make procedure calls below */
PUSH_REG(8);
PUSH_REG(9);
PUSH_REG(10);
PUSH_REG(11);
PUSH_REG(12);
PUSH_REG(15);
PUSH_REG(18);
ADDIK r4, r0, MB_ExceptionVectorTable; /* Load the Exception vector table base address */
ADDK r7, r5, r5; /* Calculate exception vector offset = r5 * 8 (32-bit) */
ADDK r7, r7, r7;
ADDK r7, r7, r7;
#if defined (__arch64__)
ADDK r7, r7, r7; /* or r5 * 16 (64-bit) */
#endif
ADDK r7, r7, r4; /* Get pointer to exception vector */
LI r5, r7, REGSIZE; /* Load argument to exception handler from table */
LOAD r7, r7, r0; /* Load vector itself here */
brald r15, r7; /* Branch to handler */
nop;
POP_REG(7); /* Restore other volatiles */
POP_REG(8);
POP_REG(9);
POP_REG(10);
POP_REG(11);
POP_REG(12);
POP_REG(15);
POP_REG(18);
BRI ex_handler_done; /* Complete exception handling */
#ifndef NO_UNALIGNED_EXCEPTIONS
handle_unaligned_ex:
andi r6, r3, ESR_REG_MASK; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
srl r6, r6;
srl r6, r6;
srl r6, r6;
sbi r6, r0, ex_reg_op; /* Store the register operand in a temporary location */
mfs r4, rear;
andi r6, r3, ESR_LW_SW_MASK; /* Extract ESR[S] */
BNEI r6, ex_sw;
#if defined (__arch64__)
ex_ll:
andi r6, r3, ESR_LONG_MASK; /* Extract ESR[L] */
BEQI r6, ex_lw;
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a long, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lbui r5, r4, 2;
sbi r5, r0, ex_tmp_data_loc_2;
lbui r5, r4, 3;
sbi r5, r0, ex_tmp_data_loc_3;
lbui r5, r4, 4;
sbi r5, r0, ex_tmp_data_loc_4;
lbui r5, r4, 5;
sbi r5, r0, ex_tmp_data_loc_5;
lbui r5, r4, 6;
sbi r5, r0, ex_tmp_data_loc_6;
lbui r5, r4, 7;
sbi r5, r0, ex_tmp_data_loc_7;
lli r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
BRI ex_lw_tail;
#endif
ex_lw:
andi r6, r3, ESR_WORD_MASK; /* Extract ESR[W] */
BEQI r6, ex_lhw;
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a word, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lbui r5, r4, 2;
sbi r5, r0, ex_tmp_data_loc_2;
lbui r5, r4, 3;
sbi r5, r0, ex_tmp_data_loc_3;
lwi r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
BRI ex_lw_tail;
ex_lhw:
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a half-word, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lhui r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
ex_lw_tail:
lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
ADDIK r6, r0, lw_table; /* Form load_word jump table offset (lw_table + (8 * regnum)) */
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r6;
bra r5;
ex_lw_end: /* Exception handling of load word, ends */
ex_sw:
lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
ADDIK r6, r0, sw_table; /* Form store_word jump table offset (sw_table + (8 * regnum)) */
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r6;
bra r5;
ex_sw_tail:
#if defined (__arch64__)
ex_sl:
mfs r6, resr;
andi r6, r6, ESR_LONG_MASK; /* Extract ESR[L] */
BEQI r6, ex_not_sl;
sli r3, r0, ex_tmp_data_loc_0;
lbui r3, r0, ex_tmp_data_loc_0; /* Store the long, byte-by-byte into destination address */
sbi r3, r4, 0;
lbui r3, r0, ex_tmp_data_loc_1;
sbi r3, r4, 1;
lbui r3, r0, ex_tmp_data_loc_2;
sbi r3, r4, 2;
lbui r3, r0, ex_tmp_data_loc_3;
sbi r3, r4, 3;
lbui r3, r0, ex_tmp_data_loc_4;
sbi r3, r4, 4;
lbui r3, r0, ex_tmp_data_loc_5;
sbi r3, r4, 5;
lbui r3, r0, ex_tmp_data_loc_6;
sbi r3, r4, 6;
lbui r3, r0, ex_tmp_data_loc_7;
sbi r3, r4, 7;
BRI ex_handler_done;
ex_not_sl:
#endif
mfs r6, resr;
andi r6, r6, ESR_WORD_MASK; /* Extract ESR[W] */
BEQI r6, ex_shw;
swi r3, r0, ex_tmp_data_loc_0;
lbui r3, r0, ex_tmp_data_loc_0; /* Store the word, byte-by-byte into destination address */
sbi r3, r4, 0;
lbui r3, r0, ex_tmp_data_loc_1;
sbi r3, r4, 1;
lbui r3, r0, ex_tmp_data_loc_2;
sbi r3, r4, 2;
lbui r3, r0, ex_tmp_data_loc_3;
sbi r3, r4, 3;
BRI ex_handler_done;
ex_shw:
swi r3, r0, ex_tmp_data_loc_0; /* Store the lower half-word, byte-by-byte into destination address */
#ifdef __LITTLE_ENDIAN__
lbui r3, r0, ex_tmp_data_loc_0;
#else
lbui r3, r0, ex_tmp_data_loc_2;
#endif
sbi r3, r4, 0;
#ifdef __LITTLE_ENDIAN__
lbui r3, r0, ex_tmp_data_loc_1;
#else
lbui r3, r0, ex_tmp_data_loc_3;
#endif
sbi r3, r4, 1;
ex_sw_end: /* Exception handling of store word, ends. */
BRI ex_handler_done;
#endif /* !NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
handle_fp_ex:
ADDIK r3, r17, -4; /* r17 contains (addr of exception causing FP instruction + 4) */
lw r4, r0, r3; /* We might find ourselves in a spot here. Unguaranteed load */
handle_fp_ex_opb:
ADDIK r6, r0, fp_table_opb; /* Decode opB and store its value in mb_fpex_op_b */
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
andi r3, r4, 0x1F;
ADDK r3, r3, r3; /* Calculate (fp_table_opb + (regno * 12)) in r5 */
ADDK r3, r3, r3;
ADDK r5, r3, r3;
ADDK r5, r5, r3;
ADDK r5, r5, r6;
bra r5;
handle_fp_ex_opa:
ADDIK r6, r0, fp_table_opa; /* Decode opA and store its value in mb_fpex_op_a */
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
andi r3, r4, 0x1F;
ADDK r3, r3, r3; /* Calculate (fp_table_opb + (regno * 12)) in r5 */
ADDK r3, r3, r3;
ADDK r5, r3, r3;
ADDK r5, r5, r3;
ADDK r5, r5, r6;
bra r5;
handle_fp_ex_done:
ori r5, r0, 6; /* Set exception number back to 6 */
BRI handle_other_ex_tail;
fp_ex_unhandled:
bri 0;
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
ex_handler_done:
POP_R17;
POP_MSR;
POP_REG(3);
POP_REG(4);
POP_REG(5);
POP_REG(6);
ADDIK r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
rted r17, 0
nop
ex_handler_unhandled:
bri 0 /* UNHANDLED. TRAP HERE */
.end _hw_exception_handler
#ifndef NO_UNALIGNED_EXCEPTIONS
/*
* hw_exception_handler Jump Table
* - Contains code snippets for each register that caused the unaligned exception.
* - Hence exception handler is NOT self-modifying
* - Separate table for load exceptions and store exceptions.
* - Each table is of size: (8 * 32) = 256 bytes
*/
.section .text
.align 4
lw_table:
lw_r0: R3_TO_LWREG (0);
lw_r1: LWREG_NOP;
lw_r2: R3_TO_LWREG (2);
lw_r3: R3_TO_LWREG_V (3);
lw_r4: R3_TO_LWREG_V (4);
lw_r5: R3_TO_LWREG_V (5);
lw_r6: R3_TO_LWREG_V (6);
lw_r7: R3_TO_LWREG (7);
lw_r8: R3_TO_LWREG (8);
lw_r9: R3_TO_LWREG (9);
lw_r10: R3_TO_LWREG (10);
lw_r11: R3_TO_LWREG (11);
lw_r12: R3_TO_LWREG (12);
lw_r13: R3_TO_LWREG (13);
lw_r14: R3_TO_LWREG (14);
lw_r15: R3_TO_LWREG (15);
lw_r16: R3_TO_LWREG (16);
lw_r17: LWREG_NOP;
lw_r18: R3_TO_LWREG (18);
lw_r19: R3_TO_LWREG (19);
lw_r20: R3_TO_LWREG (20);
lw_r21: R3_TO_LWREG (21);
lw_r22: R3_TO_LWREG (22);
lw_r23: R3_TO_LWREG (23);
lw_r24: R3_TO_LWREG (24);
lw_r25: R3_TO_LWREG (25);
lw_r26: R3_TO_LWREG (26);
lw_r27: R3_TO_LWREG (27);
lw_r28: R3_TO_LWREG (28);
lw_r29: R3_TO_LWREG (29);
lw_r30: R3_TO_LWREG (30);
lw_r31: R3_TO_LWREG (31);
sw_table:
sw_r0: SWREG_TO_R3 (0);
sw_r1: SWREG_NOP;
sw_r2: SWREG_TO_R3 (2);
sw_r3: SWREG_TO_R3_V (3);
sw_r4: SWREG_TO_R3_V (4);
sw_r5: SWREG_TO_R3_V (5);
sw_r6: SWREG_TO_R3_V (6);
sw_r7: SWREG_TO_R3 (7);
sw_r8: SWREG_TO_R3 (8);
sw_r9: SWREG_TO_R3 (9);
sw_r10: SWREG_TO_R3 (10);
sw_r11: SWREG_TO_R3 (11);
sw_r12: SWREG_TO_R3 (12);
sw_r13: SWREG_TO_R3 (13);
sw_r14: SWREG_TO_R3 (14);
sw_r15: SWREG_TO_R3 (15);
sw_r16: SWREG_TO_R3 (16);
sw_r17: SWREG_NOP;
sw_r18: SWREG_TO_R3 (18);
sw_r19: SWREG_TO_R3 (19);
sw_r20: SWREG_TO_R3 (20);
sw_r21: SWREG_TO_R3 (21);
sw_r22: SWREG_TO_R3 (22);
sw_r23: SWREG_TO_R3 (23);
sw_r24: SWREG_TO_R3 (24);
sw_r25: SWREG_TO_R3 (25);
sw_r26: SWREG_TO_R3 (26);
sw_r27: SWREG_TO_R3 (27);
sw_r28: SWREG_TO_R3 (28);
sw_r29: SWREG_TO_R3 (29);
sw_r30: SWREG_TO_R3 (30);
sw_r31: SWREG_TO_R3 (31);
/* Temporary data structures used in the handler */
.section .data
.align DATAALIGN
ex_tmp_data_loc_0:
.byte 0
ex_tmp_data_loc_1:
.byte 0
ex_tmp_data_loc_2:
.byte 0
ex_tmp_data_loc_3:
.byte 0
#if defined (__arch64__)
ex_tmp_data_loc_4:
.byte 0
ex_tmp_data_loc_5:
.byte 0
ex_tmp_data_loc_6:
.byte 0
ex_tmp_data_loc_7:
.byte 0
#endif
ex_reg_op:
.byte 0
#endif /* ! NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
/*
* FP exception decode jump table.
* - Contains code snippets for each register that could have been a source operand for an excepting FP instruction
* - Hence exception handler is NOT self-modifying
* - Separate table for opA and opB
* - Each table is of size: (12 * 32) = 384 bytes
*/
.section .text
.align 4
fp_table_opa:
opa_r0: FP_EX_OPA_SAVE (0);
opa_r1: FP_EX_UNHANDLED;
opa_r2: FP_EX_OPA_SAVE (2);
opa_r3: FP_EX_OPA_SAVE_V (3);
opa_r4: FP_EX_OPA_SAVE_V (4);
opa_r5: FP_EX_OPA_SAVE_V (5);
opa_r6: FP_EX_OPA_SAVE_V (6);
opa_r7: FP_EX_OPA_SAVE (7);
opa_r8: FP_EX_OPA_SAVE (8);
opa_r9: FP_EX_OPA_SAVE (9);
opa_r10: FP_EX_OPA_SAVE (10);
opa_r11: FP_EX_OPA_SAVE (11);
opa_r12: FP_EX_OPA_SAVE (12);
opa_r13: FP_EX_OPA_SAVE (13);
opa_r14: FP_EX_UNHANDLED;
opa_r15: FP_EX_UNHANDLED;
opa_r16: FP_EX_UNHANDLED;
opa_r17: FP_EX_UNHANDLED;
opa_r18: FP_EX_OPA_SAVE (18);
opa_r19: FP_EX_OPA_SAVE (19);
opa_r20: FP_EX_OPA_SAVE (20);
opa_r21: FP_EX_OPA_SAVE (21);
opa_r22: FP_EX_OPA_SAVE (22);
opa_r23: FP_EX_OPA_SAVE (23);
opa_r24: FP_EX_OPA_SAVE (24);
opa_r25: FP_EX_OPA_SAVE (25);
opa_r26: FP_EX_OPA_SAVE (26);
opa_r27: FP_EX_OPA_SAVE (27);
opa_r28: FP_EX_OPA_SAVE (28);
opa_r29: FP_EX_OPA_SAVE (29);
opa_r30: FP_EX_OPA_SAVE (30);
opa_r31: FP_EX_OPA_SAVE (31);
fp_table_opb:
opb_r0: FP_EX_OPB_SAVE (0);
opb_r1: FP_EX_UNHANDLED;
opb_r2: FP_EX_OPB_SAVE (2);
opb_r3: FP_EX_OPB_SAVE_V (3);
opb_r4: FP_EX_OPB_SAVE_V (4);
opb_r5: FP_EX_OPB_SAVE_V (5);
opb_r6: FP_EX_OPB_SAVE_V (6);
opb_r7: FP_EX_OPB_SAVE (7);
opb_r8: FP_EX_OPB_SAVE (8);
opb_r9: FP_EX_OPB_SAVE (9);
opb_r10: FP_EX_OPB_SAVE (10);
opb_r11: FP_EX_OPB_SAVE (11);
opb_r12: FP_EX_OPB_SAVE (12);
opb_r13: FP_EX_OPB_SAVE (13);
opb_r14: FP_EX_UNHANDLED;
opb_r15: FP_EX_UNHANDLED;
opb_r16: FP_EX_UNHANDLED;
opb_r17: FP_EX_UNHANDLED;
opb_r18: FP_EX_OPB_SAVE (18);
opb_r19: FP_EX_OPB_SAVE (19);
opb_r20: FP_EX_OPB_SAVE (20);
opb_r21: FP_EX_OPB_SAVE (21);
opb_r22: FP_EX_OPB_SAVE (22);
opb_r23: FP_EX_OPB_SAVE (23);
opb_r24: FP_EX_OPB_SAVE (24);
opb_r25: FP_EX_OPB_SAVE (25);
opb_r26: FP_EX_OPB_SAVE (26);
opb_r27: FP_EX_OPB_SAVE (27);
opb_r28: FP_EX_OPB_SAVE (28);
opb_r29: FP_EX_OPB_SAVE (29);
opb_r30: FP_EX_OPB_SAVE (30);
opb_r31: FP_EX_OPB_SAVE (31);
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
#if defined(MICROBLAZE_FP_EXCEPTION_ENABLED) && defined(MICROBLAZE_FP_EXCEPTION_DECODE)
/* This is where we store the opA and opB of the last excepting FP instruction */
.section .data
.align DATAALIGN
.global mb_fpex_op_a
.global mb_fpex_op_b
mb_fpex_op_a:
INTPTR_DATAITEM 0
mb_fpex_op_b:
INTPTR_DATAITEM 0
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
#if defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1)
/* This is where we store the register used to check which exception occurred */
.section .data
.align DATAALIGN
mb_sp_save_r3:
INTPTR_DATAITEM 0
#endif /* defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1) */
/* The exception vector table */
.section .data
.align DATAALIGN
.global MB_ExceptionVectorTable
MB_ExceptionVectorTable:
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 0 /* -- FSL Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 1 /* -- Unaligned Access Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 2 /* -- Illegal Opcode Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 3 /* -- Instruction Bus Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 4 /* -- Data Bus Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 5 /* -- Div-by-0 Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 6 /* -- FPU Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 7 /* -- MMU Exceptions -- */
#else /* Dummy exception handler, in case exceptions are not present in the processor */
.global _hw_exception_handler
.section .text
.align 2
.ent _hw_exception_handler
_hw_exception_handler:
bri 0;
.end _hw_exception_handler
#endif /* MICROBLAZE_EXCEPTIONS_ENABLED */
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,879 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_invalidate_icache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_icache_range(unsigned int cacheaddr, unsigned int len)
*
* Invalidate an ICache range
*
* Parameters:
* 'cacheaddr' - address in the Icache where invalidation begins
* 'len' - length (in bytes) worth of Icache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_icache_range
.ent microblaze_invalidate_icache_range
.align 2
microblaze_invalidate_icache_range:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Icache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
BEQI r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align start down to cache line */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wic r5, r0 /* Invalidate the cache line */
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_icache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,482 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_flush_cache_ext_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_cache_ext_range (unsigned int cacheaddr, unsigned int len)
*
*Flush a L2 Cache range
*
*Parameters:
* 'cacheaddr' - address in the L2 cache where the flush begins
* 'len ' - length (in bytes) worth of L2 cache to be flushed
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
.text
.globl microblaze_flush_cache_ext_range
.ent microblaze_flush_cache_ext_range
.align 2
microblaze_flush_cache_ext_range:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
beqi r6, Loop_done
ADDIK r6, r6, -1
ADD r6, r5, r6
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
RSUBK r6, r5, r6
Loop_start:
wdc.ext.flush r5, r6
#if defined (__arch64__ )
addlik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bneid r6, Loop_start
addik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
Loop_done:
#endif
rtsd r15, 8
nop
.end microblaze_flush_cache_ext_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,396 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_flush_cache_ext.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_cache_ext()
*
* Flush the entire L2 Cache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
#define CACHEABLE_REGION_SIZE (XPAR_MICROBLAZE_DCACHE_HIGHADDR - XPAR_MICROBLAZE_DCACHE_BASEADDR)
.text
.globl microblaze_flush_cache_ext
.ent microblaze_flush_cache_ext
.align 2
microblaze_flush_cache_ext:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN))
ADDIK r6, r0, CACHEABLE_REGION_SIZE-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
Loop_start:
wdc.ext.flush r5, r6
#if defined (__arch64__)
addlik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bgtid r6,Loop_start
addik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
#endif
rtsd r15, 8
nop
.end microblaze_flush_cache_ext
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,365 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_invalidate_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_dcache()
*
* Invalidate the entire L1 DCache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_dcache
.ent microblaze_invalidate_dcache
.align 2
microblaze_invalidate_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN))
ADDIK r6, r5, XPAR_MICROBLAZE_DCACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Compute end */
L_start:
wdc r5, r0 /* Invalidate the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,070 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_enable_icache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_enable_icache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable icache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_icache
.ent microblaze_enable_icache
.align 2
microblaze_enable_icache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x20
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Set the interrupt enable bit
ori r8, r8, 0x20
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,519 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze_invalidate_cache_ext_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_cache_ext_range (unsigned int cacheaddr, unsigned int len)
*
*Invalidate an L2 cache range
*
*Parameters:
* 'cacheaddr' - address in the L2 cache where invalidation begins
* 'len ' - length (in bytes) worth of Dcache to be invalidated
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
.text
.globl microblaze_invalidate_cache_ext_range
.ent microblaze_invalidate_cache_ext_range
.align 2
microblaze_invalidate_cache_ext_range:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
beqi r6, Loop_done
ADD r6, r5, r6
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
RSUBK r6, r5, r6
Loop_start:
wdc.ext.clear r5, r6
#if defined (__arch64__ )
addlik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bneid r6, Loop_start
addik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
Loop_done:
#endif
rtsd r15, 8
nop
.end microblaze_invalidate_cache_ext_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,044 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_disable_icache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_disable_icache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable L1 icache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_disable_icache
.ent microblaze_disable_icache
.align 2
microblaze_disable_icache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrclr r0, 0x20
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Clear the icache enable bit
andi r8, r8, ~(0x20)
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif
.end microblaze_disable_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,198 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_init_dcache_range.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_init_dcache_range (unsigned int cache_start, unsigned int cache_len)
*
* Invalidate dcache on the microblaze
*
* Parameters:
* 'cache_start' - address in the Dcache where invalidation begins
* 'cache_len' - length (in bytes) worth of Dcache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_init_dcache_range
.ent microblaze_init_dcache_range
.align 2
microblaze_init_dcache_range:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
mfs r9, rmsr /* Disable Dcache and interrupts before invalidating */
andi r10, r9, (~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE))
mts rmsr, r10
andi r5, r5, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align to cache line */
add r6, r5, r6 /* Compute end */
andi r6, r6, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align to cache line */
L_start:
wdc r5, r0 /* Invalidate the Cache (delay slot) */
cmpu r18, r5, r6 /* Are we at the end ? */
blei r18, L_done
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
L_done:
rtsd r15, 8 /* Return */
mts rmsr, r9
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_init_dcache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,428 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_cache_ext.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_cache_ext()
*
*Invalidate the entire L2 Cache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
#define CACHEABLE_REGION_SIZE (XPAR_MICROBLAZE_DCACHE_HIGHADDR - XPAR_MICROBLAZE_DCACHE_BASEADDR)
.text
.globl microblaze_invalidate_cache_ext
.ent microblaze_invalidate_cache_ext
.align 2
microblaze_invalidate_cache_ext:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN))
ADDIK r6, r0, CACHEABLE_REGION_SIZE-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
Loop_start:
wdc.ext.clear r5, r6
#if defined (__arch64__ )
addlik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bgtid r6,Loop_start
addik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
#endif
rtsd r15, 8
nop
.end microblaze_invalidate_cache_ext
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,488 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_dcache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_dcache_range (unsigned int cacheaddr, unsigned int len)
*
* Invalidate a Dcache range
*
* Parameters:
* 'cacheaddr' - address in the Dcache where invalidation begins
* 'len ' - length (in bytes) worth of Dcache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#define MB_HAS_WRITEBACK_SET 0
#else
#define MB_HAS_WRITEBACK_SET XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#endif
.text
.globl microblaze_invalidate_dcache_range
.ent microblaze_invalidate_dcache_range
.align 2
microblaze_invalidate_dcache_range:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
BEQI r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align start down to cache line */
#if MB_HAS_WRITEBACK_SET == 0 /* Use a different scheme for MB version < v7.20 or when caches are write-through */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wdc r5, r0
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
#else
RSUBK r6, r5, r6
/* r6 will now contain (count of bytes - (4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) */
L_start:
wdc.clear r5, r6 /* Invalidate the cache line only if the address matches */
#if defined (__arch64__ )
addlik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
beagei r6, L_start
#else
bneid r6, L_start
addik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
#endif
#endif
L_done:
rtsd r15, 8
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8
nop
#endif
.end microblaze_invalidate_dcache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,555 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_disable_interrupts.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* @file microblaze_disable_interrupts.S
*
* @addtogroup microblaze_pseudo_asm_macro
* @{
* <h2> microblaze_disable_interrupts.S </h2>
* - API to disable Interrupts: void microblaze_disable_interrupts(void)
*
* This API Disables interrupts on the MicroBlaze processor. It can be
* called when entering a critical section of code where a context switch is
* undesirable.
*
* <pre>
* File : microblaze_disable_interrupts.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable interrupts on the microblaze.
* </pre>
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_disable_interrupts
.ent microblaze_disable_interrupts
.align 2
microblaze_disable_interrupts:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrclr r0, 0x2
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r12, rmsr
#Clear the interrupt enable bit
andi r12, r12, ~(0x2)
#Save the MSR register
mts rmsr, r12
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_disable_interrupts
/**
* @} End of "addtogroup microblaze_pseudo_asm_macro".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 5,784 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_scrub.S | /******************************************************************************
* Copyright (c) 2012 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_scrub ()
*
* Scrub LMB memory and all internal BRAMs (data cache, instruction cache,
* MMU UTLB and branch target cache) in MicroBlaze to reduce the possibility
* of an uncorrectable error when fault tolerance support is enabled.
*
* This routine assumes that the processor is in privileged mode when it is
* called, if the MMU is enabled.
*
* Call this routine regularly from a timer interrupt.
*
* Parameters:
* None
*
*
*******************************************************************************/
#include "xparameters.h"
/* Define if fault tolerance is used */
#ifdef XPAR_MICROBLAZE_FAULT_TOLERANT
#if XPAR_MICROBLAZE_FAULT_TOLERANT > 0
#define FAULT_TOLERANT
#endif
#endif
/* Define if LMB is used and can be scrubbed */
#if defined(XPAR_MICROBLAZE_D_LMB) && \
defined(XPAR_DLMB_CNTLR_BASEADDR) && \
defined(XPAR_DLMB_CNTLR_HIGHADDR)
#if XPAR_MICROBLAZE_D_LMB == 1
#define HAS_SCRUBBABLE_LMB
#define DLMB_MASK (XPAR_DLMB_CNTLR_HIGHADDR - XPAR_DLMB_CNTLR_BASEADDR)
#endif
#endif
/* Set default cache line lengths */
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 4
#endif
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 4
#endif
/* Define if internal Data Cache BRAMs are used */
#if defined(XPAR_MICROBLAZE_USE_DCACHE) && defined(XPAR_MICROBLAZE_DCACHE_BYTE_SIZE)
#if XPAR_MICROBLAZE_USE_DCACHE == 1 && XPAR_MICROBLAZE_DCACHE_BYTE_SIZE > 1024
#define HAS_BRAM_DCACHE
#define DCACHE_INCREMENT (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
#define DCACHE_MASK (XPAR_MICROBLAZE_DCACHE_BYTE_SIZE - 1)
#endif
#endif
/* Define if internal Instruction Cache BRAMs are used */
#if defined(XPAR_MICROBLAZE_USE_ICACHE) && defined(XPAR_MICROBLAZE_CACHE_BYTE_SIZE)
#if XPAR_MICROBLAZE_USE_ICACHE == 1 && XPAR_MICROBLAZE_CACHE_BYTE_SIZE > 1024
#define HAS_BRAM_ICACHE
#define ICACHE_INCREMENT (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4)
#define ICACHE_MASK (XPAR_MICROBLAZE_CACHE_BYTE_SIZE - 1)
#endif
#endif
/* Define if internal MMU UTLB BRAM is used */
#ifdef XPAR_MICROBLAZE_USE_MMU
#if XPAR_MICROBLAZE_USE_MMU > 1
#define HAS_BRAM_MMU_UTLB
#endif
#endif
/* Define if internal BTC BRAM is used, and match BTC clear to a complete cache scrub */
#if defined(XPAR_MICROBLAZE_USE_BRANCH_TARGET_CACHE) && \
defined(XPAR_MICROBLAZE_BRANCH_TARGET_CACHE_SIZE)
#if XPAR_MICROBLAZE_USE_BRANCH_TARGET_CACHE == 1
#if XPAR_MICROBLAZE_BRANCH_TARGET_CACHE_SIZE == 0 || \
XPAR_MICROBLAZE_BRANCH_TARGET_CACHE_SIZE > 4
#define HAS_BRAM_BRANCH_TARGET_CACHE
#ifdef HAS_BRAM_DCACHE
#define BTC_MASK_D (XPAR_MICROBLAZE_DCACHE_BYTE_SIZE/DCACHE_INCREMENT-1)
#else
#define BTC_MASK_D 256
#endif
#ifdef HAS_BRAM_ICACHE
#define BTC_MASK_I (XPAR_MICROBLAZE_CACHE_BYTE_SIZE/ICACHE_INCREMENT-1)
#else
#define BTC_MASK_I 256
#endif
#if BTC_MASK_D > BTC_MASK_I
#define BTC_MASK BTC_MASK_D
#else
#define BTC_MASK BTC_MASK_I
#endif
#endif
#endif
#endif
/* Define index offsets to persistent data used by this routine */
#define DLMB_INDEX_OFFSET 0
#define DCACHE_INDEX_OFFSET 4
#define ICACHE_INDEX_OFFSET 8
#define MMU_INDEX_OFFSET 12
#define BTC_CALL_COUNT_OFFSET 16
.text
.globl microblaze_scrub
.ent microblaze_scrub
.align 2
microblaze_scrub:
#ifdef FAULT_TOLERANT
la r6, r0, L_persistent_data /* Get pointer to data */
#ifdef HAS_SCRUBBABLE_LMB
L_dlmb:
lwi r5, r6, DLMB_INDEX_OFFSET /* Get dlmb index */
lw r7, r5, r0 /* Load and store */
sw r7, r5, r0
addik r5, r5, 4 /* Increment and save dlmb index */
andi r5, r5, DLMB_MASK
swi r5, r6, DLMB_INDEX_OFFSET
#endif /* HAS_SCRUBBABLE_LMB */
#ifdef HAS_BRAM_DCACHE
L_dcache:
lwi r5, r6, DCACHE_INDEX_OFFSET /* Get dcache line index */
wdc r5, r0 /* Invalidate data cache line */
addik r5, r5, DCACHE_INCREMENT /* Increment and save entry index */
andi r5, r5, DCACHE_MASK
swi r5, r6, DCACHE_INDEX_OFFSET
#endif /* HAS_BRAM_DCACHE */
#ifdef HAS_BRAM_ICACHE
L_icache:
lwi r5, r6, ICACHE_INDEX_OFFSET /* Get icache line index */
wic r5, r0 /* Invalidate data cache line */
addik r5, r5, ICACHE_INCREMENT /* Increment and save entry index */
andi r5, r5, ICACHE_MASK
swi r5, r6, ICACHE_INDEX_OFFSET
#endif /* HAS_BRAM_ICACHE */
#ifdef HAS_BRAM_MMU_UTLB
L_mmu:
lwi r5, r6, MMU_INDEX_OFFSET /* Get UTLB entry index */
mts rtlbx, r5 /* Access next entry in UTLB */
mts rtlbhi, r0 /* Clear the UTLB entry */
addik r5, r5, 1 /* Increment and save entry index */
andi r5, r5, 0x3F
swi r5, r6, MMU_INDEX_OFFSET
#endif /* HAS_BRAM_MMU_UTLB */
#ifdef HAS_BRAM_BRANCH_TARGET_CACHE
L_btc:
lwi r5, r6, BTC_CALL_COUNT_OFFSET /* Get BTC call count offset */
addik r5, r5, 1 /* Increment and save call count */
andi r5, r5, BTC_MASK
swi r5, r6, BTC_CALL_COUNT_OFFSET
bnei r5, L_skip_btc_scrub /* Skip scrub unless count wrap */
bri 4 /* Clear branch target cache */
L_skip_btc_scrub:
#endif /* HAS_BRAM_BRANCH_TARGET_CACHE */
#endif /* FAULT_TOLERANT */
L_done:
rtsd r15, 8 /* Return */
nop
.end microblaze_scrub
/* Persistent data used by this routine */
.data
.align 2
L_persistent_data:
.long 0 /* dlmb index */
.long 0 /* dcache index */
.long 0 /* icache index */
.long 0 /* mmu entry index */
.long 0 /* btc call count */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.