repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,075 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_enable_dcache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_enable_dcache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable L1 dcache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_dcache
.ent microblaze_enable_dcache
.align 2
microblaze_enable_dcache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x80
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Set the interrupt enable bit
ori r8, r8, 0x80
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,540 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_flush_dcache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_dcache_range (unsigned int cacheaddr, unsigned int len)
*
* Flush a L1 DCache range
*
* Parameters:
* 'cacheaddr' - address in the Dcache where the flush begins
* 'len ' - length (in bytes) worth of Dcache to be flushed
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#define MB_HAS_WRITEBACK_SET 0
#else
#define MB_HAS_WRITEBACK_SET XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#endif
.text
.globl microblaze_flush_dcache_range
.ent microblaze_flush_dcache_range
.align 2
microblaze_flush_dcache_range:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
beqi r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN) /* Align start down to cache line */
#if MB_HAS_WRITEBACK_SET == 0 /* Use a different scheme for MB version < v7.20 or when caches are write-through */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wdc r5, r0 /* Invalidate the cache line */
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
#else
RSUBK r6, r5, r6
/* r6 will now contain (count of bytes - (4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) */
L_start:
wdc.flush r5, r6 /* Flush the cache line */
#if defined (__arch64__ )
addlik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
beagei r6, L_start
#else
bneid r6, L_start
addik r6, r6, -(XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
#endif
#endif
L_done:
rtsd r15, 8
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_flush_dcache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,200 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_init_icache_range.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_init_icache_range (unsigned int cache_start, unsigned int cache_len)
*
* Invalidate icache on the microblaze
*
* Parameters:
* 'cache_start' - address in the Icache where invalidation begins
* 'cache_len' - length (in bytes) worth of Icache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
.text
.globl microblaze_init_icache_range
.ent microblaze_init_icache_range
.align 2
microblaze_init_icache_range:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
mfs r9, rmsr /* Disable Icache and interrupts before invalidating */
andi r10, r9, (~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE))
mts rmsr, r10
andi r5, r5, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align to cache line */
add r6, r5, r6 /* Compute end */
andi r6, r6, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align to cache line */
L_start:
wic r5, r0 /* Invalidate the Cache (delay slot) */
cmpu r18, r5, r6 /* Are we at the end ? */
blei r18, L_done
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
L_done:
rtsd r15, 8 /* Return */
mts rmsr, r9
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_init_icache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,385 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_icache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_icache()
*
* Invalidate the entire ICache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_icache
.ent microblaze_invalidate_icache
.align 2
microblaze_invalidate_icache:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Icache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
ADDIK r5, r0, XPAR_MICROBLAZE_ICACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN)) /* Align to cache line */
ADDIK r6, r5, XPAR_MICROBLAZE_CACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN)) /* Compute end */
L_start:
wic r5, r0 /* Invalidate the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,575 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_enable_interrupts.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* @file microblaze_enable_interrupts.S
*
* @addtogroup microblaze_pseudo_asm_macro
* @{
* <h2> microblaze_enable_interrupts.S </h2>
* - API to Enable Interrupts: void microblaze_enable_interrupts(void)
*
* This API Enables interrupts on the MicroBlaze processor. When the MicroBlaze
* processor starts up, interrupts are disabled. Interrupts must be explicitly
* turned on using this function.
*
* <pre>
*
* File : microblaze_enable_interrupts.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable interrupts on the microblaze.
* </pre>
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_interrupts
.ent microblaze_enable_interrupts
.align 2
microblaze_enable_interrupts:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x2
nop
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r12, rmsr
#Set the interrupt enable bit
ori r12, r12, 0x2
#Save the MSR register
mts rmsr, r12
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_interrupts
/**
* @} End of "addtogroup microblaze_pseudo_asm_macro".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,550 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_update_dcache.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* File : microblaze_update_dcache.s
* Date : 2003, September 24
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Update dcache on the microblaze.
* Takes in three parameters
* r5 : Cache Tag Line
* r6 : Cache Data
* r7 : Lock/Valid information
* Bit 30 is Lock [ 1 indicates locked ]
* Bit 31 is Valid [ 1 indicates valid ]
*
* --------------------------------------------------------------
* | Lock | Valid | Effect
* --------------------------------------------------------------
* | 0 | 0 | Invalidate Cache
* | 0 | 1 | Valid, but unlocked cacheline
* | 1 | 0 | Invalidate Cache, No effect of lock
* | 1 | 1 | Valid cache. Locked to a
* | | | particular address
* --------------------------------------------------------------
*
*
**********************************************************************************/
#include "xparameters.h"
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_update_dcache
.ent microblaze_update_dcache
.align 2
microblaze_update_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#if XPAR_MICROBLAZE_DCACHE_LINE_LEN == 1
/* Read the MSR register into a temp register */
mfs r18, rmsr
/* Clear the dcache enable bit to disable the cache
Register r10,r18 are volatile registers and hence do not need to be saved before use */
andi r10, r18, ~128
mts rmsr, r10
/* Update the lock and valid info */
andi r5, r5, 0xfffffffc
or r5, r5, r7
/* Update dcache */
wdc r5, r6
/* Return */
rtsd r15, 8
mts rmsr, r18
#else
/* The only valid usage of this routine for larger cache line lengths is to invalidate a data cache line
So call microblaze_init_dcache_range appropriately to do the job */
brid microblaze_init_dcache_range
addik r6, r0, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4)
/* We don't have a return instruction here. This is tail call optimization :) */
#endif /* XPAR_MICROBLAZE_DCACHE_LINE_LEN == 1 */
#else
rtsd r15, 8
nop
#endif
.end microblaze_update_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,815 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_flush_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_flush_dcache()
*
* Flush the L1 DCache
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
.text
.globl microblaze_flush_dcache
.ent microblaze_flush_dcache
.align 2
microblaze_flush_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Align to cache line */
ADDIK r6, r5, XPAR_MICROBLAZE_DCACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Compute end */
L_start:
wdc.flush r5, r0 /* Flush the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
BRI L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
#endif
rtsd r15, 8 /* Return */
nop
.end microblaze_flush_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,676 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_disable_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* File : microblaze_disable_dcache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Disable the L1 dcache on the microblaze.
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
.text
.globl microblaze_disable_dcache
.ent microblaze_disable_dcache
.align 2
microblaze_disable_dcache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
ADDIK r1, r1, -8
SI r15, r1, 0
BRLID r15, microblaze_flush_dcache /* microblaze_flush_dcache does not use r1*/
nop
LI r15, r1, 0
ADDIK r1, r1, 8
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
rtsd r15, 8
msrclr r0, 0x80
#else /* XPAR_MICROBLAZE_USE_MSR_INSTR == 1 */
ADDIK r1, r1, -8
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
SI r15, r1, 0
BRLID r15, microblaze_flush_dcache
nop
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
mfs r11, rmsr
andi r11, r11, ~(0x80)
mts rmsr, r11
#if XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0
LI r15, r1, 0
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK != 0 */
ADDIK r1, r1, 8
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_disable_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,555 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_update_icache.S | /******************************************************************************
* Copyright (c) 2006 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_update_icache.s
* Date : 2003, September 24
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Update icache on the microblaze.
* Takes in three parameters
* r5 : Cache Tag Line
* r6 : Cache Data
* r7 : Lock/Valid information
* Bit 30 is Lock [ 1 indicates locked ]
* Bit 31 is Valid [ 1 indicates valid ]
*
* --------------------------------------------------------------
* | Lock | Valid | Effect
* --------------------------------------------------------------
* | 0 | 0 | Invalidate Cache
* | 0 | 1 | Valid, but unlocked cacheline
* | 1 | 0 | Invalidate Cache, No effect of lock
* | 1 | 1 | Valid cache. Locked to a
* | | | particular address
* --------------------------------------------------------------
*
*
**********************************************************************************/
#include "xparameters.h"
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
.text
.globl microblaze_update_icache
.ent microblaze_update_icache
.align 2
microblaze_update_icache:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#if XPAR_MICROBLAZE_ICACHE_LINE_LEN == 1
/* Read the MSR register into a temp register */
mfs r18, rmsr
/* Clear the icache enable bit to disable the cache
Register r10,r18 are volatile registers and hence do not need to be saved before use */
andi r10, r18, ~32
mts rmsr, r10
/* Update the lock and valid info */
andi r5, r5, 0xfffffffc
or r5, r5, r7
/* Update icache */
wic r5, r6
/* Return */
rtsd r15, 8
mts rmsr, r18
#else
/* The only valid usage of this routine for larger cache line lengths is to invalidate an instruction cache line
So call microblaze_init_icache_range appropriately to do the job */
brid microblaze_init_icache_range
addik r6, r0, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4)
/* We don't have a return instruction here. This is tail call optimization :) */
#endif /* XPAR_MICROBLAZE_ICACHE_LINE_LEN == 1 */
#else
rtsd r15, 8
nop
#endif
.end microblaze_update_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 26,672 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/hw_exception_handler.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
* Microblaze HW Exception Handler
* - Non self-modifying exception handler for the following exception conditions
* - Unalignment
* - Instruction bus error
* - Data bus error
* - Illegal instruction opcode
* - Divide-by-zero
* - Stack protection violation
*******************************************************************************/
#include "microblaze_exceptions_g.h"
#include "xparameters.h"
#include "microblaze_instructions.h"
/* 64-bit definitions */
#if defined (__arch64__)
#define INTPTR_DATAITEM .quad
#define REGSIZE 8
#define DATAALIGN 4
#else
#define INTPTR_DATAITEM .long
#define REGSIZE 4
#define DATAALIGN 2
#endif /* 64-bit definitions */
/* Helpful Macros */
#define EX_HANDLER_STACK_SIZ (REGSIZE*21)
#define RMSR_OFFSET (20 * REGSIZE)
#define R17_OFFSET (0)
#define REG_OFFSET(regnum) (REGSIZE * (regnum + 1))
#define NUM_TO_REG(num) r ## num
#define R3_TO_STACK(regnum) SI r3, r1, REG_OFFSET(regnum)
#define R3_FROM_STACK(regnum) LI r3, r1, REG_OFFSET(regnum)
#define PUSH_REG(regnum) SI NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
#define POP_REG(regnum) LI NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
/* Uses r5 */
#define PUSH_MSR \
mfs r5, rmsr; \
swi r5, r1, RMSR_OFFSET;
#define PUSH_MSR_AND_ENABLE_EXC \
mfs r5, rmsr; \
swi r5, r1, RMSR_OFFSET; \
ori r5, r5, 0x100; /* Turn ON the EE bit*/ \
mts rmsr, r5;
/* Uses r5 */
#define POP_MSR \
lwi r5, r1, RMSR_OFFSET; \
mts rmsr, r5;
/* Push r17 */
#define PUSH_R17 SI r17, r1, R17_OFFSET
/* Pop r17 */
#define POP_R17 LI r17, r1, R17_OFFSET
#define LWREG_NOP \
BRI ex_handler_unhandled; \
nop;
#define SWREG_NOP \
BRI ex_handler_unhandled; \
nop;
/* r3 is the source */
#define R3_TO_LWREG_V(regnum) \
R3_TO_STACK (regnum); \
BRI ex_handler_done;
/* r3 is the source */
#define R3_TO_LWREG(regnum) \
OR NUM_TO_REG (regnum), r0, r3; \
BRI ex_handler_done;
/* r3 is the target */
#define SWREG_TO_R3_V(regnum) \
R3_FROM_STACK (regnum); \
BRI ex_sw_tail;
/* r3 is the target */
#define SWREG_TO_R3(regnum) \
OR r3, r0, NUM_TO_REG (regnum); \
BRI ex_sw_tail;
/* regnum is the source */
#define FP_EX_OPB_SAVE(regnum) \
SI NUM_TO_REG (regnum), r0, mb_fpex_op_b; \
nop; \
BRI handle_fp_ex_opa;
/* regnum is the source */
#define FP_EX_OPB_SAVE_V(regnum) \
R3_FROM_STACK (regnum); \
SI r3, r0, mb_fpex_op_b; \
BRI handle_fp_ex_opa;
/* regnum is the source */
#define FP_EX_OPA_SAVE(regnum) \
SI NUM_TO_REG (regnum), r0, mb_fpex_op_a; \
nop; \
BRI handle_fp_ex_done;
/* regnum is the source */
#define FP_EX_OPA_SAVE_V(regnum) \
R3_FROM_STACK (regnum); \
SI r3, r0, mb_fpex_op_a; \
BRI handle_fp_ex_done;
#define FP_EX_UNHANDLED \
BRI fp_ex_unhandled; \
nop; \
nop;
/* ESR masks */
#define ESR_EXC_MASK 0x0000001F
#define ESR_REG_MASK 0x000003E0
#define ESR_LW_SW_MASK 0x00000400
#define ESR_WORD_MASK 0x00000800
#define ESR_DS_MASK 0x00001000
#define ESR_LONG_MASK 0x00002000
/* Extern declarations */
.extern XNullHandler
#ifdef MICROBLAZE_EXCEPTIONS_ENABLED /* If exceptions are enabled in the processor */
/*
* hw_exception_handler - Handler for unaligned exceptions
* Exception handler notes:
* - Does not handle exceptions other than unaligned exceptions
* - Does not handle exceptions during load into r17, r1, r0.
* - Does not handle exceptions during store from r17 (cannot be done) and r1 (slows down common case)
*
* Relevant register structures
*
* EAR - |----|----|----|----|----|----|----|----|
* - < ## 32 or 64 bit faulting address ## >
*
* ESR - |----|----|----|----|----| - | - |-----|-----|
* - W S REG EXC
*
*
* STACK FRAME STRUCTURE
* ---------------------
*
* +-------------+ + 0
* | r17 |
* +-------------+ + 4 (32-bit) + 8 (64-bit)
* | Args for |
* | next func |
* +-------------+ + 8 (32-bit) + 16 (64-bit)
* | r1 |
* | . |
* | . |
* | . |
* | . |
* | r18 |
* +-------------+ + 80 (32-bit) + 160 (64-bit)
* | MSR |
* +-------------+ + 84 (32-bit) + 168 (64-bit)
* | . |
* | . |
*/
.global _hw_exception_handler
.section .text
.align 2
.ent _hw_exception_handler
.type _hw_exception_handler, @function
_hw_exception_handler:
#if defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1)
/* Immediately halt for stack protection violation exception without using any stack */
SI r3, r0, mb_sp_save_r3; /* Save temporary register */
mfs r3, resr; /* Extract ESR[DS] */
andi r3, r3, ESR_EXC_MASK;
xori r3, r3, 0x7; /* Check for stack protection violation */
BNEI r3, ex_handler_not_sp_violation;
ex_handler_sp_violation:
bri 0; /* Halt here if stack protection violation */
ex_handler_not_sp_violation:
LI r3, r0, mb_sp_save_r3; /* Restore temporary register */
#endif /* defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1) */
ADDIK r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
PUSH_REG(3);
PUSH_REG(4);
PUSH_REG(5);
PUSH_REG(6);
#ifdef MICROBLAZE_CAN_HANDLE_EXCEPTIONS_IN_DELAY_SLOTS
mfs r6, resr;
andi r6, r6, ESR_DS_MASK;
BEQI r6, ex_handler_no_ds;
mfs r17, rbtr;
ex_handler_no_ds:
#endif
PUSH_R17;
PUSH_MSR_AND_ENABLE_EXC; /* Exceptions enabled here. This will allow nested exceptions */
mfs r3, resr;
andi r5, r3, ESR_EXC_MASK; /* Extract ESR[EXC] */
#ifndef NO_UNALIGNED_EXCEPTIONS
xori r6, r5, 1; /* 00001 = Unaligned Exception */
BNEI r6, handle_ex_regular;
ADDIK r4, r0, MB_ExceptionVectorTable; /* Check if user has registered an unaligned exception handler */
#if defined (__arch64__)
LI r4, r4, 16;
#else
LI r4, r4, 8;
#endif
ADDIK r6, r0, XNullHandler; /* If exceptionvectortable entry is still XNullHandler, use */
XOR r6, r4, r6; /* the default exception handler */
BEQI r6, handle_unaligned_ex ;
handle_ex_regular:
#endif /* ! NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
xori r6, r5, 6; /* 00110 = FPU exception */
BEQI r6, handle_fp_ex; /* Go and decode the FP exception */
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
handle_other_ex: /* Handle Other exceptions here */
ori r6, r0, 20;
cmp r6, r5, r6; /* >= 20 are exceptions we do not handle. */
BLEI r6, ex_handler_unhandled;
ori r6, r0, 7;
cmp r6, r5, r6; /* Convert MMU exception indices into an ordinal of 7 */
BGTI r6, handle_other_ex_tail;
ori r5, r0, 0x7;
handle_other_ex_tail:
PUSH_REG(7); /* Save other volatiles before we make procedure calls below */
PUSH_REG(8);
PUSH_REG(9);
PUSH_REG(10);
PUSH_REG(11);
PUSH_REG(12);
PUSH_REG(15);
PUSH_REG(18);
ADDIK r4, r0, MB_ExceptionVectorTable; /* Load the Exception vector table base address */
ADDK r7, r5, r5; /* Calculate exception vector offset = r5 * 8 (32-bit) */
ADDK r7, r7, r7;
ADDK r7, r7, r7;
#if defined (__arch64__)
ADDK r7, r7, r7; /* or r5 * 16 (64-bit) */
#endif
ADDK r7, r7, r4; /* Get pointer to exception vector */
LI r5, r7, REGSIZE; /* Load argument to exception handler from table */
LOAD r7, r7, r0; /* Load vector itself here */
brald r15, r7; /* Branch to handler */
nop;
POP_REG(7); /* Restore other volatiles */
POP_REG(8);
POP_REG(9);
POP_REG(10);
POP_REG(11);
POP_REG(12);
POP_REG(15);
POP_REG(18);
BRI ex_handler_done; /* Complete exception handling */
#ifndef NO_UNALIGNED_EXCEPTIONS
handle_unaligned_ex:
andi r6, r3, ESR_REG_MASK; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
srl r6, r6;
srl r6, r6;
srl r6, r6;
sbi r6, r0, ex_reg_op; /* Store the register operand in a temporary location */
mfs r4, rear;
andi r6, r3, ESR_LW_SW_MASK; /* Extract ESR[S] */
BNEI r6, ex_sw;
#if defined (__arch64__)
ex_ll:
andi r6, r3, ESR_LONG_MASK; /* Extract ESR[L] */
BEQI r6, ex_lw;
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a long, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lbui r5, r4, 2;
sbi r5, r0, ex_tmp_data_loc_2;
lbui r5, r4, 3;
sbi r5, r0, ex_tmp_data_loc_3;
lbui r5, r4, 4;
sbi r5, r0, ex_tmp_data_loc_4;
lbui r5, r4, 5;
sbi r5, r0, ex_tmp_data_loc_5;
lbui r5, r4, 6;
sbi r5, r0, ex_tmp_data_loc_6;
lbui r5, r4, 7;
sbi r5, r0, ex_tmp_data_loc_7;
lli r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
BRI ex_lw_tail;
#endif
ex_lw:
andi r6, r3, ESR_WORD_MASK; /* Extract ESR[W] */
BEQI r6, ex_lhw;
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a word, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lbui r5, r4, 2;
sbi r5, r0, ex_tmp_data_loc_2;
lbui r5, r4, 3;
sbi r5, r0, ex_tmp_data_loc_3;
lwi r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
BRI ex_lw_tail;
ex_lhw:
lbui r5, r4, 0; /* Exception address in r4 */
sbi r5, r0, ex_tmp_data_loc_0; /* Load a half-word, byte-by-byte from destination address and save it in tmp space */
lbui r5, r4, 1;
sbi r5, r0, ex_tmp_data_loc_1;
lhui r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
ex_lw_tail:
lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
ADDIK r6, r0, lw_table; /* Form load_word jump table offset (lw_table + (8 * regnum)) */
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r6;
bra r5;
ex_lw_end: /* Exception handling of load word, ends */
ex_sw:
lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
ADDIK r6, r0, sw_table; /* Form store_word jump table offset (sw_table + (8 * regnum)) */
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r5;
ADDK r5, r5, r6;
bra r5;
ex_sw_tail:
#if defined (__arch64__)
ex_sl:
mfs r6, resr;
andi r6, r6, ESR_LONG_MASK; /* Extract ESR[L] */
BEQI r6, ex_not_sl;
sli r3, r0, ex_tmp_data_loc_0;
lbui r3, r0, ex_tmp_data_loc_0; /* Store the long, byte-by-byte into destination address */
sbi r3, r4, 0;
lbui r3, r0, ex_tmp_data_loc_1;
sbi r3, r4, 1;
lbui r3, r0, ex_tmp_data_loc_2;
sbi r3, r4, 2;
lbui r3, r0, ex_tmp_data_loc_3;
sbi r3, r4, 3;
lbui r3, r0, ex_tmp_data_loc_4;
sbi r3, r4, 4;
lbui r3, r0, ex_tmp_data_loc_5;
sbi r3, r4, 5;
lbui r3, r0, ex_tmp_data_loc_6;
sbi r3, r4, 6;
lbui r3, r0, ex_tmp_data_loc_7;
sbi r3, r4, 7;
BRI ex_handler_done;
ex_not_sl:
#endif
mfs r6, resr;
andi r6, r6, ESR_WORD_MASK; /* Extract ESR[W] */
BEQI r6, ex_shw;
swi r3, r0, ex_tmp_data_loc_0;
lbui r3, r0, ex_tmp_data_loc_0; /* Store the word, byte-by-byte into destination address */
sbi r3, r4, 0;
lbui r3, r0, ex_tmp_data_loc_1;
sbi r3, r4, 1;
lbui r3, r0, ex_tmp_data_loc_2;
sbi r3, r4, 2;
lbui r3, r0, ex_tmp_data_loc_3;
sbi r3, r4, 3;
BRI ex_handler_done;
ex_shw:
swi r3, r0, ex_tmp_data_loc_0; /* Store the lower half-word, byte-by-byte into destination address */
#ifdef __LITTLE_ENDIAN__
lbui r3, r0, ex_tmp_data_loc_0;
#else
lbui r3, r0, ex_tmp_data_loc_2;
#endif
sbi r3, r4, 0;
#ifdef __LITTLE_ENDIAN__
lbui r3, r0, ex_tmp_data_loc_1;
#else
lbui r3, r0, ex_tmp_data_loc_3;
#endif
sbi r3, r4, 1;
ex_sw_end: /* Exception handling of store word, ends. */
BRI ex_handler_done;
#endif /* !NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
handle_fp_ex:
ADDIK r3, r17, -4; /* r17 contains (addr of exception causing FP instruction + 4) */
lw r4, r0, r3; /* We might find ourselves in a spot here. Unguaranteed load */
handle_fp_ex_opb:
ADDIK r6, r0, fp_table_opb; /* Decode opB and store its value in mb_fpex_op_b */
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
andi r3, r4, 0x1F;
ADDK r3, r3, r3; /* Calculate (fp_table_opb + (regno * 12)) in r5 */
ADDK r3, r3, r3;
ADDK r5, r3, r3;
ADDK r5, r5, r3;
ADDK r5, r5, r6;
bra r5;
handle_fp_ex_opa:
ADDIK r6, r0, fp_table_opa; /* Decode opA and store its value in mb_fpex_op_a */
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
srl r4, r4;
andi r3, r4, 0x1F;
ADDK r3, r3, r3; /* Calculate (fp_table_opb + (regno * 12)) in r5 */
ADDK r3, r3, r3;
ADDK r5, r3, r3;
ADDK r5, r5, r3;
ADDK r5, r5, r6;
bra r5;
handle_fp_ex_done:
ori r5, r0, 6; /* Set exception number back to 6 */
BRI handle_other_ex_tail;
fp_ex_unhandled:
bri 0;
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
ex_handler_done:
POP_R17;
POP_MSR;
POP_REG(3);
POP_REG(4);
POP_REG(5);
POP_REG(6);
ADDIK r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
rted r17, 0
nop
ex_handler_unhandled:
bri 0 /* UNHANDLED. TRAP HERE */
.end _hw_exception_handler
#ifndef NO_UNALIGNED_EXCEPTIONS
/*
* hw_exception_handler Jump Table
* - Contains code snippets for each register that caused the unaligned exception.
* - Hence exception handler is NOT self-modifying
* - Separate table for load exceptions and store exceptions.
* - Each table is of size: (8 * 32) = 256 bytes
*/
.section .text
.align 4
lw_table:
lw_r0: R3_TO_LWREG (0);
lw_r1: LWREG_NOP;
lw_r2: R3_TO_LWREG (2);
lw_r3: R3_TO_LWREG_V (3);
lw_r4: R3_TO_LWREG_V (4);
lw_r5: R3_TO_LWREG_V (5);
lw_r6: R3_TO_LWREG_V (6);
lw_r7: R3_TO_LWREG (7);
lw_r8: R3_TO_LWREG (8);
lw_r9: R3_TO_LWREG (9);
lw_r10: R3_TO_LWREG (10);
lw_r11: R3_TO_LWREG (11);
lw_r12: R3_TO_LWREG (12);
lw_r13: R3_TO_LWREG (13);
lw_r14: R3_TO_LWREG (14);
lw_r15: R3_TO_LWREG (15);
lw_r16: R3_TO_LWREG (16);
lw_r17: LWREG_NOP;
lw_r18: R3_TO_LWREG (18);
lw_r19: R3_TO_LWREG (19);
lw_r20: R3_TO_LWREG (20);
lw_r21: R3_TO_LWREG (21);
lw_r22: R3_TO_LWREG (22);
lw_r23: R3_TO_LWREG (23);
lw_r24: R3_TO_LWREG (24);
lw_r25: R3_TO_LWREG (25);
lw_r26: R3_TO_LWREG (26);
lw_r27: R3_TO_LWREG (27);
lw_r28: R3_TO_LWREG (28);
lw_r29: R3_TO_LWREG (29);
lw_r30: R3_TO_LWREG (30);
lw_r31: R3_TO_LWREG (31);
sw_table:
sw_r0: SWREG_TO_R3 (0);
sw_r1: SWREG_NOP;
sw_r2: SWREG_TO_R3 (2);
sw_r3: SWREG_TO_R3_V (3);
sw_r4: SWREG_TO_R3_V (4);
sw_r5: SWREG_TO_R3_V (5);
sw_r6: SWREG_TO_R3_V (6);
sw_r7: SWREG_TO_R3 (7);
sw_r8: SWREG_TO_R3 (8);
sw_r9: SWREG_TO_R3 (9);
sw_r10: SWREG_TO_R3 (10);
sw_r11: SWREG_TO_R3 (11);
sw_r12: SWREG_TO_R3 (12);
sw_r13: SWREG_TO_R3 (13);
sw_r14: SWREG_TO_R3 (14);
sw_r15: SWREG_TO_R3 (15);
sw_r16: SWREG_TO_R3 (16);
sw_r17: SWREG_NOP;
sw_r18: SWREG_TO_R3 (18);
sw_r19: SWREG_TO_R3 (19);
sw_r20: SWREG_TO_R3 (20);
sw_r21: SWREG_TO_R3 (21);
sw_r22: SWREG_TO_R3 (22);
sw_r23: SWREG_TO_R3 (23);
sw_r24: SWREG_TO_R3 (24);
sw_r25: SWREG_TO_R3 (25);
sw_r26: SWREG_TO_R3 (26);
sw_r27: SWREG_TO_R3 (27);
sw_r28: SWREG_TO_R3 (28);
sw_r29: SWREG_TO_R3 (29);
sw_r30: SWREG_TO_R3 (30);
sw_r31: SWREG_TO_R3 (31);
/* Temporary data structures used in the handler */
.section .data
.align DATAALIGN
ex_tmp_data_loc_0:
.byte 0
ex_tmp_data_loc_1:
.byte 0
ex_tmp_data_loc_2:
.byte 0
ex_tmp_data_loc_3:
.byte 0
#if defined (__arch64__)
ex_tmp_data_loc_4:
.byte 0
ex_tmp_data_loc_5:
.byte 0
ex_tmp_data_loc_6:
.byte 0
ex_tmp_data_loc_7:
.byte 0
#endif
ex_reg_op:
.byte 0
#endif /* ! NO_UNALIGNED_EXCEPTIONS */
#if defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE)
/*
* FP exception decode jump table.
* - Contains code snippets for each register that could have been a source operand for an excepting FP instruction
* - Hence exception handler is NOT self-modifying
* - Separate table for opA and opB
* - Each table is of size: (12 * 32) = 384 bytes
*/
.section .text
.align 4
fp_table_opa:
opa_r0: FP_EX_OPA_SAVE (0);
opa_r1: FP_EX_UNHANDLED;
opa_r2: FP_EX_OPA_SAVE (2);
opa_r3: FP_EX_OPA_SAVE_V (3);
opa_r4: FP_EX_OPA_SAVE_V (4);
opa_r5: FP_EX_OPA_SAVE_V (5);
opa_r6: FP_EX_OPA_SAVE_V (6);
opa_r7: FP_EX_OPA_SAVE (7);
opa_r8: FP_EX_OPA_SAVE (8);
opa_r9: FP_EX_OPA_SAVE (9);
opa_r10: FP_EX_OPA_SAVE (10);
opa_r11: FP_EX_OPA_SAVE (11);
opa_r12: FP_EX_OPA_SAVE (12);
opa_r13: FP_EX_OPA_SAVE (13);
opa_r14: FP_EX_UNHANDLED;
opa_r15: FP_EX_UNHANDLED;
opa_r16: FP_EX_UNHANDLED;
opa_r17: FP_EX_UNHANDLED;
opa_r18: FP_EX_OPA_SAVE (18);
opa_r19: FP_EX_OPA_SAVE (19);
opa_r20: FP_EX_OPA_SAVE (20);
opa_r21: FP_EX_OPA_SAVE (21);
opa_r22: FP_EX_OPA_SAVE (22);
opa_r23: FP_EX_OPA_SAVE (23);
opa_r24: FP_EX_OPA_SAVE (24);
opa_r25: FP_EX_OPA_SAVE (25);
opa_r26: FP_EX_OPA_SAVE (26);
opa_r27: FP_EX_OPA_SAVE (27);
opa_r28: FP_EX_OPA_SAVE (28);
opa_r29: FP_EX_OPA_SAVE (29);
opa_r30: FP_EX_OPA_SAVE (30);
opa_r31: FP_EX_OPA_SAVE (31);
fp_table_opb:
opb_r0: FP_EX_OPB_SAVE (0);
opb_r1: FP_EX_UNHANDLED;
opb_r2: FP_EX_OPB_SAVE (2);
opb_r3: FP_EX_OPB_SAVE_V (3);
opb_r4: FP_EX_OPB_SAVE_V (4);
opb_r5: FP_EX_OPB_SAVE_V (5);
opb_r6: FP_EX_OPB_SAVE_V (6);
opb_r7: FP_EX_OPB_SAVE (7);
opb_r8: FP_EX_OPB_SAVE (8);
opb_r9: FP_EX_OPB_SAVE (9);
opb_r10: FP_EX_OPB_SAVE (10);
opb_r11: FP_EX_OPB_SAVE (11);
opb_r12: FP_EX_OPB_SAVE (12);
opb_r13: FP_EX_OPB_SAVE (13);
opb_r14: FP_EX_UNHANDLED;
opb_r15: FP_EX_UNHANDLED;
opb_r16: FP_EX_UNHANDLED;
opb_r17: FP_EX_UNHANDLED;
opb_r18: FP_EX_OPB_SAVE (18);
opb_r19: FP_EX_OPB_SAVE (19);
opb_r20: FP_EX_OPB_SAVE (20);
opb_r21: FP_EX_OPB_SAVE (21);
opb_r22: FP_EX_OPB_SAVE (22);
opb_r23: FP_EX_OPB_SAVE (23);
opb_r24: FP_EX_OPB_SAVE (24);
opb_r25: FP_EX_OPB_SAVE (25);
opb_r26: FP_EX_OPB_SAVE (26);
opb_r27: FP_EX_OPB_SAVE (27);
opb_r28: FP_EX_OPB_SAVE (28);
opb_r29: FP_EX_OPB_SAVE (29);
opb_r30: FP_EX_OPB_SAVE (30);
opb_r31: FP_EX_OPB_SAVE (31);
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
#if defined(MICROBLAZE_FP_EXCEPTION_ENABLED) && defined(MICROBLAZE_FP_EXCEPTION_DECODE)
/* This is where we store the opA and opB of the last excepting FP instruction */
.section .data
.align DATAALIGN
.global mb_fpex_op_a
.global mb_fpex_op_b
mb_fpex_op_a:
INTPTR_DATAITEM 0
mb_fpex_op_b:
INTPTR_DATAITEM 0
#endif /* defined (MICROBLAZE_FP_EXCEPTION_ENABLED) && defined (MICROBLAZE_FP_EXCEPTION_DECODE) */
#if defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1)
/* This is where we store the register used to check which exception occurred */
.section .data
.align DATAALIGN
mb_sp_save_r3:
INTPTR_DATAITEM 0
#endif /* defined(XPAR_MICROBLAZE_USE_STACK_PROTECTION) && (XPAR_MICROBLAZE_USE_STACK_PROTECTION == 1) */
/* The exception vector table */
.section .data
.align DATAALIGN
.global MB_ExceptionVectorTable
MB_ExceptionVectorTable:
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 0 /* -- FSL Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 1 /* -- Unaligned Access Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 2 /* -- Illegal Opcode Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 3 /* -- Instruction Bus Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 4 /* -- Data Bus Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 5 /* -- Div-by-0 Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 6 /* -- FPU Exception -- */
INTPTR_DATAITEM XNullHandler
INTPTR_DATAITEM 7 /* -- MMU Exceptions -- */
#else /* Dummy exception handler, in case exceptions are not present in the processor */
.global _hw_exception_handler
.section .text
.align 2
.ent _hw_exception_handler
_hw_exception_handler:
bri 0;
.end _hw_exception_handler
#endif /* MICROBLAZE_EXCEPTIONS_ENABLED */
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,879 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_icache_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
*
* microblaze_invalidate_icache_range(unsigned int cacheaddr, unsigned int len)
*
* Invalidate an ICache range
*
* Parameters:
* 'cacheaddr' - address in the Icache where invalidation begins
* 'len' - length (in bytes) worth of Icache to be invalidated
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_ICACHE_ENABLE 0x00000020
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_ICACHE_LINE_LEN
#define XPAR_MICROBLAZE_ICACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_icache_range
.ent microblaze_invalidate_icache_range
.align 2
microblaze_invalidate_icache_range:
#if (XPAR_MICROBLAZE_USE_ICACHE==1) && (XPAR_MICROBLAZE_ALLOW_ICACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Icache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_ICACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
BEQI r6, L_done /* Skip loop if size is zero */
ADD r6, r5, r6 /* Compute end address */
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align end down to cache line */
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_ICACHE_LINE_LEN) /* Align start down to cache line */
L_start:
CMPU r18, r5, r6 /* Are we at the end? */
BLTI r18, L_done
wic r5, r0 /* Invalidate the cache line */
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start /* Branch to the beginning of the loop */
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_ICACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_icache_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,482 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_flush_cache_ext_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_cache_ext_range (unsigned int cacheaddr, unsigned int len)
*
*Flush a L2 Cache range
*
*Parameters:
* 'cacheaddr' - address in the L2 cache where the flush begins
* 'len ' - length (in bytes) worth of L2 cache to be flushed
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
.text
.globl microblaze_flush_cache_ext_range
.ent microblaze_flush_cache_ext_range
.align 2
microblaze_flush_cache_ext_range:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
beqi r6, Loop_done
ADDIK r6, r6, -1
ADD r6, r5, r6
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
RSUBK r6, r5, r6
Loop_start:
wdc.ext.flush r5, r6
#if defined (__arch64__ )
addlik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bneid r6, Loop_start
addik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
Loop_done:
#endif
rtsd r15, 8
nop
.end microblaze_flush_cache_ext_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,396 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_flush_cache_ext.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_flush_cache_ext()
*
* Flush the entire L2 Cache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
#define CACHEABLE_REGION_SIZE (XPAR_MICROBLAZE_DCACHE_HIGHADDR - XPAR_MICROBLAZE_DCACHE_BASEADDR)
.text
.globl microblaze_flush_cache_ext
.ent microblaze_flush_cache_ext
.align 2
microblaze_flush_cache_ext:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN))
ADDIK r6, r0, CACHEABLE_REGION_SIZE-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
Loop_start:
wdc.ext.flush r5, r6
#if defined (__arch64__)
addlik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bgtid r6,Loop_start
addik r6, r6,-(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
#endif
rtsd r15, 8
nop
.end microblaze_flush_cache_ext
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,365 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_dcache.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_dcache()
*
* Invalidate the entire L1 DCache
*
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define MICROBLAZE_MSR_DCACHE_ENABLE 0x00000080
#define MICROBLAZE_MSR_INTR_ENABLE 0x00000002
#ifndef XPAR_MICROBLAZE_DCACHE_LINE_LEN
#define XPAR_MICROBLAZE_DCACHE_LINE_LEN 1
#endif
#ifndef XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK
#define MB_VERSION_LT_v720
#endif
.text
.globl microblaze_invalidate_dcache
.ent microblaze_invalidate_dcache
.align 2
microblaze_invalidate_dcache:
#if (XPAR_MICROBLAZE_USE_DCACHE==1) && (XPAR_MICROBLAZE_ALLOW_DCACHE_WR==1)
#ifdef MB_VERSION_LT_v720 /* Disable Dcache and interrupts before invalidating */
mfs r9, rmsr
andi r10, r9, ~(MICROBLAZE_MSR_DCACHE_ENABLE | MICROBLAZE_MSR_INTR_ENABLE)
mts rmsr, r10
#endif
ADDIK r5, r0, XPAR_MICROBLAZE_DCACHE_BASEADDR & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN))
ADDIK r6, r5, XPAR_MICROBLAZE_DCACHE_BYTE_SIZE & (-(4 * XPAR_MICROBLAZE_DCACHE_LINE_LEN)) /* Compute end */
L_start:
wdc r5, r0 /* Invalidate the Cache */
CMPU r18, r5, r6 /* Are we at the end? */
BLEI r18, L_done
#if defined (__arch64__ )
addlik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 */
breai L_start
#else
brid L_start /* Branch to the beginning of the loop */
addik r5, r5, (XPAR_MICROBLAZE_DCACHE_LINE_LEN * 4) /* Increment the address by 4 (delay slot) */
#endif
L_done:
rtsd r15, 8 /* Return */
#ifdef MB_VERSION_LT_v720 /* restore MSR only for MB version < v7.20 */
mts rmsr, r9
#else
nop
#endif
#else
rtsd r15, 8 /* Return */
nop
#endif
.end microblaze_invalidate_dcache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,070 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_enable_icache.S | /******************************************************************************
* Copyright (c) 2004 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* File : microblaze_enable_icache.s
* Date : 2002, March 20.
* Company: Xilinx
* Group : Emerging Software Technologies
*
* Summary:
* Enable icache on the microblaze.
*
*
******************************************************************************/
#include "xparameters.h"
.text
.globl microblaze_enable_icache
.ent microblaze_enable_icache
.align 2
microblaze_enable_icache:
#if XPAR_MICROBLAZE_USE_MSR_INSTR == 1
rtsd r15, 8
msrset r0, 0x20
#else /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
#Read the MSR register
mfs r8, rmsr
#Set the interrupt enable bit
ori r8, r8, 0x20
#Save the MSR register
mts rmsr, r8
#Return
rtsd r15, 8
nop
#endif /*XPAR_MICROBLAZE_USE_MSR_INSTR == 1*/
.end microblaze_enable_icache
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,519 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/microblaze/microblaze_invalidate_cache_ext_range.S | /******************************************************************************
* Copyright (c) 2008 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/******************************************************************************
*
* microblaze_invalidate_cache_ext_range (unsigned int cacheaddr, unsigned int len)
*
*Invalidate an L2 cache range
*
*Parameters:
* 'cacheaddr' - address in the L2 cache where invalidation begins
* 'len ' - length (in bytes) worth of Dcache to be invalidated
*
*******************************************************************************/
#include "xparameters.h"
#include "microblaze_instructions.h"
#define XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN 16
.text
.globl microblaze_invalidate_cache_ext_range
.ent microblaze_invalidate_cache_ext_range
.align 2
microblaze_invalidate_cache_ext_range:
#if ((XPAR_MICROBLAZE_INTERCONNECT==3) && (XPAR_MICROBLAZE_USE_DCACHE==1))
beqi r6, Loop_done
ADD r6, r5, r6
ADDIK r6, r6, -1
ANDI r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
ANDI r5, r5, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
RSUBK r6, r5, r6
Loop_start:
wdc.ext.clear r5, r6
#if defined (__arch64__ )
addlik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
beagei r6, Loop_start
#else
bneid r6, Loop_start
addik r6, r6, -(4 * XPAR_MICROBLAZE_EXT_CACHE_LINE_LEN)
#endif
Loop_done:
#endif
rtsd r15, 8
nop
.end microblaze_invalidate_cache_ext_range
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,765 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/iccarm/asm_vectors.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file asm_vectors.s
;
; This file contains the initial vector table for the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 1.00a Initial version
; 4.2 pkp 06/27/14 Modified return addresses for interrupt
; handlers
; 5.1 pkp 05/13/15 Saved the addresses of instruction causing data
; abort and prefetch abort into DataAbortAddr and
; PrefetchAbortAddr for further use to fix CR#854523
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
MODULE ?asm_vectors
;; Forward declaration of sections.
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
#include "xparameters.h"
;#include "xtime_l.h"
#define UART_BAUDRATE 115200
IMPORT _prestart
IMPORT __iar_program_start
SECTION .intvec:CODE:NOROOT(2)
PUBLIC _vector_table
IMPORT IRQInterrupt
IMPORT FIQInterrupt
IMPORT SWInterrupt
IMPORT DataAbortInterrupt
IMPORT PrefetchAbortInterrupt
IMPORT DataAbortAddr
IMPORT PrefetchAbortAddr
_vector_table
ARM
B __iar_program_start
B Undefined
B SVCHandler
B PrefetchAbortHandler
B DataAbortHandler
NOP ; Placeholder for address exception vector
B IRQHandler
B FIQHandler
SECTION .text:CODE:NOROOT(2)
REQUIRE _vector_table
ARM
IRQHandler ; IRQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
bl IRQInterrupt ; IRQ vector
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
FIQHandler ; FIQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
FIQLoop
bl FIQInterrupt ; FIQ vector
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
Undefined ; Undefined handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
b _prestart
movs pc, lr
SVCHandler ; SWI handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
tst r0, #0x20 ; check the T bit
ldrneh r0, [lr,#-2] ; Thumb mode
bicne r0, r0, #0xff00 ; Thumb mode
ldreq r0, [lr,#-4] ; ARM mode
biceq r0, r0, #0xff000000 ; ARM mode
bl SWInterrupt ; SWInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr ; adjust return
DataAbortHandler ; Data Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =DataAbortAddr
sub r1, lr,#8
str r1, [r0] ;Address of instruction causing data abort
bl DataAbortInterrupt ;DataAbortInterrupt :call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #8 ; adjust return
PrefetchAbortHandler ; Prefetch Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =PrefetchAbortAddr
sub r1, lr,#4
str r1, [r0] ;Address of instruction causing prefetch abort
bl PrefetchAbortInterrupt ; PrefetchAbortInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,764 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/iccarm/translation_table.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file translation_table.s
;
; This file contains the initialization for the MMU table in RAM
; needed by the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ---- -------- ---------------------------------------------------
; 1.00a ecm 10/20/09 Initial version
; 3.07a sgd 07/05/12 Configuring device address spaces as shareable device
; instead of strongly-ordered.
; 4.2 pkp 09/02/14 modified translation table entries according to address map
; 4.2 pkp 09/11/14 modified translation table entries to resolve compilation
; error for solving CR#822897
; 6.1 pkp 07/11/16 Corrected comments for memory attributes
; 6.8 mus 07/12/2018 Mark DDR memory as inner cacheable, if BSP is built
; with the USE_AMP flag.
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
EXPORT MMUTable
;ARMCC AREA |.mmu_tbl|,CODE,ALIGN=14
; RSEG mmu_tbl:CODE:ROOT (14)
SECTION .mmu_tbl:CODE:ROOT(14)
MMUTable
; Each table entry occupies one 32-bit word and there are
; 4096 entries, so the entire table takes up 16KB.
; Each entry covers a 1MB section.
; 0x00000000 - 0x3ffffff (DDR Cacheable)
count SETA 0
sect SETA 0
REPT 0x400
#ifndef USE_AMP
DCD sect + 0x15de6 ; S=1, TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1
#else
DCD sect + 0x14de6 ; S=1, TEX=b100 AP=b11, Domain=b1111, C=b0, B=b1
#endif
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0x40000000 - 0x7fffffff (GpAxi0)
count SETA 0
REPT 0x400
DCD sect + 0xc02 ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0x80000000 - 0xbfffffff (GpAxi1)
count SETA 0
REPT 0x400
DCD sect + 0xc02 ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xc0000000 - 0xdfffffff (undef)
count SETA 0
REPT 0x200
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe0000000 - 0xe02fffff (IOP dev)
count SETA 0
REPT 0x3
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe0300000 - 0xe0ffffff (undef/reserved)
count SETA 0
REPT 0xD
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe1000000 - 0xe1ffffff (NAND)
count SETA 0
REPT 0x10
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe2000000 - 0xe3ffffff (NOR)
count SETA 0
REPT 0x20
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe4000000 - 0xe5ffffff (SRAM)
count SETA 0
REPT 0x20
DCD sect + 0xc0e ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b1
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xe6000000 - 0xf7ffffff (reserved)
count SETA 0
REPT 0x0120
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xf8000c00 to 0xf8000fff, 0xf8010000 to 0xf88fffff and
; 0xf8f03000 to 0xf8ffffff are reserved but due to granual size of
; 1MB, it is not possible to define separate regions for them
; 0xf8000000 - 0xf8ffffff (APB device regs)
count SETA 0
REPT 0x10
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xf9000000 - 0xfbffffff (reserved)
count SETA 0
REPT 0x30
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xfc000000 - 0xfdffffff (QSPI)
count SETA 0
REPT 0x20
DCD sect + 0xc0a ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xfe000000 - 0xffefffff (reserved)
count SETA 0
REPT 0x1F
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
ENDR
; 0xfff00000 to 0xfffb0000 is reserved but due to granual size of
; 1MB, it is not possible to define separate region for it
; 0xfff00000 to 0xfffb0000 (OCM)
count SETA 0
DCD sect + 0x4c0e ; S=b0 TEX=b100 AP=b11, Domain=b0, C=b1, B=b1
sect SETA sect+0x100000
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 15,408 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/iccarm/boot.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file boot.s
;
; This file contains the initial vector table for the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 1.00a Initial version
; 4.2 pkp 08/04/14 Removed PEEP board related code which contained
; initialization of uart smc nor and sram
; 5.0 pkp 16/12/14 Modified initialization code to enable scu after
; MMU is enabled and removed incorrect initialization
; of TLB lockdown register to fix CR#830580
; 5.1 pkp 05/13/15 Changed the initialization order so to first invalidate
; caches and TLB, enable MMU and caches, then enable SMP
; bit in ACTLR. L2Cache invalidation and enabling of L2Cache
; is done later.
; 6.0 mus 08/04/16 Added code to detect zynq-7000 base silicon configuration and
; attempt to enable dual core behavior on single cpu zynq-7000s devices
; is prevented from corrupting system behavior.
; 6.6 srm 10/25/17 Added timer configuration using XTime_StartTTCTimer API.
; Now the TTC instance as specified by the user will be
; started.
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
MODULE ?boot
;; Forward declaration of sections.
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
#include "xparameters.h"
;#include "xtime_l.h"
#define UART_BAUDRATE 115200
PUBLIC _prestart
PUBLIC __iar_program_start
IMPORT _vector_table
IMPORT MMUTable
IMPORT __cmain
IMPORT Xil_ExceptionInit
IMPORT XTime_SetTime
#if defined SLEEP_TIMER_BASEADDR
IMPORT XTime_StartTTCTimer
#endif
PSS_L2CC_BASE_ADDR EQU 0xF8F02000
PSS_SLCR_BASE_ADDR EQU 0xF8000000
RESERVED EQU 0x0fffff00
TblBase EQU MMUTable
LRemap EQU 0xFE00000F ; set the base address of the peripheral block as not shared
L2CCWay EQU (PSS_L2CC_BASE_ADDR + 0x077C) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_INVLD_WAY_OFFSET)
L2CCSync EQU (PSS_L2CC_BASE_ADDR + 0x0730) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_SYNC_OFFSET)
L2CCCrtl EQU (PSS_L2CC_BASE_ADDR + 0x0100) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CNTRL_OFFSET)
L2CCAuxCrtl EQU (PSS_L2CC_BASE_ADDR + 0x0104) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_AUX_CNTRL_OFFSET)
L2CCTAGLatReg EQU (PSS_L2CC_BASE_ADDR + 0x0108) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_TAG_RAM_CNTRL_OFFSET)
L2CCDataLatReg EQU (PSS_L2CC_BASE_ADDR + 0x010C) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_DATA_RAM_CNTRL_OFFSET)
L2CCIntClear EQU (PSS_L2CC_BASE_ADDR + 0x0220) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_IAR_OFFSET)
L2CCIntRaw EQU (PSS_L2CC_BASE_ADDR + 0x021C) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_ISR_OFFSET)
SLCRlockReg EQU (PSS_SLCR_BASE_ADDR + 0x04) ;(PSS_SLCR_BASE_ADDR + XPSS_SLCR_LOCK_OFFSET)
SLCRUnlockReg EQU (PSS_SLCR_BASE_ADDR + 0x08) ;(PSS_SLCR_BASE_ADDR + XPSS_SLCR_UNLOCK_OFFSET)
SLCRL2cRamReg EQU (PSS_SLCR_BASE_ADDR + 0xA1C) ;(PSS_SLCR_BASE_ADDR + XPSS_SLCR_L2C_RAM_OFFSET)
SLCRCPURSTReg EQU (0xF8000000 + 0x244) ;(XPS_SYS_CTRL_BASEADDR + A9_CPU_RST_CTRL_OFFSET)
EFUSEStaus EQU (0xF800D000 + 0x10) ;(XPS_EFUSE_BASEADDR + EFUSE_STATUS_OFFSET)
/* workaround for simulation not working when L1 D and I caches,MMU and L2 cache enabled - DT568997 */
#if SIM_MODE == 1
CRValMmuCac EQU 00000000000000b ; Disable IDC, and MMU
#else
CRValMmuCac EQU 01000000000101b ; Enable IDC, and MMU
#endif
CRValHiVectorAddr EQU 10000000000000b ; Set the Vector address to high, 0xFFFF0000
L2CCAuxControl EQU 0x72360000 ; Enable all prefetching, Way Size (16 KB) and High Priority for SO and Dev Reads Enable
L2CCControl EQU 0x01 ; Enable L2CC
L2CCTAGLatency EQU 0x0111 ; 7 Cycles of latency for TAG RAM
L2CCDataLatency EQU 0x0121 ; 7 Cycles of latency for DATA RAM
SLCRlockKey EQU 0x767B ; SLCR lock key
SLCRUnlockKey EQU 0xDF0D ; SLCR unlock key
SLCRL2cRamConfig EQU 0x00020202 ; SLCR L2C ram configuration
vector_base EQU _vector_table
FPEXC_EN EQU 0x40000000 ; FPU enable bit, (1 << 30)
SECTION .intvec:CODE:NOROOT(2)
; this initializes the various processor modes
_prestart
__iar_program_start
#if XPAR_CPU_ID==0
; only allow cp0 through
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #0
beq OKToRun
EndlessLoop0
wfe
b EndlessLoop0
#elif XPAR_CPU_ID==1
; only allow cp1 through
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #1
beq OKToRun
EndlessLoop1
wfe
b EndlessLoop1
#endif
OKToRun
ldr r0,=EFUSEStaus
ldr r1,[r0] ; Read eFuse to detect zynq silicon configuration
ands r1,r1,#0x80 ; Check whether cpu1 is disabled through eFuse
beq DualCPU
; cpu1 is disabled through eFuse,reset cpu1
ldr r0,=SLCRUnlockReg ; Load SLCR base address base + unlock register
ldr r1,=SLCRUnlockKey ; set unlock key
str r1, [r0] ; Unlock SLCR
ldr r0,=SLCRCPURSTReg
ldr r1,[r0] ; Read CPU Software Reset Control register
orr r1,r1,#0x22
str r1,[r0] ; Reset CPU1
ldr r0,=SLCRlockReg ; Load SLCR base address base + lock register
ldr r1,=SLCRlockKey ; set lock key
str r1, [r0] ; lock SLCR
DualCPU
mrc p15, 0, r0, c0, c0, 0 ; Get the revision
and r5, r0, #0x00f00000
and r6, r0, #0x0000000f
orr r6, r6, r5, lsr #20-4
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 ; only present up to r2p2
mrcle p15, 0, r10, c15, c0, 1 ; read diagnostic register
orrle r10, r10, #1 << 4 ; set bit #4
mcrle p15, 0, r10, c15, c0, 1 ; write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_743622
teq r5, #0x00200000 ; only present in r2p*
mrceq p15, 0, r10, c15, c0, 1 ; read diagnostic register
orreq r10, r10, #1 << 6 ; set bit #6
mcreq p15, 0, r10, c15, c0, 1 ; write diagnostic register
#endif
; set VBAR to the _vector_table address in linker script
ldr r0, =vector_base
mcr p15, 0, r0, c12, c0, 0
;invalidate scu
ldr r7, =0xf8f0000c
ldr r6, =0xffff
str r6, [r7]
;Invalidate caches and TLBs
mov r0,#0 ; r0 = 0
mcr p15, 0, r0, c8, c7, 0 ; invalidate TLBs
mcr p15, 0, r0, c7, c5, 0 ; invalidate icache
mcr p15, 0, r0, c7, c5, 6 ; Invalidate branch predictor array
bl invalidate_dcache ; invalidate dcache
; Disable MMU, if enabled
mrc p15, 0, r0, c1, c0, 0 ; read CP15 register 1
bic r0, r0, #0x1 ; clear bit 0
mcr p15, 0, r0, c1, c0, 0 ; write value back
#ifdef SHAREABLE_DDR
; Mark the entire DDR memory as shareable
ldr r3, =0x3ff ; 1024 entries to cover 1G DDR
ldr r0, =TblBase ; MMU Table address in memory
ldr r2, =0x15de6 ; S=1, TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1
shareable_loop
str r2, [r0] ; write the entry to MMU table
add r0, r0, #0x4 ; next entry in the table
add r2, r2, #0x100000 ; next section
subs r3, r3, #1
bge shareable_loop ; loop till 1G is covered
#endif
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the irq stack pointer
and r2, r1, r0
orr r2, r2, #0x12 ; IRQ mode
msr cpsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=SFE(IRQ_STACK) ; IRQ stack pointer
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the supervisor stack pointer
and r2, r1, r0
orr r2, r2, #0x13 ; supervisor mode
msr cpsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=SFE(SVC_STACK) ; Supervisor stack pointer
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the Abort stack pointer
and r2, r1, r0
orr r2, r2, #0x17 ; Abort mode
msr cpsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=SFE(ABT_STACK) ; Abort stack pointer
mrs r0, cpsr ; was cpsr, get the current PSR
mvn r1, #0x1f ; set up the FIQ stack pointer
and r2, r1, r0
orr r2, r2, #0x11 ; FIQ mode
msr cpsr, r2 ; was cpsr
ldr r13,=SFE(FIQ_STACK) ; FIQ stack pointer
mrs r0, cpsr ; was cpsr, get the current PSR
mvn r1, #0x1f ; set up the Undefine stack pointer
and r2, r1, r0
orr r2, r2, #0x1b ; Undefine mode
msr cpsr, r2 ; was cpsr
ldr r13,=SFE(UND_STACK) ; Undefine stack pointer
mrs r0, cpsr ; was cpsr, get the current PSR
mvn r1, #0x1f ; set up the system stack pointer
and r2, r1, r0
orr r2, r2, #0x1f ; SYS mode
msr cpsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=SFE(CSTACK) ; SYS stack pointer
;set scu enable bit in scu
ldr r7, =0xf8f00000
ldr r0, [r7]
orr r0, r0, #0x1
str r0, [r7]
; enable MMU and cache
ldr r0,=TblBase ; Load MMU translation table base
orr r0, r0, #0x5B ; Outer-cacheable, WB
mcr p15, 0, r0, c2, c0, 0 ; TTB0
mvn r0,#0 ; Load MMU domains -- all ones=manager
mcr p15,0,r0,c3,c0,0
; Enable mmu, icahce and dcache
ldr r0,=CRValMmuCac
mcr p15,0,r0,c1,c0,0 ; Enable cache and MMU
dsb ; dsb allow the MMU to start up
isb ; isb flush prefetch buffer
; Write to ACTLR
mrc p15, 0,r0, c1, c0, 1 ; Read ACTLR
orr r0, r0, #(0x01 << 6) ; SMP bit
orr r0, r0, #(0x01 ) ; Cache/TLB maintenance broadcast
mcr p15, 0,r0, c1, c0, 1 ; Write ACTLR
; Invalidate L2 Cache and initialize L2 Cache
; For AMP, assume running on CPU1. Don't initialize L2 Cache (up to Linux)
#if USE_AMP!=1
ldr r0,=L2CCCrtl ; Load L2CC base address base + control register
mov r1, #0 ; force the disable bit
str r1, [r0] ; disable the L2 Caches
ldr r0,=L2CCAuxCrtl ; Load L2CC base address base + Aux control register
ldr r1,[r0] ; read the register
ldr r2,=L2CCAuxControl ; set the default bits
orr r1,r1,r2
str r1, [r0] ; store the Aux Control Register
ldr r0,=L2CCTAGLatReg ; Load L2CC base address base + TAG Latency address
ldr r1,=L2CCTAGLatency ; set the latencies for the TAG
str r1, [r0] ; store the TAG Latency register Register
ldr r0,=L2CCDataLatReg ; Load L2CC base address base + Data Latency address
ldr r1,=L2CCDataLatency ; set the latencies for the Data
str r1, [r0] ; store the Data Latency register Register
ldr r0,=L2CCWay ; Load L2CC base address base + way register
ldr r2, =0xFFFF
str r2, [r0] ; force invalidate
ldr r0,=L2CCSync ; need to poll 0x730, PSS_L2CC_CACHE_SYNC_OFFSET
; Load L2CC base address base + sync register
; poll for completion
Sync
ldr r1, [r0]
cmp r1, #0
bne Sync
ldr r0,=L2CCIntRaw ; clear pending interrupts
ldr r1,[r0]
ldr r0,=L2CCIntClear
str r1,[r0]
ldr r0,=SLCRUnlockReg ; Load SLCR base address base + unlock register
ldr r1,=SLCRUnlockKey ; set unlock key
str r1, [r0] ; Unlock SLCR
ldr r0,=SLCRL2cRamReg ; Load SLCR base address base + l2c Ram Control register
str r1, [r0] ; store the L2c Ram Control Register
ldr r0,=SLCRlockReg ; Load SLCR base address base + lock register
ldr r1,=SLCRlockKey ; set lock key
str r1, [r0] ; lock SLCR
ldr r0,=L2CCCrtl ; Load L2CC base address base + control register
ldr r1,[r0] ; read the register
mov r2, #L2CCControl ; set the enable bit
orr r1,r1,r2
str r1, [r0] ; enable the L2 Caches
#endif
mov r0, r0
mrc p15, 0, r1, c1, c0, 2 ; read cp access control register (CACR) into r1
orr r1, r1, #(0xf << 20) ; enable full access for p10 & p11
mcr p15, 0, r1, c1, c0, 2 ; write back into CACR
; enable vfp
fmrx r1, FPEXC ; read the exception register
orr r1,r1, #FPEXC_EN ; set VFP enable bit, leave the others in orig state
fmxr FPEXC, r1 ; write back the exception register
mrc p15, 0, r0, c1, c0, 0 ; flow prediction enable
orr r0, r0, #(0x01 << 11) ; #0x8000
mcr p15,0,r0,c1,c0,0
mrc p15, 0, r0, c1, c0, 1 ; read Auxiliary Control Register
orr r0, r0, #(0x1 << 2) ; enable Dside prefetch
orr r0, r0, #(0x1 << 1) ; enable L2 prefetch
mcr p15, 0, r0, c1, c0, 1 ; write Auxiliary Control Register
; Initialize the vector table
;bl Xil_ExceptionInit
; Clear cp15 regs with unknown reset values
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 ; DFSR
mcr p15, 0, r0, c5, c0, 1 ; IFSR
mcr p15, 0, r0, c6, c0, 0 ; DFAR
mcr p15, 0, r0, c6, c0, 2 ; IFAR
mcr p15, 0, r0, c9, c13, 2 ; PMXEVCNTR
mcr p15, 0, r0, c13, c0, 2 ; TPIDRURW
mcr p15, 0, r0, c13, c0, 3 ; TPIDRURO
; Reset and start Cycle Counter
mov r2, #0x80000000 ; clear overflow
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd ; D, C, E
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 ; enable cycle counter
mcr p15, 0, r2, c9, c12, 1
; Reset and start Global Timer
mov r0, #0x0
mov r1, #0x0
bl XTime_SetTime
; Reset and start Triple Timer counter
#if defined SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
; make sure argc and argv are valid
mov r0, #0
mov r1, #0
b __cmain ; jump to C startup code
and r0, r0, r0 ; no op
Ldone b Ldone ; Paranoia: we should never get here
; *************************************************************************
; *
; * invalidate_dcache - invalidate the entire d-cache by set/way
; *
; * Note: for Cortex-A9, there is no cp instruction for invalidating
; * the whole D-cache. Need to invalidate each line.
; *
; *************************************************************************
invalidate_dcache
mrc p15, 1, r0, c0, c0, 1 ; read CLIDR
ands r3, r0, #0x7000000
mov r3, r3, lsr #23 ; cache level value (naturally aligned)
beq finished
mov r10, #0 ; start with level 0
loop1
add r2, r10, r10, lsr #1 ; work out 3xcachelevel
mov r1, r0, lsr r2 ; bottom 3 bits are the Cache type for this level
and r1, r1, #7 ; get those 3 bits alone
cmp r1, #2
blt skip ; no cache or only instruction cache at this level
mcr p15, 2, r10, c0, c0, 0 ; write the Cache Size selection register
isb ; isb to sync the change to the CacheSizeID reg
mrc p15, 1, r1, c0, c0, 0 ; reads current Cache Size ID register
and r2, r1, #7 ; extract the line length field
add r2, r2, #4 ; add 4 for the line length offset (log2 16 bytes)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 ; r4 is the max number on the way size (right aligned)
clz r5, r4 ; r5 is the bit position of the way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 ; r7 is the max number of the index size (right aligned)
loop2
mov r9, r4 ; r9 working copy of the max way size (right aligned)
loop3
orr r11, r10, r9, lsl r5 ; factor in the way number and cache number into r11
orr r11, r11, r7, lsl r2 ; factor in the index number
mcr p15, 0, r11, c7, c6, 2 ; invalidate by set/way
subs r9, r9, #1 ; decrement the way number
bge loop3
subs r7, r7, #1 ; decrement the index
bge loop2
skip
add r10, r10, #2 ; increment the cache number
cmp r3, r10
bgt loop1
finished
mov r10, #0 ; switch back to cache level 0
mcr p15, 2, r10, c0, c0, 0 ; select current cache level in cssr
dsb
isb
bx lr
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 17,391 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/boot.S | /******************************************************************************
* Copyright (c) 2010 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file boot.S
*
* @addtogroup a9_boot_code Cortex A9 Processor Boot Code
* @{
* <h2> boot.S </h2>
* The boot code performs minimum configuration which is required for an
* application to run starting from processor's reset state. Below is a
* sequence illustrating what all configuration is performed before control
* reaches to main function.
*
* 1. Program vector table base for exception handling
* 2. Invalidate instruction cache, data cache and TLBs
* 3. Program stack pointer for various modes (IRQ, FIQ, supervisor, undefine,
* abort, system)
* 4. Configure MMU with short descriptor translation table format and program
* base address of translation table
* 5. Enable data cache, instruction cache and MMU
* 6. Enable Floating point unit
* 7. Transfer control to _start which clears BSS sections, initializes
* global timer and runs global constructor before jumping to main
* application
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 1.00a ecm/sdm 10/20/09 Initial version
* 3.06a sgd 05/15/12 Updated L2CC Auxiliary and Tag RAM Latency control
* register settings.
* 3.06a asa 06/17/12 Modified the TTBR settings and L2 Cache auxiliary
* register settings.
* 3.07a asa 07/16/12 Modified the L2 Cache controller settings to improve
* performance. Changed the property of the ".boot"
* section.
* 3.07a sgd 08/21/12 Modified the L2 Cache controller and cp15 Aux Control
* Register settings
* 3.09a sgd 02/06/13 Updated SLCR l2c Ram Control register to a
* value of 0x00020202. Fix for CR 697094 (SI#687034).
* 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
* 'xil_errata.h' for errata description
* 4.2 pkp 06/19/14 Enabled asynchronous abort exception
* 5.0 pkp 16/15/14 Modified initialization code to enable scu after
* MMU is enabled
* 5.1 pkp 05/13/15 Changed the initialization order so to first invalidate
* caches and TLB, enable MMU and caches, then enable SMP
* bit in ACTLR. L2Cache invalidation and enabling of L2Cache
* is done later.
* 5.4 asa 12/6/15 Added code to initialize SPSR for all relevant modes.
* 6.0 mus 08/04/16 Added code to detect zynq-7000 base silicon configuration and
* attempt to enable dual core behavior on single cpu zynq-7000s
* devices is prevented from corrupting system behavior.
* 6.0 mus 08/24/16 Check CPU core before putting cpu1 to reset for single core
* zynq-7000s devices
*
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xparameters.h"
#include "xil_errata.h"
.globl MMUTable
.global _prestart
.global _boot
.global __stack
.global __irq_stack
.global __supervisor_stack
.global __abort_stack
.global __fiq_stack
.global __undef_stack
.global _vector_table
.set PSS_L2CC_BASE_ADDR, 0xF8F02000
.set PSS_SLCR_BASE_ADDR, 0xF8000000
.set RESERVED, 0x0fffff00
.set TblBase , MMUTable
.set LRemap, 0xFE00000F /* set the base address of the peripheral block as not shared */
.set L2CCWay, (PSS_L2CC_BASE_ADDR + 0x077C) /*(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_INVLD_WAY_OFFSET)*/
.set L2CCSync, (PSS_L2CC_BASE_ADDR + 0x0730) /*(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_SYNC_OFFSET)*/
.set L2CCCrtl, (PSS_L2CC_BASE_ADDR + 0x0100) /*(PSS_L2CC_BASE_ADDR + PSS_L2CC_CNTRL_OFFSET)*/
.set L2CCAuxCrtl, (PSS_L2CC_BASE_ADDR + 0x0104) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_AUX_CNTRL_OFFSET)*/
.set L2CCTAGLatReg, (PSS_L2CC_BASE_ADDR + 0x0108) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_TAG_RAM_CNTRL_OFFSET)*/
.set L2CCDataLatReg, (PSS_L2CC_BASE_ADDR + 0x010C) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_DATA_RAM_CNTRL_OFFSET)*/
.set L2CCIntClear, (PSS_L2CC_BASE_ADDR + 0x0220) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_IAR_OFFSET)*/
.set L2CCIntRaw, (PSS_L2CC_BASE_ADDR + 0x021C) /*(PSS_L2CC_BASE_ADDR + XPSS_L2CC_ISR_OFFSET)*/
.set SLCRlockReg, (PSS_SLCR_BASE_ADDR + 0x04) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_LOCK_OFFSET)*/
.set SLCRUnlockReg, (PSS_SLCR_BASE_ADDR + 0x08) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_UNLOCK_OFFSET)*/
.set SLCRL2cRamReg, (PSS_SLCR_BASE_ADDR + 0xA1C) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_L2C_RAM_OFFSET)*/
.set SLCRCPURSTReg, (0xF8000000 + 0x244) /*(XPS_SYS_CTRL_BASEADDR + A9_CPU_RST_CTRL_OFFSET)*/
.set EFUSEStaus, (0xF800D000 + 0x10) /*(XPS_EFUSE_BASEADDR + EFUSE_STATUS_OFFSET)*/
/* workaround for simulation not working when L1 D and I caches,MMU and L2 cache enabled - DT568997 */
.if SIM_MODE == 1
.set CRValMmuCac, 0b00000000000000 /* Disable IDC, and MMU */
.else
.set CRValMmuCac, 0b01000000000101 /* Enable IDC, and MMU */
.endif
.set CRValHiVectorAddr, 0b10000000000000 /* Set the Vector address to high, 0xFFFF0000 */
.set L2CCAuxControl, 0x72360000 /* Enable all prefetching, Cache replacement policy, Parity enable,
Event monitor bus enable and Way Size (64 KB) */
.set L2CCControl, 0x01 /* Enable L2CC */
.set L2CCTAGLatency, 0x0111 /* latency for TAG RAM */
.set L2CCDataLatency, 0x0121 /* latency for DATA RAM */
.set SLCRlockKey, 0x767B /* SLCR lock key */
.set SLCRUnlockKey, 0xDF0D /* SLCR unlock key */
.set SLCRL2cRamConfig, 0x00020202 /* SLCR L2C ram configuration */
/* Stack Pointer locations for boot code */
.set Undef_stack, __undef_stack
.set FIQ_stack, __fiq_stack
.set Abort_stack, __abort_stack
.set SPV_stack, __supervisor_stack
.set IRQ_stack, __irq_stack
.set SYS_stack, __stack
.set vector_base, _vector_table
.set FPEXC_EN, 0x40000000 /* FPU enable bit, (1 << 30) */
.section .boot,"ax"
/* this initializes the various processor modes */
_prestart:
_boot:
#if XPAR_CPU_ID==0
/* only allow cpu0 through */
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #0
beq CheckEFUSE
EndlessLoop0:
wfe
b EndlessLoop0
CheckEFUSE:
ldr r0,=EFUSEStaus
ldr r1,[r0] /* Read eFuse setting */
ands r1,r1,#0x80 /* Check whether device is having single core */
beq OKToRun
/* single core device, reset cpu1 */
ldr r0,=SLCRUnlockReg /* Load SLCR base address base + unlock register */
ldr r1,=SLCRUnlockKey /* set unlock key */
str r1, [r0] /* Unlock SLCR */
ldr r0,=SLCRCPURSTReg
ldr r1,[r0] /* Read CPU Software Reset Control register */
orr r1,r1,#0x22
str r1,[r0] /* Reset CPU1 */
ldr r0,=SLCRlockReg /* Load SLCR base address base + lock register */
ldr r1,=SLCRlockKey /* set lock key */
str r1, [r0] /* lock SLCR */
#elif XPAR_CPU_ID==1
/* only allow cpu1 through */
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #1
beq CheckEFUSE1
b EndlessLoop1
CheckEFUSE1:
ldr r0,=EFUSEStaus
ldr r1,[r0] /* Read eFuse setting */
ands r1,r1,#0x80 /* Check whether device is having single core */
beq OKToRun
EndlessLoop1:
wfe
b EndlessLoop1
#endif
OKToRun:
mrc p15, 0, r0, c0, c0, 0 /* Get the revision */
and r5, r0, #0x00f00000
and r6, r0, #0x0000000f
orr r6, r6, r5, lsr #20-4
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 /* only present up to r2p2 */
mrcle p15, 0, r10, c15, c0, 1 /* read diagnostic register */
orrle r10, r10, #1 << 4 /* set bit #4 */
mcrle p15, 0, r10, c15, c0, 1 /* write diagnostic register */
#endif
#ifdef CONFIG_ARM_ERRATA_743622
teq r5, #0x00200000 /* only present in r2p* */
mrceq p15, 0, r10, c15, c0, 1 /* read diagnostic register */
orreq r10, r10, #1 << 6 /* set bit #6 */
mcreq p15, 0, r10, c15, c0, 1 /* write diagnostic register */
#endif
/* set VBAR to the _vector_table address in linker script */
ldr r0, =vector_base
mcr p15, 0, r0, c12, c0, 0
/*invalidate scu*/
ldr r7, =0xf8f0000c
ldr r6, =0xffff
str r6, [r7]
/* Invalidate caches and TLBs */
mov r0,#0 /* r0 = 0 */
mcr p15, 0, r0, c8, c7, 0 /* invalidate TLBs */
mcr p15, 0, r0, c7, c5, 0 /* invalidate icache */
mcr p15, 0, r0, c7, c5, 6 /* Invalidate branch predictor array */
bl invalidate_dcache /* invalidate dcache */
/* Disable MMU, if enabled */
mrc p15, 0, r0, c1, c0, 0 /* read CP15 register 1 */
bic r0, r0, #0x1 /* clear bit 0 */
mcr p15, 0, r0, c1, c0, 0 /* write value back */
#ifdef SHAREABLE_DDR
/* Mark the entire DDR memory as shareable */
ldr r3, =0x3ff /* 1024 entries to cover 1G DDR */
ldr r0, =TblBase /* MMU Table address in memory */
ldr r2, =0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
shareable_loop:
str r2, [r0] /* write the entry to MMU table */
add r0, r0, #0x4 /* next entry in the table */
add r2, r2, #0x100000 /* next section */
subs r3, r3, #1
bge shareable_loop /* loop till 1G is covered */
#endif
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the irq stack pointer */
and r2, r1, r0
orr r2, r2, #0x12 /* IRQ mode */
msr cpsr, r2
ldr r13,=IRQ_stack /* IRQ stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the supervisor stack pointer */
and r2, r1, r0
orr r2, r2, #0x13 /* supervisor mode */
msr cpsr, r2
ldr r13,=SPV_stack /* Supervisor stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Abort stack pointer */
and r2, r1, r0
orr r2, r2, #0x17 /* Abort mode */
msr cpsr, r2
ldr r13,=Abort_stack /* Abort stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the FIQ stack pointer */
and r2, r1, r0
orr r2, r2, #0x11 /* FIQ mode */
msr cpsr, r2
ldr r13,=FIQ_stack /* FIQ stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Undefine stack pointer */
and r2, r1, r0
orr r2, r2, #0x1b /* Undefine mode */
msr cpsr, r2
ldr r13,=Undef_stack /* Undefine stack pointer */
bic r2, r2, #(0x1 << 9) /* Set EE bit to little-endian */
msr spsr_fsxc,r2
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the system stack pointer */
and r2, r1, r0
orr r2, r2, #0x1F /* SYS mode */
msr cpsr, r2
ldr r13,=SYS_stack /* SYS stack pointer */
/*set scu enable bit in scu*/
ldr r7, =0xf8f00000
ldr r0, [r7]
orr r0, r0, #0x1
str r0, [r7]
/* enable MMU and cache */
ldr r0,=TblBase /* Load MMU translation table base */
orr r0, r0, #0x5B /* Outer-cacheable, WB */
mcr 15, 0, r0, c2, c0, 0 /* TTB0 */
mvn r0,#0 /* Load MMU domains -- all ones=manager */
mcr p15,0,r0,c3,c0,0
/* Enable mmu, icahce and dcache */
ldr r0,=CRValMmuCac
mcr p15,0,r0,c1,c0,0 /* Enable cache and MMU */
dsb /* dsb allow the MMU to start up */
isb /* isb flush prefetch buffer */
/* Write to ACTLR */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR*/
orr r0, r0, #(0x01 << 6) /* set SMP bit */
orr r0, r0, #(0x01 ) /* Cache/TLB maintenance broadcast */
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
/* Invalidate L2 Cache and enable L2 Cache*/
/* For AMP, assume running on CPU1. Don't initialize L2 Cache (up to Linux) */
#if USE_AMP!=1
ldr r0,=L2CCCrtl /* Load L2CC base address base + control register */
mov r1, #0 /* force the disable bit */
str r1, [r0] /* disable the L2 Caches */
ldr r0,=L2CCAuxCrtl /* Load L2CC base address base + Aux control register */
ldr r1,[r0] /* read the register */
ldr r2,=L2CCAuxControl /* set the default bits */
orr r1,r1,r2
str r1, [r0] /* store the Aux Control Register */
ldr r0,=L2CCTAGLatReg /* Load L2CC base address base + TAG Latency address */
ldr r1,=L2CCTAGLatency /* set the latencies for the TAG*/
str r1, [r0] /* store the TAG Latency register Register */
ldr r0,=L2CCDataLatReg /* Load L2CC base address base + Data Latency address */
ldr r1,=L2CCDataLatency /* set the latencies for the Data*/
str r1, [r0] /* store the Data Latency register Register */
ldr r0,=L2CCWay /* Load L2CC base address base + way register*/
ldr r2, =0xFFFF
str r2, [r0] /* force invalidate */
ldr r0,=L2CCSync /* need to poll 0x730, PSS_L2CC_CACHE_SYNC_OFFSET */
/* Load L2CC base address base + sync register*/
/* poll for completion */
Sync: ldr r1, [r0]
cmp r1, #0
bne Sync
ldr r0,=L2CCIntRaw /* clear pending interrupts */
ldr r1,[r0]
ldr r0,=L2CCIntClear
str r1,[r0]
ldr r0,=SLCRUnlockReg /* Load SLCR base address base + unlock register */
ldr r1,=SLCRUnlockKey /* set unlock key */
str r1, [r0] /* Unlock SLCR */
ldr r0,=SLCRL2cRamReg /* Load SLCR base address base + l2c Ram Control register */
ldr r1,=SLCRL2cRamConfig /* set the configuration value */
str r1, [r0] /* store the L2c Ram Control Register */
ldr r0,=SLCRlockReg /* Load SLCR base address base + lock register */
ldr r1,=SLCRlockKey /* set lock key */
str r1, [r0] /* lock SLCR */
ldr r0,=L2CCCrtl /* Load L2CC base address base + control register */
ldr r1,[r0] /* read the register */
mov r2, #L2CCControl /* set the enable bit */
orr r1,r1,r2
str r1, [r0] /* enable the L2 Caches */
#endif
mov r0, r0
mrc p15, 0, r1, c1, c0, 2 /* read cp access control register (CACR) into r1 */
orr r1, r1, #(0xf << 20) /* enable full access for p10 & p11 */
mcr p15, 0, r1, c1, c0, 2 /* write back into CACR */
/* enable vfp */
fmrx r1, FPEXC /* read the exception register */
orr r1,r1, #FPEXC_EN /* set VFP enable bit, leave the others in orig state */
fmxr FPEXC, r1 /* write back the exception register */
mrc p15,0,r0,c1,c0,0 /* flow prediction enable */
orr r0, r0, #(0x01 << 11) /* #0x8000 */
mcr p15,0,r0,c1,c0,0
mrc p15,0,r0,c1,c0,1 /* read Auxiliary Control Register */
orr r0, r0, #(0x1 << 2) /* enable Dside prefetch */
orr r0, r0, #(0x1 << 1) /* enable L2 Prefetch hint */
mcr p15,0,r0,c1,c0,1 /* write Auxiliary Control Register */
mrs r0, cpsr /* get the current PSR */
bic r0, r0, #0x100 /* enable asynchronous abort exception */
msr cpsr_xsf, r0
b _start /* jump to C startup code */
and r0, r0, r0 /* no op */
.Ldone: b .Ldone /* Paranoia: we should never get here */
/*
*************************************************************************
*
* invalidate_dcache - invalidate the entire d-cache by set/way
*
* Note: for Cortex-A9, there is no cp instruction for invalidating
* the whole D-cache. Need to invalidate each line.
*
*************************************************************************
*/
invalidate_dcache:
mrc p15, 1, r0, c0, c0, 1 /* read CLIDR */
ands r3, r0, #0x7000000
mov r3, r3, lsr #23 /* cache level value (naturally aligned) */
beq finished
mov r10, #0 /* start with level 0 */
loop1:
add r2, r10, r10, lsr #1 /* work out 3xcachelevel */
mov r1, r0, lsr r2 /* bottom 3 bits are the Cache type for this level */
and r1, r1, #7 /* get those 3 bits alone */
cmp r1, #2
blt skip /* no cache or only instruction cache at this level */
mcr p15, 2, r10, c0, c0, 0 /* write the Cache Size selection register */
isb /* isb to sync the change to the CacheSizeID reg */
mrc p15, 1, r1, c0, c0, 0 /* reads current Cache Size ID register */
and r2, r1, #7 /* extract the line length field */
add r2, r2, #4 /* add 4 for the line length offset (log2 16 bytes) */
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 /* r4 is the max number on the way size (right aligned) */
clz r5, r4 /* r5 is the bit position of the way size increment */
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 /* r7 is the max number of the index size (right aligned) */
loop2:
mov r9, r4 /* r9 working copy of the max way size (right aligned) */
loop3:
orr r11, r10, r9, lsl r5 /* factor in the way number and cache number into r11 */
orr r11, r11, r7, lsl r2 /* factor in the index number */
mcr p15, 0, r11, c7, c6, 2 /* invalidate by set/way */
subs r9, r9, #1 /* decrement the way number */
bge loop3
subs r7, r7, #1 /* decrement the index */
bge loop2
skip:
add r10, r10, #2 /* increment the cache number */
cmp r3, r10
bgt loop1
finished:
mov r10, #0 /* switch back to cache level 0 */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
dsb
isb
bx lr
.end
/**
* @} End of "addtogroup a9_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,892 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/asm_vectors.S | /******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex A9 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 1.00a ecm/sdm 10/20/09 Initial version
* 3.05a sdm 02/02/12 Save lr when profiling is enabled
* 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
* 'xil_errata.h' for errata description
* 4.00a pkp 22/01/14 Modified return addresses for interrupt
* handlers (DataAbortHandler and SVCHandler)
* to fix CR#767251
* 5.1 pkp 05/13/15 Saved the addresses of instruction causing data
* abort and prefetch abort into DataAbortAddr and
* PrefetchAbortAddr for further use to fix CR#854523
* 5.4 pkp 12/03/15 Added handler for undefined exception
* 6.8 mus 04/27/18 Removed __ARM_NEON__ flag definition. Now,
* saving/restoring of of HW floating point register
* would be done through newly introduced flag
* FPU_HARD_FLOAT_ABI_ENABLED. This new flag will be
* configured based on the -mfpu-abi option in extra
* compiler flags.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xil_errata.h"
#include "bspconfig.h"
.org 0
.text
.globl _vector_table
.section .vectors
_vector_table:
B _boot
B Undefined
B SVCHandler
B PrefetchAbortHandler
B DataAbortHandler
NOP /* Placeholder for address exception vector*/
B IRQHandler
B FIQHandler
IRQHandler: /* IRQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code*/
#if FPU_HARD_FLOAT_ABI_ENABLED
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
#ifdef PROFILING
ldr r2, =prof_pc
subs r3, lr, #0
str r3, [r2]
#endif
bl IRQInterrupt /* IRQ vector */
#if FPU_HARD_FLOAT_ABI_ENABLED
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
FIQHandler: /* FIQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
#if FPU_HARD_FLOAT_ABI_ENABLED
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
FIQLoop:
bl FIQInterrupt /* FIQ vector */
#if FPU_HARD_FLOAT_ABI_ENABLED
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
Undefined: /* Undefined handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =UndefinedExceptionAddr
sub r1, lr, #4
str r1, [r0] /* Store address of instruction causing undefined exception */
bl UndefinedException /* UndefinedException: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr
SVCHandler: /* SWI handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
tst r0, #0x20 /* check the T bit */
ldrneh r0, [lr,#-2] /* Thumb mode */
bicne r0, r0, #0xff00 /* Thumb mode */
ldreq r0, [lr,#-4] /* ARM mode */
biceq r0, r0, #0xff000000 /* ARM mode */
bl SWInterrupt /* SWInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr /*return to the next instruction after the SWI instruction */
DataAbortHandler: /* Data Abort handler */
#ifdef CONFIG_ARM_ERRATA_775420
dsb
#endif
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =DataAbortAddr
sub r1, lr, #8
str r1, [r0] /* Stores instruction causing data abort */
bl DataAbortInterrupt /*DataAbortInterrupt :call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #8 /* points to the instruction that caused the Data Abort exception */
PrefetchAbortHandler: /* Prefetch Abort handler */
#ifdef CONFIG_ARM_ERRATA_775420
dsb
#endif
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =PrefetchAbortAddr
sub r1, lr, #4
str r1, [r0] /* Stores instruction causing prefetch abort */
bl PrefetchAbortInterrupt /* PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* points to the instruction that caused the Prefetch Abort exception */
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 8,023 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/translation_table.S | /******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a9_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A9. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for zynq architecture. It
* utilizes short descriptor translation table format with each section defining
* 1MB of memory.
*
* The overview of translation table memory attributes is described below.
*
*| | Memory Range | Definition in Translation Table |
*|-----------------------|-------------------------|-----------------------------------|
*| DDR | 0x00000000 - 0x3FFFFFFF | Normal write-back Cacheable |
*| PL | 0x40000000 - 0xBFFFFFFF | Strongly Ordered |
*| Reserved | 0xC0000000 - 0xDFFFFFFF | Unassigned |
*| Memory mapped devices | 0xE0000000 - 0xE02FFFFF | Device Memory |
*| Reserved | 0xE0300000 - 0xE0FFFFFF | Unassigned |
*| NAND, NOR | 0xE1000000 - 0xE3FFFFFF | Device memory |
*| SRAM | 0xE4000000 - 0xE5FFFFFF | Normal write-back Cacheable |
*| Reserved | 0xE6000000 - 0xF7FFFFFF | Unassigned |
*| AMBA APB Peripherals | 0xF8000000 - 0xF8FFFFFF | Device Memory |
*| Reserved | 0xF9000000 - 0xFBFFFFFF | Unassigned |
*| Linear QSPI - XIP | 0xFC000000 - 0xFDFFFFFF | Normal write-through cacheable |
*| Reserved | 0xFE000000 - 0xFFEFFFFF | Unassigned |
*| OCM | 0xFFF00000 - 0xFFFFFFFF | Normal inner write-back cacheable |
*
* @note
*
* For region 0x00000000 - 0x3FFFFFFF, a system where DDR is less than 1GB,
* region after DDR and before PL is marked as undefined/reserved in translation
* table. In 0xF8000000 - 0xF8FFFFFF, 0xF8000C00 - 0xF8000FFF, 0xF8010000 -
* 0xF88FFFFF and 0xF8F03000 to 0xF8FFFFFF are reserved but due to granual size
* of 1MB, it is not possible to define separate regions for them. For region
* 0xFFF00000 - 0xFFFFFFFF, 0xFFF00000 to 0xFFFB0000 is reserved but due to 1MB
* granual size, it is not possible to define separate region for it
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 1.00a ecm 10/20/09 Initial version
* 3.04a sdm 01/13/12 Updated MMU table to mark DDR memory as Shareable
* 3.07a sgd 07/05/2012 Configuring device address spaces as shareable device
* instead of strongly-ordered.
* 3.07a asa 07/17/2012 Changed the property of the ".mmu_tbl" section.
* 4.2 pkp 09/02/2014 added entries for 0xfe000000 to 0xffefffff as reserved
* and 0xe0000000 - 0xe1ffffff is broken down into
* 0xe0000000 - 0xe02fffff (memory mapped divides)
* 0xe0300000 - 0xe0ffffff (reserved) and
* 0xe1000000 - 0xe1ffffff (NAND)
* 5.2 pkp 06/08/2015 put a check for XPAR_PS7_DDR_0_S_AXI_BASEADDR to confirm
* if DDR is present or not and accordingly generate the
* translation table
* 6.1 pkp 07/11/2016 Corrected comments for memory attributes
* 6.8 mus 07/12/2018 Mark DDR memory as inner cacheable, if BSP is built
* with the USE_AMP flag.
* </pre>
*
*
******************************************************************************/
#include "xparameters.h"
.globl MMUTable
.section .mmu_tbl,"a"
MMUTable:
/* Each table entry occupies one 32-bit word and there are
* 4096 entries, so the entire table takes up 16KB.
* Each entry covers a 1MB section.
*/
.set SECT, 0
#ifdef XPAR_PS7_DDR_0_S_AXI_BASEADDR
.set DDR_START, XPAR_PS7_DDR_0_S_AXI_BASEADDR
.set DDR_END, XPAR_PS7_DDR_0_S_AXI_HIGHADDR
.set DDR_SIZE, (DDR_END - DDR_START)+1
.set DDR_REG, DDR_SIZE/0x100000
#else
.set DDR_REG, 0
#endif
.set UNDEF_REG, 0x3FF - DDR_REG
#ifndef USE_AMP
/*0x00000000 - 0x00100000 (inner and outer cacheable )*/
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
#else
/*0x00000000 - 0x00100000 (inner cacheable )*/
.word SECT + 0x14de6 /* S=b1 TEX=b100 AP=b11, Domain=b1111, C=b0, B=b1 */
#endif
.set SECT, SECT+0x100000
.rept DDR_REG /* (DDR Cacheable) */
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept UNDEF_REG /* (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0400 /* 0x40000000 - 0x7fffffff (FPGA slave0) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0400 /* 0x80000000 - 0xbfffffff (FPGA slave1) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0200 /* 0xc0000000 - 0xdfffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x003 /* 0xe0000000 - 0xe02fffff (Memory mapped devices)
* UART/USB/IIC/SPI/CAN/GEM/GPIO/QSPI/SD/NAND */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0D /* 0xe0300000 - 0xe0ffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0010 /* 0xe1000000 - 0xe1ffffff (NAND) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0020 /* 0xe2000000 - 0xe3ffffff (NOR) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0020 /* 0xe4000000 - 0xe5ffffff (SRAM) */
.word SECT + 0xc0e /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0120 /* 0xe6000000 - 0xf7ffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
/* 0xf8000c00 to 0xf8000fff, 0xf8010000 to 0xf88fffff and
0xf8f03000 to 0xf8ffffff are reserved but due to granual size of
1MB, it is not possible to define separate regions for them */
.rept 0x0010 /* 0xf8000000 - 0xf8ffffff (AMBA APB Peripherals) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0030 /* 0xf9000000 - 0xfbffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0020 /* 0xfc000000 - 0xfdffffff (Linear QSPI - XIP) */
.word SECT + 0xc0a /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x001F /* 0xfe000000 - 0xffefffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
/* 0xfff00000 to 0xfffb0000 is reserved but due to granual size of
1MB, it is not possible to define separate region for it
0xfff00000 - 0xffffffff
256K OCM when mapped to high address space
inner-cacheable */
.word SECT + 0x4c0e /* S=b0 TEX=b100 AP=b11, Domain=b0, C=b1, B=b1 */
.set SECT, SECT+0x100000
.end
/**
* @} End of "addtogroup a9_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,033 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/xil-crt0.S | /******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil-crt0.S
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 1.00a ecm 10/20/09 Initial version
* 3.05a sdm 02/02/12 Added code for profiling
* 3.06a sgd 05/16/12 Added global constructors and cleanup code
* Uart initialization based on compiler flag
* 3.07a sgd 07/05/12 Updated with reset and start Global Timer
* 3.07a sgd 10/19/12 SMC NOR and SRAM initialization with build option
* 4.2 pkp 08/04/14 Removed PEEP board related code which contained
* initialization of uart smc nor and sram
* 5.3 pkp 10/07/15 Added support for OpenAMP by not initializing global
* timer when USE_AMP flag is defined
* 6.6 srm 10/18/17 Added timer configuration using XTime_StartTTCTimer API.
* Now the TTC instance as specified by the user will be
* started.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "bspconfig.h"
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
.text
.Lsbss_start:
.long __sbss_start
.Lsbss_end:
.long __sbss_end
.Lbss_start:
.long __bss_start
.Lbss_end:
.long __bss_end
.Lstack:
.long __stack
.globl _start
_start:
bl __cpu_init /* Initialize the CPU first (BSP provides this) */
mov r0, #0
/* clear sbss */
ldr r1,.Lsbss_start /* calculate beginning of the SBSS */
ldr r2,.Lsbss_end /* calculate end of the SBSS */
.Lloop_sbss:
cmp r1,r2
bge .Lenclsbss /* If no SBSS, no clearing required */
str r0, [r1], #4
b .Lloop_sbss
.Lenclsbss:
/* clear bss */
ldr r1,.Lbss_start /* calculate beginning of the BSS */
ldr r2,.Lbss_end /* calculate end of the BSS */
.Lloop_bss:
cmp r1,r2
bge .Lenclbss /* If no BSS, no clearing required */
str r0, [r1], #4
b .Lloop_bss
.Lenclbss:
/* set stack pointer */
ldr r13,.Lstack /* stack address */
/* Reset and start Global Timer */
mov r0, #0x0
mov r1, #0x0
/* Reset and start Triple Timer Counter */
#if defined SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
#if USE_AMP != 1
bl XTime_SetTime
#endif
#ifdef PROFILING /* defined in Makefile */
/* Setup profiling stuff */
bl _profile_init
#endif /* PROFILING */
/* run global constructors */
bl __libc_init_array
/* make sure argc and argv are valid */
mov r0, #0
mov r1, #0
/* Let her rip */
bl main
/* Cleanup global constructors */
bl __libc_fini_array
#ifdef PROFILING
/* Cleanup profiling stuff */
bl _profile_clean
#endif /* PROFILING */
/* All done */
bl exit
.Lexit: /* should never get here */
b .Lexit
.Lstart:
.size _start,.Lstart-_start
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,555 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/gcc/cpu_init.S | /******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file cpu_init.s
*
* This file contains CPU specific initialization. Invoked from main CRT
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 1.00a ecm/sdm 10/20/09 Initial version
* 3.04a sdm 01/02/12 Updated to clear cp15 regs with unknown reset values
* 5.0 pkp 12/16/14 removed incorrect initialization of TLB lockdown
* register to fix CR#830580
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.text
.global __cpu_init
.align 2
__cpu_init:
/* Clear cp15 regs with unknown reset values */
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 /* DFSR */
mcr p15, 0, r0, c5, c0, 1 /* IFSR */
mcr p15, 0, r0, c6, c0, 0 /* DFAR */
mcr p15, 0, r0, c6, c0, 2 /* IFAR */
mcr p15, 0, r0, c9, c13, 2 /* PMXEVCNTR */
mcr p15, 0, r0, c13, c0, 2 /* TPIDRURW */
mcr p15, 0, r0, c13, c0, 3 /* TPIDRURO */
/* Reset and start Cycle Counter */
mov r2, #0x80000000 /* clear overflow */
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd /* D, C, E */
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 /* enable cycle counter */
mcr p15, 0, r2, c9, c12, 1
bx lr
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,043 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/armcc/asm_vectors.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file asm_vectors.s
;
; This file contains the initial vector table for the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 1.00a ecm/sdm 10/20/09 Initial version
; 3.11a asa 9/17/13 Added support for neon.
; 4.00 pkp 01/22/14 Modified return addresses for interrupt
; handlers
; 5.1 pkp 05/13/15 Saved the addresses of instruction causing data
; abort and prefetch abort into DataAbortAddr and
; PrefetchAbortAddr for further use to fix CR#854523
; 5.4 pkp 12/03/15 Added handler for undefined exception
;</pre>
;
; @note
;
; None.
;
;****************************************************************************
EXPORT _vector_table
EXPORT IRQHandler
IMPORT _boot
IMPORT _prestart
IMPORT IRQInterrupt
IMPORT FIQInterrupt
IMPORT SWInterrupt
IMPORT DataAbortInterrupt
IMPORT PrefetchAbortInterrupt
IMPORT UndefinedException
IMPORT DataAbortAddr
IMPORT PrefetchAbortAddr
IMPORT UndefinedExceptionAddr
AREA |.vectors|, CODE
REQUIRE8 {TRUE}
PRESERVE8 {TRUE}
ENTRY ; define this as an entry point
_vector_table
B _boot
B Undefined
B SVCHandler
B PrefetchAbortHandler
B DataAbortHandler
NOP ; Placeholder for address exception vector
B IRQHandler
B FIQHandler
IRQHandler ; IRQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
bl IRQInterrupt ; IRQ vector
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
FIQHandler ; FIQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
FIQLoop
bl FIQInterrupt ; FIQ vector
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
Undefined ; Undefined handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =UndefinedExceptionAddr
sub r1, lr,#4
str r1, [r0] ; Address of instruction causing undefined exception
bl UndefinedException ; UndefinedException: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr
SVCHandler ; SWI handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
tst r0, #0x20 ; check the T bit
ldrneh r0, [lr,#-2] ; Thumb mode
bicne r0, r0, #0xff00 ; Thumb mode
ldreq r0, [lr,#-4] ; ARM mode
biceq r0, r0, #0xff000000 ; ARM mode
bl SWInterrupt ; SWInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr ; adjust return
DataAbortHandler ; Data Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =DataAbortAddr
sub r1, lr,#8
str r1, [r0] ;Address of instruction causing data abort
bl DataAbortInterrupt ;DataAbortInterrupt :call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #8 ; adjust return
PrefetchAbortHandler ; Prefetch Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =PrefetchAbortAddr
sub r1, lr,#4
str r1, [r0] ;Address of instruction causing prefetch abort
bl PrefetchAbortInterrupt ; PrefetchAbortInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 15,820 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/armcc/boot.S | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file boot.S
;
; This file contains the initial startup code for the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 1.00a ecm/sdm 10/20/09 Initial version
; 3.04a sdm 01/02/12 Updated to clear cp15 regs with unknown reset values
; 3.06a sgd 05/15/12 Updated L2CC Auxiliary and Tag RAM Latency control
; register settings.
; 3.06a asa 06/17/12 Modified the TTBR settings and L2 Cache auxiliary
; register settings.
; 3.07a sgd 07/05/12 Updated with reset and start Global Timer
; 3.07a sgd 10/19/12 SMC NOR and SRAM initialization with build option
; 4.2 pkp 06/19/14 Enabled asynchronous abort exception
; 4.2 pkp 08/04/14 Removed PEEP board related code which contained
; initialization of uart smc nor and sram
; 5.0 pkp 16/12/14 Modified initialization code to enable scu after
; MMU is enabled and removed incorrect initialization
; of TLB lockdown register to fix CR#830580
; 5.1 pkp 05/13/15 Changed the initialization order so to first invalidate
; caches and TLB, enable MMU and caches, then enable SMP
; bit in ACTLR. L2Cache invalidation and enabling of L2Cache
; is done later.
; 5.4 asa 12/06/15 Added code to initialize SPSR for all relevant modes.
; 6.0 mus 04/08/16 Added code to detect zynq-7000 base silicon configuration and
; attempt to enable dual core behavior on single cpu zynq-7000s devices
; is prevented from corrupting system behavior.
; 6.0 mus 24/08/16 Check CPU core before putting cpu1 to reset for single core
; zynq-7000s devices
; 6.6 srm 10/25/17 Added timer configuration using XTime_StartTTCTimer API.
* Now the TTC instance as specified by the user will be
* started.
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
#include "xparameters.h"
#include "xil_errata.h"
#define UART_BAUDRATE 115200
EXPORT _prestart
EXPORT _boot
IMPORT |Image$$ARM_LIB_STACK$$ZI$$Limit|
IMPORT |Image$$IRQ_STACK$$ZI$$Limit|
IMPORT |Image$$SPV_STACK$$ZI$$Limit|
IMPORT |Image$$ABORT_STACK$$ZI$$Limit|
IMPORT MMUTable
IMPORT _vector_table
IMPORT __main
IMPORT Xil_ExceptionInit
IMPORT XTime_SetTime
#if defined SLEEP_TIMER_BASEADDR
IMPORT XTime_StartTTCTimer
#endif
PSS_L2CC_BASE_ADDR EQU 0xF8F02000
PSS_SLCR_BASE_ADDR EQU 0xF8000000
L2CCWay EQU (PSS_L2CC_BASE_ADDR + 0x077C) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_INVLD_WAY_OFFSET)
L2CCSync EQU (PSS_L2CC_BASE_ADDR + 0x0730) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CACHE_SYNC_OFFSET)
L2CCCrtl EQU (PSS_L2CC_BASE_ADDR + 0x0100) ;(PSS_L2CC_BASE_ADDR + PSS_L2CC_CNTRL_OFFSET)
L2CCAuxCrtl EQU (PSS_L2CC_BASE_ADDR + 0x0104) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_AUX_CNTRL_OFFSET)
L2CCTAGLatReg EQU (PSS_L2CC_BASE_ADDR + 0x0108) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_TAG_RAM_CNTRL_OFFSET)
L2CCDataLatReg EQU (PSS_L2CC_BASE_ADDR + 0x010C) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_DATA_RAM_CNTRL_OFFSET)
L2CCIntClear EQU (PSS_L2CC_BASE_ADDR + 0x0220) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_IAR_OFFSET)
L2CCIntRaw EQU (PSS_L2CC_BASE_ADDR + 0x021C) ;(PSS_L2CC_BASE_ADDR + XPSS_L2CC_ISR_OFFSET)
SLCRlockReg EQU (PSS_SLCR_BASE_ADDR + 0x04) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_LOCK_OFFSET)*/
SLCRUnlockReg EQU (PSS_SLCR_BASE_ADDR + 0x08) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_UNLOCK_OFFSET)*/
SLCRL2cRamReg EQU (PSS_SLCR_BASE_ADDR + 0xA1C) /*(PSS_SLCR_BASE_ADDR + XPSS_SLCR_L2C_RAM_OFFSET)*/
SLCRCPURSTReg EQU (0xF8000000 + 0x244) ;(XPS_SYS_CTRL_BASEADDR + A9_CPU_RST_CTRL_OFFSET)
EFUSEStaus EQU (0xF800D000 + 0x10) ;(XPS_EFUSE_BASEADDR + EFUSE_STATUS_OFFSET)
SLCRlockKey EQU 0x767B /* SLCR lock key */
SLCRUnlockKey EQU 0xDF0D /* SLCR unlock key */
SLCRL2cRamConfig EQU 0x00020202 /* SLCR L2C ram configuration */
CRValMmuCac EQU 2_01000000000101 ; Enable IDC, and MMU
CRValHiVectorAddr EQU 2_10000000000000 ; Set the Vector address to high, 0xFFFF0000
L2CCAuxControl EQU 0x72360000 ; Enable all prefetching, Way Size (16 KB) and High Priority for SO and Dev Reads Enable
L2CCControl EQU 0x01 ; Enable L2CC
L2CCTAGLatency EQU 0x0111 ; 7 Cycles of latency for TAG RAM
L2CCDataLatency EQU 0x0121 ; 7 Cycles of latency for DATA RAM
FPEXC_EN EQU 0x40000000 ; FPU enable bit, (1 << 30)
AREA |.boot|, CODE
PRESERVE8
; this initializes the various processor modes
_prestart
_boot
#if XPAR_CPU_ID==0
; only allow cp0 through
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #0
beq CheckEFUSE
EndlessLoop0
wfe
b EndlessLoop0
CheckEFUSE
ldr r0,=EFUSEStaus
ldr r1,[r0] ; Read eFuse setting
ands r1,r1,#0x80 ; Check whether device is having single core
beq OKToRun
; Single core device, reset CPU1
ldr r0,=SLCRUnlockReg ; Load SLCR base address base + unlock register
ldr r1,=SLCRUnlockKey ; set unlock key
str r1, [r0] ; Unlock SLCR
ldr r0,=SLCRCPURSTReg
ldr r1,[r0] ; Read CPU Software Reset Control register
orr r1,r1,#0x22
str r1,[r0] ; Reset CPU1
ldr r0,=SLCRlockReg ; Load SLCR base address base + lock register
ldr r1,=SLCRlockKey ; set lock key
str r1, [r0] ; lock SLCR
#elif XPAR_CPU_ID==1
; only allow cpu1 through
mrc p15,0,r1,c0,c0,5
and r1, r1, #0xf
cmp r1, #1
beq CheckEFUSE1
b EndlessLoop1
CheckEFUSE1
ldr r0,=EFUSEStaus
ldr r1,[r0] ; Read eFuse setting
ands r1,r1,#0x80 ; Check whether device is having single core
beq OKToRun
EndlessLoop1
wfe
b EndlessLoop1
#endif
OKToRun
mrc p15, 0, r0, c0, c0, 0 /* Get the revision */
and r5, r0, #0x00f00000
and r6, r0, #0x0000000f
orr r6, r6, r5, lsr #20-4
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 /* only present up to r2p2 */
mrcle p15, 0, r10, c15, c0, 1 /* read diagnostic register */
orrle r10, r10, #1 << 4 /* set bit #4 */
mcrle p15, 0, r10, c15, c0, 1 /* write diagnostic register */
#endif
#ifdef CONFIG_ARM_ERRATA_743622
teq r5, #0x00200000 /* only present in r2p* */
mrceq p15, 0, r10, c15, c0, 1 /* read diagnostic register */
orreq r10, r10, #1 << 6 /* set bit #6 */
mcreq p15, 0, r10, c15, c0, 1 /* write diagnostic register */
#endif
/* set VBAR to the _vector_table address in scatter file */
ldr r0, =_vector_table
mcr p15, 0, r0, c12, c0, 0
;invalidate scu
ldr r7, =0xf8f0000c
ldr r6, =0xffff
str r6, [r7]
;Invalidate caches and TLBs
mov r0,#0 ; r0 = 0
mcr p15, 0, r0, c8, c7, 0 ; invalidate TLBs
mcr p15, 0, r0, c7, c5, 0 ; invalidate icache
mcr p15, 0, r0, c7, c5, 6 ; Invalidate branch predictor array
bl invalidate_dcache ; invalidate dcache
; Disable MMU, if enabled
mrc p15, 0, r0, c1, c0, 0 ; read CP15 register 1
bic r0, r0, #0x1 ; clear bit 0
mcr p15, 0, r0, c1, c0, 0 ; write value back
#ifdef SHAREABLE_DDR
; Mark the entire DDR memory as shareable
ldr r3, =0x3ff ; 1024 entries to cover 1G DDR
ldr r0, =TblBase ; MMU Table address in memory
ldr r2, =0x15de6 ; S=1, TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1
shareable_loop:
str r2, [r0] ; write the entry to MMU table
add r0, r0, #0x4 ; next entry in the table
add r2, r2, #0x100000 ; next section
subs r3, r3, #1
bge shareable_loop ; loop till 1G is covered
#endif
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the irq stack pointer
and r2, r1, r0
orr r2, r2, #0x12 ; IRQ mode
msr apsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=|Image$$IRQ_STACK$$ZI$$Limit| ; IRQ stack pointer
bic r2, r2, #(0x1 << 9) ; Set EE bit to little-endian
msr spsr_fsxc,r2
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the supervisor stack pointer
and r2, r1, r0
orr r2, r2, #0x13 ; supervisor mode
msr apsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=|Image$$SPV_STACK$$ZI$$Limit| ; Supervisor stack pointer
bic r2, r2, #(0x1 << 9) ; Set EE bit to little-endian
msr spsr_fsxc,r2
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the Abort stack pointer
and r2, r1, r0
orr r2, r2, #0x17 ; Abort mode
msr apsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=|Image$$ABORT_STACK$$ZI$$Limit| ; Abort stack pointer
bic r2, r2, #(0x1 << 9) ; Set EE bit to little-endian
msr spsr_fsxc,r2
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the system stack pointer
and r2, r1, r0
orr r2, r2, #0x1f ; SYS mode
msr apsr, r2 ; was cpsr, apsr is considered synonym
ldr r13,=|Image$$ARM_LIB_STACK$$ZI$$Limit| ; SYS stack pointer
;set scu enable bit in scu
ldr r7, =0xf8f00000
ldr r0, [r7]
orr r0, r0, #0x1
str r0, [r7]
; enable MMU and cache
ldr r0,=MMUTable ; Load MMU translation table base
orr r0, r0, #0x5B ; Outer-cacheable, WB
mcr p15, 0, r0, c2, c0, 0 ; TTB0
mvn r0,#0
mcr p15,0,r0,c3,c0,0
; Enable mmu, icahce and dcache
ldr r0,=CRValMmuCac
mcr p15,0,r0,c1,c0,0 ; Enable cache and MMU
dsb ; dsb allow the MMU to start up
isb ; isb flush prefetch buffer
; Write to ACTLR
mrc p15, 0,r0, c1, c0, 1 ; Read ACTLR
orr r0, r0, #(0x01 << 6) ; SMP bit
orr r0, r0, #(0x01 ) ; Cache/TLB maintenance broadcast
mcr p15, 0,r0, c1, c0, 1 ; Write ACTLR
; Invalidate L2 Cache and initialize L2 Cache
; For AMP, assume running on CPU1. Don't initialize L2 Cache (up to Linux)
#if USE_AMP!=1
ldr r0,=L2CCCrtl ; Load L2CC base address base + control register
mov r1, #0 ; force the disable bit
str r1, [r0] ; disable the L2 Caches
ldr r0,=L2CCAuxCrtl ; Load L2CC base address base + Aux control register
ldr r1,[r0] ; read the register
ldr r2,=L2CCAuxControl ; set the default bits
orr r1,r1,r2
str r1, [r0] ; store the Aux Control Register
ldr r0,=L2CCTAGLatReg ; Load L2CC base address base + TAG Latency address
ldr r1,=L2CCTAGLatency ; set the latencies for the TAG
str r1, [r0] ; store the TAG Latency register Register
ldr r0,=L2CCDataLatReg ; Load L2CC base address base + Data Latency address
ldr r1,=L2CCDataLatency ; set the latencies for the Data
str r1, [r0] ; store the Data Latency register Register
ldr r0,=L2CCWay ; Load L2CC base address base + way register
ldr r2, =0xFFFF
str r2, [r0] ; force invalidate
ldr r0,=L2CCSync ; need to poll 0x730, PSS_L2CC_CACHE_SYNC_OFFSET
; Load L2CC base address base + sync register
; poll for completion
Sync
ldr r1, [r0]
cmp r1, #0
bne Sync
ldr r0,=L2CCIntRaw ; clear pending interrupts
ldr r1,[r0]
ldr r0,=L2CCIntClear
str r1,[r0]
ldr r0,=SLCRUnlockReg ;Load SLCR base address base + unlock register
ldr r1,=SLCRUnlockKey ;set unlock key
str r1, [r0] ;Unlock SLCR
ldr r0,=SLCRL2cRamReg ;Load SLCR base address base + l2c Ram Control register
ldr r1,=SLCRL2cRamConfig ;set the configuration value
str r1, [r0] ;store the L2c Ram Control Register
ldr r0,=SLCRlockReg ;Load SLCR base address base + lock register
ldr r1,=SLCRlockKey ;set lock key
str r1, [r0] ;lock SLCR
ldr r0,=L2CCCrtl ; Load L2CC base address base + control register
ldr r1,[r0] ; read the register
mov r2, #L2CCControl ; set the enable bit
orr r1,r1,r2
str r1, [r0] ; enable the L2 Caches
#endif
mov r0, r0
mrc p15, 0, r1, c1, c0, 2 ; read cp access control register (CACR) into r1
orr r1, r1, #(0xf << 20) ; enable full access for p10 & p11
mcr p15, 0, r1, c1, c0, 2 ; write back into CACR
; enable vfp
fmrx r1, FPEXC ; read the exception register
orr r1,r1, #FPEXC_EN ; set VFP enable bit, leave the others in orig state
fmxr FPEXC, r1 ; write back the exception register
mrc p15, 0, r0, c1, c0, 0 ; flow prediction enable
orr r0, r0, #(0x01 << 11) ; #0x8000
mcr p15,0,r0,c1,c0,0
mrc p15, 0, r0, c1, c0, 1 ; read Auxiliary Control Register
orr r0, r0, #(0x1 << 2) ; enable Dside prefetch
orr r0, r0, #(0x1 << 1) ; enable L2 prefetch
mcr p15, 0, r0, c1, c0, 1 ; write Auxiliary Control Register
mrs r0, cpsr /* get the current PSR */
bic r0, r0, #0x100 /* enable asynchronous abort exception */
msr cpsr_xsf, r0
; Clear cp15 regs with unknown reset values
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 ; DFSR
mcr p15, 0, r0, c5, c0, 1 ; IFSR
mcr p15, 0, r0, c6, c0, 0 ; DFAR
mcr p15, 0, r0, c6, c0, 2 ; IFAR
mcr p15, 0, r0, c9, c13, 2 ; PMXEVCNTR
mcr p15, 0, r0, c13, c0, 2 ; TPIDRURW
mcr p15, 0, r0, c13, c0, 3 ; TPIDRURO
; Reset and start Cycle Counter
mov r2, #0x80000000 ; clear overflow
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd ; D, C, E
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 ; enable cycle counter
mcr p15, 0, r2, c9, c12, 1
; Reset and start Global Timer
mov r0, #0x0
mov r1, #0x0
bl XTime_SetTime
; Reset and start Triple Timer counter
#if defined SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
#ifdef PROFILING /* defined in Makefile */
/* Setup profiling stuff */
bl _profile_init
#endif /* PROFILING */
; make sure argc and argv are valid
mov r0, #0
mov r1, #0
b __main ; jump to C startup code
and r0, r0, r0 ; no op
Ldone b Ldone ; Paranoia: we should never get here
; *************************************************************************
; *
; * invalidate_dcache - invalidate the entire d-cache by set/way
; *
; * Note: for Cortex-A9, there is no cp instruction for invalidating
; * the whole D-cache. Need to invalidate each line.
; *
; *************************************************************************
invalidate_dcache
mrc p15, 1, r0, c0, c0, 1 ; read CLIDR
ands r3, r0, #0x7000000
mov r3, r3, lsr #23 ; cache level value (naturally aligned)
beq finished
mov r10, #0 ; start with level 0
loop1
add r2, r10, r10, lsr #1 ; work out 3xcachelevel
mov r1, r0, lsr r2 ; bottom 3 bits are the Cache type for this level
and r1, r1, #7 ; get those 3 bits alone
cmp r1, #2
blt skip ; no cache or only instruction cache at this level
mcr p15, 2, r10, c0, c0, 0 ; write the Cache Size selection register
isb ; isb to sync the change to the CacheSizeID reg
mrc p15, 1, r1, c0, c0, 0 ; reads current Cache Size ID register
and r2, r1, #7 ; extract the line length field
add r2, r2, #4 ; add 4 for the line length offset (log2 16 bytes)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 ; r4 is the max number on the way size (right aligned)
clz r5, r4 ; r5 is the bit position of the way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 ; r7 is the max number of the index size (right aligned)
loop2
mov r9, r4 ; r9 working copy of the max way size (right aligned)
loop3
orr r11, r10, r9, lsl r5 ; factor in the way number and cache number into r11
orr r11, r11, r7, lsl r2 ; factor in the index number
mcr p15, 0, r11, c7, c6, 2 ; invalidate by set/way
subs r9, r9, #1 ; decrement the way number
bge loop3
subs r7, r7, #1 ; decrement the index
bge loop2
skip
add r10, r10, #2 ; increment the cache number
cmp r3, r10
bgt loop1
finished
mov r10, #0 ; switch back to cache level 0
mcr p15, 2, r10, c0, c0, 0 ; select current cache level in cssr
isb
bx lr
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,612 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexa9/armcc/translation_table.s | ;******************************************************************************
; Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************
;****************************************************************************
;**
; @file translation_table.s
;
; This file contains the initialization for the MMU table in RAM
; needed by the Cortex A9 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ---- -------- ---------------------------------------------------
; 1.00a ecm 10/20/09 Initial version
; 3.07a sgd 07/05/2012 Configuring device address spaces as shareable device
; instead of strongly-ordered.
; 4.2 pkp 09/02/14 modified translation table entries according to address map
; 4.2 pkp 09/11/14 modified translation table entries to resolve compilation
; error for solving CR#822897
; 6.1 pkp 07/11/16 Corrected comments for memory attributes
; </pre>
;
; @note
;
; None.
;
;****************************************************************************
EXPORT MMUTable
AREA |.mmu_tbl|,CODE,ALIGN=14
MMUTable
; Each table entry occupies one 32-bit word and there are
; 4096 entries, so the entire table takes up 16KB.
; Each entry covers a 1MB section.
GBLA count
GBLA sect
; 0x00000000 - 0x3ffffff (DDR Cacheable)
count SETA 0
sect SETA 0
WHILE count<0x400
DCD sect + 0x15de6 ; S=1, TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1
sect SETA sect+0x100000
count SETA count+1
WEND
; 0x40000000 - 0x7fffffff (GpAxi0)
count SETA 0
WHILE count<0x400
DCD sect + 0xc02 ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0x80000000 - 0xbfffffff (GpAxi1)
count SETA 0
WHILE count<0x400
DCD sect + 0xc02 ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xc0000000 - 0xdfffffff (undef)
count SETA 0
WHILE count<0x200
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe0000000 - 0xe02fffff (IOP dev)
count SETA 0
WHILE count<0x3
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe0300000 - 0xe0ffffff (undef/reserved)
count SETA 0
WHILE count<0xD
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe1000000 - 0xe1ffffff (NAND)
count SETA 0
WHILE count<0x10
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe2000000 - 0xe3ffffff (NOR)
count SETA 0
WHILE count<0x20
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe4000000 - 0xe5ffffff (SRAM)
count SETA 0
WHILE count<0x20
DCD sect + 0xc0e ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b1
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xe6000000 - 0xf7ffffff (reserved)
count SETA 0
WHILE count<0x120
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xf8000c00 to 0xf8000fff, 0xf8010000 to 0xf88fffff and
; 0xf8f03000 to 0xf8ffffff are reserved but due to granual size of
; 1MB, it is not possible to define separate regions for them
; 0xf8000000 - 0xf8ffffff (APB device regs)
count SETA 0
WHILE count<0x10
DCD sect + 0xc06 ; S=0, TEX=b010 AP=b11, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xf9000000 - 0xfbffffff (reserved)
count SETA 0
WHILE count<0x30
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xfc000000 - 0xfdffffff (QSPI)
count SETA 0
WHILE count<0x20
DCD sect + 0xc0a ; S=b0 TEX=b000 AP=b11, Domain=b0, C=b1, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xfe000000 - 0xffefffff (reserved)
count SETA 0
WHILE count<0x1F
DCD sect ; S=0, TEX=b000 AP=b00, Domain=b0, C=b0, B=b0
sect SETA sect+0x100000
count SETA count+1
WEND
; 0xfff00000 to 0xfffb0000 is reserved but due to granual size of
; 1MB, it is not possible to define separate region for it
; 0xfff00000 to 0xfffb0000 (OCM)
count SETA 0
DCD sect + 0x4c0e ; S=b0 TEX=b100 AP=b11, Domain=b0, C=b1, B=b1
sect SETA sect+0x100000
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,131 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/iccarm/asm_vectors.s | ;******************************************************************************
; Copyright (c) 2017 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
;*****************************************************************************/
;*****************************************************************************/
;**
; @file asm_vectors.s
;
; This file contains the initial vector table for the Cortex R5 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ------- -------- ---------------------------------------------------
; 6.2 mus 01/27/17 Initial version
; </pre>
;
; @note
;
; None.
;
;*****************************************************************************/
MODULE ?asm_vectors
;; Forward declaration of sections.
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
#define UART_BAUDRATE 115200
IMPORT _prestart
IMPORT __iar_program_start
SECTION .intvec:CODE:NOROOT(2)
PUBLIC _vector_table
IMPORT FIQInterrupt
IMPORT IRQInterrupt
IMPORT SWInterrupt
IMPORT DataAbortInterrupt
IMPORT PrefetchAbortInterrupt
IMPORT UndefinedException
IMPORT UndefinedExceptionAddr
IMPORT PrefetchAbortAddr
IMPORT DataAbortAddr
IMPORT prof_pc
_vector_table
ARM
ldr pc,=__iar_program_start
ldr pc,=Undefined
ldr pc,=SVCHandler
ldr pc,=PrefetchAbortHandler
ldr pc,=DataAbortHandler
NOP ; Placeholder for address exception vector
ldr pc,=IRQHandler
ldr pc,=FIQHandler
SECTION .text:CODE:NOROOT(2)
REQUIRE _vector_table
ARM
IRQHandler ; IRQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
#ifndef __SOFTFP__
vpush {d0-d7} /* Store floating point registers */
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
bl IRQInterrupt ; IRQ vector
#ifndef __SOFTFP__
pop {r1} /* Restore floating point registers */
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
FIQHandler ; FIQ vector handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
FIQLoop
bl FIQInterrupt ; FIQ vector
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #4 ; adjust return
Undefined ; Undefined handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =UndefinedExceptionAddr
sub r1, lr, #4
str r1, [r0] ; Store address of instruction causing undefined exception
bl UndefinedException ; UndefinedException: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr
SVCHandler ; SWI handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
tst r0, #0x20 ; check the T bit
ldrneh r0, [lr,#-2] ; Thumb mode
bicne r0, r0, #0xff00 ; Thumb mode
ldreq r0, [lr,#-4] ; ARM mode
biceq r0, r0, #0xff000000 ; ARM mode
bl SWInterrupt ; SWInterrupt: call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
movs pc, lr ; adjust return
DataAbortHandler ; Data Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =DataAbortAddr
sub r1, lr, #8
str r1, [r0] ; Stores instruction causing data abort
bl DataAbortInterrupt ;DataAbortInterrupt :call C function here
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code
subs pc, lr, #8 ; adjust return
PrefetchAbortHandler ; Prefetch Abort handler
stmdb sp!,{r0-r3,r12,lr} ; state save from compiled code
ldr r0, =PrefetchAbortAddr
sub r1, lr, #4
str r1, [r0] ; Stores instruction causing prefetch abort */
bl PrefetchAbortInterrupt ; PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} ; state restore from compiled code */
subs pc, lr, #4 ; adjust return */
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 8,976 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/iccarm/boot.s | ;******************************************************************************
; Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
; SPDX-License-Identifier: MIT
; *****************************************************************************/
; ****************************************************************************/
; **
; @file boot.S
;
; This file contains the initial startup code for the Cortex R5 processor
;
; <pre>
; MODIFICATION HISTORY:
;
; Ver Who Date Changes
; ----- ---- -------- ---------------------------------------------------
; 5.00 mus 01/27/17 Initial version
; 6.6 srm 10/18/17 Updated the timer configuration with XTime_StartTTCTimer.
; Now the timer instance as specified by the user will be
; started.
; 6.6 mus 02/23/17 Disable the debug logic in non-JTAG boot mode(when
; processor is in lockstep configuration), based
; on the mld parameter "lockstep_mode_debug".
* 6.8 mus 09/20/18 Clear VINITHI field in RPU_0_CFG/RPU_1_CFG
* registers to initialize CortexR5 core with LOVEC
* on reset. It fixes CR#1010656.
* 7.0 mus 03/19/19 Disable FPU only in case of softp, otherwise enable it by
* default. CR#1021638
;
; </pre>
;
; @note
;
; None.
;
; *****************************************************************************/
MODULE ?boot
;; Forward declaration of sections.
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
#include "xparameters.h"
#define UART_BAUDRATE 115200
PUBLIC _prestart
PUBLIC __iar_program_start
IMPORT _vector_table
IMPORT Init_MPU
#ifdef SLEEP_TIMER_BASEADDR
IMPORT XTime_StartTTCTimer
#endif
IMPORT __cmain
vector_base EQU _vector_table
RPU_GLBL_CNTL EQU 0xFF9A0000
RPU_ERR_INJ EQU 0xFF9A0020
RPU_0_CFG EQU 0xFF9A0100
RPU_1_CFG EQU 0xFF9A0200
RST_LPD_DBG EQU 0xFF5E0240
BOOT_MODE_USER EQU 0xFF5E0200
fault_log_enable EQU 0x101
SECTION .boot:CODE:NOROOT(2)
/* this initializes the various processor modes */
_prestart
__iar_program_start
OKToRun
REQUIRE _vector_table
/* Initialize processor registers to 0 */
mov r0,#0
mov r1,#0
mov r2,#0
mov r3,#0
mov r4,#0
mov r5,#0
mov r6,#0
mov r7,#0
mov r8,#0
mov r9,#0
mov r10,#0
mov r11,#0
mov r12,#0
/* Initialize stack pointer and banked registers for various mode */
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the irq stack pointer
and r2, r1, r0
orr r2, r2, #0x12 ; IRQ mode
msr cpsr, r2
ldr r13,=SFE(IRQ_STACK) ; IRQ stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the supervisor stack pointer
and r2, r1, r0
orr r2, r2, #0x13 ; supervisor mode
msr cpsr, r2
ldr r13,=SFE(SVC_STACK) ; Supervisor stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the Abort stack pointer
and r2, r1, r0
orr r2, r2, #0x17 ; Abort mode
msr cpsr, r2
ldr r13,=SFE(ABT_STACK) ; Abort stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the FIQ stack pointer
and r2, r1, r0
orr r2, r2, #0x11 ; FIQ mode
msr cpsr, r2
mov r8, #0
mov r9, #0
mov r10, #0
mov r11, #0
mov r12, #0
ldr r13,=SFE(FIQ_STACK) ; FIQ stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the Undefine stack pointer
and r2, r1, r0
orr r2, r2, #0x1b ; Undefine mode
msr cpsr, r2
ldr r13,=SFE(UND_STACK) ; Undefine stack pointer
mov r14,#0
mrs r0, cpsr ; get the current PSR
mvn r1, #0x1f ; set up the system stack pointer
and r2, r1, r0
orr r2, r2, #0x1F ; SYS mode
msr cpsr, r2
ldr r13,=SFE(CSTACK) ; SYS stack pointer
mov r14,#0
;
; Enable access to VFP by enabling access to Coprocessors 10 and 11.
; Enables Full Access i.e. in both privileged and non privileged modes
;
mrc p15, 0, r0, c1, c0, 2 ; Read Coprocessor Access Control Register (CPACR)
orr r0, r0, #(0xF << 20) ; Enable access to CP 10 & 11
mcr p15, 0, r0, c1, c0, 2 ; Write Coprocessor Access Control Register (CPACR)
isb
; enable fpu access
vmrs r3, FPEXC
orr r1, r3, #(1<<30)
vmsr FPEXC, r1
; clear the floating point register
mov r1,#0
vmov d0,r1,r1
vmov d1,r1,r1
vmov d2,r1,r1
vmov d3,r1,r1
vmov d4,r1,r1
vmov d5,r1,r1
vmov d6,r1,r1
vmov d7,r1,r1
vmov d8,r1,r1
vmov d9,r1,r1
vmov d10,r1,r1
vmov d11,r1,r1
vmov d12,r1,r1
vmov d13,r1,r1
vmov d14,r1,r1
vmov d15,r1,r1
#ifdef __SOFTFP__
; Disable FPU by restoring previous value for FPU access
vmsr FPEXC,r3
#endif
; Disable MPU and caches
mrc p15, 0, r0, c1, c0, 0 ; Read CP15 Control Register
bic r0, r0, #0x05 ; Disable MPU (M bit) and data cache (C bit)
bic r0, r0, #0x1000 ; Disable instruction cache (I bit)
dsb ; Ensure all previous loads/stores have completed
mcr p15, 0, r0, c1, c0, 0 ; Write CP15 Control Register
isb ; Ensure subsequent insts execute wrt new MPU settings
; Disable Branch prediction, TCM ECC checks
mrc p15, 0, r0, c1, c0, 1 ; Read ACTLR
orr r0, r0, #(0x1 << 17) ; Enable RSDIS bit 17 to disable the return stack
orr r0, r0, #(0x1 << 16) ; Clear BP bit 15 and set BP bit 16
bic r0, r0, #(0x1 << 15) ; Branch always not taken and history table updates disabled
bic r0, r0, #(0x1 << 27) ; Disable B1TCM ECC check
bic r0, r0, #(0x1 << 26) ; Disable B0TCM ECC check
bic r0, r0, #(0x1 << 25) ; Disable ATCM ECC check
orr r0, r0, #(0x1 << 5) ; Enable ECC with no forced write through with [5:3]=b'101
bic r0, r0, #(0x1 << 4)
orr r0, r0, #(0x1 << 3)
mcr p15, 0, r0, c1, c0, 1 ; Write ACTLR*/
dsb ; Complete all outstanding explicit memory operations*/
; Invalidate caches
mov r0,#0 ; r0 = 0
dsb
mcr p15, 0, r0, c7, c5, 0 ; invalidate icache
mcr p15, 0, r0, c15, c5, 0 ; Invalidate entire data cache
isb
#if LOCKSTEP_MODE_DEBUG == 0
; enable fault log for lock step
ldr r0,=RPU_GLBL_CNTL
ldr r1, [r0]
ands r1, r1, #0x8
; branch to initialization if split mode
bne init
; check for boot mode if in lock step, branch to init if JTAG boot mode
ldr r0,=BOOT_MODE_USER
ldr r1, [r0]
ands r1, r1, #0xF
beq init
; reset the debug logic
ldr r0,=RST_LPD_DBG
ldr r1, [r0]
orr r1, r1, #(0x1 << 1)
orr r1, r1, #(0x1 << 4)
orr r1, r1, #(0x1 << 5)
str r1, [r0]
; enable fault log
ldr r0,=RPU_ERR_INJ
ldr r1,=fault_log_enable
ldr r2, [r0]
orr r2, r2, r1
str r2, [r0]
nop
nop
#endif
init
bl Init_MPU ; Initialize MPU
; Enable Branch prediction
mrc p15, 0, r0, c1, c0, 1 ; Read ACTLR
bic r0, r0, #(0x1 << 17) ; Clear RSDIS bit 17 to enable return stack
bic r0, r0, #(0x1 << 16) ; Clear BP bit 15 and BP bit 16:
bic r0, r0, #(0x1 << 15) ; Normal operation, BP is taken from the global history table.
orr r0, r0, #(0x1 << 14) ; Disable DBWR for errata 780125
mcr p15, 0, r0, c1, c0, 1 ; Write ACTLR
; Enable icahce and dcache
mrc p15,0,r1,c1,c0,0
ldr r0, =0x1005
orr r1,r1,r0
dsb
mcr p15,0,r1,c1,c0,0 ; Enable cache
isb ; isb flush prefetch buffer
; Set vector table in TCM/LOVEC
#ifndef VEC_TABLE_IN_OCM
mrc p15, 0, r0, c1, c0, 0
mvn r1, #0x2000
and r0, r0, r1
mcr p15, 0, r0, c1, c0, 0
; Clear VINITHI to enable LOVEC on reset
#if XPAR_CPU_ID == 0
ldr r0, =RPU_0_CFG
#else
ldr r0, =RPU_1_CFG
#endif
ldr r1, [r0]
bic r1, r1, #(0x1 << 2)
str r1, [r0]
#endif
; enable asynchronous abort exception
mrs r0, cpsr
bic r0, r0, #0x100
msr cpsr_xsf, r0
; Clear cp15 regs with unknown reset values
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 ; DFSR
mcr p15, 0, r0, c5, c0, 1 ; IFSR
mcr p15, 0, r0, c6, c0, 0 ; DFAR
mcr p15, 0, r0, c6, c0, 2 ; IFAR
mcr p15, 0, r0, c9, c13, 2 ; PMXEVCNTR
mcr p15, 0, r0, c13, c0, 2 ; TPIDRURW
mcr p15, 0, r0, c13, c0, 3 ; TPIDRURO
; Reset and start Cycle Counter
mov r2, #0x80000000 ; clear overflow
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd ; D, C, E
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 ; enable cycle counter
mcr p15, 0, r2, c9, c12, 1
; configure the timer if TTC is present
#ifdef SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
; make sure argc and argv are valid
mov r0, #0
mov r1, #0
b __cmain ; jump to C startup code
Ldone b Ldone ; Paranoia: we should never get here
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 10,478 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/gcc/boot.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file boot.S
*
* @addtogroup r5_boot_code Cortex R5 Processor Boot Code
* @{
* <h2> boot.S </h2>
* The boot code performs minimum configuration which is required for an
* application to run starting from processor's reset state. Below is a
* sequence illustrating what all configuration is performed before control
* reaches to main function.
*
* 1. Program vector table base for exception handling
* 2. Program stack pointer for various modes (IRQ, FIQ, supervisor, undefine,
* abort, system)
* 3. Disable instruction cache, data cache and MPU
* 4. Invalidate instruction and data cache
* 5. Configure MPU with short descriptor translation table format and program
* base address of translation table
* 6. Enable data cache, instruction cache and MPU
* 7. Enable Floating point unit
* 8. Transfer control to _start which clears BSS sections and jumping to main
* application
*
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.00 pkp 02/10/14 Initial version
* 5.04 pkp 09/11/15 Disabled ACTLR.DBWR bit to avoid potential R5 deadlock
* for errata 780125
* 5.04 pkp 02/04/16 Enabled the fault log for lock-step mode
* 5.04 pkp 02/25/16 Initialized the banked registers for various modes,
* initialized floating point registers and enabled the
* cache ECC check before enabling the fault log for
* lock step mode
* 5.04 pkp 03/24/16 Reset the dbg_lpd_reset before enabling the fault log
* to avoid intervention for lock-step mode
* 5.05 pkp 04/11/16 Enable the comparators for non-JTAG boot mode for
* lock-step to avoid putting debug logic to reset
* 6.02 pkp 02/13/17 Added support for hard float
* 6.6 mus 02/23/17 Enable/Disable the debug logic in non-JTAG boot mode(when
* processor is in lockstep configuration), based
* on the mld parameter "lockstep_mode_debug".
* 6.8 mus 09/20/18 Clear VINITHI field in RPU_0_CFG/RPU_1_CFG
* registers to initialize CortexR5 core with LOVEC
* on reset. It fixes CR#1010656.
* 7.1 mus 03/27/19 Skip reading/writing to the RPU address space registers,
* in case if processor is nonsecure and RPU
* address space is secure. CR#1015725.
* 7.2 mus 10/11/19 Resetting the r5_0 and r5_1 debug logic is sufficient
* to avoid intervention for lock-step mode. So, removed
* code which resets dbg_lpd_reset, to unblock debugger
* access to LPD. Fix for CR#1027983.
* </pre>
*
******************************************************************************/
#include "xparameters.h"
.global _prestart
.global _boot
.global __stack
.global __irq_stack
.global __supervisor_stack
.global __abort_stack
.global __fiq_stack
.global __undef_stack
.global _vector_table
/* Stack Pointer locations for boot code */
.set Undef_stack, __undef_stack
.set FIQ_stack, __fiq_stack
.set Abort_stack, __abort_stack
.set SPV_stack, __supervisor_stack
.set IRQ_stack, __irq_stack
.set SYS_stack, __stack
.set vector_base, _vector_table
.set RPU_GLBL_CNTL, 0xFF9A0000
.set RPU_ERR_INJ, 0xFF9A0020
.set RPU_0_CFG, 0xFF9A0100
.set RPU_1_CFG, 0xFF9A0200
#if defined(versal)
.set RST_LPD_DBG, 0xFF5E0338
.set BOOT_MODE_USER, 0xF1260200
#else
.set RST_LPD_DBG, 0xFF5E0240
.set BOOT_MODE_USER, 0xFF5E0200
#endif
.set fault_log_enable, 0x101
/*
* 0th bit of PROCESSOR_ACCESS_VALUE macro signifies trustzone
* setting for RPU address space
*/
#define RPU_TZ_MASK 0x1
.section .boot,"axS"
/* this initializes the various processor modes */
_prestart:
_boot:
OKToRun:
/* Initialize processor registers to 0 */
mov r0,#0
mov r1,#0
mov r2,#0
mov r3,#0
mov r4,#0
mov r5,#0
mov r6,#0
mov r7,#0
mov r8,#0
mov r9,#0
mov r10,#0
mov r11,#0
mov r12,#0
/* Initialize stack pointer and banked registers for various mode */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the irq stack pointer */
and r2, r1, r0
orr r2, r2, #0x12 /* IRQ mode */
msr cpsr, r2
ldr r13,=IRQ_stack /* IRQ stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the supervisor stack pointer */
and r2, r1, r0
orr r2, r2, #0x13 /* supervisor mode */
msr cpsr, r2
ldr r13,=SPV_stack /* Supervisor stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Abort stack pointer */
and r2, r1, r0
orr r2, r2, #0x17 /* Abort mode */
msr cpsr, r2
ldr r13,=Abort_stack /* Abort stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the FIQ stack pointer */
and r2, r1, r0
orr r2, r2, #0x11 /* FIQ mode */
msr cpsr, r2
mov r8, #0
mov r9, #0
mov r10, #0
mov r11, #0
mov r12, #0
ldr r13,=FIQ_stack /* FIQ stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Undefine stack pointer */
and r2, r1, r0
orr r2, r2, #0x1b /* Undefine mode */
msr cpsr, r2
ldr r13,=Undef_stack /* Undefine stack pointer */
mov r14,#0
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the system stack pointer */
and r2, r1, r0
orr r2, r2, #0x1F /* SYS mode */
msr cpsr, r2
ldr r13,=SYS_stack /* SYS stack pointer */
mov r14,#0
/*
* Enable access to VFP by enabling access to Coprocessors 10 and 11.
* Enables Full Access i.e. in both privileged and non privileged modes
*/
mrc p15, 0, r0, c1, c0, 2 /* Read Coprocessor Access Control Register (CPACR) */
orr r0, r0, #(0xF << 20) /* Enable access to CP 10 & 11 */
mcr p15, 0, r0, c1, c0, 2 /* Write Coprocessor Access Control Register (CPACR) */
isb
/* enable fpu access */
vmrs r3, FPEXC
orr r1, r3, #(1<<30)
vmsr FPEXC, r1
/* clear the floating point register*/
mov r1,#0
vmov d0,r1,r1
vmov d1,r1,r1
vmov d2,r1,r1
vmov d3,r1,r1
vmov d4,r1,r1
vmov d5,r1,r1
vmov d6,r1,r1
vmov d7,r1,r1
vmov d8,r1,r1
vmov d9,r1,r1
vmov d10,r1,r1
vmov d11,r1,r1
vmov d12,r1,r1
vmov d13,r1,r1
vmov d14,r1,r1
vmov d15,r1,r1
#ifdef __SOFTFP__
/* Disable the FPU if SOFTFP is defined*/
vmsr FPEXC,r3
#endif
/* Disable MPU and caches */
mrc p15, 0, r0, c1, c0, 0 /* Read CP15 Control Register*/
bic r0, r0, #0x05 /* Disable MPU (M bit) and data cache (C bit) */
bic r0, r0, #0x1000 /* Disable instruction cache (I bit) */
dsb /* Ensure all previous loads/stores have completed */
mcr p15, 0, r0, c1, c0, 0 /* Write CP15 Control Register */
isb /* Ensure subsequent insts execute wrt new MPU settings */
/* Disable Branch prediction, TCM ECC checks */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR */
orr r0, r0, #(0x1 << 17) /* Enable RSDIS bit 17 to disable the return stack */
orr r0, r0, #(0x1 << 16) /* Clear BP bit 15 and set BP bit 16:*/
bic r0, r0, #(0x1 << 15) /* Branch always not taken and history table updates disabled*/
orr r0, r0, #(0x1 << 27) /* Enable B1TCM ECC check */
orr r0, r0, #(0x1 << 26) /* Enable B0TCM ECC check */
orr r0, r0, #(0x1 << 25) /* Enable ATCM ECC check */
bic r0, r0, #(0x1 << 5) /* Generate abort on parity errors, with [5:3]=b 000*/
bic r0, r0, #(0x1 << 4)
bic r0, r0, #(0x1 << 3)
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
dsb /* Complete all outstanding explicit memory operations*/
/* Invalidate caches */
mov r0,#0 /* r0 = 0 */
dsb
mcr p15, 0, r0, c7, c5, 0 /* invalidate icache */
mcr p15, 0, r0, c15, c5, 0 /* Invalidate entire data cache*/
isb
#if LOCKSTEP_MODE_DEBUG == 0 && (PROCESSOR_ACCESS_VALUE & RPU_TZ_MASK)
/* enable fault log for lock step */
ldr r0,=RPU_GLBL_CNTL
ldr r1, [r0]
ands r1, r1, #0x8
/* branch to initialization if split mode*/
bne init
/* check for boot mode if in lock step, branch to init if JTAG boot mode*/
ldr r0,=BOOT_MODE_USER
ldr r1, [r0]
ands r1, r1, #0xF
beq init
/* reset the debug logic */
ldr r0,=RST_LPD_DBG
ldr r1, [r0]
orr r1, r1, #(0x1 << 4)
orr r1, r1, #(0x1 << 5)
str r1, [r0]
/* enable fault log */
ldr r0,=RPU_ERR_INJ
ldr r1,=fault_log_enable
ldr r2, [r0]
orr r2, r2, r1
str r2, [r0]
nop
nop
#endif
init:
bl Init_MPU /* Initialize MPU */
/* Enable Branch prediction */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR*/
bic r0, r0, #(0x1 << 17) /* Clear RSDIS bit 17 to enable return stack*/
bic r0, r0, #(0x1 << 16) /* Clear BP bit 15 and BP bit 16:*/
bic r0, r0, #(0x1 << 15) /* Normal operation, BP is taken from the global history table.*/
orr r0, r0, #(0x1 << 14) /* Disable DBWR for errata 780125 */
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
/* Enable icahce and dcache */
mrc p15,0,r1,c1,c0,0
ldr r0, =0x1005
orr r1,r1,r0
dsb
mcr p15,0,r1,c1,c0,0 /* Enable cache */
isb /* isb flush prefetch buffer */
/* Warning message to be removed after 2016.1 */
/* USEAMP was introduced in 2015.4 with ZynqMP and caused confusion with USE_AMP */
#ifdef USEAMP
#warning "-DUSEAMP=1 is deprecated, use -DVEC_TABLE_IN_OCM instead to set vector table in OCM"
#endif
/* Set vector table in TCM/LOVEC */
#ifndef VEC_TABLE_IN_OCM
mrc p15, 0, r0, c1, c0, 0
mvn r1, #0x2000
and r0, r0, r1
mcr p15, 0, r0, c1, c0, 0
/* Check if processor is having access to RPU address space */
#if (PROCESSOR_ACCESS_VALUE & RPU_TZ_MASK)
/* Clear VINITHI to enable LOVEC on reset */
#if XPAR_CPU_ID == 0
ldr r0, =RPU_0_CFG
#else
ldr r0, =RPU_1_CFG
#endif
ldr r1, [r0]
bic r1, r1, #(0x1 << 2)
str r1, [r0]
#endif
#endif
/* enable asynchronous abort exception */
mrs r0, cpsr
bic r0, r0, #0x100
msr cpsr_xsf, r0
b _startup /* jump to C startup code */
.Ldone: b .Ldone /* Paranoia: we should never get here */
.end
/**
* @} End of "addtogroup r5_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,773 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/gcc/asm_vectors.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex R5 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.00 pkp 02/10/14 Initial version
* 6.0 mus 27/07/16 Added UndefinedException handler
* 6.3 pkp 02/13/17 Added support for hard float
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.org 0
.text
.globl _boot
.globl _vector_table
.globl FIQInterrupt
.globl IRQInterrupt
.globl SWInterrupt
.globl DataAbortInterrupt
.globl PrefetchAbortInterrupt
.globl IRQHandler
.globl prof_pc
.section .vectors, "a"
_vector_table:
ldr pc,=_boot
ldr pc,=Undefined
ldr pc,=SVCHandler
ldr pc,=PrefetchAbortHandler
ldr pc,=DataAbortHandler
NOP /* Placeholder for address exception vector*/
ldr pc,=IRQHandler
ldr pc,=FIQHandler
.text
IRQHandler: /* IRQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code*/
#ifndef __SOFTFP__
vpush {d0-d7} /* Store floating point registers */
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
#endif
bl IRQInterrupt /* IRQ vector */
#ifndef __SOFTFP__
pop {r1} /* Restore floating point registers */
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d0-d7}
#endif
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
FIQHandler: /* FIQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
FIQLoop:
bl FIQInterrupt /* FIQ vector */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
Undefined: /* Undefined handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =UndefinedExceptionAddr
sub r1, lr, #4
str r1, [r0] /* Store address of instruction causing undefined exception */
bl UndefinedException /* UndefinedException: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr
SVCHandler: /* SWI handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
tst r0, #0x20 /* check the T bit */
ldrneh r0, [lr,#-2] /* Thumb mode */
bicne r0, r0, #0xff00 /* Thumb mode */
ldreq r0, [lr,#-4] /* ARM mode */
biceq r0, r0, #0xff000000 /* ARM mode */
bl SWInterrupt /* SWInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr /* adjust return */
DataAbortHandler: /* Data Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =DataAbortAddr
sub r1, lr, #8
str r1, [r0] /* Stores instruction causing data abort */
bl DataAbortInterrupt /*DataAbortInterrupt :call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #8 /* adjust return */
PrefetchAbortHandler: /* Prefetch Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =PrefetchAbortAddr
sub r1, lr, #4
str r1, [r0] /* Stores instruction causing prefetch abort */
bl PrefetchAbortInterrupt /* PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,572 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/gcc/xil-crt0.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil-crt0.S
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.00 pkp 02/10/14 First release
* 5.04 pkp 12/18/15 Initialized global constructor for C++ applications
* 5.04 pkp 02/19/16 Added timer configuration using XTime_StartTimer API when
* TTC3 is present
* 6.4 asa 08/16/17 Added call to Xil_InitializeExistingMPURegConfig to
* initialize the MPU configuration table with the MPU
* configurations already set in Init_Mpu function.
* 6.6 srm 10/18/17 Updated the timer configuration with XTime_StartTTCTimer.
* Now the timer instance as specified by the user will be
* started.
* 7.2 mus 10/22/19 Defined RPU_TZ_MASK as #define instead of variable.
* 7.2 sd 03/20/20 Add clocking support.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
/*
* 0th bit of PROCESSOR_ACCESS_VALUE macro signifies trustzone
* setting for RPU address space
*/
#define RPU_TZ_MASK 0x1
.text
.Lsbss_start:
.long __sbss_start
.Lsbss_end:
.long __sbss_end
.Lbss_start:
.long __bss_start__
.Lbss_end:
.long __bss_end__
.Lstack:
.long __stack
.set RPU_0_PWRCTL, 0xFF9A0108
.set RPU_1_PWRCTL, 0xFF9A0208
.set MPIDR_AFF0, 0xFF
.set PWRCTL_MASK, 0x1
.globl _startup
_startup:
bl __cpu_init /* Initialize the CPU first (BSP provides this) */
#if (PROCESSOR_ACCESS_VALUE & RPU_TZ_MASK)
mrc p15, 0, r0, c0, c0, 5 /* Read MPIDR register */
ands r0, r0, #MPIDR_AFF0 /* Get affinity level 0 */
bne core1
ldr r10, =RPU_0_PWRCTL /* Load PWRCTRL address for core 0 */
b test_boot_status
core1:
ldr r10, =RPU_1_PWRCTL /* Load PWRCTRL address for core 1 */
test_boot_status:
ldr r11, [r10] /* Read PWRCTRL register */
ands r11, r11, #PWRCTL_MASK /* Extract and test core's PWRCTRL */
/* if warm reset, skip the clearing of BSS and SBSS */
bne .Lenclbss
#endif
mov r0, #0
/* clear sbss */
ldr r1,.Lsbss_start /* calculate beginning of the SBSS */
ldr r2,.Lsbss_end /* calculate end of the SBSS */
.Lloop_sbss:
cmp r1,r2
bge .Lenclsbss /* If no SBSS, no clearing required */
str r0, [r1], #4
b .Lloop_sbss
.Lenclsbss:
/* clear bss */
ldr r1,.Lbss_start /* calculate beginning of the BSS */
ldr r2,.Lbss_end /* calculate end of the BSS */
.Lloop_bss:
cmp r1,r2
bge .Lenclbss /* If no BSS, no clearing required */
str r0, [r1], #4
b .Lloop_bss
.Lenclbss:
/* set stack pointer */
ldr r13,.Lstack /* stack address */
/* configure the timer if TTC is present */
#ifdef SLEEP_TIMER_BASEADDR
bl XTime_StartTTCTimer
#endif
bl Xil_InitializeExistingMPURegConfig /* Initialize MPU config */
/* run global constructors */
bl __libc_init_array
/* make sure argc and argv are valid */
mov r0, #0
mov r1, #0
#ifdef XCLOCKING
bl Xil_ClockInit
#endif
bl main /* Jump to main C code */
/* Cleanup global constructors */
bl __libc_fini_array
bl exit
.Lexit: /* should never get here */
b .Lexit
.Lstart:
.size _startup,.Lstart-_startup
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,379 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/cortexr5/gcc/cpu_init.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file cpu_init.s
*
* This file contains CPU specific initialization. Invoked from main CRT
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.00 pkp 02/10/14 Initial version
*
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.text
.global __cpu_init
.align 2
__cpu_init:
/* Clear cp15 regs with unknown reset values */
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 /* DFSR */
mcr p15, 0, r0, c5, c0, 1 /* IFSR */
mcr p15, 0, r0, c6, c0, 0 /* DFAR */
mcr p15, 0, r0, c6, c0, 2 /* IFAR */
mcr p15, 0, r0, c9, c13, 2 /* PMXEVCNTR */
mcr p15, 0, r0, c13, c0, 2 /* TPIDRURW */
mcr p15, 0, r0, c13, c0, 3 /* TPIDRURO */
/* Reset and start Cycle Counter */
mov r2, #0x80000000 /* clear overflow */
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd /* D, C, E */
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 /* enable cycle counter */
mcr p15, 0, r2, c9, c12, 1
bx lr
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 9,069 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/gcc/boot.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file boot.S
*
* @addtogroup a53_32_boot_code Cortex A53 32bit Processor Boot Code
* @{
* <h2> boot.S </h2>
* The boot code performs minimum configuration which is required for an
* application to run starting from processor's reset state. Below is a
* sequence illustrating what all configuration is performed before control
* reaches to main function.
*
* 1. Program vector table base for exception handling
* 2. Invalidate instruction cache, data cache and TLBs
* 3. Program stack pointer for various modes (IRQ, FIQ, supervisor, undefine,
* abort, system)
* 4. Program counter frequency
* 5. Configure MMU with short descriptor translation table format and program
* base address of translation table
* 6. Enable data cache, instruction cache and MMU
* 7. Transfer control to _start which clears BSS sections and runs global
* constructor before jumping to main application
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* 5.4 pkp 09/11/15 Enable I-Cache and D-Cache in the initialization
* 6.0 pkp 07/25/16 Program the counter frequency
* 6.4 mus 07/25/17 Set VFP enable bit in FPEXC register, to support
* hard floating point in BSP
* </pre>
*
*
******************************************************************************/
#include "xparameters.h"
.globl MMUTable
.global _prestart
.global _boot
.global __stack
.global __irq_stack
.global __supervisor_stack
.global __abort_stack
.global __fiq_stack
.global __undef_stack
.global _vector_table
.set PSS_L2CC_BASE_ADDR, 0xF8F02000
.set PSS_SLCR_BASE_ADDR, 0xF8000000
.set RESERVED, 0x0fffff00
.set TblBase , MMUTable
.set LRemap, 0xFE00000F /* set the base address of the peripheral block as not shared */
.set CRValMmuCac, 0b01000000000001 /* Enable IDC, and MMU */
.set counterfreq, XPAR_CPU_CORTEXA53_0_TIMESTAMP_CLK_FREQ
/* Stack Pointer locations for boot code */
.set Undef_stack, __undef_stack
.set FIQ_stack, __fiq_stack
.set Abort_stack, __abort_stack
.set SPV_stack, __supervisor_stack
.set IRQ_stack, __irq_stack
.set SYS_stack, __stack
.set vector_base, _vector_table
.set FPEXC_EN, 0x40000000 /* FPU enable bit, (1 << 30) */
.section .boot,"ax"
/* this initializes the various processor modes */
_prestart:
_boot:
OKToRun:
mrc p15, 0, r0, c0, c0, 0 /* Get the revision */
and r5, r0, #0x00f00000
and r6, r0, #0x0000000f
orr r6, r6, r5, lsr #20-4
/* set VBAR to the _vector_table address in linker script */
ldr r0, =vector_base
mcr p15, 0, r0, c12, c0, 0
/* Invalidate caches and TLBs */
mov r0,#0 /* r0 = 0 */
mcr p15, 0, r0, c8, c7, 0 /* invalidate TLBs */
mcr p15, 0, r0, c7, c5, 0 /* invalidate icache */
mcr p15, 0, r0, c7, c5, 6 /* Invalidate branch predictor array */
bl invalidate_dcache /* invalidate dcache */
/* Disable MMU, if enabled */
mrc p15, 0, r0, c1, c0, 0 /* read CP15 register 1 */
bic r0, r0, #0x1 /* clear bit 0 */
mcr p15, 0, r0, c1, c0, 0 /* write value back */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the irq stack pointer */
and r2, r1, r0
orr r2, r2, #0x12 /* IRQ mode */
msr cpsr, r2
ldr r13,=IRQ_stack /* IRQ stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the supervisor stack pointer */
and r2, r1, r0
orr r2, r2, #0x13 /* supervisor mode */
msr cpsr, r2
ldr r13,=SPV_stack /* Supervisor stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Abort stack pointer */
and r2, r1, r0
orr r2, r2, #0x17 /* Abort mode */
msr cpsr, r2
ldr r13,=Abort_stack /* Abort stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the FIQ stack pointer */
and r2, r1, r0
orr r2, r2, #0x11 /* FIQ mode */
msr cpsr, r2
ldr r13,=FIQ_stack /* FIQ stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the Undefine stack pointer */
and r2, r1, r0
orr r2, r2, #0x1b /* Undefine mode */
msr cpsr, r2
ldr r13,=Undef_stack /* Undefine stack pointer */
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the system stack pointer */
and r2, r1, r0
orr r2, r2, #0x1F /* SYS mode */
msr cpsr, r2
ldr r13,=SYS_stack /* SYS stack pointer */
/* program the timer counter frequency */
ldr r0,=counterfreq
mcr 15,0,r0,c14,c0,0
mov r0,#0
mcr 15,0,r0,c2,c0,2 /* N = 0 to use ttbr0 */
/* Write to ACTLR */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR */
orr r0, r0, #(1 << 0) /* Enable access to CPUECTLR */
orr r0, r0, #(1 << 1)
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR */
/* Write to CPUECTLR */
mrrc p15, 1, r0, r1, c15 /* Read CPUECTLR */
orr r0, r0, #(0x01 << 6) /* Set SMPEN bit */
mcrr p15, 1, r0, r1, c15 /* Write CPUECTLR */
/* enable MMU and cache */
ldr r0,=TblBase /* Load MMU translation table base */
orr r0, r0, #0x5B /* Outer-cacheable, WB */
mcr 15, 0, r0, c2, c0, 0 /* TTB0 */
mov r0,#0x5B
mcr p15,0,r0,c2,c0,1
mvn r0,#0 /* Load MMU domains -- all ones=manager */
mcr p15,0,r0,c3,c0,0
/* Enable mmu, icahce and dcache */
mrc p15,0,r0,c1,c0,0
bic r0, r0, #(0x1 << 13)
orr r0, r0, #(0x1 << 12) /* enable I-cache */
orr r0, r0, #(0x1 << 2) /* enable D-Cache */
orr r0, r0, #0x1 /* enable MMU */
dsb /* dsb allow the MMU to start up */
mcr p15,0,r0,c1,c0,0 /* Enable cache and MMU */
isb /* isb flush prefetch buffer */
mov r0, r0
mrc p15, 0, r1, c1, c0, 2 /* read cp access control register (CACR) into r1 */
orr r1, r1, #(0xf << 20) /* enable full access for p10 & p11 */
mcr p15, 0, r1, c1, c0, 2 /* write back into CACR */
/* enable vfp */
vmrs r1, FPEXC /* read the exception register */
orr r1,r1, #FPEXC_EN /* set VFP enable bit, leave the others in orig state */
vmsr FPEXC, r1 /* write back the exception register */
mrc p15,0,r0,c1,c0,0 /* flow prediction enable */
orr r0, r0, #(0x01 << 11) /* #0x8000 */
mcr p15,0,r0,c1,c0,0
mrc p15,0,r0,c1,c0,1 /* read Auxiliary Control Register */
orr r0, r0, #(0x1 << 2) /* enable Dside prefetch */
orr r0, r0, #(0x1 << 1) /* enable L2 Prefetch hint */
mcr p15,0,r0,c1,c0,1 /* write Auxiliary Control Register */
mrs r0, cpsr /* get the current PSR */
bic r0, r0, #0x100 /* enable asynchronous abort exception */
msr cpsr_xsf, r0
b _startup /* jump to C startup code */
and r0, r0, r0 /* no op */
.Ldone: b .Ldone /* Paranoia: we should never get here */
/*
*************************************************************************
*
* invalidate_dcache - invalidate the entire d-cache by set/way
*
* Note: for Cortex-A53, there is no cp instruction for invalidating
* the whole D-cache. Need to invalidate each line.
*
*************************************************************************
*/
invalidate_dcache:
mrc p15, 1, r0, c0, c0, 1 /* read CLIDR */
ands r3, r0, #0x7000000
mov r3, r3, lsr #23 /* cache level value (naturally aligned) */
beq finished
mov r10, #0 /* start with level 0 */
loop1:
add r2, r10, r10, lsr #1 /* work out 3xcachelevel */
mov r1, r0, lsr r2 /* bottom 3 bits are the Cache type for this level */
and r1, r1, #7 /* get those 3 bits alone */
cmp r1, #2
blt skip /* no cache or only instruction cache at this level */
mcr p15, 2, r10, c0, c0, 0 /* write the Cache Size selection register */
isb /* isb to sync the change to the CacheSizeID reg */
mrc p15, 1, r1, c0, c0, 0 /* reads current Cache Size ID register */
and r2, r1, #7 /* extract the line length field */
add r2, r2, #4 /* add 4 for the line length offset (log2 16 bytes) */
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 /* r4 is the max number on the way size (right aligned) */
clz r5, r4 /* r5 is the bit position of the way size increment */
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 /* r7 is the max number of the index size (right aligned) */
loop2:
mov r9, r4 /* r9 working copy of the max way size (right aligned) */
loop3:
orr r11, r10, r9, lsl r5 /* factor in the way number and cache number into r11 */
orr r11, r11, r7, lsl r2 /* factor in the index number */
mcr p15, 0, r11, c7, c6, 2 /* invalidate by set/way */
subs r9, r9, #1 /* decrement the way number */
bge loop3
subs r7, r7, #1 /* decrement the index */
bge loop2
skip:
add r10, r10, #2 /* increment the cache number */
cmp r3, r10
bgt loop1
finished:
mov r10, #0 /* switch back to cache level 0 */
mcr p15, 2, r10, c0, c0, 0 /* select current cache level in cssr */
dsb
isb
bx lr
.end
/**
* @} End of "addtogroup a53_32_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 4,011 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/gcc/asm_vectors.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex A53 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* 6.0 mus 27/07/16 Added Undefined exception handler
* 6.4 mus 25/07/17 Added support for hard floating point
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.org 0
.text
.globl _boot
.globl _vector_table
.globl FIQInterrupt
.globl IRQInterrupt
.globl SWInterrupt
.globl DataAbortInterrupt
.globl PrefetchAbortInterrupt
.globl IRQHandler
.globl prof_pc
.section .vectors, "a"
_vector_table:
B _boot
B Undefined
B SVCHandler
B PrefetchAbortHandler
B DataAbortHandler
NOP /* Placeholder for address exception vector*/
B IRQHandler
B FIQHandler
IRQHandler: /* IRQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code*/
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
bl IRQInterrupt /* IRQ vector */
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
FIQHandler: /* FIQ vector handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
vpush {d0-d7}
vpush {d16-d31}
vmrs r1, FPSCR
push {r1}
vmrs r1, FPEXC
push {r1}
FIQLoop:
bl FIQInterrupt /* FIQ vector */
pop {r1}
vmsr FPEXC, r1
pop {r1}
vmsr FPSCR, r1
vpop {d16-d31}
vpop {d0-d7}
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* adjust return */
Undefined: /* Undefined handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =UndefinedExceptionAddr
sub r1, lr, #4
str r1, [r0] /* Store address of instruction causing undefined exception */
bl UndefinedException /* UndefinedException: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr
SVCHandler: /* SWI handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
tst r0, #0x20 /* check the T bit */
ldrneh r0, [lr,#-2] /* Thumb mode */
bicne r0, r0, #0xff00 /* Thumb mode */
ldreq r0, [lr,#-4] /* ARM mode */
biceq r0, r0, #0xff000000 /* ARM mode */
bl SWInterrupt /* SWInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
movs pc, lr /*return to the next instruction after the SWI instruction */
DataAbortHandler: /* Data Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =DataAbortAddr
sub r1, lr, #8
str r1, [r0] /* Stores instruction causing data abort */
bl DataAbortInterrupt /*DataAbortInterrupt :call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #8 /* points to the instruction that caused the Data Abort exception */
PrefetchAbortHandler: /* Prefetch Abort handler */
stmdb sp!,{r0-r3,r12,lr} /* state save from compiled code */
ldr r0, =PrefetchAbortAddr
sub r1, lr, #4
str r1, [r0] /* Stores instruction causing prefetch abort */
bl PrefetchAbortInterrupt /* PrefetchAbortInterrupt: call C function here */
ldmia sp!,{r0-r3,r12,lr} /* state restore from compiled code */
subs pc, lr, #4 /* points to the instruction that caused the Prefetch Abort exception */
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 2,156 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/gcc/xil-crt0.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil-crt0.S
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* 5.4 pkp 18/12/15 Initialized global constructor for C++ applications
* 6.6 srm 10/18/17 Added timer configuration using XTime_StartTTCTimer API.
* Now the TTC instance as specified by the user will be
* started.
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xparameters.h"
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
.text
.Lsbss_start:
.long __sbss_start
.Lsbss_end:
.long __sbss_end
.Lbss_start:
.long __bss_start__
.Lbss_end:
.long __bss_end__
.Lstack:
.long __stack
.globl _startup
_startup:
mov r0, #0
/* clear sbss */
ldr r1,.Lsbss_start /* calculate beginning of the SBSS */
ldr r2,.Lsbss_end /* calculate end of the SBSS */
.Lloop_sbss:
cmp r1,r2
bge .Lenclsbss /* If no SBSS, no clearing required */
str r0, [r1], #4
b .Lloop_sbss
.Lenclsbss:
/* clear bss */
ldr r1,.Lbss_start /* calculate beginning of the BSS */
ldr r2,.Lbss_end /* calculate end of the BSS */
.Lloop_bss:
cmp r1,r2
bge .Lenclbss /* If no BSS, no clearing required */
str r0, [r1], #4
b .Lloop_bss
.Lenclbss:
/* set stack pointer */
ldr r13,.Lstack /* stack address */
/* run global constructors */
bl __libc_init_array
/* Reset and start Triple Timer Counter */
#if defined (SLEEP_TIMER_BASEADDR)
bl XTime_StartTTCTimer
#endif
/* make sure argc and argv are valid */
mov r0, #0
mov r1, #0
bl main /* Jump to main C code */
/* Cleanup global constructors */
bl __libc_fini_array
bl exit
.Lexit: /* should never get here */
b .Lexit
.Lstart:
.size _startup,.Lstart-_startup
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,374 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/gcc/cpu_init.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file cpu_init.s
*
* This file contains CPU specific initialization. Invoked from main CRT
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
.text
.global __cpu_init
.align 2
__cpu_init:
/* Clear cp15 regs with unknown reset values */
mov r0, #0x0
mcr p15, 0, r0, c5, c0, 0 /* DFSR */
mcr p15, 0, r0, c5, c0, 1 /* IFSR */
mcr p15, 0, r0, c6, c0, 0 /* DFAR */
mcr p15, 0, r0, c6, c0, 2 /* IFAR */
mcr p15, 0, r0, c9, c13, 2 /* PMXEVCNTR */
mcr p15, 0, r0, c13, c0, 2 /* TPIDRURW */
mcr p15, 0, r0, c13, c0, 3 /* TPIDRURO */
/* Reset and start Cycle Counter */
mov r2, #0x80000000 /* clear overflow */
mcr p15, 0, r2, c9, c12, 3
mov r2, #0xd /* D, C, E */
mcr p15, 0, r2, c9, c12, 0
mov r2, #0x80000000 /* enable cycle counter */
mcr p15, 0, r2, c9, c12, 1
bx lr
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 5,660 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/32bit/platform/ZynqMP/translation_table.S | /******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a53_32_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A53. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for zynq ultrascale+
* architecture. It utilizes short descriptor translation table format with each
* section defining 1MB of memory.
* The overview of translation table memory attributes is described below.
*
*| | Memory Range | Definition in Translation Table |
*|-----------------|-------------------------|---------------------------------|
*| DDR | 0x00000000 - 0x7FFFFFFF | Normal write-back Cacheable |
*| PL | 0x80000000 - 0xBFFFFFFF | Strongly Ordered |
*| QSPI, lower PCIe| 0xC0000000 - 0xEFFFFFFF | Device Memory |
*| Reserved | 0xF0000000 - 0xF7FFFFFF | Unassigned |
*| STM Coresight | 0xF8000000 - 0xF8FFFFFF | Device Memory |
*| GIC | 0xF9000000 - 0xF90FFFFF | Device memory |
*| Reserved | 0xF9100000 - 0xFCFFFFFF | Unassigned |
*| FPS, LPS slaves | 0xFD000000 - 0xFFBFFFFF | Device memory |
*| CSU, PMU | 0xFFC00000 - 0xFFDFFFFF | Device Memory |
*| TCM, OCM | 0xFFE00000 - 0xFFFFFFFF | Normal write-back cacheable |
*
* @note
*
* For DDR in region 0x00000000 - 0x7FFFFFFF, a system where DDR is less than
* 2GB, region after DDR and before PL is marked as undefined/reserved in
* translation table. In region 0xFFC00000 - 0xFFDFFFFF, it contains CSU
* and PMU memory which are marked as Device since it is less than 1MB and
* falls in a region with device memory.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.2 pkp 28/05/15 First release
* 5.4 pkp 18/12/15 Updated the address map according to proper address map
* 6.0 mus 20/07/16 Added warning for ddrless HW design CR-954977
* </pre>
*
*
******************************************************************************/
#include "xparameters.h"
.globl MMUTable
.section .mmu_tbl,"a"
MMUTable:
/* Each table entry occupies one 32-bit word and there are
* 4096 entries, so the entire table takes up 16KB.
* Each entry covers a 1MB section.
*/
.set SECT, 0
#ifdef XPAR_PSU_DDR_0_S_AXI_BASEADDR
.set DDR_START, XPAR_PSU_DDR_0_S_AXI_BASEADDR
.set DDR_END, XPAR_PSU_DDR_0_S_AXI_HIGHADDR
.set DDR_SIZE, (DDR_END - DDR_START)+1
.if DDR_SIZE > 0x80000000
/* If DDR size is larger than 2GB, truncate to 2GB */
.set DDR_REG, 0x800
.else
.set DDR_REG, DDR_SIZE/0x100000
.endif
#else
.set DDR_REG, 0
#warning "There's no DDR in the HW design. MMU translation table marks 2 GB DDR address space as undefined"
#endif
.set UNDEF_REG, 0x800 - DDR_REG
.rept DDR_REG /* DDR Cacheable */
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept UNDEF_REG /* unassigned/reserved */
/* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x0200 /* 0x80000000 - 0x9fffffff (FPGA slave0) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0200 /* 0xA0000000 - 0xbfffffff (FPGA slave1) */
.word SECT + 0xc02 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0200 /* 0xc0000000 - 0xdfffffff (OSPI IOU)*/
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x0100 /* 0xe0000000 - 0xefffffff (Lower PCIe)*/
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x80 /* 0xf0000000 - 0xf7ffffff (unassigned/reserved).
* Generates a translation fault if accessed */
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x10 /* 0xf8000000 - 0xf8ffffff (STM Coresight) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x1 /* 0xf9000000 - 0xf90fffff (RPU_A53_GIC) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x3f /* 0xf9100000 - 0xfcffffff (reserved).*/
.word SECT + 0x0 /* S=b0 TEX=b000 AP=b00, Domain=b0, C=b0, B=b0 */
.set SECT, SECT+0x100000
.endr
.rept 0x10 /* 0xfd000000 - 0xfdffffff (FPS Slaves) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x1C /* 0xfe0000000 - 0xfeffffff (LPS Slaves) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x2 /* 0xffc000000 - 0xffdfffff (CSU and PMU) */
.word SECT + 0xc06 /* S=b0 TEX=b000 AP=b11, Domain=b0, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.rept 0x02 /* 0xffe00000 - 0xffffffff (TCM and OCM Cacheable) */
.word SECT + 0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
.set SECT, SECT+0x100000
.endr
.end
/**
* @} End of "addtogroup a53_32_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 11,334 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/armclang/boot.S | ;/******************************************************************************
;* Copyright (c) 2019 - 2020 Xilinx, Inc. All rights reserved.
;* SPDX-License-Identifier: MIT
;******************************************************************************/
;/*****************************************************************************/
;/**
;* @file boot.S
;*
;* @addtogroup a53_64_boot_code Cortex A53 64bit Processor Boot Code
;* @{
;* <h2> boot.S </h2>
;*
;* The boot code performs minimum configuration which is required for an
;* application. Cortex-A53 starts by checking current exception level. If the
;* current exception level is EL3 and BSP is built for EL3, it will do
;* initialization required for application execution at EL3. Below is a
;* sequence illustrating what all configuration is performed before control
;* reaches to main function for EL3 execution.
;*
;* 1. Program vector table base for exception handling
;* 2. Set reset vector table base address
;* 3. Program stack pointer for EL3
;* 4. Routing of interrupts to EL3
;* 5. Enable ECC protection
;* 6. Program generic counter frequency
;* 7. Invalidate instruction cache, data cache and TLBs
;* 8. Configure MMU registers and program base address of translation table
;* 9. Transfer control to _start which clears BSS sections and runs global
;* constructor before jumping to main application
;*
;* If current exception level is EL1 and BSP is also built for EL1_NONSECURE
;* it will perform initialization required for application execution at EL1
;* non-secure. For all other combination, the execution will go into infinite
;* loop. Below is a sequence illustrating what all configuration is performed
;* before control reaches to main function for EL1 execution.
;*
;* 1. Program vector table base for exception handling
;* 2. Program stack pointer for EL1
;* 3. Invalidate instruction cache, data cache and TLBs
;* 4. Configure MMU registers and program base address of translation table
;* 5. Transfer control to _start which clears BSS sections and runs global
;* constructor before jumping to main application
;*
;* <pre>
;* MODIFICATION HISTORY:
;*
;* Ver Who Date Changes
;* ----- ------- -------- ---------------------------------------------------
;* 7.0 mus 02/26/19 First release
;* 7.2 mus 01/08/19 Added support for versal
;* sd 02/23/20 Clock Init is called
;* sd 03/21/20 Added XCLOCKING flag
;*
;******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
#include "xil_errata.h"
EXPORT _prestart
EXPORT _boot
IMPORT MMUTableL0
IMPORT |Image$$ARM_LIB_STACK$$ZI$$Base|
IMPORT _vector_table
IMPORT __main
#ifdef XCLOCKING
IMPORT Xil_ClockInit
#endif
#ifndef FREERTOS_BSP
IMPORT FPUStatus
#endif
rvbar_base EQU 0xFD5C0040
MODE_EL1 EQU 0x5
DAIF_BIT EQU 0x1C0
TT_S1_FAULT EQU 0x0
TT_S1_TABLE EQU 0x3
AREA |.boot|, CODE
; This initializes the various processor modes
_prestart
_boot
mov x0, #0
mov x1, #0
mov x2, #0
mov x3, #0
mov x4, #0
mov x5, #0
mov x6, #0
mov x7, #0
mov x8, #0
mov x9, #0
mov x10, #0
mov x11, #0
mov x12, #0
mov x13, #0
mov x14, #0
mov x15, #0
mov x16, #0
mov x17, #0
mov x18, #0
mov x19, #0
mov x20, #0
mov x21, #0
mov x22, #0
mov x23, #0
mov x24, #0
mov x25, #0
mov x26, #0
mov x27, #0
mov x28, #0
mov x29, #0
mov x30, #0
OKToRun
mrs x0, currentEL
cmp x0, #0xC
beq InitEL3
cmp x0, #0x4
beq InitEL1
b error ; Go to error if current exception level is neither EL3 nor EL1
InitEL3
#if (EL3 == 1)
ldr x1, =_vector_table ; Set vector table base address
msr VBAR_EL3, x1
mrs x0, MPIDR_EL1 ; Get the CPU ID
and x0, x0, #0xFF
mov w0, w0
ldr w2, =rvbar_base ; Calculate the rvbar base address for particular CPU core
mov w3, #0x8
mul w0, w0, w3
add w2, w2, w0
str x1, [x2] ; Store vector base address to rvbar
ldr x2, =|Image$$ARM_LIB_STACK$$ZI$$Base| ; Define stack pointer for current exception level
mov sp, x2
mov x0, #0 ; Enable Trapping of SIMD/FPU register for standalone BSP
#ifndef FREERTOS_BSP
orr x0, x0, #(0x1 << 10)
#endif
msr CPTR_EL3, x0
isb
;
; Clear FPUStatus variable to make sure that it contains current
; status of FPU i.e. disabled. In case of a warm restart execution
; when bss sections are not cleared, it may contain previously updated
; value which does not hold true now
;
#ifndef FREERTOS_BSP
ldr x0, =FPUStatus
str xzr, [x0]
#endif
; Configure SCR_EL3
mov w1, #0 ; Initial value of register is unknown
orr w1, w1, #(1 << 11) ; Set ST bit (Secure EL1 can access CNTPS_TVAL_EL1, CNTPS_CTL_EL1 & CNTPS_CVAL_EL1)
orr w1, w1, #(1 << 10) ; Set RW bit (EL1 is AArch64, as this is the Secure world)
orr w1, w1, #(1 << 3) ; Set EA bit (SError routed to EL3)
orr w1, w1, #(1 << 2) ; Set FIQ bit (FIQs routed to EL3)
orr w1, w1, #(1 << 1) ; Set IRQ bit (IRQs routed to EL3)
msr SCR_EL3, x1
; Configure cpu auxiliary control register EL1
ldr x0, =0x80CA000 ; L1 Data prefetch control - 5, Enable device split throttle, 2 independent data prefetch streams
#if (CONFIG_ARM_ERRATA_855873)
;
; Set ENDCCASCI bit in CPUACTLR_EL1 register, to execute data
; cache clean operations as data cache clean and invalidate
;
orr x0, x0, #(1 << 44) ; Set ENDCCASCI bit
#endif
msr S3_1_C15_C2_0, x0 ; CPUACTLR_EL1
; Program the counter frequency
#if defined (versal)
ldr x0, =XPAR_CPU_CORTEXA72_0_TIMESTAMP_CLK_FREQ
#else
ldr x0, =XPAR_CPU_CORTEXA53_0_TIMESTAMP_CLK_FREQ
msr CNTFRQ_EL0, x0
#endif
; Enable hardware coherency between cores
mrs x0, S3_1_c15_c2_1 ; Read EL1 CPU Extended Control Register
orr x0, x0, #(1 << 6) ; Set the SMPEN bit
msr S3_1_c15_c2_1, x0 ; Write EL1 CPU Extended Control Register
isb
tlbi ALLE3
ic IALLU ; Invalidate I cache to PoU
bl invalidate_dcaches
dsb sy
isb
ldr x1, =MMUTableL0 ; Get address of level 0 for TTBR0_EL3
msr TTBR0_EL3, x1 ; Set TTBR0_EL3
;
; Set up memory attributes
; This equates to:
; 0 = b01000100 = Normal, Inner/Outer Non-Cacheable
; 1 = b11111111 = Normal, Inner/Outer WB/WA/RA
; 2 = b00000000 = Device-nGnRnE
; 3 = b00000100 = Device-nGnRE
; 4 = b10111011 = Normal, Inner/Outer WT/WA/RA
;
ldr x1, =0x000000BB0400FF44
msr MAIR_EL3, x1
#if defined (versal)
; Set up TCR_EL3
; Physical Address Size PS = 100 -> 44bits 16 TB
; Granual Size TG0 = 00 -> 4KB
; size offset of the memory region T0SZ = 20 -> (region size 2^(64-20) = 2^44)
ldr x1,=0x80843514
#else
;
; Set up TCR_EL3
; Physical Address Size PS = 010 -> 40bits 1TB
; Granule Size TG0 = 00 -> 4KB
; Size offset of the memory region T0SZ = 24 -> (region size 2^(64-24) = 2^40)
;
ldr x1, =0x80823518
#endif
msr TCR_EL3, x1
isb
; Enable SError Exception for asynchronous abort
mrs x1, DAIF
mov x2, #(0x1<<8)
bic x1, x1, x2
msr DAIF, x1
; Configure SCTLR_EL3
mov x1, #0 ; Most of the SCTLR_EL3 bits are unknown at reset
orr x1, x1, #(1 << 12) ; Enable I cache
orr x1, x1, #(1 << 3) ; Enable SP alignment check
orr x1, x1, #(1 << 2) ; Enable caches
orr x1, x1, #(1 << 0) ; Enable MMU
msr SCTLR_EL3, x1
dsb sy
isb
#ifdef XCLOCKING
b Xil_Clockinit
#endif
b __main ; Jump to start
#else
b error ; Present exception level and selected exception level mismatch
#endif
InitEL1
#if (EL1_NONSECURE == 1)
ldr x1, =_vector_table ; Set vector table base address
msr VBAR_EL1, x1
mrs x0, CPACR_EL1
mov x2, #(0x3 << 0x20)
bic x0, x0, x2
msr CPACR_EL1, x0
isb
;
; Clear FPUStatus variable to make sure that it contains current
; status of FPU i.e. disabled. In case of a warm restart execution
; when bss sections are not cleared, it may contain previously updated
; value which does not hold true now
;
#ifndef FREERTOS_BSP
ldr x0, =FPUStatus
str xzr, [x0]
#endif
ldr x2, =|Image$$ARM_LIB_STACK$$ZI$$Base| ; Define stack pointer for current exception level
mov sp, x2
; Disable MMU
mov x1, #0x0
msr SCTLR_EL1, x1
isb
TLBI VMALLE1
ic IALLU ; Invalidate I cache to PoU
bl invalidate_dcaches
dsb sy
isb
ldr x1, =MMUTableL0 ; Get address of level 0 for TTBR0_EL1
msr TTBR0_EL1, x1 ; Set TTBR0_EL1
;
; Set up memory attributes
; This equates to:
; 0 = b01000100 = Normal, Inner/Outer Non-Cacheable
; 1 = b11111111 = Normal, Inner/Outer WB/WA/RA
; 2 = b00000000 = Device-nGnRnE
; 3 = b00000100 = Device-nGnRE
; 4 = b10111011 = Normal, Inner/Outer WT/WA/RA
;
ldr x1, =0x000000BB0400FF44
msr MAIR_EL1, x1
#if defined (versal)
;
; Set up TCR_EL1
; Physical Address Size PS = 100 -> 44bits 16TB
; Granual Size TG0 = 00 -> 4KB
; size offset of the memory region T0SZ = 20 -> (region size 2^(64-20) = 2^44)
;
ldr x1,=0x485800514
#else
;
; Set up TCR_EL1
; Physical Address Size PS = 010 -> 40bits 1TB
; Granule Size TG0 = 00 -> 4KB
; Size offset of the memory region T0SZ = 24 -> (region size 2^(64-24) = 2^40)
;
ldr x1, =0x285800518
#endif
msr TCR_EL1, x1
isb
; Enable SError Exception for asynchronous abort
mrs x1,DAIF
mov x2, #(0x1<<8)
bic x1,x1,x2
msr DAIF,x1
; Enable MMU
mov x1,#0x0
orr x1, x1, #(1 << 18) ; Set WFE non trapping
orr x1, x1, #(1 << 17) ; Set WFI non trapping
orr x1, x1, #(1 << 5) ; Set CP15 barrier enabled
orr x1, x1, #(1 << 12) ; Set I bit
orr x1, x1, #(1 << 2) ; Set C bit
orr x1, x1, #(1 << 0) ; Set M bit
msr SCTLR_EL1, x1
isb
bl __main ; Jump to start
#else
b error ; present exception level and selected exception level mismatch
#endif
error
b error
invalidate_dcaches
dmb ISH
mrs x0, CLIDR_EL1 ; x0 = CLIDR
ubfx w2, w0, #24, #3 ; w2 = CLIDR>Loc
cmp w2, #0 ; LoC is 0?
b.eq invalidateCaches_end ; No cleaning required and enable MMU
mov w1, #0 ; w1 = level iterator
invalidateCaches_flush_level
add w3, w1, w1, lsl #1 ; w3 = w1 * 3 (right-shift for cache type)
lsr w3, w0, w3 ; w3 = w0 >> w3
ubfx w3, w3, #0, #3 ; w3 = cache type of this level
cmp w3, #2 ; No cache at this level?
b.lt invalidateCaches_next_level
lsl w4, w1, #1
msr CSSELR_EL1, x4 ; Select current cache level in CSSELR
isb ; ISB required to reflect new CSIDR
mrs x4, CCSIDR_EL1 ; w4 = CSIDR
ubfx w3, w4, #0, #3
add w3, w3, #2 ; w3 = log2(line size)
ubfx w5, w4, #13, #15
ubfx w4, w4, #3, #10 ; w4 = Way number
clz w6, w4 ; w6 = 32 - log2(number of ways)
invalidateCaches_flush_set
mov w8, w4 ; w8 = Way number
invalidateCaches_flush_way
lsl w7, w1, #1 ; Fill level field
lsl w9, w5, w3
orr w7, w7, w9 ; Fill index field
lsl w9, w8, w6
orr w7, w7, w9 ; Fill way field
dc CISW, x7 ; Invalidate by set/way to point of coherency
subs w8, w8, #1 ; Decrement way
b.ge invalidateCaches_flush_way
subs w5, w5, #1 ; Decrement set
b.ge invalidateCaches_flush_set
invalidateCaches_next_level
add w1, w1, #1 ; Next level
cmp w2, w1
b.gt invalidateCaches_flush_level
invalidateCaches_end
ret
END
;
; @} End of "addtogroup a53_64_boot_code"
;
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 7,075 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/armclang/asm_vectors.S | ;/******************************************************************************
;* Copyright (c) 2019 - 2020 Xilinx, Inc. All rights reserved.
;* SPDX-License-Identifier: MIT
;******************************************************************************/
;/*****************************************************************************/
;/**
;* @file asm_vectors.s
;*
;* This file contains the initial vector table for the Cortex A53 processor
;*
;* <pre>
;* MODIFICATION HISTORY:
;*
;* Ver Who Date Changes
;* ----- ------- -------- ---------------------------------------------------
;* 7.0 cjp 02/26/19 First release
;* </pre>
;*
;* @note
;*
;* None.
;*
;******************************************************************************/
#include "bspconfig.h"
EXPORT _vector_table
EXPORT FPUStatus
IMPORT _boot
IMPORT FIQInterrupt
IMPORT IRQInterrupt
IMPORT SErrorInterrupt
IMPORT SynchronousInterrupt
;
; FPUContextSize is the size of the array where floating point registers are
; stored when required. The default size corresponds to the case when there is
; no nested interrupt. If there are nested interrupts in application which are
; using floating point operation, the size of FPUContextSize need to be
; increased as per requirement
;
FPUContextSize EQU 528
MACRO
saveregister
stp X0, X1, [sp,#-0x10]!
stp X2, X3, [sp,#-0x10]!
stp X4, X5, [sp,#-0x10]!
stp X6, X7, [sp,#-0x10]!
stp X8, X9, [sp,#-0x10]!
stp X10, X11, [sp,#-0x10]!
stp X12, X13, [sp,#-0x10]!
stp X14, X15, [sp,#-0x10]!
stp X16, X17, [sp,#-0x10]!
stp X18, X19, [sp,#-0x10]!
stp X29, X30, [sp,#-0x10]!
MEND
MACRO
restoreregister
ldp X29, X30, [sp], #0x10
ldp X18, X19, [sp], #0x10
ldp X16, X17, [sp], #0x10
ldp X14, X15, [sp], #0x10
ldp X12, X13, [sp], #0x10
ldp X10, X11, [sp], #0x10
ldp X8, X9, [sp], #0x10
ldp X6, X7, [sp], #0x10
ldp X4, X5, [sp], #0x10
ldp X2, X3, [sp], #0x10
ldp X0, X1, [sp], #0x10
MEND
MACRO
savefloatregister
ldr x1, =FPUContextBase ; Load the floating point context array address from FPUContextBase
ldr x0, [x1]
stp q0, q1, [x0], #0x20 ; Save all the floating point register to the array
stp q2, q3, [x0], #0x20
stp q4, q5, [x0], #0x20
stp q6, q7, [x0], #0x20
stp q8, q9, [x0], #0x20
stp q10, q11, [x0], #0x20
stp q12, q13, [x0], #0x20
stp q14, q15, [x0], #0x20
stp q16, q17, [x0], #0x20
stp q18, q19, [x0], #0x20
stp q20, q21, [x0], #0x20
stp q22, q23, [x0], #0x20
stp q24, q25, [x0], #0x20
stp q26, q27, [x0], #0x20
stp q28, q29, [x0], #0x20
stp q30, q31, [x0], #0x20
mrs x2, FPCR
mrs x3, FPSR
stp x2, x3, [x0], #0x10
str x0, [x1] ; Save current address of floating point context array to FPUContextBase
MEND
MACRO
restorefloatregister
ldr x1, =FPUContextBase ; Restore the address of floating point context array from FPUContextBase
ldr x0, [x1]
ldp x2, x3, [x0,#-0x10]! ; Restore all the floating point register from the array
msr FPCR, x2
msr FPSR, x3
ldp q30, q31, [x0,#-0x20]!
ldp q28, q29, [x0,#-0x20]!
ldp q26, q27, [x0,#-0x20]!
ldp q24, q25, [x0,#-0x20]!
ldp q22, q23, [x0,#-0x20]!
ldp q20, q21, [x0,#-0x20]!
ldp q18, q19, [x0,#-0x20]!
ldp q16, q17, [x0,#-0x20]!
ldp q14, q15, [x0,#-0x20]!
ldp q12, q13, [x0,#-0x20]!
ldp q10, q11, [x0,#-0x20]!
ldp q8, q9, [x0,#-0x20]!
ldp q6, q7, [x0,#-0x20]!
ldp q4, q5, [x0,#-0x20]!
ldp q2, q3, [x0,#-0x20]!
ldp q0, q1, [x0,#-0x20]!
str x0, [x1] ; Save current address of floating point context array to FPUContextBase
MEND
AREA |.vectors|, CODE
REQUIRE8 {TRUE}
PRESERVE8 {TRUE}
ENTRY ; Define this as an entry point
_vector_table
;
; If application is built for XEN GUEST as EL1 Non-secure following image
; header is required by XEN.
;
#if (HYP_GUEST == 1)
ldr x16, =_boot ; Valid Image header
br x16 ; HW reset vector
DCD 0 ; Text offset
DCD 0 ; Image size
DCD 8 ; Flags
DCD 0 ; RES0
DCD 0
DCD 0
DCD 0x644d5241 ; Magic
DCD 0 ; RES0
#endif
B _boot
ALIGN 512
B SynchronousInterruptHandler
ALIGN 128
B IRQInterruptHandler
ALIGN 128
B FIQInterruptHandler
ALIGN 128
B SErrorInterruptHandler
SynchronousInterruptHandler
saveregister
; Check if the Synchronous abort is occurred due to floating point access
#if (EL3 == 1)
mrs x0, ESR_EL3
#else
mrs x0, ESR_EL1
#endif
and x0, x0, #(0x3F << 26)
mov x1, #(0x7 << 26)
cmp x0, x1
;
; If exception is not due to floating point access go to synchronous
; handler
;
bne synchronoushandler
;
; If exception occurred due to floating point access, Enable the floating point
; access i.e. do not trap floating point instruction
;
#if (EL3 == 1)
mrs x1, CPTR_EL3
mov x2, #(0x1<<10)
bic x1, x1, x2
msr CPTR_EL3, x1
#else
mrs x1, CPACR_EL1
orr x1, x1, #(0x1<<20)
msr CPACR_EL1, x1
#endif
isb
;
; If the floating point access was previously enabled, store FPU context
; registers(storefloat)
;
ldr x0, =FPUStatus
ldrb w1, [x0]
cbnz w1, storefloat
;
; If the floating point access was not enabled previously, save the status of
; floating point accessibility i.e. enabled and store floating point context
; array address(FPUContext) to FPUContextBase
;
mov w1, #0x1
strb w1, [x0]
ldr x0, =FPUContext
ldr x1, =FPUContextBase
str x0, [x1]
b restorecontext
storefloat
savefloatregister
b restorecontext
synchronoushandler
bl SynchronousInterrupt
restorecontext
restoreregister
eret
IRQInterruptHandler
saveregister
; Save the status of SPSR, ELR and CPTR to stack
#if (EL3 == 1)
mrs x0, CPTR_EL3
mrs x1, ELR_EL3
mrs x2, SPSR_EL3
#else
mrs x0, CPACR_EL1
mrs x1, ELR_EL1
mrs x2, SPSR_EL1
#endif
stp x0, x1, [sp,#-0x10]!
str x2, [sp,#-0x10]!
; Trap floating point access
#if (EL3 == 1)
mrs x1, CPTR_EL3
orr x1, x1, #(0x1<<10)
msr CPTR_EL3, x1
#else
mrs x1, CPACR_EL1
mov x2, #(0x1<<20)
bic x1, x1, x2
msr CPACR_EL1, x1
#endif
isb
bl IRQInterrupt
;
; If floating point access is enabled during interrupt handling, restore
; floating point registers
;
#if (EL3 == 1)
mrs x0, CPTR_EL3
ands x0, x0, #(0x1<<10)
bne RestorePrevState
#else
mrs x0, CPACR_EL1
ands x0, x0, #(0x1<<20)
beq RestorePrevState
#endif
restorefloatregister
; Restore the status of SPSR, ELR and CPTR from stack
RestorePrevState
ldr x2, [sp], #0x10
ldp x0, x1, [sp],#0x10
#if (EL3 == 1)
msr CPTR_EL3, x0
msr ELR_EL3, x1
msr SPSR_EL3, x2
#else
msr CPACR_EL1, x0
msr ELR_EL1, x1
msr SPSR_EL1, x2
#endif
restoreregister
eret
FIQInterruptHandler
saveregister
bl FIQInterrupt
restoreregister
eret
SErrorInterruptHandler
saveregister
bl SErrorInterrupt
restoreregister
eret
ALIGN 8
; Array to store floating point registers
FPUContext
SPACE FPUContextSize
; Stores address for floating point context array
FPUContextBase
SPACE 8
FPUStatus
SPACE 4
END
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 1,679 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/xpvxenconsole/hypercall.S | /*
Copyright DornerWorks 2016
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
following disclaimer.
THIS SOFTWARE IS PROVIDED BY DORNERWORKS FOR USE ON THE CONTRACTED PROJECT, AND ANY EXPRESS OR IMPLIED WARRANTY
IS LIMITED TO THIS USE. FOR ALL OTHER USES THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DORNERWORKS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "xen.h"
.globl HYPERVISOR_console_io;
.align 4;
HYPERVISOR_console_io:
mov x16, __HYPERVISOR_console_io;
hvc 0xEA1;
ret;
.globl HYPERVISOR_hvm_op;
.align 4;
HYPERVISOR_hvm_op:
mov x16, __HYPERVISOR_hvm_op;
hvc 0xEA1;
ret;
.globl HYPERVISOR_memory_op;
.align 4;
HYPERVISOR_memory_op:
mov x16, __HYPERVISOR_memory_op;
hvc 0xEA1;
ret;
.globl HYPERVISOR_event_channel_op;
.align 4;
HYPERVISOR_event_channel_op:
mov x16, __HYPERVISOR_event_channel_op
hvc 0xEA1;
ret;
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 13,827 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/gcc/boot.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file boot.S
*
* @addtogroup a53_64_boot_code Cortex A53 64bit Processor Boot Code
* @{
* <h2> boot.S </h2>
*
* The boot code performs minimum configuration which is required for an
* application. Cortex-A53 starts by checking current exception level. If the
* current exception level is EL3 and BSP is built for EL3, it will do
* initialization required for application execution at EL3. Below is a
* sequence illustrating what all configuration is performed before control
* reaches to main function for EL3 execution.
*
* 1. Program vector table base for exception handling
* 2. Set reset vector table base address
* 3. Program stack pointer for EL3
* 4. Routing of interrupts to EL3
* 5. Enable ECC protection
* 6. Program generic counter frequency
* 7. Invalidate instruction cache, data cache and TLBs
* 8. Configure MMU registers and program base address of translation table
* 9. Transfer control to _start which clears BSS sections and runs global
* constructor before jumping to main application
*
* If the current exception level is EL1 and BSP is also built for EL1_NONSECURE
* it will perform initialization required for application execution at EL1
* non-secure. For all other combination, the execution will go into infinite
* loop. Below is a sequence illustrating what all configuration is performed
* before control reaches to main function for EL1 execution.
*
* 1. Program vector table base for exception handling
* 2. Program stack pointer for EL1
* 3. Invalidate instruction cache, data cache and TLBs
* 4. Configure MMU registers and program base address of translation table
* 5. Transfer control to _start which clears BSS sections and runs global
* constructor before jumping to main application
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.00 pkp 05/21/14 Initial version
* 6.00 pkp 07/25/16 Program the counter frequency
* 6.02 pkp 01/22/17 Added support for EL1 non-secure
* 6.02 pkp 01/24/17 Clearing status of FPUStatus variable to ensure it
* holds correct value.
* 6.3 mus 04/20/17 CPU Cache protection bit in the L2CTLR_EL1 will be in
* set state on reset. So, setting that bit through boot
* code is redundant, hence removed the code which sets
* CPU cache protection bit.
* 6.4 mus 08/11/17 Implemented ARM erratum 855873.It fixes
* CR#982209.
* 6.6 mus 01/19/18 Added isb after writing to the cpacr_el1/cptr_el3,
* to ensure floating-point unit is disabled, before
* any subsequent instruction.
* 7.0 mus 03/26/18 Updated TCR_EL3/TCR_EL1 as per versal address map
*
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
#include "xil_errata.h"
.globl MMUTableL0
.globl MMUTableL1
.globl MMUTableL2
.global _prestart
.global _boot
.global __el3_stack
.global __el2_stack
.global __el1_stack
.global __el0_stack
.global _vector_table
.set EL3_stack, __el3_stack
.set EL2_stack, __el2_stack
.set EL1_stack, __el1_stack
.set EL0_stack, __el0_stack
.set TT_S1_FAULT, 0x0
.set TT_S1_TABLE, 0x3
.set L0Table, MMUTableL0
.set L1Table, MMUTableL1
.set L2Table, MMUTableL2
.set vector_base, _vector_table
.set rvbar_base, 0xFD5C0040
#if defined (versal)
.set counterfreq, XPAR_CPU_CORTEXA72_0_TIMESTAMP_CLK_FREQ
#else
.set counterfreq, XPAR_CPU_CORTEXA53_0_TIMESTAMP_CLK_FREQ
#endif
.set MODE_EL1, 0x5
.set DAIF_BIT, 0x1C0
.section .boot,"ax"
/* this initializes the various processor modes */
_prestart:
_boot:
mov x0, #0
mov x1, #0
mov x2, #0
mov x3, #0
mov x4, #0
mov x5, #0
mov x6, #0
mov x7, #0
mov x8, #0
mov x9, #0
mov x10, #0
mov x11, #0
mov x12, #0
mov x13, #0
mov x14, #0
mov x15, #0
mov x16, #0
mov x17, #0
mov x18, #0
mov x19, #0
mov x20, #0
mov x21, #0
mov x22, #0
mov x23, #0
mov x24, #0
mov x25, #0
mov x26, #0
mov x27, #0
mov x28, #0
mov x29, #0
mov x30, #0
#if 0 //don't put other a53 cpus in wfi
//Which core am I
// ----------------
mrs x0, MPIDR_EL1
and x0, x0, #0xFF //Mask off to leave Aff0
cbz x0, OKToRun //If core 0, run the primary init code
EndlessLoop0:
wfi
b EndlessLoop0
#endif
OKToRun:
mrs x0, currentEL
cmp x0, #0xC
beq InitEL3
cmp x0, #0x4
beq InitEL1
b error // go to error if current exception level is neither EL3 nor EL1
InitEL3:
.if (EL3 == 1)
/*Set vector table base address*/
ldr x1, =vector_base
msr VBAR_EL3,x1
/* Set reset vector address */
/* Get the cpu ID */
mrs x0, MPIDR_EL1
and x0, x0, #0xFF
mov w0, w0
ldr w2, =rvbar_base
/* calculate the rvbar base address for particular CPU core */
mov w3, #0x8
mul w0, w0, w3
add w2, w2, w0
/* store vector base address to RVBAR */
str x1, [x2]
/*Define stack pointer for current exception level*/
ldr x2,=EL3_stack
mov sp,x2
/* Enable Trapping of SIMD/FPU register for standalone BSP */
mov x0, #0
#ifndef FREERTOS_BSP
orr x0, x0, #(0x1 << 10)
#endif
msr CPTR_EL3, x0
isb
/*
* Clear FPUStatus variable to make sure that it contains current
* status of FPU i.e. disabled. In case of a warm restart execution
* when bss sections are not cleared, it may contain previously updated
* value which does not hold true now.
*/
#ifndef FREERTOS_BSP
ldr x0,=FPUStatus
str xzr, [x0]
#endif
/* Configure SCR_EL3 */
mov w1, #0 //; Initial value of register is unknown
orr w1, w1, #(1 << 11) //; Set ST bit (Secure EL1 can access CNTPS_TVAL_EL1, CNTPS_CTL_EL1 & CNTPS_CVAL_EL1)
orr w1, w1, #(1 << 10) //; Set RW bit (EL1 is AArch64, as this is the Secure world)
orr w1, w1, #(1 << 3) //; Set EA bit (SError routed to EL3)
orr w1, w1, #(1 << 2) //; Set FIQ bit (FIQs routed to EL3)
orr w1, w1, #(1 << 1) //; Set IRQ bit (IRQs routed to EL3)
msr SCR_EL3, x1
/*configure cpu auxiliary control register EL1 */
ldr x0,=0x80CA000 // L1 Data prefetch control - 5, Enable device split throttle, 2 independent data prefetch streams
#if CONFIG_ARM_ERRATA_855873
/*
* Set ENDCCASCI bit in CPUACTLR_EL1 register, to execute data
* cache clean operations as data cache clean and invalidate
*
*/
orr x0, x0, #(1 << 44) //; Set ENDCCASCI bit
#endif
msr S3_1_C15_C2_0, x0 //CPUACTLR_EL1
/* program the counter frequency */
ldr x0,=counterfreq
msr CNTFRQ_EL0, x0
/*Enable hardware coherency between cores*/
mrs x0, S3_1_c15_c2_1 //Read EL1 CPU Extended Control Register
orr x0, x0, #(1 << 6) //Set the SMPEN bit
msr S3_1_c15_c2_1, x0 //Write EL1 CPU Extended Control Register
isb
tlbi ALLE3
ic IALLU //; Invalidate I cache to PoU
bl invalidate_dcaches
dsb sy
isb
ldr x1, =L0Table //; Get address of level 0 for TTBR0_EL3
msr TTBR0_EL3, x1 //; Set TTBR0_EL3
/**********************************************
* Set up memory attributes
* This equates to:
* 0 = b01000100 = Normal, Inner/Outer Non-Cacheable
* 1 = b11111111 = Normal, Inner/Outer WB/WA/RA
* 2 = b00000000 = Device-nGnRnE
* 3 = b00000100 = Device-nGnRE
* 4 = b10111011 = Normal, Inner/Outer WT/WA/RA
**********************************************/
ldr x1, =0x000000BB0400FF44
msr MAIR_EL3, x1
#if defined (versal)
/**********************************************
* Set up TCR_EL3
* Physical Address Size PS = 100 -> 44bits 16 TB
* Granual Size TG0 = 00 -> 4KB
* size offset of the memory region T0SZ = 20 -> (region size 2^(64-20) = 2^44)
***************************************************/
ldr x1,=0x80843514
#else
/**********************************************
* Set up TCR_EL3
* Physical Address Size PS = 010 -> 40bits 1TB
* Granual Size TG0 = 00 -> 4KB
* size offset of the memory region T0SZ = 24 -> (region size 2^(64-24) = 2^40)
***************************************************/
ldr x1,=0x80823518
#endif
msr TCR_EL3, x1
isb
/* Enable SError Exception for asynchronous abort */
mrs x1,DAIF
bic x1,x1,#(0x1<<8)
msr DAIF,x1
/* Configure SCTLR_EL3 */
mov x1, #0 //Most of the SCTLR_EL3 bits are unknown at reset
orr x1, x1, #(1 << 12) //Enable I cache
orr x1, x1, #(1 << 3) //Enable SP alignment check
orr x1, x1, #(1 << 2) //Enable caches
orr x1, x1, #(1 << 0) //Enable MMU
msr SCTLR_EL3, x1
dsb sy
isb
b _startup //jump to start
.else
b error // present exception level and selected exception level mismatch
.endif
InitEL1:
.if (EL1_NONSECURE == 1)
/*Set vector table base address*/
ldr x1, =vector_base
msr VBAR_EL1,x1
mrs x0, CPACR_EL1
bic x0, x0, #(0x3 << 0x20)
msr CPACR_EL1, x0
isb
/*
* Clear FPUStatus variable to make sure that it contains current
* status of FPU i.e. disabled. In case of a warm restart execution
* when bss sections are not cleared, it may contain previously updated
* value which does not hold true now.
*/
#ifndef FREERTOS_BSP
ldr x0,=FPUStatus
str xzr, [x0]
#endif
/*Define stack pointer for current exception level*/
ldr x2,=EL1_stack
mov sp,x2
/* Disable MMU first */
mov x1,#0x0
msr SCTLR_EL1, x1
isb
TLBI VMALLE1
ic IALLU //; Invalidate I cache to PoU
bl invalidate_dcaches
dsb sy
isb
ldr x1, =L0Table //; Get address of level 0 for TTBR0_EL1
msr TTBR0_EL1, x1 //; Set TTBR0_EL1
/**********************************************
* Set up memory attributes
* This equates to:
* 0 = b01000100 = Normal, Inner/Outer Non-Cacheable
* 1 = b11111111 = Normal, Inner/Outer WB/WA/RA
* 2 = b00000000 = Device-nGnRnE
* 3 = b00000100 = Device-nGnRE
* 4 = b10111011 = Normal, Inner/Outer WT/WA/RA
**********************************************/
ldr x1, =0x000000BB0400FF44
msr MAIR_EL1, x1
#if defined (versal)
/**********************************************
* Set up TCR_EL1
* Physical Address Size PS = 100 -> 44bits 16TB
* Granual Size TG0 = 00 -> 4KB
* size offset of the memory region T0SZ = 20 -> (region size 2^(64-20) = 2^44)
***************************************************/
ldr x1,=0x485800514
#else
/**********************************************
* Set up TCR_EL1
* Physical Address Size PS = 010 -> 44bits 16TB
* Granual Size TG0 = 00 -> 4KB
* size offset of the memory region T0SZ = 24 -> (region size 2^(64-24) = 2^40)
***************************************************/
ldr x1,=0x285800518
#endif
msr TCR_EL1, x1
isb
/* Enable SError Exception for asynchronous abort */
mrs x1,DAIF
bic x1,x1,#(0x1<<8)
msr DAIF,x1
//; Enable MMU
mov x1,#0x0
orr x1, x1, #(1 << 18) // ; Set WFE non trapping
orr x1, x1, #(1 << 17) // ; Set WFI non trapping
orr x1, x1, #(1 << 5) // ; Set CP15 barrier enabled
orr x1, x1, #(1 << 12) // ; Set I bit
orr x1, x1, #(1 << 2) // ; Set C bit
orr x1, x1, #(1 << 0) // ; Set M bit
msr SCTLR_EL1, x1
isb
bl _startup //jump to start
.else
b error // present exception level and selected exception level mismatch
.endif
error: b error
invalidate_dcaches:
dmb ISH
mrs x0, CLIDR_EL1 //; x0 = CLIDR
ubfx w2, w0, #24, #3 //; w2 = CLIDR.LoC
cmp w2, #0 //; LoC is 0?
b.eq invalidateCaches_end //; No cleaning required and enable MMU
mov w1, #0 //; w1 = level iterator
invalidateCaches_flush_level:
add w3, w1, w1, lsl #1 //; w3 = w1 * 3 (right-shift for cache type)
lsr w3, w0, w3 //; w3 = w0 >> w3
ubfx w3, w3, #0, #3 //; w3 = cache type of this level
cmp w3, #2 //; No cache at this level?
b.lt invalidateCaches_next_level
lsl w4, w1, #1
msr CSSELR_EL1, x4 //; Select current cache level in CSSELR
isb //; ISB required to reflect new CSIDR
mrs x4, CCSIDR_EL1 //; w4 = CSIDR
ubfx w3, w4, #0, #3
add w3, w3, #2 //; w3 = log2(line size)
ubfx w5, w4, #13, #15
ubfx w4, w4, #3, #10 //; w4 = Way number
clz w6, w4 //; w6 = 32 - log2(number of ways)
invalidateCaches_flush_set:
mov w8, w4 //; w8 = Way number
invalidateCaches_flush_way:
lsl w7, w1, #1 //; Fill level field
lsl w9, w5, w3
orr w7, w7, w9 //; Fill index field
lsl w9, w8, w6
orr w7, w7, w9 //; Fill way field
dc CISW, x7 //; Invalidate by set/way to point of coherency
subs w8, w8, #1 //; Decrement way
b.ge invalidateCaches_flush_way
subs w5, w5, #1 //; Descrement set
b.ge invalidateCaches_flush_set
invalidateCaches_next_level:
add w1, w1, #1 //; Next level
cmp w2, w1
b.gt invalidateCaches_flush_level
invalidateCaches_end:
ret
.end
/**
* @} End of "addtogroup a53_64_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 7,362 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/gcc/asm_vectors.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file asm_vectors.s
*
* This file contains the initial vector table for the Cortex A53 processor
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ------- -------- ---------------------------------------------------
* 5.00 pkp 05/21/14 Initial version
* 6.02 pkp 12/21/16 Added support for floating point access
* 6.02 pkp 01/22/17 Added support for EL1 non-secure and hypervisor
* baremetal guest
* 6.4 mus 06/14/17 Fixed bug in IRQInterruptHandler code snippet,
* which checks for the FPEN bit of CPACR_EL1
* 6.6 mus 01/19/18 Added isb after writing to the cpacr_el1/cptr_el3,
* to ensure enabling/disabling of floating-point unit
* is completed, before any subsequent instruction.
*
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "bspconfig.h"
.org 0
.text
.globl _boot
.globl _vector_table
.globl FIQInterrupt
.globl IRQInterrupt
.globl SErrorInterrupt
.globl SynchronousInterrupt
.globl FPUStatus
/*
* FPUContextSize is the size of the array where floating point registers are
* stored when required. The default size corresponds to the case when there is no
* nested interrupt. If there are nested interrupts in application which are using
* floating point operation, the size of FPUContextSize need to be increased as per
* requirement
*/
.set FPUContextSize, 528
.macro saveregister
stp X0,X1, [sp,#-0x10]!
stp X2,X3, [sp,#-0x10]!
stp X4,X5, [sp,#-0x10]!
stp X6,X7, [sp,#-0x10]!
stp X8,X9, [sp,#-0x10]!
stp X10,X11, [sp,#-0x10]!
stp X12,X13, [sp,#-0x10]!
stp X14,X15, [sp,#-0x10]!
stp X16,X17, [sp,#-0x10]!
stp X18,X19, [sp,#-0x10]!
stp X29,X30, [sp,#-0x10]!
.endm
.macro restoreregister
ldp X29,X30, [sp], #0x10
ldp X18,X19, [sp], #0x10
ldp X16,X17, [sp], #0x10
ldp X14,X15, [sp], #0x10
ldp X12,X13, [sp], #0x10
ldp X10,X11, [sp], #0x10
ldp X8,X9, [sp], #0x10
ldp X6,X7, [sp], #0x10
ldp X4,X5, [sp], #0x10
ldp X2,X3, [sp], #0x10
ldp X0,X1, [sp], #0x10
.endm
.macro savefloatregister
/* Load the floating point context array address from FPUContextBase */
ldr x1,=FPUContextBase
ldr x0, [x1]
/* Save all the floating point register to the array */
stp q0,q1, [x0], #0x20
stp q2,q3, [x0], #0x20
stp q4,q5, [x0], #0x20
stp q6,q7, [x0], #0x20
stp q8,q9, [x0], #0x20
stp q10,q11, [x0], #0x20
stp q12,q13, [x0], #0x20
stp q14,q15, [x0], #0x20
stp q16,q17, [x0], #0x20
stp q18,q19, [x0], #0x20
stp q20,q21, [x0], #0x20
stp q22,q23, [x0], #0x20
stp q24,q25, [x0], #0x20
stp q26,q27, [x0], #0x20
stp q28,q29, [x0], #0x20
stp q30,q31, [x0], #0x20
mrs x2, FPCR
mrs x3, FPSR
stp x2, x3, [x0], #0x10
/* Save current address of floating point context array to FPUContextBase */
str x0, [x1]
.endm
.macro restorefloatregister
/* Restore the address of floating point context array from FPUContextBase */
ldr x1,=FPUContextBase
ldr x0, [x1]
/* Restore all the floating point register from the array */
ldp x2, x3, [x0,#-0x10]!
msr FPCR, x2
msr FPSR, x3
ldp q30,q31, [x0,#-0x20]!
ldp q28,q29, [x0,#-0x20]!
ldp q26,q27, [x0,#-0x20]!
ldp q24,q25, [x0,#-0x20]!
ldp q22,q23, [x0,#-0x20]!
ldp q20,q21, [x0,#-0x20]!
ldp q18,q19, [x0,#-0x20]!
ldp q16,q17, [x0,#-0x20]!
ldp q14,q15, [x0,#-0x20]!
ldp q12,q13, [x0,#-0x20]!
ldp q10,q11, [x0,#-0x20]!
ldp q8,q9, [x0,#-0x20]!
ldp q6,q7, [x0,#-0x20]!
ldp q4,q5, [x0,#-0x20]!
ldp q2,q3, [x0,#-0x20]!
ldp q0,q1, [x0,#-0x20]!
/* Save current address of floating point context array to FPUContextBase */
str x0, [x1]
.endm
.org 0
.section .vectors, "a"
_vector_table:
.set VBAR, _vector_table
.org VBAR
/*
* if application is built for XEN GUEST as EL1 Non-secure following image
* header is required by XEN.
*/
.if (HYP_GUEST == 1)
/* Valid Image header. */
/* HW reset vector. */
ldr x16, =_boot
br x16
/* text offset. */
.dword 0
/* image size. */
.dword 0
/* flags. */
.dword 8
/* RES0 */
.dword 0
.dword 0
.dword 0
/* magic */
.dword 0x644d5241
/* RES0 */
.dword 0
/* End of Image header. */
.endif
b _boot
.org (VBAR + 0x200)
b SynchronousInterruptHandler
.org (VBAR + 0x280)
b IRQInterruptHandler
.org (VBAR + 0x300)
b FIQInterruptHandler
.org (VBAR + 0x380)
b SErrorInterruptHandler
SynchronousInterruptHandler:
saveregister
/* Check if the Synchronous abort is occurred due to floating point access. */
.if (EL3 == 1)
mrs x0, ESR_EL3
.else
mrs x0, ESR_EL1
.endif
and x0, x0, #(0x3F << 26)
mov x1, #(0x7 << 26)
cmp x0, x1
/* If exception is not due to floating point access go to synchronous handler */
bne synchronoushandler
/*
* If excpetion occurred due to floating point access, Enable the floating point
* access i.e. do not trap floating point instruction
*/
.if (EL3 == 1)
mrs x1,CPTR_EL3
bic x1, x1, #(0x1<<10)
msr CPTR_EL3, x1
.else
mrs x1,CPACR_EL1
orr x1, x1, #(0x1<<20)
msr CPACR_EL1, x1
.endif
isb
/* If the floating point access was previously enabled, store FPU context
* registers(storefloat).
*/
ldr x0, =FPUStatus
ldrb w1,[x0]
cbnz w1, storefloat
/*
* If the floating point access was not enabled previously, save the status of
* floating point accessibility i.e. enabled and store floating point context
* array address(FPUContext) to FPUContextBase.
*/
mov w1, #0x1
strb w1, [x0]
ldr x0, =FPUContext
ldr x1, =FPUContextBase
str x0,[x1]
b restorecontext
storefloat:
savefloatregister
b restorecontext
synchronoushandler:
bl SynchronousInterrupt
restorecontext:
restoreregister
eret
IRQInterruptHandler:
saveregister
/* Save the status of SPSR, ELR and CPTR to stack */
.if (EL3 == 1)
mrs x0, CPTR_EL3
mrs x1, ELR_EL3
mrs x2, SPSR_EL3
.else
mrs x0, CPACR_EL1
mrs x1, ELR_EL1
mrs x2, SPSR_EL1
.endif
stp x0, x1, [sp,#-0x10]!
str x2, [sp,#-0x10]!
/* Trap floating point access */
.if (EL3 == 1)
mrs x1,CPTR_EL3
orr x1, x1, #(0x1<<10)
msr CPTR_EL3, x1
.else
mrs x1,CPACR_EL1
bic x1, x1, #(0x1<<20)
msr CPACR_EL1, x1
.endif
isb
bl IRQInterrupt
/*
* If floating point access is enabled during interrupt handling,
* restore floating point registers.
*/
.if (EL3 == 1)
mrs x0, CPTR_EL3
ands x0, x0, #(0x1<<10)
bne RestorePrevState
.else
mrs x0,CPACR_EL1
ands x0, x0, #(0x1<<20)
beq RestorePrevState
.endif
restorefloatregister
/* Restore the status of SPSR, ELR and CPTR from stack */
RestorePrevState:
ldr x2,[sp],0x10
ldp x0, x1, [sp],0x10
.if (EL3 == 1)
msr CPTR_EL3, x0
msr ELR_EL3, x1
msr SPSR_EL3, x2
.else
msr CPACR_EL1, x0
msr ELR_EL1, x1
msr SPSR_EL1, x2
.endif
restoreregister
eret
FIQInterruptHandler:
saveregister
bl FIQInterrupt
restoreregister
eret
SErrorInterruptHandler:
saveregister
bl SErrorInterrupt
restoreregister
eret
.align 8
/* Array to store floating point registers */
FPUContext: .skip FPUContextSize
/* Stores address for floating point context array */
FPUContextBase: .skip 8
FPUStatus: .skip 1
.end
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 3,119 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/gcc/xil-crt0.S | /******************************************************************************
* Copyright (C) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil-crt0.S
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.00 pkp 05/21/14 Initial version
* 5.04 pkp 12/18/15 Initialized global constructor for C++ applications
* 5.04 pkp 01/05/16 Set the reset vector register RVBAR equivalent to
* vector table base address
* 6.02 pkp 01/22/17 Added support for EL1 non-secure
* 6.6 srm 10/18/17 Added timer configuration using XTime_StartTTCTimer API.
* Now the TTC instance as specified by the user will be
* started.
* 6.6 mus 01/29/18 Initialized the xen PV console for Cortexa53 64 bit
* EL1 NS BSP.
* 7.2 sd 02/23/20 Clock Init is called
* 7.2 sd 02/23/20 Clock code added under XCLOCKING flag
* </pre>
*
* @note
*
* None.
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
.text
.Lsbss_start:
.quad __sbss_start
.Lsbss_end:
.quad __sbss_end
.Lbss_start:
.quad __bss_start__
.Lbss_end:
.quad __bss_end__
.set APU_PWRCTL, 0xFD5C0090
.globl _startup
_startup:
mov x0, #0
.if (EL3 == 1)
/* Check whether the clearing of bss sections shall be skipped */
ldr x10, =APU_PWRCTL /* Load PWRCTRL address */
ldr w11, [x10] /* Read PWRCTRL register */
mrs x2, MPIDR_EL1 /* Read MPIDR_EL1 */
ubfx x2, x2, #0, #8 /* Extract CPU ID (affinity level 0) */
mov w1, #1
lsl w2, w1, w2 /* Shift CPU ID to get one-hot ID */
ands w11, w11, w2 /* Get PWRCTRL bit for this core */
bne .Lenclbss /* Skip BSS and SBSS clearing */
.endif
/* clear sbss */
ldr x1,.Lsbss_start /* calculate beginning of the SBSS */
ldr x2,.Lsbss_end /* calculate end of the SBSS */
.Lloop_sbss:
cmp x1,x2
bge .Lenclsbss /* If no SBSS, no clearing required */
str x0, [x1], #8
b .Lloop_sbss
.Lenclsbss:
/* clear bss */
ldr x1,.Lbss_start /* calculate beginning of the BSS */
ldr x2,.Lbss_end /* calculate end of the BSS */
.Lloop_bss:
cmp x1,x2
bge .Lenclbss /* If no BSS, no clearing required */
str x0, [x1], #8
b .Lloop_bss
.Lenclbss:
/* run global constructors */
bl __libc_init_array
/* Reset and start Triple Timer Counter */
#if defined (SLEEP_TIMER_BASEADDR)
bl XTime_StartTTCTimer
#endif
.if (EL1_NONSECURE == 1 && HYP_GUEST == 1 && \
XEN_USE_PV_CONSOLE == 1)
bl XPVXenConsole_Init
.endif
/* make sure argc and argv are valid */
mov x0, #0
mov x1, #0
#ifdef XCLOCKING
bl Xil_ClockInit
#endif
bl main /* Jump to main C code */
/* Cleanup global constructors */
bl __libc_fini_array
bl exit
.Lexit: /* should never get here */
b .Lexit
.Lstart:
.size _startup,.Lstart-_startup
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 15,453 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/platform/versal/armclang/translation_table.S | /******************************************************************************
* Copyright (C) 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a72_64_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A72. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for versal
* architecture. It utilizes translation granual size of 4KB with 2MB section
* size for initial 5GB memory and 1GB section size for memory after 5GB.
* The overview of translation table memory attributes is described below.
*
*| Name | Memory Range | Def. in Translation Table |
*|-----------------------|-----------------------------------|-----------------------------|
*| DDR | 0x000_0000_0000 - 0x000_7FFF_FFFF | Normal WB Cacheable |
*| LPD_AFI_FS | 0x000_8000_0000 - 0x000_9FFF_FFFF | Strongly Ordered |
*| Reserved | 0x000_A000_0000 - 0x000_A3FF_FFFF | Unassigned |
*| FPD_AFI_0 | 0x000_A400_0000 - 0x000_AFFF_FFFF | Strongly Ordered |
*| FPD_AFI_1 | 0x000_B000_0000 - 0x000_BFFF_FFFF | Strongly Ordered |
*| QSPI | 0x000_C000_0000 - 0x000_DFFF_FFFF | Strongly Ordered |
*| PCIE region 0 | 0x000_E000_0000 - 0x000_EFFF_FFFF | Strongly Ordered |
*| PMC | 0x000_F000_0000 - 0x000_F7FF_FFFF | Strongly Ordered |
*| STM_CORESIGHT | 0x000_F800_0000 - 0x000_F8FF_FFFF | Strongly Ordered |
*| GIC | 0x000_F900_0000 - 0x000_F90F_FFFF | Strongly Ordered |
*| Reserved | 0x000_F910_0000 - 0x000_FBFF_FFFF | Unassigned |
*| CPM | 0x000_FC00_0000 - 0x000_FCFF_FFFF | Strongly Ordered |
*| FPD slaves | 0x000_FD00_0000 - 0x000_FDFF_FFFF | Strongly Ordered |
*| LPD slaves | 0x000_FE00_0000 - 0x000_FFDF_FFFF | Strongly Ordered |
*| OCM | 0x000_FFE0_0000 - 0xFFF_FFFF_FFFF | Normal WB Cacheable |
*| PMC region 0-3 | 0x001_0000_0000 - 0x001_1FFF_FFFF | Strongly Ordered |
*| Reserved | 0x001_2000_0000 - 0x001_FFFF_FFFF | Unassigned |
*| ME Array 0-3 | 0x002_0000_0000 - 0x002_FFFF_FFFF | Strongly Ordered |
*| Reserved | 0x003_0000_0000 - 0x003_FFFF_FFFF | Unassigned |
*| PL- via PS | 0x004_0000_0000 - 0x005_FFFF_FFFF | Strongly Ordered |
*| PCIe region 1 | 0x006_0000_0000 - 0x007_FFFF_FFFF | Strongly Ordered |
*| DDR | 0x008_0000_0000 - 0x00F_FFFF_FFFF | Normal WB Cacheable |
*| Reserved | 0x010_0000_0000 - 0x03F_FFFF_FFFF | Unassigned |
*| HBM 0-3 | 0x040_0000_0000 - 0x07F_FFFF_FFFF | Strongly Ordered |
*| PCIe region 2 | 0x080_0000_0000 - 0x0BF_FFFF_FFFF | Strongly Ordered |
*| DDR | 0x0C0_0000_0000 - 0x1B7_7FFF_FFFF | Normal WB Cacheable |
*| Reserved | 0x1B7_8000_0000 - 0x1FF_FFFF_FFFF | Unassigned |
*| PL- Via NoC | 0x200_0000_0000 - 0x3FF_FFFF_FFFF | Strongly Ordered |
*| PL- Via PS | 0x400_0000_0000 - 0x4FF_FFFF_FFFF | Strongly Ordered |
*| DDR CH1-CH3 | 0x500_0000_0000 - 0x7FF_FFFF_FFFF | Normal WB Cacheable |
*| PL- Via NoC | 0x800_0000_0000 - 0xFFF_FFFF_FFFF | Strongly Ordered |
*
* @note
*
* For DDR region 0x0000000000 - 0x007FFFFFFF, a system where DDR is less than
* 2GB, region after DDR and before PL is marked as undefined/reserved in
* translation table. Region 0xF9100000 - 0xF91FFFFF is reserved memory in
* 0x00F9000000 - 0x00F91FFFFF range, but it is marked as strongly ordered
* because minimum section size in translation table section is 2MB.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 7.2 mus 01/09/20 Initial version
*
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
EXPORT MMUTableL0
EXPORT MMUTableL1
EXPORT MMUTableL2
GBLA abscnt
GBLA count
GBLA sect
Reserved EQU 0x0 ; Fault
#if EL1_NONSECURE
Memory EQU 0x405:OR:(2:SHL:8):OR:0x0 ; normal writeback write allocate outer shared read write */
#else
Memory EQU 0x405:OR:(3:SHL:8):OR:0x0 ; normal writeback write allocate inner shared read write */
#endif
Device EQU 0x409:OR:(1:SHL:53):OR:(1:SHL:54):OR:0x0 ; strongly ordered read write non executable
AREA |.mmu_tbl0|, CODE, ALIGN=12
MMUTableL0
count SETA 0
WHILE count<0x1f
DCQU MMUTableL1+count*0x1000+0x3 ; 0x0000_0000 - 0x7F_FFFF_FFFF
count SETA count+1
WEND
count SETA 1
WHILE count<0x20
DCQ MMUTableL1+count*0x1000+0x3 ; 0x80_0000_0000 - 0xFFF_FFFF_FFFF
count SETA count+1
WEND
AREA |.mmu_tbl1|, CODE, ALIGN=12
MMUTableL1
DCQU MMUTableL2+0x3 ; 0x0000_0000 - 0x3FFF_FFFF
count SETA 1 ; 0x4000_0000 - 0x1_3FFF_FFFF
WHILE count<5
DCQ MMUTableL2+count*0x1000+0x3 ; 1GB DDR, 512MB LPD_AFI_FS, 448MB FPD_AFI_0, 512MB QSPI,
; 256MB PCIe region 0, PMC 128MB, GIC 1 MB, reserved 47MB,
; 2GB other devices and memory, 512 MB PMC
count SETA count+1
WEND
Fixlocl1 EQU 0x140000000
abscnt SETA 0
count SETA 0
WHILE count<0x3
DCQU Fixlocl1+abscnt*0x40000000+Reserved ; 0x1_4000_0000 - 0x1_FFFF_FFFF
; 3GB Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x4
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x2_0000_0000 - 0x2_FFFF_FFFF
; 4GB ME Array 0-3
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x4
DCQU Fixlocl1+abscnt*0x40000000+Reserved ; 0x3_0000_0000 - 0x3_FFFF_FFFF
; 4GB Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x10
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x4_0000_0000 - 0x7_FFFF_FFFF
; 8GB PL - via PS, 8GB PCIe region1
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_LOW_1_BASEADDR
DDR_1_START EQU XPAR_AXI_NOC_DDR_LOW_1_BASEADDR
DDR_1_END EQU XPAR_AXI_NOC_DDR_LOW_1_HIGHADDR
DDR_1_SIZE EQU (DDR_1_END - DDR_1_START+1)
#if DDR_1_SIZE > 0x800000000
; If DDR size is larger than 32GB, truncate to 32GB
DDR_1_REG EQU 0x20
#else
DDR_1_REG EQU DDR_1_SIZE/0x40000000
#endif
#else
DDR_1_REG EQU 0
#endif
UNDEF_1_REG EQU (0x20 - DDR_1_REG)
; DDR based on size in hw design
count SETA 0
WHILE count<DDR_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0xC0
DCQU Fixlocl1+abscnt*0x40000000+Reserved ; 0x10_0000_0000 - 0x3F_FFFF_FFFF
; 192GB Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x40_0000_0000 - 0x7F_FFFF_FFFF
; 256GB HBM 0-3
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x80_0000_0000 - 0xBF_FFFF_FFFF
; 256GB PCIe 2
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_LOW_2_BASEADDR
DDR_2_START EQU XPAR_AXI_NOC_DDR_LOW_2_BASEADDR
DDR_2_END EQU XPAR_AXI_NOC_DDR_LOW_2_HIGHADDR
DDR_2_SIZE EQU (DDR_2_END - DDR_2_START+1)
#if DDR_2_SIZE > 0x4000000000
; If DDR size is larger than 256 GB, truncate to 256GB
DDR_2_REG EQU 0x100
#else
DDR_2_REG EQU DDR_2_SIZE/0x40000000
#endif
#else
DDR_2_REG EQU 0
#endif
UNDEF_2_REG EQU (0x100 - DDR_2_REG)
; DDR based on size in hw design
count SETA 0
WHILE count<DDR_2_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_2_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_LOW_3_BASEADDR
DDR_3_START EQU XPAR_AXI_NOC_DDR_LOW_3_BASEADDR
DDR_3_END EQU XPAR_AXI_NOC_DDR_LOW_3_HIGHADDR
DDR_3_SIZE EQU (DDR_3_END - DDR_3_START+1)
#if DDR_3_SIZE > 0xB780000000
; If DDR size is larger than 734 GB, truncate to 734GB
DDR_3_REG EQU 0x2de
#else
DDR_3_REG EQU DDR_3_SIZE/0x40000000
#endif
#else
DDR_3_REG EQU 0
#endif
UNDEF_3_REG EQU (0x2de - DDR_3_REG)
; DDR based on size in hw design
count SETA 0
WHILE count<DDR_3_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_3_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x122
DCQU Fixlocl1+abscnt*0x40000000+Reserved ; 0x1B7_8000_0000 - 0x1FF_FFFF_FFFF
; 290GB reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x800
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x200_0000_0000 - 0x3FF_FFFF_FFFF
; 2TB PL- via NoC
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x400
DCQU Fixlocl1+abscnt*0x40000000+Device ; 0x400_0000_0000 - 0x4FF_FFFF_FFFF
; 1TB PL- via PS
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_CH_1_BASEADDR
DDR_CH_1_START EQU XPAR_AXI_NOC_DDR_CH_1_BASEADDR
DDR_CH_1_END EQU XPAR_AXI_NOC_DDR_CH_1_HIGHADDR
DDR_CH_1_SIZE EQU (DDR_CH_1_END - DDR_CH_1_START + 1)
#if DDR_CH_1_SIZE > 0x010000000000
; If DDR size is larger than 1TB, truncate to 1 TB
DDR_CH_1_REG EQU 0x400 ; 0x500_0000_0000 - 0x5FF_FFFF_FFFF
#else
DDR_CH_1_REG EQU DDR_CH_1_SIZE/0x40000000
#endif
#else
DDR_CH_1_REG EQU 0
#endif
UNDEF_CH_1_REG EQU (0x400 - DDR_CH_1_REG)
; DDR based on size in hw design, Max size 1 TB
count SETA 0
WHILE count<DDR_CH_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_CH_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_CH_2_BASEADDR
DDR_CH_2_START EQU XPAR_AXI_NOC_DDR_CH_2_BASEADDR
DDR_CH_2_END EQU XPAR_AXI_NOC_DDR_CH_2_HIGHADDR
DDR_CH_2_SIZE EQU (DDR_CH_2_END - DDR_CH_2_START + 1)
#if DDR_CH_2_SIZE > 0x010000000000
; If DDR_CH_2 size is larger than 1TB, truncate to 1 TB
DDR_CH_2_REG EQU 0x400 ; 0x600_0000_0000 - 0x6FF_FFFF_FFFF
#else
DDR_CH_2_REG EQU DDR_CH_2_SIZE/0x40000000
#endif
#else
DDR_CH_2_REG EQU 0
#endif
UNDEF_CH_2_REG EQU (0x400 - DDR_CH_2_REG)
; DDR based on size in hw design, Max size 1 TB
count SETA 0
WHILE count<DDR_CH_2_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_CH_2_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_AXI_NOC_DDR_CH_3_BASEADDR
DDR_CH_3_START EQU XPAR_AXI_NOC_DDR_CH_3_BASEADDR
DDR_CH_3_END EQU XPAR_AXI_NOC_DDR_CH_3_HIGHADDR
DDR_CH_3_SIZE EQU (DDR_CH_3_END - DDR_CH_3_START+1)
#if DDR_CH_3_SIZE > 0x010000000000
; If DDR_CH_3 size is larger than 1TB, truncate to 1 TB */
DDR_CH_3_REG EQU 0x400 ; 0x700_0000_0000 - 0x7FF_FFFF_FFFF
#else
DDR_CH_3_REG EQU DDR_CH_3_SIZE/0x40000000
#endif
#else
DDR_CH_3_REG EQU 0
#endif
UNDEF_CH_3_REG EQU (0x400 - DDR_CH_3_REG)
; DDR based on size in hw design, Max size 1 TB
count SETA 0
WHILE count<DDR_CH_3_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_CH_3_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x2000
DCQU Fixlocl1+abscnt*0x40000000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
AREA |.mmu_tbl2|, CODE, ALIGN=12
MMUTableL2
abscnt SETA 0
#ifdef XPAR_AXI_NOC_DDR_LOW_0_BASEADDR
DDR_0_START EQU XPAR_AXI_NOC_DDR_LOW_0_BASEADDR
DDR_0_END EQU XPAR_AXI_NOC_DDR_LOW_0_HIGHADDR
DDR_0_SIZE EQU (DDR_0_END - DDR_0_START+1)
#if DDR_0_SIZE > 0x80000000
; If DDR size is larger than 2GB, truncate to 2GB
.set DDR_0_REG, 0x400
#else
DDR_0_REG EQU DDR_0_SIZE/0x200000
#endif
#else
DDR_0_REG EQU 0
#endif
UNDEF_0_REG EQU (0x400 - DDR_0_REG)
; DDR based on size in hw design
count SETA 0
WHILE count<DDR_0_REG
DCQU abscnt*0x200000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_0_REG
DCQU abscnt*0x200000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x20 ; 0xA000_0000 - 0xA3FF_FFFF
DCQU abscnt*0x200000+Device ; 64MB reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x60 ; 0xA400_0000 - 0xAFFF_FFFF
DCQU abscnt*0x200000+Device ; 192MB FPD AFI 0
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x80 ; 0xB000_0000 - 0xBFFF_FFFF
DCQU abscnt*0x200000+Device ; 192MB FPD AFI 1
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100 ; 0xC000_0000 - 0xDFFF_FFFF
DCQU abscnt*0x200000+Device ; 512MB QSPI
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x80 ; 0xE000_0000 - 0xEFFF_FFFF
DCQU abscnt*0x200000+Device ; 256MB lower PCIe
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x40 ; 0xF000_0000 - 0xF7FF_FFFF
DCQU abscnt*0x200000+Device ; 128MB PMC
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x8 ; 0xF800_0000 - 0xF8FF_FFFF
DCQU abscnt*0x200000+Device ; 16MB coresight
count SETA count+1
abscnt SETA abscnt+1
WEND
; 1MB GIC is marked for 2MB region as the minimum block size in
; translation table is 2MB and adjacent 47MB reserved region is
; converted to 46MB
DCQU abscnt*0x200000+Device ; 0xF910_0000 - 0xF90F_FFFF
abscnt SETA abscnt+1
; Reserved 46MB 0xF91FFFFF - 0xFBFFFFFF
count SETA 0
WHILE count<0x17 ; 0xF91F_FFFF - 0xFBFF_FFFF
DCQU abscnt*0x200000+Reserved ; 46MB reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x1F ; 0xFC00_0000 - 0xFFDF_FFFF
DCQU abscnt*0x200000+Device ; 16MB CPM,16MB FPS, 30MB LPS slaves
count SETA count+1
abscnt SETA abscnt+1
WEND
DCQU abscnt*0x200000+Memory ; 0xFFE0_0000 - 0xFFFF_FFFF
abscnt SETA abscnt+1
count SETA 0
WHILE count<0x100 ; 0x1_0000_0000 - 0x1_1FFF_FFFF
DCQU abscnt*0x200000+Device ; 512MB PMC 0-3
count SETA count+1
abscnt SETA abscnt+1
WEND
count SETA 0
WHILE count<0x100 ; 0x1_2000_0000 - 0x1_3FFF_FFFF
DCQU abscnt*0x200000+Device ; 512MB reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
END
/**
* @} End of "addtogroup a53_64_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 15,212 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/platform/versal/gcc/translation_table.S | /******************************************************************************
* Copyright (C) 2018 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a53_64_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A53. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for zynq ultrascale+
* architecture. It utilizes translation granual size of 4KB with 2MB section
* size for initial 5GB memory and 1GB section size for memory after 5GB.
* The overview of translation table memory attributes is described below.
*
*| Name | Memory Range | Def. in Translation Table |
*|-----------------------|-----------------------------------|-----------------------------|
*| DDR | 0x000_0000_0000 - 0x000_7FFF_FFFF | Normal WB Cacheable |
*| LPD_AFI_FS | 0x000_8000_0000 - 0x000_9FFF_FFFF | Strongly Ordered |
*| Reserved | 0x000_A000_0000 - 0x000_A3FF_FFFF | Unassigned |
*| FPD_AFI_0 | 0x000_A400_0000 - 0x000_AFFF_FFFF | Strongly Ordered |
*| FPD_AFI_1 | 0x000_B000_0000 - 0x000_BFFF_FFFF | Strongly Ordered |
*| QSPI | 0x000_C000_0000 - 0x000_DFFF_FFFF | Strongly Ordered |
*| PCIE region 0 | 0x000_E000_0000 - 0x000_EFFF_FFFF | Strongly Ordered |
*| PMC | 0x000_F000_0000 - 0x000_F7FF_FFFF | Strongly Ordered |
*| STM_CORESIGHT | 0x000_F800_0000 - 0x000_F8FF_FFFF | Strongly Ordered |
*| GIC | 0x000_F900_0000 - 0x000_F90F_FFFF | Strongly Ordered |
*| Reserved | 0x000_F910_0000 - 0x000_FBFF_FFFF | Unassigned |
*| CPM | 0x000_FC00_0000 - 0x000_FCFF_FFFF | Strongly Ordered |
*| FPD slaves | 0x000_FD00_0000 - 0x000_FDFF_FFFF | Strongly Ordered |
*| LPD slaves | 0x000_FE00_0000 - 0x000_FFDF_FFFF | Strongly Ordered |
*| OCM | 0x000_FFE0_0000 - 0xFFF_FFFF_FFFF | Normal WB Cacheable |
*| PMC region 0-3 | 0x001_0000_0000 - 0x001_1FFF_FFFF | Strongly Ordered |
*| Reserved | 0x001_2000_0000 - 0x001_FFFF_FFFF | Unassigned |
*| ME Array 0-3 | 0x002_0000_0000 - 0x002_FFFF_FFFF | Strongly Ordered |
*| Reserved | 0x003_0000_0000 - 0x003_FFFF_FFFF | Unassigned |
*| PL- via PS | 0x004_0000_0000 - 0x005_FFFF_FFFF | Strongly Ordered |
*| PCIe region 1 | 0x006_0000_0000 - 0x007_FFFF_FFFF | Strongly Ordered |
*| DDR | 0x008_0000_0000 - 0x00F_FFFF_FFFF | Normal WB Cacheable |
*| Reserved | 0x010_0000_0000 - 0x03F_FFFF_FFFF | Unassigned |
*| HBM 0-3 | 0x040_0000_0000 - 0x07F_FFFF_FFFF | Strongly Ordered |
*| PCIe region 2 | 0x080_0000_0000 - 0x0BF_FFFF_FFFF | Strongly Ordered |
*| DDR | 0x0C0_0000_0000 - 0x1B7_7FFF_FFFF | Normal WB Cacheable |
*| Reserved | 0x1B7_8000_0000 - 0x1FF_FFFF_FFFF | Unassigned |
*| PL- Via NoC | 0x200_0000_0000 - 0x3FF_FFFF_FFFF | Strongly Ordered |
*| PL- Via PS | 0x400_0000_0000 - 0x4FF_FFFF_FFFF | Strongly Ordered |
*| DDR CH1-CH3 | 0x500_0000_0000 - 0x7FF_FFFF_FFFF | Normal WB Cacheable |
*| PL- Via NoC | 0x800_0000_0000 - 0xFFF_FFFF_FFFF | Strongly Ordered |
*
* @note
*
* For DDR region 0x0000000000 - 0x007FFFFFFF, a system where DDR is less than
* 2GB, region after DDR and before PL is marked as undefined/reserved in
* translation table. Region 0xF9100000 - 0xF91FFFFF is reserved memory in
* 0x00F9000000 - 0x00F91FFFFF range, but it is marked as strongly ordered
* because minimum section size in translation table section is 2MB. Region
* 0x00FFC00000 - 0x00FFDFFFFF contains CSU and PMU memory which are marked as
* Device since it is less than 1MB and falls in a region with device memory.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 7.00 mus 05/21/14 Initial version
* 7.00 mus 03/16/19 Updated translation table to mark DDR regions as
* memory, based on the DDR size in hdf
* 7.1 mus 08/29/19 Updated translation table entries for DDR_CH_1,
* DDR_CH_2 and DDR_CH_3 based on respective size in hdf
*
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
.globl MMUTableL0
.globl MMUTableL1
.globl MMUTableL2
.set reserved, 0x0 /* Fault*/
#if EL1_NONSECURE
.set Memory, 0x405 | (2 << 8) | (0x0) /* normal writeback write allocate outer shared read write */
#else
.set Memory, 0x405 | (3 << 8) | (0x0) /* normal writeback write allocate inner shared read write */
#endif
.set Device, 0x409 | (1 << 53)| (1 << 54) |(0x0) /* strongly ordered read write non executable*/
.section .mmu_tbl0,"a"
MMUTableL0:
.set SECT, MMUTableL1 /* 0x0000_0000 - 0x7F_FFFF_FFFF */
.8byte SECT + 0x3
.rept 0x1f
.set SECT, SECT + 0x1000 /* 0x80_0000_0000 - 0xFFF_FFFF_FFFF */
.8byte SECT + 0x3
.endr
.section .mmu_tbl1,"a"
MMUTableL1:
.set SECT, MMUTableL2 /* 0x0000_0000 - 0x3FFF_FFFF */
.8byte SECT + 0x3 /* 1GB DDR */
.rept 0x4 /* 0x4000_0000 - 0x1_3FFF_FFFF */
.set SECT, SECT + 0x1000 /*1GB DDR, 512MB LPD_AFI_FS, 448MB FPD_AFI_0, 512MB QSPI,
256MB PCIe region 0, PMC 128MB, GIC 1 MB, reserved 47MB,
2GB other devices and memory, 512 MB PMC */
.8byte SECT + 0x3
.endr
.set SECT,0x140000000
.rept 0x3 /* 0x1_4000_0000 - 0x1_FFFF_FFFF */
.8byte SECT + reserved /* 3GB Reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x4 /* 0x2_0000_0000 - 0x2_FFFF_FFFF */
.8byte SECT + Device /* 4GB ME Array 0-3*/
.set SECT, SECT + 0x40000000
.endr
.rept 0x4 /* 0x3_0000_0000 - 0x3_FFFF_FFFF */
.8byte SECT + reserved /* 4GB Reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x10 /* 0x4_0000_0000 - 0x7_FFFF_FFFF */
.8byte SECT + Device /* 8GB PL - via PS, 8GB PCIe region1 */
.set SECT, SECT + 0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_LOW_1_BASEADDR
.set DDR_1_START, XPAR_AXI_NOC_DDR_LOW_1_BASEADDR
.set DDR_1_END, XPAR_AXI_NOC_DDR_LOW_1_HIGHADDR
.set DDR_1_SIZE, (DDR_1_END - DDR_1_START)+1
.if DDR_1_SIZE > 0x800000000
/* If DDR size is larger than 32GB, truncate to 32GB */
.set DDR_1_REG, 0x20
.else
.set DDR_1_REG, DDR_1_SIZE/0x40000000
.endif
#else
.set DDR_1_REG, 0
#warning "There's no DDR_1 in the HW design. MMU translation table marks 32 GB DDR address space as undefined"
#endif
.set UNDEF_1_REG, 0x20 - DDR_1_REG
.rept DDR_1_REG /* DDR based on size in hdf*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_1_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
.rept 0xC0 /* 0x10_0000_0000 - 0x3F_FFFF_FFFF */
.8byte SECT + reserved /* 192GB Reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x100 /* 0x40_0000_0000 - 0x7F_FFFF_FFFF */
.8byte SECT + Device /* 256GB HBM 0-3*/
.set SECT, SECT + 0x40000000
.endr
.rept 0x100 /* 0x80_0000_0000 - 0xBF_FFFF_FFFF */
.8byte SECT + Device /* 256GB PCIe 2 */
.set SECT, SECT + 0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_LOW_2_BASEADDR
.set DDR_2_START, XPAR_AXI_NOC_DDR_LOW_2_BASEADDR
.set DDR_2_END, XPAR_AXI_NOC_DDR_LOW_2_HIGHADDR
.set DDR_2_SIZE, (DDR_2_END - DDR_2_START)+1
.if DDR_2_SIZE > 0x4000000000
/* If DDR size is larger than 256 GB, truncate to 256GB */
.set DDR_2_REG, 0x100
.else
.set DDR_2_REG, DDR_2_SIZE/0x40000000
.endif
#else
.set DDR_2_REG, 0
#warning "There's no DDR_LOW_2 in the HW design. MMU translation table marks 256 GB DDR address space as undefined"
#endif
.set UNDEF_2_REG, 0x100 - DDR_2_REG
.rept DDR_2_REG /* DDR based on size in hdf 0xC0_0000_0000 - 0xFF_FFFF_FFFF */
.8byte SECT + Memory /* Maximum DDR region size - 256GB */
.set SECT, SECT + 0x40000000
.endr
.rept UNDEF_2_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_LOW_3_BASEADDR
.set DDR_3_START, XPAR_AXI_NOC_DDR_LOW_3_BASEADDR
.set DDR_3_END, XPAR_AXI_NOC_DDR_LOW_3_HIGHADDR
.set DDR_3_SIZE, (DDR_3_END - DDR_3_START)+1
.if DDR_3_SIZE > 0xB780000000
/* If DDR size is larger than 734 GB, truncate to 734GB */
.set DDR_3_REG, 0x2de
.else
.set DDR_3_REG, DDR_3_SIZE/0x40000000
.endif
#else
.set DDR_3_REG, 0
#warning "There's no DDR_LOW_3 in the HW design. MMU translation table marks 734 GB DDR address space as undefined"
#endif
.set UNDEF_3_REG, 0x2de - DDR_3_REG
.rept DDR_3_REG /* DDR based on size in hdf 0x100_0000_0000 - 0x1B7_7FFF_FFFF */
.8byte SECT + Memory /* Maximum DDR region size - 734GB DDR */
.set SECT, SECT + 0x40000000
.endr
.rept UNDEF_3_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
.rept 0x122 /* 0x1B7_8000_0000 - 0x1FF_FFFF_FFFF */
.8byte SECT + reserved /* 290GB reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x800 /* 0x200_0000_0000 - 0x3FF_FFFF_FFFF */
.8byte SECT + Device /* 2TB PL- via NoC */
.set SECT, SECT + 0x40000000
.endr
.rept 0x400 /* 0x400_0000_0000 - 0x4FF_FFFF_FFFF */
.8byte SECT + Device /* 1TB PL- via PS */
.set SECT, SECT + 0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_CH_1_BASEADDR
.set DDR_CH_1_START, XPAR_AXI_NOC_DDR_CH_1_BASEADDR
.set DDR_CH_1_END, XPAR_AXI_NOC_DDR_CH_1_HIGHADDR
.set DDR_CH_1_SIZE, (DDR_CH_1_END - DDR_CH_1_START)+1
.if DDR_CH_1_SIZE > 0x010000000000
/* If DDR size is larger than 1TB, truncate to 1 TB */
.set DDR_CH_1_REG, 0x400 /* 0x500_0000_0000 - 0x5FF_FFFF_FFFF */
.else
.set DDR_CH_1_REG, DDR_CH_1_SIZE/0x40000000
.endif
#else
.set DDR_CH_1_REG, 0
#warning "There's no DDR_CH_1 in the HW design. MMU translation table marks 1 TB DDR address space as undefined"
#endif
.set UNDEF_CH_1_REG, 0x400 - DDR_CH_1_REG
.rept DDR_CH_1_REG /* DDR based on size in hdf, Max size 1 TB*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_CH_1_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_CH_2_BASEADDR
.set DDR_CH_2_START, XPAR_AXI_NOC_DDR_CH_2_BASEADDR
.set DDR_CH_2_END, XPAR_AXI_NOC_DDR_CH_2_HIGHADDR
.set DDR_CH_2_SIZE, (DDR_CH_2_END - DDR_CH_2_START)+1
.if DDR_CH_2_SIZE > 0x010000000000
/* If DDR_CH_2 size is larger than 1TB, truncate to 1 TB */
.set DDR_CH_2_REG, 0x400 /* 0x600_0000_0000 - 0x6FF_FFFF_FFFF */
.else
.set DDR_CH_2_REG, DDR_CH_2_SIZE/0x40000000
.endif
#else
.set DDR_CH_2_REG, 0
#warning "There's no DDR_CH_2 in the HW design. MMU translation table marks 1 TB DDR address space as undefined"
#endif
.set UNDEF_CH_2_REG, 0x400 - DDR_CH_2_REG
.rept DDR_CH_2_REG /* DDR based on size in hdf, Max size 1 TB*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_CH_2_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
#ifdef XPAR_AXI_NOC_DDR_CH_3_BASEADDR
.set DDR_CH_3_START, XPAR_AXI_NOC_DDR_CH_3_BASEADDR
.set DDR_CH_3_END, XPAR_AXI_NOC_DDR_CH_3_HIGHADDR
.set DDR_CH_3_SIZE, (DDR_CH_3_END - DDR_CH_3_START)+1
.if DDR_CH_3_SIZE > 0x010000000000
/* If DDR_CH_3 size is larger than 1TB, truncate to 1 TB */
.set DDR_CH_3_REG, 0x400 /* 0x700_0000_0000 - 0x7FF_FFFF_FFFF */
.else
.set DDR_CH_3_REG, DDR_CH_3_SIZE/0x40000000
.endif
#else
.set DDR_CH_3_REG, 0
#warning "There's no DDR_CH_3 in the HW design. MMU translation table marks 1 TB DDR address space as undefined"
#endif
.set UNDEF_CH_3_REG, 0x400 - DDR_CH_3_REG
.rept DDR_CH_3_REG /* DDR based on size in hdf, Max size 1 TB*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_CH_3_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
.rept 0x2000 /* 0x800_0000_0000 - 0xFFF_FFFF_FFFF */
.8byte SECT + Device /* 8TB PL- via NoC*/
.set SECT, SECT + 0x40000000
.endr
.section .mmu_tbl2,"a"
MMUTableL2:
.set SECT, 0
#ifdef XPAR_AXI_NOC_DDR_LOW_0_BASEADDR
.set DDR_0_START, XPAR_AXI_NOC_DDR_LOW_0_BASEADDR
.set DDR_0_END, XPAR_AXI_NOC_DDR_LOW_0_HIGHADDR
.set DDR_0_SIZE, (DDR_0_END - DDR_0_START)+1
.if DDR_0_SIZE > 0x80000000
/* If DDR size is larger than 2GB, truncate to 2GB */
.set DDR_0_REG, 0x400
.else
.set DDR_0_REG, DDR_0_SIZE/0x200000
.endif
#else
.set DDR_0_REG, 0
#warning "There's no DDR_0 in the HW design. MMU translation table marks 2 GB DDR address space as undefined"
#endif
.set UNDEF_0_REG, 0x400 - DDR_0_REG
.rept DDR_0_REG /* DDR based on size in hdf*/
.8byte SECT + Memory
.set SECT, SECT+0x200000
.endr
.rept UNDEF_0_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x200000
.endr
.rept 0x0100 /* 0x8000_0000 - 0xBFFF_FFFF */
.8byte SECT + Device /* 512MB LPD AFI */
.set SECT, SECT+0x200000
.endr
.rept 0x020 /* 0xA000_0000 - 0xA3FF_FFFF */
.8byte SECT + reserved /* 64MB reserved*/
.set SECT, SECT+0x200000
.endr
.rept 0x60 /* 0xA400_0000 - 0xAFFF_FFFF */
.8byte SECT + Device /* 192MB FPD AFI 0 */
.set SECT, SECT+0x200000
.endr
.rept 0x80 /* 0xB000_0000 - 0xBFFF_FFFF */
.8byte SECT + Device /* 192MB FPD AFI 1 */
.set SECT, SECT+0x200000
.endr
.rept 0x100 /* 0xC000_0000 - 0xDFFF_FFFF */
.8byte SECT + Device /* 512MB QSPI */
.set SECT, SECT+0x200000
.endr
.rept 0x080 /* 0xE000_0000 - 0xEFFF_FFFF */
.8byte SECT + Device /* 256MB lower PCIe */
.set SECT, SECT+0x200000
.endr
.rept 0x040 /* 0xF000_0000 - 0xF7FF_FFFF */
.8byte SECT + Device /* 128MB PMC */
.set SECT, SECT+0x200000
.endr
.rept 0x08 /* 0xF800_0000 - 0xF8FF_FFFF */
.8byte SECT + Device /* 16MB coresight */
.set SECT, SECT+0x200000
.endr
/* 1MB GIC is marked for 2MB region as the minimum block size in
translation table is 2MB and adjacent 47MB reserved region is
converted to 46MB */
.8byte SECT + Device /* 0xF910_0000 - 0xF90F_FFFF */
/* Reserved 46MB 0xF91FFFFF - 0xFBFFFFFF*/
.rept 0x17 /* 0xF91F_FFFF - 0xFBFF_FFFF */
.set SECT, SECT+0x200000
.8byte SECT + reserved /* 46MB reserved */
.endr
.rept 0x1F /* 0xFC00_0000 - 0xFFDF_FFFF */
.set SECT, SECT+0x200000
.8byte SECT + Device /* 16MB CPM,16MB FPS, 30MB LPS slaves */
.endr
.set SECT, SECT+0x200000 /* 0xFFE0_0000 - 0xFFFF_FFFF*/
.8byte SECT + Memory /*2MB OCM/TCM*/
.rept 0x100 /* 0x1_0000_0000 - 0x1_1FFF_FFFF */
.set SECT, SECT+0x200000
.8byte SECT + Device /* 512MB PMC 0-3 */
.endr
.rept 0x100 /* 0x1_2000_0000 - 0x1_3FFF_FFFF */
.set SECT, SECT+0x200000
.8byte SECT + reserved /* 512MB reserved */
.endr
.end
/**
* @} End of "addtogroup a53_64_boot_code".
*/
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 8,548 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/platform/ZynqMP/armclang/translation_table.S | ;/******************************************************************************
;* Copyright (c) 2018 - 2020 Xilinx, Inc. All rights reserved.
;* SPDX-License-Identifier: MIT
;******************************************************************************/
;/*****************************************************************************/
;/**
;* @file translation_table.s
;*
;* @addtogroup a53_64_boot_code
;* @{
;* <h2> translation_table.S </h2>
;* translation_table.S contains a static page table required by MMU for
;* cortex-A53. This translation table is flat mapped (input address = output
;* address) with default memory attributes defined for zynq ultrascale+
;* architecture. It utilizes translation granule size of 4KB with 2MB section
;* size for initial 4GB memory and 1GB section size for memory after 4GB.
;* The overview of translation table memory attributes is described below.
;*
;*| | Memory Range | Definition in Translation Table |
;*|-----------------------|-----------------------------|-----------------------------------|
;*| DDR | 0x0000000000 - 0x007FFFFFFF | Normal write-back Cacheable |
;*| PL | 0x0080000000 - 0x00BFFFFFFF | Strongly Ordered |
;*| QSPI, lower PCIe | 0x00C0000000 - 0x00EFFFFFFF | Strongly Ordered |
;*| Reserved | 0x00F0000000 - 0x00F7FFFFFF | Unassigned |
;*| STM Coresight | 0x00F8000000 - 0x00F8FFFFFF | Strongly Ordered |
;*| GIC | 0x00F9000000 - 0x00F91FFFFF | Strongly Ordered |
;*| Reserved | 0x00F9200000 - 0x00FCFFFFFF | Unassigned |
;*| FPS, LPS slaves | 0x00FD000000 - 0x00FFBFFFFF | Strongly Ordered |
;*| CSU, PMU | 0x00FFC00000 - 0x00FFDFFFFF | Strongly Ordered |
;*| TCM, OCM | 0x00FFE00000 - 0x00FFFFFFFF | Normal inner write-back cacheable |
;*| Reserved | 0x0100000000 - 0x03FFFFFFFF | Unassigned |
;*| PL, PCIe | 0x0400000000 - 0x07FFFFFFFF | Strongly Ordered |
;*| DDR | 0x0800000000 - 0x0FFFFFFFFF | Normal inner write-back cacheable |
;*| PL, PCIe | 0x1000000000 - 0xBFFFFFFFFF | Strongly Ordered |
;*| Reserved | 0xC000000000 - 0xFFFFFFFFFF | Unassigned |
;*
;* @note
;*
;* For DDR region 0x0000000000 - 0x007FFFFFFF, a system where DDR is less than
;* 2GB, region after DDR and before PL is marked as undefined/reserved in
;* translation table. Region 0xF9100000 - 0xF91FFFFF is reserved memory in
;* 0x00F9000000 - 0x00F91FFFFF range, but it is marked as strongly ordered
;* because minimum section size in translation table section is 2MB. Region
;* 0x00FFC00000 - 0x00FFDFFFFF contains CSU and PMU memory which are marked as
;* Device since it is less than 1MB and falls in a region with device memory.
;*
;* <pre>
;* MODIFICATION HISTORY:
;*
;* Ver Who Date Changes
;* ----- ---- -------- ---------------------------------------------------
;* 7.0 cjp 02/26/19 First release
;*
;******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
EXPORT MMUTableL0
EXPORT MMUTableL1
EXPORT MMUTableL2
GBLA abscnt
GBLA count
GBLA sect
; Fault
Reserved EQU 0
#if (EL1_NONSECURE == 1)
Memory EQU 0x405:OR:(2:SHL:8):OR:0x0 ; Normal writeback write allocate outer shared read write
#else
Memory EQU 0x405:OR:(3:SHL:8):OR:0x0 ; Normal writeback write allocate inner shared read write
#endif
Device EQU 0x409:OR:(1:SHL:53):OR:(1:SHL:54):OR:0x0 ; Strongly ordered read write non executable
AREA |.mmu_tbl0|, CODE, ALIGN=12
MMUTableL0
DCQU MMUTableL1+0x3 ; 0x0000_0000 - 0x7F_FFFF_FFFF
DCQU MMUTableL1+0x1000+0x3 ; 0x80_0000_0000 - 0xFF_FFFF_FFFF
AREA |.mmu_tbl1|, CODE, ALIGN=12
MMUTableL1
;
; 0x4000_0000 - 0xFFFF_FFFF
; 1GB DDR, 1GB PL, 2GB other devices n memory
;
count SETA 0
WHILE count<0x4
DCQU MMUTableL2+count*0x1000+0x3
count SETA count+1
WEND
Fixlocl1 EQU 0x100000000
abscnt SETA 0
;
; 0x0001_0000_0000 - 0x0003_FFFF_FFFF
; 12GB Reserved
;
count SETA 0
WHILE count<0xc
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x0004_0000_0000 - 0x0007_FFFF_FFFF
; 8GB PL, 8GB PCIe
;
count SETA 0
WHILE count<0x10
DCQU Fixlocl1+abscnt*0x40000000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
#ifdef XPAR_PSU_DDR_1_S_AXI_BASEADDR
DDR_1_START EQU XPAR_PSU_DDR_1_S_AXI_BASEADDR
DDR_1_END EQU XPAR_PSU_DDR_1_S_AXI_HIGHADDR
DDR_1_SIZE EQU (DDR_1_END - DDR_1_START + 1)
#if (DDR_1_SIZE > 0x800000000)
DDR_1_REG EQU 0x20 ; If DDR size is larger than 32GB, truncate to 32GB
#else
DDR_1_REG EQU DDR_1_SIZE / 0x40000000
#endif
#else
DDR_1_REG EQU 0
#endif
UNDEF_1_REG EQU (0x20 - DDR_1_REG)
; DDR based on size in hdf
count SETA 0
WHILE count<DDR_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_1_REG
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x0010_0000_0000 - 0x007F_FFFF_FFFF
; 448 GB PL
;
count SETA 0
WHILE count<0x1C0
DCQU Fixlocl1 + abscnt * 0x40000000 + Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x0080_0000_0000 - 0x00BF_FFFF_FFFF
; 256GB PCIe
;
count SETA 0
WHILE count<0x100
DCQU Fixlocl1+abscnt*0x40000000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x00C0_0000_0000 - 0x00FF_FFFF_FFFF
; 256GB Reserved
;
count SETA 0
WHILE count<0x100
DCQU Fixlocl1+abscnt*0x40000000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
AREA |.mmu_tbl2|, CODE, ALIGN=12
MMUTableL2
abscnt SETA 0
#ifdef XPAR_PSU_DDR_0_S_AXI_BASEADDR
DDR_0_START EQU XPAR_PSU_DDR_0_S_AXI_BASEADDR
DDR_0_END EQU XPAR_PSU_DDR_0_S_AXI_HIGHADDR
DDR_0_SIZE EQU (DDR_0_END - DDR_0_START + 1)
#if (DDR_0_SIZE > 0x80000000)
DDR_0_REG EQU 0x400 ; If DDR size is larger than 2GB, truncate to 2GB
#else
DDR_0_REG EQU DDR_0_SIZE / 0x200000
#endif
#else
DDR_0_REG EQU 0
#endif
UNDEF_0_REG EQU (0x400 - DDR_0_REG)
; DDR based on size in hdf
count SETA 0
WHILE count<DDR_0_REG
DCQU abscnt*0x200000+Memory
count SETA count+1
abscnt SETA abscnt+1
WEND
; Reserved for region where ddr is absent
count SETA 0
WHILE count<UNDEF_0_REG
DCQU abscnt*0x200000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0x8000_0000 - 0xBFFF_FFFF
; 1GB lower PL
;
count SETA 0
WHILE count<0x0200
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xC000_0000 - 0xDFFF_FFFF
; 512MB QSPI
;
count SETA 0
WHILE count<0x0100
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xE000_0000 - 0xEFFF_FFFF
; 256MB lower PCIe
;
count SETA 0
WHILE count<0x080
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xF000_0000 - 0xF7FF_FFFF
; 128MB Reserved
;
count SETA 0
WHILE count<0x040
DCQU abscnt*0x200000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xF800_0000 - 0xF8FF_FFFF
; 16MB Coresight
;
count SETA 0
WHILE count<0x8
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 1MB RPU LLP is marked for 2MB region as the minimum block size in translation
; table is 2MB and adjacent 63MB reserved region is converted to 62MB
;
;
; 0xF900_0000 - 0xF91F_FFFF
; 2MB RPU low latency port
;
count SETA 0
WHILE count<0x1
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xF920_0000 - 0xFCFF_FFFF
; 62MB Reserved
;
count SETA 0
WHILE count<0x1f
DCQU abscnt*0x200000+Reserved
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xFD00_0000 - 0xFDFF_FFFF
; 16MB FPS
;
count SETA 0
WHILE count<0x8
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xFE00_0000 - 0xFFBF_FFFF
; 28MB LPS
;
count SETA 0
WHILE count<0xE
DCQU abscnt*0x200000+Device
count SETA count+1
abscnt SETA abscnt+1
WEND
;
; 0xFFC0_0000 - 0xFFDF_FFFF
; 2MB PMU/CSU
;
DCQU abscnt*0x200000+Device
abscnt SETA abscnt+1
;
; 0xFFE0_0000 - 0xFFFF_FFFF
; 2MB OCM/TCM
;
DCQU abscnt*0x200000+Memory
END
;
; @} End of "addtogroup a53_64_boot_code"
;
|
PacktPublishing/Architecting-High-Performance-Embedded-Systems | 8,360 | Chapter08/src/oscilloscope-software/design_1_wrapper/microblaze_0/domain_microblaze_0/bsp/microblaze_0/libsrc/standalone_v7_2/src/arm/ARMv8/64bit/platform/ZynqMP/gcc/translation_table.S | /******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file translation_table.s
*
* @addtogroup a53_64_boot_code
* @{
* <h2> translation_table.S </h2>
* translation_table.S contains a static page table required by MMU for
* cortex-A53. This translation table is flat mapped (input address = output
* address) with default memory attributes defined for zynq ultrascale+
* architecture. It utilizes translation granual size of 4KB with 2MB section
* size for initial 4GB memory and 1GB section size for memory after 4GB.
* The overview of translation table memory attributes is described below.
*
*| | Memory Range | Definition in Translation Table |
*|-----------------------|-----------------------------|-----------------------------------|
*| DDR | 0x0000000000 - 0x007FFFFFFF | Normal write-back Cacheable |
*| PL | 0x0080000000 - 0x00BFFFFFFF | Strongly Ordered |
*| QSPI, lower PCIe | 0x00C0000000 - 0x00EFFFFFFF | Strongly Ordere |
*| Reserved | 0x00F0000000 - 0x00F7FFFFFF | Unassigned |
*| STM Coresight | 0x00F8000000 - 0x00F8FFFFFF | Strongly Ordered |
*| GIC | 0x00F9000000 - 0x00F91FFFFF | Strongly Ordered |
*| Reserved | 0x00F9200000 - 0x00FCFFFFFF | Unassigned |
*| FPS, LPS slaves | 0x00FD000000 - 0x00FFBFFFFF | Strongly Ordered |
*| CSU, PMU | 0x00FFC00000 - 0x00FFDFFFFF | Strongly Ordered |
*| TCM, OCM | 0x00FFE00000 - 0x00FFFFFFFF | Normal inner write-back cacheable |
*| Reserved | 0x0100000000 - 0x03FFFFFFFF | Unassigned |
*| PL, PCIe | 0x0400000000 - 0x07FFFFFFFF | Strongly Ordered |
*| DDR | 0x0800000000 - 0x0FFFFFFFFF | Normal inner write-back cacheable |
*| PL, PCIe | 0x1000000000 - 0xBFFFFFFFFF | Strongly Ordered |
*| Reserved | 0xC000000000 - 0xFFFFFFFFFF | Unassigned |
*
* @note
*
* For DDR region 0x0000000000 - 0x007FFFFFFF, a system where DDR is less than
* 2GB, region after DDR and before PL is marked as undefined/reserved in
* translation table. Region 0xF9100000 - 0xF91FFFFF is reserved memory in
* 0x00F9000000 - 0x00F91FFFFF range, but it is marked as strongly ordered
* because minimum section size in translation table section is 2MB. Region
* 0x00FFC00000 - 0x00FFDFFFFF contains CSU and PMU memory which are marked as
* Device since it is less than 1MB and falls in a region with device memory.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.00 pkp 05/21/14 Initial version
* 5.04 pkp 12/18/15 Updated the address map according to proper address map
* 6.0 mus 07/20/16 Added warning for ddrless HW design CR-954977
* 6.2 pkp 12/14/16 DDR memory in 0x800000000 - 0xFFFFFFFFF range is marked
* as normal writeback for the size defined in hdf and rest
* of the memory in that 32GB range is marked as reserved.
* 6.4 mus 08/10/17 Marked memory as a outer shareable for EL1 NS execution,
* to support CCI enabled IP's.
*
*
******************************************************************************/
#include "xparameters.h"
#include "bspconfig.h"
.globl MMUTableL0
.globl MMUTableL1
.globl MMUTableL2
.set reserved, 0x0 /* Fault*/
#if EL1_NONSECURE
.set Memory, 0x405 | (2 << 8) | (0x0) /* normal writeback write allocate outer shared read write */
#else
.set Memory, 0x405 | (3 << 8) | (0x0) /* normal writeback write allocate inner shared read write */
#endif
.set Device, 0x409 | (1 << 53)| (1 << 54) |(0x0) /* strongly ordered read write non executable*/
.section .mmu_tbl0,"a"
MMUTableL0:
.set SECT, MMUTableL1 /* 0x0000_0000 - 0x7F_FFFF_FFFF */
.8byte SECT + 0x3
.set SECT, MMUTableL1+0x1000 /* 0x80_0000_0000 - 0xFF_FFFF_FFFF */
.8byte SECT + 0x3
.section .mmu_tbl1,"a"
MMUTableL1:
.set SECT, MMUTableL2 /* 0x0000_0000 - 0x3FFF_FFFF */
.8byte SECT + 0x3 /* 1GB DDR */
.rept 0x3 /* 0x4000_0000 - 0xFFFF_FFFF */
.set SECT, SECT + 0x1000 /*1GB DDR, 1GB PL, 2GB other devices n memory */
.8byte SECT + 0x3
.endr
.set SECT,0x100000000
.rept 0xC /* 0x0001_0000_0000 - 0x0003_FFFF_FFFF */
.8byte SECT + reserved /* 12GB Reserved */
.set SECT, SECT + 0x40000000
.endr
.rept 0x10 /* 0x0004_0000_0000 - 0x0007_FFFF_FFFF */
.8byte SECT + Device /* 8GB PL, 8GB PCIe */
.set SECT, SECT + 0x40000000
.endr
#ifdef XPAR_PSU_DDR_1_S_AXI_BASEADDR
.set DDR_1_START, XPAR_PSU_DDR_1_S_AXI_BASEADDR
.set DDR_1_END, XPAR_PSU_DDR_1_S_AXI_HIGHADDR
.set DDR_1_SIZE, (DDR_1_END - DDR_1_START)+1
.if DDR_1_SIZE > 0x800000000
/* If DDR size is larger than 32GB, truncate to 32GB */
.set DDR_1_REG, 0x20
.else
.set DDR_1_REG, DDR_1_SIZE/0x40000000
.endif
#else
.set DDR_1_REG, 0
#warning "There's no DDR_1 in the HW design. MMU translation table marks 32 GB DDR address space as undefined"
#endif
.set UNDEF_1_REG, 0x20 - DDR_1_REG
.rept DDR_1_REG /* DDR based on size in hdf*/
.8byte SECT + Memory
.set SECT, SECT+0x40000000
.endr
.rept UNDEF_1_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x40000000
.endr
.rept 0x1C0 /* 0x0010_0000_0000 - 0x007F_FFFF_FFFF */
.8byte SECT + Device /* 448 GB PL */
.set SECT, SECT + 0x40000000
.endr
.rept 0x100 /* 0x0080_0000_0000 - 0x00BF_FFFF_FFFF */
.8byte SECT + Device /* 256GB PCIe */
.set SECT, SECT + 0x40000000
.endr
.rept 0x100 /* 0x00C0_0000_0000 - 0x00FF_FFFF_FFFF */
.8byte SECT + reserved /* 256GB reserved */
.set SECT, SECT + 0x40000000
.endr
.section .mmu_tbl2,"a"
MMUTableL2:
.set SECT, 0
#ifdef XPAR_PSU_DDR_0_S_AXI_BASEADDR
.set DDR_0_START, XPAR_PSU_DDR_0_S_AXI_BASEADDR
.set DDR_0_END, XPAR_PSU_DDR_0_S_AXI_HIGHADDR
.set DDR_0_SIZE, (DDR_0_END - DDR_0_START)+1
.if DDR_0_SIZE > 0x80000000
/* If DDR size is larger than 2GB, truncate to 2GB */
.set DDR_0_REG, 0x400
.else
.set DDR_0_REG, DDR_0_SIZE/0x200000
.endif
#else
.set DDR_0_REG, 0
#warning "There's no DDR_0 in the HW design. MMU translation table marks 2 GB DDR address space as undefined"
#endif
.set UNDEF_0_REG, 0x400 - DDR_0_REG
.rept DDR_0_REG /* DDR based on size in hdf*/
.8byte SECT + Memory
.set SECT, SECT+0x200000
.endr
.rept UNDEF_0_REG /* reserved for region where ddr is absent */
.8byte SECT + reserved
.set SECT, SECT+0x200000
.endr
.rept 0x0200 /* 0x8000_0000 - 0xBFFF_FFFF */
.8byte SECT + Device /* 1GB lower PL */
.set SECT, SECT+0x200000
.endr
.rept 0x0100 /* 0xC000_0000 - 0xDFFF_FFFF */
.8byte SECT + Device /* 512MB QSPI */
.set SECT, SECT+0x200000
.endr
.rept 0x080 /* 0xE000_0000 - 0xEFFF_FFFF */
.8byte SECT + Device /* 256MB lower PCIe */
.set SECT, SECT+0x200000
.endr
.rept 0x040 /* 0xF000_0000 - 0xF7FF_FFFF */
.8byte SECT + reserved /* 128MB Reserved */
.set SECT, SECT+0x200000
.endr
.rept 0x8 /* 0xF800_0000 - 0xF8FF_FFFF */
.8byte SECT + Device /* 16MB coresight */
.set SECT, SECT+0x200000
.endr
/* 1MB RPU LLP is marked for 2MB region as the minimum block size in
translation table is 2MB and adjacent 63MB reserved region is
converted to 62MB */
.rept 0x1 /* 0xF900_0000 - 0xF91F_FFFF */
.8byte SECT + Device /* 2MB RPU low latency port */
.set SECT, SECT+0x200000
.endr
.rept 0x1F /* 0xF920_0000 - 0xFCFF_FFFF */
.8byte SECT + reserved /* 62MB Reserved */
.set SECT, SECT+0x200000
.endr
.rept 0x8 /* 0xFD00_0000 - 0xFDFF_FFFF */
.8byte SECT + Device /* 16MB FPS */
.set SECT, SECT+0x200000
.endr
.rept 0xE /* 0xFE00_0000 - 0xFFBF_FFFF */
.8byte SECT + Device /* 28MB LPS */
.set SECT, SECT+0x200000
.endr
/* 0xFFC0_0000 - 0xFFDF_FFFF */
.8byte SECT + Device /*2MB PMU/CSU */
.set SECT, SECT+0x200000 /* 0xFFE0_0000 - 0xFFFF_FFFF*/
.8byte SECT + Memory /*2MB OCM/TCM*/
.end
/**
* @} End of "addtogroup a53_64_boot_code".
*/
|
paritytech/polkaports | 1,670 | libs/musl/src/setjmp/powerpc64/longjmp.s | .global _longjmp
.global longjmp
.type _longjmp,@function
.type longjmp,@function
_longjmp:
longjmp:
# 0) move old return address into the link register
ld 0, 0*8(3)
mtlr 0
# 1) restore cr
ld 0, 1*8(3)
mtcr 0
# 2) restore SP
ld 1, 2*8(3)
# 3) restore TOC into both r2 and the caller's stack.
# Which location is required depends on whether setjmp was called
# locally or non-locally, but it's always safe to restore to both.
ld 2, 3*8(3)
std 2, 24(1)
# 4) restore r14-r31
ld 14, 4*8(3)
ld 15, 5*8(3)
ld 16, 6*8(3)
ld 17, 7*8(3)
ld 18, 8*8(3)
ld 19, 9*8(3)
ld 20, 10*8(3)
ld 21, 11*8(3)
ld 22, 12*8(3)
ld 23, 13*8(3)
ld 24, 14*8(3)
ld 25, 15*8(3)
ld 26, 16*8(3)
ld 27, 17*8(3)
ld 28, 18*8(3)
ld 29, 19*8(3)
ld 30, 20*8(3)
ld 31, 21*8(3)
# 5) restore floating point registers f14-f31
lfd 14, 22*8(3)
lfd 15, 23*8(3)
lfd 16, 24*8(3)
lfd 17, 25*8(3)
lfd 18, 26*8(3)
lfd 19, 27*8(3)
lfd 20, 28*8(3)
lfd 21, 29*8(3)
lfd 22, 30*8(3)
lfd 23, 31*8(3)
lfd 24, 32*8(3)
lfd 25, 33*8(3)
lfd 26, 34*8(3)
lfd 27, 35*8(3)
lfd 28, 36*8(3)
lfd 29, 37*8(3)
lfd 30, 38*8(3)
lfd 31, 39*8(3)
# 6) restore vector registers v20-v31
addi 3, 3, 40*8
lvx 20, 0, 3 ; addi 3, 3, 16
lvx 21, 0, 3 ; addi 3, 3, 16
lvx 22, 0, 3 ; addi 3, 3, 16
lvx 23, 0, 3 ; addi 3, 3, 16
lvx 24, 0, 3 ; addi 3, 3, 16
lvx 25, 0, 3 ; addi 3, 3, 16
lvx 26, 0, 3 ; addi 3, 3, 16
lvx 27, 0, 3 ; addi 3, 3, 16
lvx 28, 0, 3 ; addi 3, 3, 16
lvx 29, 0, 3 ; addi 3, 3, 16
lvx 30, 0, 3 ; addi 3, 3, 16
lvx 31, 0, 3
# 7) return r4 ? r4 : 1
mr 3, 4
cmpwi cr7, 4, 0
bne cr7, 1f
li 3, 1
1:
blr
|
paritytech/polkaports | 2,032 | libs/musl/src/setjmp/powerpc64/setjmp.s | .global __setjmp
.global _setjmp
.global setjmp
.type __setjmp,@function
.type _setjmp,@function
.type setjmp,@function
__setjmp:
_setjmp:
setjmp:
ld 5, 24(1) # load from the TOC slot in the caller's stack frame
b __setjmp_toc
.localentry __setjmp,.-__setjmp
.localentry _setjmp,.-_setjmp
.localentry setjmp,.-setjmp
mr 5, 2
.global __setjmp_toc
.hidden __setjmp_toc
# same as normal setjmp, except TOC pointer to save is provided in r5.
# r4 would normally be the 2nd parameter, but we're using r5 to simplify calling from sigsetjmp.
# solves the problem of knowing whether to save the TOC pointer from r2 or the caller's stack frame.
__setjmp_toc:
# 0) store IP into 0, then into the jmpbuf pointed to by r3 (first arg)
mflr 0
std 0, 0*8(3)
# 1) store cr
mfcr 0
std 0, 1*8(3)
# 2) store SP and TOC
std 1, 2*8(3)
std 5, 3*8(3)
# 3) store r14-31
std 14, 4*8(3)
std 15, 5*8(3)
std 16, 6*8(3)
std 17, 7*8(3)
std 18, 8*8(3)
std 19, 9*8(3)
std 20, 10*8(3)
std 21, 11*8(3)
std 22, 12*8(3)
std 23, 13*8(3)
std 24, 14*8(3)
std 25, 15*8(3)
std 26, 16*8(3)
std 27, 17*8(3)
std 28, 18*8(3)
std 29, 19*8(3)
std 30, 20*8(3)
std 31, 21*8(3)
# 4) store floating point registers f14-f31
stfd 14, 22*8(3)
stfd 15, 23*8(3)
stfd 16, 24*8(3)
stfd 17, 25*8(3)
stfd 18, 26*8(3)
stfd 19, 27*8(3)
stfd 20, 28*8(3)
stfd 21, 29*8(3)
stfd 22, 30*8(3)
stfd 23, 31*8(3)
stfd 24, 32*8(3)
stfd 25, 33*8(3)
stfd 26, 34*8(3)
stfd 27, 35*8(3)
stfd 28, 36*8(3)
stfd 29, 37*8(3)
stfd 30, 38*8(3)
stfd 31, 39*8(3)
# 5) store vector registers v20-v31
addi 3, 3, 40*8
stvx 20, 0, 3 ; addi 3, 3, 16
stvx 21, 0, 3 ; addi 3, 3, 16
stvx 22, 0, 3 ; addi 3, 3, 16
stvx 23, 0, 3 ; addi 3, 3, 16
stvx 24, 0, 3 ; addi 3, 3, 16
stvx 25, 0, 3 ; addi 3, 3, 16
stvx 26, 0, 3 ; addi 3, 3, 16
stvx 27, 0, 3 ; addi 3, 3, 16
stvx 28, 0, 3 ; addi 3, 3, 16
stvx 29, 0, 3 ; addi 3, 3, 16
stvx 30, 0, 3 ; addi 3, 3, 16
stvx 31, 0, 3
# 6) return 0
li 3, 0
blr
|
paritytech/polkaports | 1,631 | libs/musl/src/setjmp/powerpc/setjmp.S | .global ___setjmp
.hidden ___setjmp
.global __setjmp
.global _setjmp
.global setjmp
.type __setjmp,@function
.type _setjmp,@function
.type setjmp,@function
___setjmp:
__setjmp:
_setjmp:
setjmp:
/* 0) store IP int 0, then into the jmpbuf pointed to by r3 (first arg) */
mflr 0
stw 0, 0(3)
/* 1) store reg1 (SP) */
stw 1, 4(3)
/* 2) store cr */
mfcr 0
stw 0, 8(3)
/* 3) store r14-31 */
stw 14, 12(3)
stw 15, 16(3)
stw 16, 20(3)
stw 17, 24(3)
stw 18, 28(3)
stw 19, 32(3)
stw 20, 36(3)
stw 21, 40(3)
stw 22, 44(3)
stw 23, 48(3)
stw 24, 52(3)
stw 25, 56(3)
stw 26, 60(3)
stw 27, 64(3)
stw 28, 68(3)
stw 29, 72(3)
stw 30, 76(3)
stw 31, 80(3)
#if defined(_SOFT_FLOAT) || defined(__NO_FPRS__)
mflr 0
bl 1f
.hidden __hwcap
.long __hwcap-.
1: mflr 4
lwz 5, 0(4)
lwzx 4, 4, 5
andis. 4, 4, 0x80
beq 1f
.long 0x11c35b21 /* evstdd 14,88(3) */
.long 0x11e36321 /* ... */
.long 0x12036b21
.long 0x12237321
.long 0x12437b21
.long 0x12638321
.long 0x12838b21
.long 0x12a39321
.long 0x12c39b21
.long 0x12e3a321
.long 0x1303ab21
.long 0x1323b321
.long 0x1343bb21
.long 0x1363c321
.long 0x1383cb21
.long 0x13a3d321
.long 0x13c3db21
.long 0x13e3e321 /* evstdd 31,224(3) */
.long 0x11a3eb21 /* evstdd 13,232(3) */
1: mtlr 0
#else
stfd 14,88(3)
stfd 15,96(3)
stfd 16,104(3)
stfd 17,112(3)
stfd 18,120(3)
stfd 19,128(3)
stfd 20,136(3)
stfd 21,144(3)
stfd 22,152(3)
stfd 23,160(3)
stfd 24,168(3)
stfd 25,176(3)
stfd 26,184(3)
stfd 27,192(3)
stfd 28,200(3)
stfd 29,208(3)
stfd 30,216(3)
stfd 31,224(3)
#endif
/* 4) set return value to 0 */
li 3, 0
/* 5) return */
blr
|
paritytech/polkaports | 1,770 | libs/musl/src/setjmp/powerpc/longjmp.S | .global _longjmp
.global longjmp
.type _longjmp,@function
.type longjmp,@function
_longjmp:
longjmp:
/*
* void longjmp(jmp_buf env, int val);
* put val into return register and restore the env saved in setjmp
* if val(r4) is 0, put 1 there.
*/
/* 0) move old return address into r0 */
lwz 0, 0(3)
/* 1) put it into link reg */
mtlr 0
/* 2 ) restore stack ptr */
lwz 1, 4(3)
/* 3) restore control reg */
lwz 0, 8(3)
mtcr 0
/* 4) restore r14-r31 */
lwz 14, 12(3)
lwz 15, 16(3)
lwz 16, 20(3)
lwz 17, 24(3)
lwz 18, 28(3)
lwz 19, 32(3)
lwz 20, 36(3)
lwz 21, 40(3)
lwz 22, 44(3)
lwz 23, 48(3)
lwz 24, 52(3)
lwz 25, 56(3)
lwz 26, 60(3)
lwz 27, 64(3)
lwz 28, 68(3)
lwz 29, 72(3)
lwz 30, 76(3)
lwz 31, 80(3)
#if defined(_SOFT_FLOAT) || defined(__NO_FPRS__)
mflr 0
bl 1f
.hidden __hwcap
.long __hwcap-.
1: mflr 6
lwz 5, 0(6)
lwzx 6, 6, 5
andis. 6, 6, 0x80
beq 1f
.long 0x11c35b01 /* evldd 14,88(3) */
.long 0x11e36301 /* ... */
.long 0x12036b01
.long 0x12237301
.long 0x12437b01
.long 0x12638301
.long 0x12838b01
.long 0x12a39301
.long 0x12c39b01
.long 0x12e3a301
.long 0x1303ab01
.long 0x1323b301
.long 0x1343bb01
.long 0x1363c301
.long 0x1383cb01
.long 0x13a3d301
.long 0x13c3db01
.long 0x13e3e301 /* evldd 31,224(3) */
.long 0x11a3eb01 /* evldd 13,232(3) */
1: mtlr 0
#else
lfd 14,88(3)
lfd 15,96(3)
lfd 16,104(3)
lfd 17,112(3)
lfd 18,120(3)
lfd 19,128(3)
lfd 20,136(3)
lfd 21,144(3)
lfd 22,152(3)
lfd 23,160(3)
lfd 24,168(3)
lfd 25,176(3)
lfd 26,184(3)
lfd 27,192(3)
lfd 28,200(3)
lfd 29,208(3)
lfd 30,216(3)
lfd 31,224(3)
#endif
/* 5) put val into return reg r3 */
mr 3, 4
/* 6) check if return value is 0, make it 1 in that case */
cmpwi cr7, 4, 0
bne cr7, 1f
li 3, 1
1:
blr
|
paritytech/polkaports | 1,069 | libs/musl/src/thread/s390x/clone.s | .text
.global __clone
.hidden __clone
.type __clone, %function
__clone:
# int clone(
# fn, a = r2
# stack, b = r3
# flags, c = r4
# arg, d = r5
# ptid, e = r6
# tls, f = *(r15+160)
# ctid) g = *(r15+168)
#
# pseudo C code:
# tid = syscall(SYS_clone,b,c,e,g,f);
# if (!tid) syscall(SYS_exit, a(d));
# return tid;
# preserve call-saved register used as syscall arg
stg %r6, 48(%r15)
# create initial stack frame for new thread
nill %r3, 0xfff8
aghi %r3, -160
lghi %r0, 0
stg %r0, 0(%r3)
# save fn and arg to child stack
stg %r2, 8(%r3)
stg %r5, 16(%r3)
# shuffle args into correct registers and call SYS_clone
lgr %r2, %r3
lgr %r3, %r4
lgr %r4, %r6
lg %r5, 168(%r15)
lg %r6, 160(%r15)
svc 120
# restore call-saved register
lg %r6, 48(%r15)
# if error or if we're the parent, return
ltgr %r2, %r2
bnzr %r14
# we're the child. call fn(arg)
lg %r1, 8(%r15)
lg %r2, 16(%r15)
basr %r14, %r1
# call SYS_exit. exit code is already in r2 from fn return value
svc 1
|
paritytech/polkaports | 1,717 | libs/musl/src/thread/arm/atomics.s | .syntax unified
.text
.global __a_barrier_dummy
.hidden __a_barrier_dummy
.type __a_barrier_dummy,%function
__a_barrier_dummy:
bx lr
.global __a_barrier_oldkuser
.hidden __a_barrier_oldkuser
.type __a_barrier_oldkuser,%function
__a_barrier_oldkuser:
push {r0,r1,r2,r3,ip,lr}
mov r1,r0
mov r2,sp
ldr ip,=0xffff0fc0
bl 1f
pop {r0,r1,r2,r3,ip,lr}
bx lr
1: bx ip
.global __a_barrier_v6
.hidden __a_barrier_v6
.type __a_barrier_v6,%function
__a_barrier_v6:
.arch armv6t2
mcr p15,0,r0,c7,c10,5
bx lr
.global __a_barrier_v7
.hidden __a_barrier_v7
.type __a_barrier_v7,%function
__a_barrier_v7:
.arch armv7-a
dmb ish
bx lr
.global __a_cas_dummy
.hidden __a_cas_dummy
.type __a_cas_dummy,%function
__a_cas_dummy:
mov r3,r0
ldr r0,[r2]
subs r0,r3,r0
streq r1,[r2]
bx lr
.global __a_cas_v6
.hidden __a_cas_v6
.type __a_cas_v6,%function
__a_cas_v6:
.arch armv6t2
mov r3,r0
mcr p15,0,r0,c7,c10,5
1: ldrex r0,[r2]
subs r0,r3,r0
strexeq r0,r1,[r2]
teqeq r0,#1
beq 1b
mcr p15,0,r0,c7,c10,5
bx lr
.global __a_cas_v7
.hidden __a_cas_v7
.type __a_cas_v7,%function
__a_cas_v7:
.arch armv7-a
mov r3,r0
dmb ish
1: ldrex r0,[r2]
subs r0,r3,r0
strexeq r0,r1,[r2]
teqeq r0,#1
beq 1b
dmb ish
bx lr
.global __a_gettp_cp15
.hidden __a_gettp_cp15
.type __a_gettp_cp15,%function
__a_gettp_cp15:
mrc p15,0,r0,c13,c0,3
bx lr
/* Tag this file with minimum ISA level so as not to affect linking. */
.object_arch armv4t
.eabi_attribute 6,2
.data
.align 2
.global __a_barrier_ptr
.hidden __a_barrier_ptr
__a_barrier_ptr:
.word __a_barrier_dummy
.global __a_cas_ptr
.hidden __a_cas_ptr
__a_cas_ptr:
.word __a_cas_dummy
.global __a_gettp_ptr
.hidden __a_gettp_ptr
__a_gettp_ptr:
.word __a_gettp_cp15
|
paritytech/polkaports | 1,077 | libs/musl/src/thread/sh/atomics.s | /* Contract for all versions is same as cas.l r2,r3,@r0
* pr and r1 are also clobbered (by jsr & r1 as temp).
* r0,r2,r4-r15 must be preserved.
* r3 contains result (==r2 iff cas succeeded). */
.align 2
.global __sh_cas_gusa
.hidden __sh_cas_gusa
__sh_cas_gusa:
mov.l r5,@-r15
mov.l r4,@-r15
mov r0,r4
mova 1f,r0
mov r15,r1
mov #(0f-1f),r15
0: mov.l @r4,r5
cmp/eq r5,r2
bf 1f
mov.l r3,@r4
1: mov r1,r15
mov r5,r3
mov r4,r0
mov.l @r15+,r4
rts
mov.l @r15+,r5
.global __sh_cas_llsc
.hidden __sh_cas_llsc
__sh_cas_llsc:
mov r0,r1
.word 0x00ab /* synco */
0: .word 0x0163 /* movli.l @r1,r0 */
cmp/eq r0,r2
bf 1f
mov r3,r0
.word 0x0173 /* movco.l r0,@r1 */
bf 0b
mov r2,r0
1: .word 0x00ab /* synco */
mov r0,r3
rts
mov r1,r0
.global __sh_cas_imask
.hidden __sh_cas_imask
__sh_cas_imask:
mov r0,r1
stc sr,r0
mov.l r0,@-r15
or #0xf0,r0
ldc r0,sr
mov.l @r1,r0
cmp/eq r0,r2
bf 1f
mov.l r3,@r1
1: ldc.l @r15+,sr
mov r0,r3
rts
mov r1,r0
.global __sh_cas_cas_l
.hidden __sh_cas_cas_l
__sh_cas_cas_l:
rts
.word 0x2323 /* cas.l r2,r3,@r0 */
|
paritytech/polkaports | 1,866 | libs/musl/src/thread/powerpc/syscall_cp.s | .global __cp_begin
.hidden __cp_begin
.global __cp_end
.hidden __cp_end
.global __cp_cancel
.hidden __cp_cancel
.hidden __cancel
.global __syscall_cp_asm
.hidden __syscall_cp_asm
#r0: volatile. may be modified during linkage.
#r1: stack frame: 16 byte alignment.
#r2: tls/thread pointer on pp32
#r3,r4: return values, first args
#r5-r10: args
#r11-r12: volatile. may be modified during linkage
#r13: "small data area" pointer
#r14 - r30: local vars
#r31: local or environment pointer
#r1, r14-31: belong to the caller, must be saved and restored
#r0, r3-r12, ctr, xer: volatile, not preserved
#r0,r11,r12: may be altered by cross-module call,
#"a func cannot depend on that these regs have the values placed by the caller"
#the fields CR2,CR2,CR4 of the cond reg must be preserved
#LR (link reg) shall contain the funcs return address
.text
.type __syscall_cp_asm,%function
__syscall_cp_asm:
# at enter: r3 = pointer to self->cancel, r4: syscall no, r5: first arg, r6: 2nd, r7: 3rd, r8: 4th, r9: 5th, r10: 6th
__cp_begin:
# r3 holds first argument, its a pointer to self->cancel.
# we must compare the dereferenced value with 0 and jump to __cancel if its not
lwz 0, 0(3) #deref pointer into r0
cmpwi cr7, 0, 0 #compare r0 with 0, store result in cr7.
beq+ cr7, 1f #jump to label 1 if r0 was 0
b __cp_cancel #else call cancel
1:
#ok, the cancel flag was not set
# syscall: number goes to r0, the rest 3-8
mr 0, 4 # put the system call number into r0
mr 3, 5 # Shift the arguments: arg1
mr 4, 6 # arg2
mr 5, 7 # arg3
mr 6, 8 # arg4
mr 7, 9 # arg5
mr 8, 10 # arg6
sc
__cp_end:
bnslr+ # return if no summary overflow.
#else negate result.
neg 3, 3
blr
__cp_cancel:
b __cancel
|
paritytech/polkaports | 1,218 | libs/musl/src/thread/powerpc/clone.s | .text
.global __clone
.hidden __clone
.type __clone, %function
__clone:
# int clone(fn, stack, flags, arg, ptid, tls, ctid)
# a b c d e f g
# 3 4 5 6 7 8 9
# pseudo C code:
# tid = syscall(SYS_clone,c,b,e,f,g);
# if (!tid) syscall(SYS_exit, a(d));
# return tid;
# SYS_clone = 120
# SYS_exit = 1
# store non-volatile regs r30, r31 on stack in order to put our
# start func and its arg there
stwu 30, -16(1)
stw 31, 4(1)
# save r3 (func) into r30, and r6(arg) into r31
mr 30, 3
mr 31, 6
# create initial stack frame for new thread
clrrwi 4, 4, 4
li 0, 0
stwu 0, -16(4)
#move c into first arg
mr 3, 5
#mr 4, 4
mr 5, 7
mr 6, 8
mr 7, 9
# move syscall number into r0
li 0, 120
sc
# check for syscall error
bns+ 1f # jump to label 1 if no summary overflow.
#else
neg 3, 3 #negate the result (errno)
1:
# compare sc result with 0
cmpwi cr7, 3, 0
# if not 0, jump to end
bne cr7, 2f
#else: we're the child
#call funcptr: move arg (d) into r3
mr 3, 31
#move r30 (funcptr) into CTR reg
mtctr 30
# call CTR reg
bctrl
# mov SYS_exit into r0 (the exit param is already in r3)
li 0, 1
sc
2:
# restore stack
lwz 30, 0(1)
lwz 31, 4(1)
addi 1, 1, 16
blr
|
paritytech/polkaports | 1,054 | libs/musl/src/ldso/arm/tlsdesc.S | .syntax unified
.text
.global __tlsdesc_static
.hidden __tlsdesc_static
.type __tlsdesc_static,%function
__tlsdesc_static:
ldr r0,[r0]
bx lr
.global __tlsdesc_dynamic
.hidden __tlsdesc_dynamic
.type __tlsdesc_dynamic,%function
__tlsdesc_dynamic:
push {r2,r3,ip,lr}
ldr r1,[r0]
ldr r2,[r1,#4] // r2 = offset
ldr r1,[r1] // r1 = modid
#if ((__ARM_ARCH_6K__ || __ARM_ARCH_6KZ__ || __ARM_ARCH_6ZK__) && !__thumb__) \
|| __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
mrc p15,0,r0,c13,c0,3
#else
ldr r0,1f
add r0,r0,pc
ldr r0,[r0]
2:
#if __ARM_ARCH >= 5
blx r0 // r0 = tp
#else
#if __thumb__
add lr,pc,#1
#else
mov lr,pc
#endif
bx r0
#endif
#endif
ldr r3,[r0,#-4] // r3 = dtv
ldr ip,[r3,r1,LSL #2]
sub r0,ip,r0
add r0,r0,r2 // r0 = r3[r1]-r0+r2
#if __ARM_ARCH >= 5
pop {r2,r3,ip,pc}
#else
pop {r2,r3,ip,lr}
bx lr
#endif
#if ((__ARM_ARCH_6K__ || __ARM_ARCH_6KZ__ || __ARM_ARCH_6ZK__) && !__thumb__) \
|| __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
#else
.align 2
1: .word __a_gettp_ptr - 2b
#endif
|
paritytech/polkaports | 1,435 | libs/musl/src/math/i386/exp_ld.s | .global expm1l
.type expm1l,@function
expm1l:
fldt 4(%esp)
fldl2e
fmulp
mov $0xc2820000,%eax
push %eax
flds (%esp)
pop %eax
fucomp %st(1)
fnstsw %ax
sahf
fld1
jb 1f
# x*log2e < -65, return -1 without underflow
fstp %st(1)
fchs
ret
1: fld %st(1)
fabs
fucom %st(1)
fnstsw %ax
fstp %st(0)
fstp %st(0)
sahf
ja 1f
f2xm1
ret
1: call 1f
fld1
fsubrp
ret
.global exp2l
.global __exp2l
.hidden __exp2l
.type exp2l,@function
exp2l:
__exp2l:
fldt 4(%esp)
1: sub $12,%esp
fld %st(0)
fstpt (%esp)
mov 8(%esp),%ax
and $0x7fff,%ax
cmp $0x3fff+13,%ax
jb 4f # |x| < 8192
cmp $0x3fff+15,%ax
jae 3f # |x| >= 32768
fsts (%esp)
cmpl $0xc67ff800,(%esp)
jb 2f # x > -16382
movl $0x5f000000,(%esp)
flds (%esp) # 0x1p63
fld %st(1)
fsub %st(1)
faddp
fucomp %st(1)
fnstsw
sahf
je 2f # x - 0x1p63 + 0x1p63 == x
movl $1,(%esp)
flds (%esp) # 0x1p-149
fdiv %st(1)
fstps (%esp) # raise underflow
2: fld1
fld %st(1)
frndint
fxch %st(2)
fsub %st(2) # st(0)=x-rint(x), st(1)=1, st(2)=rint(x)
f2xm1
faddp # 2^(x-rint(x))
1: fscale
fstp %st(1)
add $12,%esp
ret
3: xor %eax,%eax
4: cmp $0x3fff-64,%ax
fld1
jb 1b # |x| < 0x1p-64
fstpt (%esp)
fistl 8(%esp)
fildl 8(%esp)
fsubrp %st(1)
addl $0x3fff,8(%esp)
f2xm1
fld1
faddp # 2^(x-rint(x))
fldt (%esp) # 2^rint(x)
fmulp
add $12,%esp
ret
|
paritytech/polkaports | 1,934 | libs/musl/src/math/i386/expl.s | # exp(x) = 2^hi + 2^hi (2^lo - 1)
# where hi+lo = log2e*x with 128bit precision
# exact log2e*x calculation depends on nearest rounding mode
# using the exact multiplication method of Dekker and Veltkamp
.global expl
.type expl,@function
expl:
fldt 4(%esp)
# interesting case: 0x1p-32 <= |x| < 16384
# check if (exponent|0x8000) is in [0xbfff-32, 0xbfff+13]
mov 12(%esp), %ax
or $0x8000, %ax
sub $0xbfdf, %ax
cmp $45, %ax
jbe 2f
test %ax, %ax
fld1
js 1f
# if |x|>=0x1p14 or nan return 2^trunc(x)
fscale
fstp %st(1)
ret
# if |x|<0x1p-32 return 1+x
1: faddp
ret
# should be 0x1.71547652b82fe178p0L == 0x3fff b8aa3b29 5c17f0bc
# it will be wrong on non-nearest rounding mode
2: fldl2e
subl $44, %esp
# hi = log2e_hi*x
# 2^hi = exp2l(hi)
fmul %st(1),%st
fld %st(0)
fstpt (%esp)
fstpt 16(%esp)
fstpt 32(%esp)
.hidden __exp2l
call __exp2l
# if 2^hi == inf return 2^hi
fld %st(0)
fstpt (%esp)
cmpw $0x7fff, 8(%esp)
je 1f
fldt 32(%esp)
fldt 16(%esp)
# fpu stack: 2^hi x hi
# exact mult: x*log2e
fld %st(1)
# c = 0x1p32+1
pushl $0x41f00000
pushl $0x00100000
fldl (%esp)
# xh = x - c*x + c*x
# xl = x - xh
fmulp
fld %st(2)
fsub %st(1), %st
faddp
fld %st(2)
fsub %st(1), %st
# yh = log2e_hi - c*log2e_hi + c*log2e_hi
pushl $0x3ff71547
pushl $0x65200000
fldl (%esp)
# fpu stack: 2^hi x hi xh xl yh
# lo = hi - xh*yh + xl*yh
fld %st(2)
fmul %st(1), %st
fsubp %st, %st(4)
fmul %st(1), %st
faddp %st, %st(3)
# yl = log2e_hi - yh
pushl $0x3de705fc
pushl $0x2f000000
fldl (%esp)
# fpu stack: 2^hi x lo xh xl yl
# lo += xh*yl + xl*yl
fmul %st, %st(2)
fmulp %st, %st(1)
fxch %st(2)
faddp
faddp
# log2e_lo
pushl $0xbfbe
pushl $0x82f0025f
pushl $0x2dc582ee
fldt (%esp)
addl $36,%esp
# fpu stack: 2^hi x lo log2e_lo
# lo += log2e_lo*x
# return 2^hi + 2^hi (2^lo - 1)
fmulp %st, %st(2)
faddp
f2xm1
fmul %st(1), %st
faddp
1: addl $44, %esp
ret
|
paritytech/polkaports | 1,956 | libs/musl/src/math/x86_64/expl.s | # exp(x) = 2^hi + 2^hi (2^lo - 1)
# where hi+lo = log2e*x with 128bit precision
# exact log2e*x calculation depends on nearest rounding mode
# using the exact multiplication method of Dekker and Veltkamp
.global expl
.type expl,@function
expl:
fldt 8(%rsp)
# interesting case: 0x1p-32 <= |x| < 16384
# check if (exponent|0x8000) is in [0xbfff-32, 0xbfff+13]
mov 16(%rsp), %ax
or $0x8000, %ax
sub $0xbfdf, %ax
cmp $45, %ax
jbe 2f
test %ax, %ax
fld1
js 1f
# if |x|>=0x1p14 or nan return 2^trunc(x)
fscale
fstp %st(1)
ret
# if |x|<0x1p-32 return 1+x
1: faddp
ret
# should be 0x1.71547652b82fe178p0L == 0x3fff b8aa3b29 5c17f0bc
# it will be wrong on non-nearest rounding mode
2: fldl2e
subq $48, %rsp
# hi = log2e_hi*x
# 2^hi = exp2l(hi)
fmul %st(1),%st
fld %st(0)
fstpt (%rsp)
fstpt 16(%rsp)
fstpt 32(%rsp)
call exp2l@PLT
# if 2^hi == inf return 2^hi
fld %st(0)
fstpt (%rsp)
cmpw $0x7fff, 8(%rsp)
je 1f
fldt 32(%rsp)
fldt 16(%rsp)
# fpu stack: 2^hi x hi
# exact mult: x*log2e
fld %st(1)
# c = 0x1p32+1
movq $0x41f0000000100000,%rax
pushq %rax
fldl (%rsp)
# xh = x - c*x + c*x
# xl = x - xh
fmulp
fld %st(2)
fsub %st(1), %st
faddp
fld %st(2)
fsub %st(1), %st
# yh = log2e_hi - c*log2e_hi + c*log2e_hi
movq $0x3ff7154765200000,%rax
pushq %rax
fldl (%rsp)
# fpu stack: 2^hi x hi xh xl yh
# lo = hi - xh*yh + xl*yh
fld %st(2)
fmul %st(1), %st
fsubp %st, %st(4)
fmul %st(1), %st
faddp %st, %st(3)
# yl = log2e_hi - yh
movq $0x3de705fc2f000000,%rax
pushq %rax
fldl (%rsp)
# fpu stack: 2^hi x lo xh xl yl
# lo += xh*yl + xl*yl
fmul %st, %st(2)
fmulp %st, %st(1)
fxch %st(2)
faddp
faddp
# log2e_lo
movq $0xbfbe,%rax
pushq %rax
movq $0x82f0025f2dc582ee,%rax
pushq %rax
fldt (%rsp)
addq $40,%rsp
# fpu stack: 2^hi x lo log2e_lo
# lo += log2e_lo*x
# return 2^hi + 2^hi (2^lo - 1)
fmulp %st, %st(2)
faddp
f2xm1
fmul %st(1), %st
faddp
1: addq $48, %rsp
ret
|
paritytech/polkaports | 1,355 | libs/musl/src/math/x86_64/exp2l.s | .global expm1l
.type expm1l,@function
expm1l:
fldt 8(%rsp)
fldl2e
fmulp
movl $0xc2820000,-4(%rsp)
flds -4(%rsp)
fucomip %st(1),%st
fld1
jb 1f
# x*log2e <= -65, return -1 without underflow
fstp %st(1)
fchs
ret
1: fld %st(1)
fabs
fucomip %st(1),%st
fstp %st(0)
ja 1f
f2xm1
ret
1: push %rax
call 1f
pop %rax
fld1
fsubrp
ret
.global exp2l
.type exp2l,@function
exp2l:
fldt 8(%rsp)
1: fld %st(0)
sub $16,%rsp
fstpt (%rsp)
mov 8(%rsp),%ax
and $0x7fff,%ax
cmp $0x3fff+13,%ax
jb 4f # |x| < 8192
cmp $0x3fff+15,%ax
jae 3f # |x| >= 32768
fsts (%rsp)
cmpl $0xc67ff800,(%rsp)
jb 2f # x > -16382
movl $0x5f000000,(%rsp)
flds (%rsp) # 0x1p63
fld %st(1)
fsub %st(1)
faddp
fucomip %st(1),%st
je 2f # x - 0x1p63 + 0x1p63 == x
movl $1,(%rsp)
flds (%rsp) # 0x1p-149
fdiv %st(1)
fstps (%rsp) # raise underflow
2: fld1
fld %st(1)
frndint
fxch %st(2)
fsub %st(2) # st(0)=x-rint(x), st(1)=1, st(2)=rint(x)
f2xm1
faddp # 2^(x-rint(x))
1: fscale
fstp %st(1)
add $16,%rsp
ret
3: xor %eax,%eax
4: cmp $0x3fff-64,%ax
fld1
jb 1b # |x| < 0x1p-64
fstpt (%rsp)
fistl 8(%rsp)
fildl 8(%rsp)
fsubrp %st(1)
addl $0x3fff,8(%rsp)
f2xm1
fld1
faddp # 2^(x-rint(x))
fldt (%rsp) # 2^rint(x)
fmulp
add $16,%rsp
ret
|
paritytech/polkaports | 1,953 | libs/musl/src/math/x32/expl.s | # exp(x) = 2^hi + 2^hi (2^lo - 1)
# where hi+lo = log2e*x with 128bit precision
# exact log2e*x calculation depends on nearest rounding mode
# using the exact multiplication method of Dekker and Veltkamp
.global expl
.type expl,@function
expl:
fldt 8(%esp)
# interesting case: 0x1p-32 <= |x| < 16384
# check if (exponent|0x8000) is in [0xbfff-32, 0xbfff+13]
mov 16(%esp), %ax
or $0x8000, %ax
sub $0xbfdf, %ax
cmp $45, %ax
jbe 2f
test %ax, %ax
fld1
js 1f
# if |x|>=0x1p14 or nan return 2^trunc(x)
fscale
fstp %st(1)
ret
# if |x|<0x1p-32 return 1+x
1: faddp
ret
# should be 0x1.71547652b82fe178p0L == 0x3fff b8aa3b29 5c17f0bc
# it will be wrong on non-nearest rounding mode
2: fldl2e
sub $48, %esp
# hi = log2e_hi*x
# 2^hi = exp2l(hi)
fmul %st(1),%st
fld %st(0)
fstpt (%esp)
fstpt 16(%esp)
fstpt 32(%esp)
call exp2l@PLT
# if 2^hi == inf return 2^hi
fld %st(0)
fstpt (%esp)
cmpw $0x7fff, 8(%esp)
je 1f
fldt 32(%esp)
fldt 16(%esp)
# fpu stack: 2^hi x hi
# exact mult: x*log2e
fld %st(1)
# c = 0x1p32+1
movq $0x41f0000000100000,%rax
pushq %rax
fldl (%esp)
# xh = x - c*x + c*x
# xl = x - xh
fmulp
fld %st(2)
fsub %st(1), %st
faddp
fld %st(2)
fsub %st(1), %st
# yh = log2e_hi - c*log2e_hi + c*log2e_hi
movq $0x3ff7154765200000,%rax
pushq %rax
fldl (%esp)
# fpu stack: 2^hi x hi xh xl yh
# lo = hi - xh*yh + xl*yh
fld %st(2)
fmul %st(1), %st
fsubp %st, %st(4)
fmul %st(1), %st
faddp %st, %st(3)
# yl = log2e_hi - yh
movq $0x3de705fc2f000000,%rax
pushq %rax
fldl (%esp)
# fpu stack: 2^hi x lo xh xl yl
# lo += xh*yl + xl*yl
fmul %st, %st(2)
fmulp %st, %st(1)
fxch %st(2)
faddp
faddp
# log2e_lo
movq $0xbfbe,%rax
pushq %rax
movq $0x82f0025f2dc582ee,%rax
pushq %rax
fldt (%esp)
add $40,%esp
# fpu stack: 2^hi x lo log2e_lo
# lo += log2e_lo*x
# return 2^hi + 2^hi (2^lo - 1)
fmulp %st, %st(2)
faddp
f2xm1
fmul %st(1), %st
faddp
1: add $48, %esp
ret
|
paritytech/polkaports | 1,355 | libs/musl/src/math/x32/exp2l.s | .global expm1l
.type expm1l,@function
expm1l:
fldt 8(%esp)
fldl2e
fmulp
movl $0xc2820000,-4(%esp)
flds -4(%esp)
fucomip %st(1),%st
fld1
jb 1f
# x*log2e <= -65, return -1 without underflow
fstp %st(1)
fchs
ret
1: fld %st(1)
fabs
fucomip %st(1),%st
fstp %st(0)
ja 1f
f2xm1
ret
1: push %rax
call 1f
pop %rax
fld1
fsubrp
ret
.global exp2l
.type exp2l,@function
exp2l:
fldt 8(%esp)
1: fld %st(0)
sub $16,%esp
fstpt (%esp)
mov 8(%esp),%ax
and $0x7fff,%ax
cmp $0x3fff+13,%ax
jb 4f # |x| < 8192
cmp $0x3fff+15,%ax
jae 3f # |x| >= 32768
fsts (%esp)
cmpl $0xc67ff800,(%esp)
jb 2f # x > -16382
movl $0x5f000000,(%esp)
flds (%esp) # 0x1p63
fld %st(1)
fsub %st(1)
faddp
fucomip %st(1),%st
je 2f # x - 0x1p63 + 0x1p63 == x
movl $1,(%esp)
flds (%esp) # 0x1p-149
fdiv %st(1)
fstps (%esp) # raise underflow
2: fld1
fld %st(1)
frndint
fxch %st(2)
fsub %st(2) # st(0)=x-rint(x), st(1)=1, st(2)=rint(x)
f2xm1
faddp # 2^(x-rint(x))
1: fscale
fstp %st(1)
add $16,%esp
ret
3: xor %eax,%eax
4: cmp $0x3fff-64,%ax
fld1
jb 1b # |x| < 0x1p-64
fstpt (%esp)
fistl 8(%esp)
fildl 8(%esp)
fsubrp %st(1)
addl $0x3fff,8(%esp)
f2xm1
fld1
faddp # 2^(x-rint(x))
fldt (%esp) # 2^rint(x)
fmulp
add $16,%esp
ret
|
paritytech/polkaports | 4,437 | libs/musl/src/string/aarch64/memcpy.S | /*
* memcpy - copy memory area
*
* Copyright (c) 2012-2020, Arm Limited.
* SPDX-License-Identifier: MIT
*/
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses.
*
*/
#define dstin x0
#define src x1
#define count x2
#define dst x3
#define srcend x4
#define dstend x5
#define A_l x6
#define A_lw w6
#define A_h x7
#define B_l x8
#define B_lw w8
#define B_h x9
#define C_l x10
#define C_lw w10
#define C_h x11
#define D_l x12
#define D_h x13
#define E_l x14
#define E_h x15
#define F_l x16
#define F_h x17
#define G_l count
#define G_h dst
#define H_l src
#define H_h srcend
#define tmp1 x14
/* This implementation of memcpy uses unaligned accesses and branchless
sequences to keep the code small, simple and improve performance.
Copies are split into 3 main cases: small copies of up to 32 bytes, medium
copies of up to 128 bytes, and large copies. The overhead of the overlap
check is negligible since it is only required for large copies.
Large copies use a software pipelined loop processing 64 bytes per iteration.
The destination pointer is 16-byte aligned to minimize unaligned accesses.
The loop tail is handled by always copying 64 bytes from the end.
*/
.global memcpy
.type memcpy,%function
memcpy:
add srcend, src, count
add dstend, dstin, count
cmp count, 128
b.hi .Lcopy_long
cmp count, 32
b.hi .Lcopy32_128
/* Small copies: 0..32 bytes. */
cmp count, 16
b.lo .Lcopy16
ldp A_l, A_h, [src]
ldp D_l, D_h, [srcend, -16]
stp A_l, A_h, [dstin]
stp D_l, D_h, [dstend, -16]
ret
/* Copy 8-15 bytes. */
.Lcopy16:
tbz count, 3, .Lcopy8
ldr A_l, [src]
ldr A_h, [srcend, -8]
str A_l, [dstin]
str A_h, [dstend, -8]
ret
.p2align 3
/* Copy 4-7 bytes. */
.Lcopy8:
tbz count, 2, .Lcopy4
ldr A_lw, [src]
ldr B_lw, [srcend, -4]
str A_lw, [dstin]
str B_lw, [dstend, -4]
ret
/* Copy 0..3 bytes using a branchless sequence. */
.Lcopy4:
cbz count, .Lcopy0
lsr tmp1, count, 1
ldrb A_lw, [src]
ldrb C_lw, [srcend, -1]
ldrb B_lw, [src, tmp1]
strb A_lw, [dstin]
strb B_lw, [dstin, tmp1]
strb C_lw, [dstend, -1]
.Lcopy0:
ret
.p2align 4
/* Medium copies: 33..128 bytes. */
.Lcopy32_128:
ldp A_l, A_h, [src]
ldp B_l, B_h, [src, 16]
ldp C_l, C_h, [srcend, -32]
ldp D_l, D_h, [srcend, -16]
cmp count, 64
b.hi .Lcopy128
stp A_l, A_h, [dstin]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstend, -32]
stp D_l, D_h, [dstend, -16]
ret
.p2align 4
/* Copy 65..128 bytes. */
.Lcopy128:
ldp E_l, E_h, [src, 32]
ldp F_l, F_h, [src, 48]
cmp count, 96
b.ls .Lcopy96
ldp G_l, G_h, [srcend, -64]
ldp H_l, H_h, [srcend, -48]
stp G_l, G_h, [dstend, -64]
stp H_l, H_h, [dstend, -48]
.Lcopy96:
stp A_l, A_h, [dstin]
stp B_l, B_h, [dstin, 16]
stp E_l, E_h, [dstin, 32]
stp F_l, F_h, [dstin, 48]
stp C_l, C_h, [dstend, -32]
stp D_l, D_h, [dstend, -16]
ret
.p2align 4
/* Copy more than 128 bytes. */
.Lcopy_long:
/* Copy 16 bytes and then align dst to 16-byte alignment. */
ldp D_l, D_h, [src]
and tmp1, dstin, 15
bic dst, dstin, 15
sub src, src, tmp1
add count, count, tmp1 /* Count is now 16 too large. */
ldp A_l, A_h, [src, 16]
stp D_l, D_h, [dstin]
ldp B_l, B_h, [src, 32]
ldp C_l, C_h, [src, 48]
ldp D_l, D_h, [src, 64]!
subs count, count, 128 + 16 /* Test and readjust count. */
b.ls .Lcopy64_from_end
.Lloop64:
stp A_l, A_h, [dst, 16]
ldp A_l, A_h, [src, 16]
stp B_l, B_h, [dst, 32]
ldp B_l, B_h, [src, 32]
stp C_l, C_h, [dst, 48]
ldp C_l, C_h, [src, 48]
stp D_l, D_h, [dst, 64]!
ldp D_l, D_h, [src, 64]!
subs count, count, 64
b.hi .Lloop64
/* Write the last iteration and copy 64 bytes from the end. */
.Lcopy64_from_end:
ldp E_l, E_h, [srcend, -64]
stp A_l, A_h, [dst, 16]
ldp A_l, A_h, [srcend, -48]
stp B_l, B_h, [dst, 32]
ldp B_l, B_h, [srcend, -32]
stp C_l, C_h, [dst, 48]
ldp C_l, C_h, [srcend, -16]
stp D_l, D_h, [dst, 64]
stp E_l, E_h, [dstend, -64]
stp A_l, A_h, [dstend, -48]
stp B_l, B_h, [dstend, -32]
stp C_l, C_h, [dstend, -16]
ret
.size memcpy,.-memcpy
|
paritytech/polkaports | 2,386 | libs/musl/src/string/aarch64/memset.S | /*
* memset - fill memory with a constant byte
*
* Copyright (c) 2012-2020, Arm Limited.
* SPDX-License-Identifier: MIT
*/
/* Assumptions:
*
* ARMv8-a, AArch64, Advanced SIMD, unaligned accesses.
*
*/
#define dstin x0
#define val x1
#define valw w1
#define count x2
#define dst x3
#define dstend x4
#define zva_val x5
.global memset
.type memset,%function
memset:
dup v0.16B, valw
add dstend, dstin, count
cmp count, 96
b.hi .Lset_long
cmp count, 16
b.hs .Lset_medium
mov val, v0.D[0]
/* Set 0..15 bytes. */
tbz count, 3, 1f
str val, [dstin]
str val, [dstend, -8]
ret
nop
1: tbz count, 2, 2f
str valw, [dstin]
str valw, [dstend, -4]
ret
2: cbz count, 3f
strb valw, [dstin]
tbz count, 1, 3f
strh valw, [dstend, -2]
3: ret
/* Set 17..96 bytes. */
.Lset_medium:
str q0, [dstin]
tbnz count, 6, .Lset96
str q0, [dstend, -16]
tbz count, 5, 1f
str q0, [dstin, 16]
str q0, [dstend, -32]
1: ret
.p2align 4
/* Set 64..96 bytes. Write 64 bytes from the start and
32 bytes from the end. */
.Lset96:
str q0, [dstin, 16]
stp q0, q0, [dstin, 32]
stp q0, q0, [dstend, -32]
ret
.p2align 4
.Lset_long:
and valw, valw, 255
bic dst, dstin, 15
str q0, [dstin]
cmp count, 160
ccmp valw, 0, 0, hs
b.ne .Lno_zva
#ifndef SKIP_ZVA_CHECK
mrs zva_val, dczid_el0
and zva_val, zva_val, 31
cmp zva_val, 4 /* ZVA size is 64 bytes. */
b.ne .Lno_zva
#endif
str q0, [dst, 16]
stp q0, q0, [dst, 32]
bic dst, dst, 63
sub count, dstend, dst /* Count is now 64 too large. */
sub count, count, 128 /* Adjust count and bias for loop. */
.p2align 4
.Lzva_loop:
add dst, dst, 64
dc zva, dst
subs count, count, 64
b.hi .Lzva_loop
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
.Lno_zva:
sub count, dstend, dst /* Count is 16 too large. */
sub dst, dst, 16 /* Dst is biased by -32. */
sub count, count, 64 + 16 /* Adjust count and bias for loop. */
.Lno_zva_loop:
stp q0, q0, [dst, 32]
stp q0, q0, [dst, 64]!
subs count, count, 64
b.hi .Lno_zva_loop
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
.size memset,.-memset
|
paritytech/polkaports | 1,166 | libs/musl/src/string/i386/memset.s | .global memset
.type memset,@function
memset:
mov 12(%esp),%ecx
cmp $62,%ecx
ja 2f
mov 8(%esp),%dl
mov 4(%esp),%eax
test %ecx,%ecx
jz 1f
mov %dl,%dh
mov %dl,(%eax)
mov %dl,-1(%eax,%ecx)
cmp $2,%ecx
jbe 1f
mov %dx,1(%eax)
mov %dx,(-1-2)(%eax,%ecx)
cmp $6,%ecx
jbe 1f
shl $16,%edx
mov 8(%esp),%dl
mov 8(%esp),%dh
mov %edx,(1+2)(%eax)
mov %edx,(-1-2-4)(%eax,%ecx)
cmp $14,%ecx
jbe 1f
mov %edx,(1+2+4)(%eax)
mov %edx,(1+2+4+4)(%eax)
mov %edx,(-1-2-4-8)(%eax,%ecx)
mov %edx,(-1-2-4-4)(%eax,%ecx)
cmp $30,%ecx
jbe 1f
mov %edx,(1+2+4+8)(%eax)
mov %edx,(1+2+4+8+4)(%eax)
mov %edx,(1+2+4+8+8)(%eax)
mov %edx,(1+2+4+8+12)(%eax)
mov %edx,(-1-2-4-8-16)(%eax,%ecx)
mov %edx,(-1-2-4-8-12)(%eax,%ecx)
mov %edx,(-1-2-4-8-8)(%eax,%ecx)
mov %edx,(-1-2-4-8-4)(%eax,%ecx)
1: ret
2: movzbl 8(%esp),%eax
mov %edi,12(%esp)
imul $0x1010101,%eax
mov 4(%esp),%edi
test $15,%edi
mov %eax,-4(%edi,%ecx)
jnz 2f
1: shr $2, %ecx
rep
stosl
mov 4(%esp),%eax
mov 12(%esp),%edi
ret
2: xor %edx,%edx
sub %edi,%edx
and $15,%edx
mov %eax,(%edi)
mov %eax,4(%edi)
mov %eax,8(%edi)
mov %eax,12(%edi)
sub %edx,%ecx
add %edx,%edi
jmp 1b
|
paritytech/polkaports | 1,128 | libs/musl/src/string/x86_64/memset.s | .global memset
.type memset,@function
memset:
movzbq %sil,%rax
mov $0x101010101010101,%r8
imul %r8,%rax
cmp $126,%rdx
ja 2f
test %edx,%edx
jz 1f
mov %sil,(%rdi)
mov %sil,-1(%rdi,%rdx)
cmp $2,%edx
jbe 1f
mov %ax,1(%rdi)
mov %ax,(-1-2)(%rdi,%rdx)
cmp $6,%edx
jbe 1f
mov %eax,(1+2)(%rdi)
mov %eax,(-1-2-4)(%rdi,%rdx)
cmp $14,%edx
jbe 1f
mov %rax,(1+2+4)(%rdi)
mov %rax,(-1-2-4-8)(%rdi,%rdx)
cmp $30,%edx
jbe 1f
mov %rax,(1+2+4+8)(%rdi)
mov %rax,(1+2+4+8+8)(%rdi)
mov %rax,(-1-2-4-8-16)(%rdi,%rdx)
mov %rax,(-1-2-4-8-8)(%rdi,%rdx)
cmp $62,%edx
jbe 1f
mov %rax,(1+2+4+8+16)(%rdi)
mov %rax,(1+2+4+8+16+8)(%rdi)
mov %rax,(1+2+4+8+16+16)(%rdi)
mov %rax,(1+2+4+8+16+24)(%rdi)
mov %rax,(-1-2-4-8-16-32)(%rdi,%rdx)
mov %rax,(-1-2-4-8-16-24)(%rdi,%rdx)
mov %rax,(-1-2-4-8-16-16)(%rdi,%rdx)
mov %rax,(-1-2-4-8-16-8)(%rdi,%rdx)
1: mov %rdi,%rax
ret
2: test $15,%edi
mov %rdi,%r8
mov %rax,-8(%rdi,%rdx)
mov %rdx,%rcx
jnz 2f
1: shr $3,%rcx
rep
stosq
mov %r8,%rax
ret
2: xor %edx,%edx
sub %edi,%edx
and $15,%edx
mov %rax,(%rdi)
mov %rax,8(%rdi)
sub %rdx,%rcx
add %rdx,%rdi
jmp 1b
|
paritytech/polkaports | 13,178 | libs/musl/src/string/arm/memcpy.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Optimized memcpy() for ARM.
*
* note that memcpy() always returns the destination pointer,
* so we have to preserve R0.
*/
/*
* This file has been modified from the original for use in musl libc.
* The main changes are: addition of .type memcpy,%function to make the
* code safely callable from thumb mode, adjusting the return
* instructions to be compatible with pre-thumb ARM cpus, removal of
* prefetch code that is not compatible with older cpus and support for
* building as thumb 2 and big-endian.
*/
.syntax unified
.global memcpy
.type memcpy,%function
memcpy:
/* The stack must always be 64-bits aligned to be compliant with the
* ARM ABI. Since we have to save R0, we might as well save R4
* which we can use for better pipelining of the reads below
*/
.fnstart
.save {r0, r4, lr}
stmfd sp!, {r0, r4, lr}
/* Making room for r5-r11 which will be spilled later */
.pad #28
sub sp, sp, #28
/* it simplifies things to take care of len<4 early */
cmp r2, #4
blo copy_last_3_and_return
/* compute the offset to align the source
* offset = (4-(src&3))&3 = -src & 3
*/
rsb r3, r1, #0
ands r3, r3, #3
beq src_aligned
/* align source to 32 bits. We need to insert 2 instructions between
* a ldr[b|h] and str[b|h] because byte and half-word instructions
* stall 2 cycles.
*/
movs r12, r3, lsl #31
sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
ldrbmi r3, [r1], #1
ldrbcs r4, [r1], #1
ldrbcs r12,[r1], #1
strbmi r3, [r0], #1
strbcs r4, [r0], #1
strbcs r12,[r0], #1
src_aligned:
/* see if src and dst are aligned together (congruent) */
eor r12, r0, r1
tst r12, #3
bne non_congruent
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
*/
stmea sp, {r5-r11}
/* align the destination to a cache-line */
rsb r3, r0, #0
ands r3, r3, #0x1C
beq congruent_aligned32
cmp r3, r2
andhi r3, r2, #0x1C
/* conditionnaly copies 0 to 7 words (length in r3) */
movs r12, r3, lsl #28
ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmi r1!, {r8, r9} /* 8 bytes */
stmcs r0!, {r4, r5, r6, r7}
stmmi r0!, {r8, r9}
tst r3, #0x4
ldrne r10,[r1], #4 /* 4 bytes */
strne r10,[r0], #4
sub r2, r2, r3
congruent_aligned32:
/*
* here source is aligned to 32 bytes.
*/
cached_aligned32:
subs r2, r2, #32
blo less_than_32_left
/*
* We preload a cache-line up to 64 bytes ahead. On the 926, this will
* stall only until the requested world is fetched, but the linefill
* continues in the the background.
* While the linefill is going, we write our previous cache-line
* into the write-buffer (which should have some free space).
* When the linefill is done, the writebuffer will
* start dumping its content into memory
*
* While all this is going, we then load a full cache line into
* 8 registers, this cache line should be in the cache by now
* (or partly in the cache).
*
* This code should work well regardless of the source/dest alignment.
*
*/
/* Align the preload register to a cache-line because the cpu does
* "critical word first" (the first word requested is loaded first).
*/
@ bic r12, r1, #0x1F
@ add r12, r12, #64
1: ldmia r1!, { r4-r11 }
subs r2, r2, #32
/*
* NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
* for ARM9 preload will not be safely guarded by the preceding subs.
* When it is safely guarded the only possibility to have SIGSEGV here
* is because the caller overstates the length.
*/
@ ldrhi r3, [r12], #32 /* cheap ARM9 preload */
stmia r0!, { r4-r11 }
bhs 1b
add r2, r2, #32
less_than_32_left:
/*
* less than 32 bytes left at this point (length in r2)
*/
/* skip all this if there is nothing to do, which should
* be a common case (if not executed the code below takes
* about 16 cycles)
*/
tst r2, #0x1F
beq 1f
/* conditionnaly copies 0 to 31 bytes */
movs r12, r2, lsl #28
ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmi r1!, {r8, r9} /* 8 bytes */
stmcs r0!, {r4, r5, r6, r7}
stmmi r0!, {r8, r9}
movs r12, r2, lsl #30
ldrcs r3, [r1], #4 /* 4 bytes */
ldrhmi r4, [r1], #2 /* 2 bytes */
strcs r3, [r0], #4
strhmi r4, [r0], #2
tst r2, #0x1
ldrbne r3, [r1] /* last byte */
strbne r3, [r0]
/* we're done! restore everything and return */
1: ldmfd sp!, {r5-r11}
ldmfd sp!, {r0, r4, lr}
bx lr
/********************************************************************/
non_congruent:
/*
* here source is aligned to 4 bytes
* but destination is not.
*
* in the code below r2 is the number of bytes read
* (the number of bytes written is always smaller, because we have
* partial words in the shift queue)
*/
cmp r2, #4
blo copy_last_3_and_return
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
*/
stmea sp, {r5-r11}
/* compute shifts needed to align src to dest */
rsb r5, r0, #0
and r5, r5, #3 /* r5 = # bytes in partial words */
mov r12, r5, lsl #3 /* r12 = right */
rsb lr, r12, #32 /* lr = left */
/* read the first word */
ldr r3, [r1], #4
sub r2, r2, #4
/* write a partial word (0 to 3 bytes), such that destination
* becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
*/
movs r5, r5, lsl #31
#if __ARMEB__
movmi r3, r3, ror #24
strbmi r3, [r0], #1
movcs r3, r3, ror #24
strbcs r3, [r0], #1
movcs r3, r3, ror #24
strbcs r3, [r0], #1
#else
strbmi r3, [r0], #1
movmi r3, r3, lsr #8
strbcs r3, [r0], #1
movcs r3, r3, lsr #8
strbcs r3, [r0], #1
movcs r3, r3, lsr #8
#endif
cmp r2, #4
blo partial_word_tail
#if __ARMEB__
mov r3, r3, lsr r12
mov r3, r3, lsl r12
#endif
/* Align destination to 32 bytes (cache line boundary) */
1: tst r0, #0x1c
beq 2f
ldr r5, [r1], #4
sub r2, r2, #4
#if __ARMEB__
mov r4, r5, lsr lr
orr r4, r4, r3
mov r3, r5, lsl r12
#else
mov r4, r5, lsl lr
orr r4, r4, r3
mov r3, r5, lsr r12
#endif
str r4, [r0], #4
cmp r2, #4
bhs 1b
blo partial_word_tail
/* copy 32 bytes at a time */
2: subs r2, r2, #32
blo less_than_thirtytwo
/* Use immediate mode for the shifts, because there is an extra cycle
* for register shifts, which could account for up to 50% of
* performance hit.
*/
cmp r12, #24
beq loop24
cmp r12, #8
beq loop8
loop16:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
subs r2, r2, #32
ldrhs r12, [r1], #4
#if __ARMEB__
orr r3, r3, r4, lsr #16
mov r4, r4, lsl #16
orr r4, r4, r5, lsr #16
mov r5, r5, lsl #16
orr r5, r5, r6, lsr #16
mov r6, r6, lsl #16
orr r6, r6, r7, lsr #16
mov r7, r7, lsl #16
orr r7, r7, r8, lsr #16
mov r8, r8, lsl #16
orr r8, r8, r9, lsr #16
mov r9, r9, lsl #16
orr r9, r9, r10, lsr #16
mov r10, r10, lsl #16
orr r10, r10, r11, lsr #16
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsl #16
#else
orr r3, r3, r4, lsl #16
mov r4, r4, lsr #16
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r6, lsl #16
mov r6, r6, lsr #16
orr r6, r6, r7, lsl #16
mov r7, r7, lsr #16
orr r7, r7, r8, lsl #16
mov r8, r8, lsr #16
orr r8, r8, r9, lsl #16
mov r9, r9, lsr #16
orr r9, r9, r10, lsl #16
mov r10, r10, lsr #16
orr r10, r10, r11, lsl #16
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #16
#endif
bhs 1b
b less_than_thirtytwo
loop8:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
subs r2, r2, #32
ldrhs r12, [r1], #4
#if __ARMEB__
orr r3, r3, r4, lsr #24
mov r4, r4, lsl #8
orr r4, r4, r5, lsr #24
mov r5, r5, lsl #8
orr r5, r5, r6, lsr #24
mov r6, r6, lsl #8
orr r6, r6, r7, lsr #24
mov r7, r7, lsl #8
orr r7, r7, r8, lsr #24
mov r8, r8, lsl #8
orr r8, r8, r9, lsr #24
mov r9, r9, lsl #8
orr r9, r9, r10, lsr #24
mov r10, r10, lsl #8
orr r10, r10, r11, lsr #24
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsl #8
#else
orr r3, r3, r4, lsl #24
mov r4, r4, lsr #8
orr r4, r4, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r6, lsl #24
mov r6, r6, lsr #8
orr r6, r6, r7, lsl #24
mov r7, r7, lsr #8
orr r7, r7, r8, lsl #24
mov r8, r8, lsr #8
orr r8, r8, r9, lsl #24
mov r9, r9, lsr #8
orr r9, r9, r10, lsl #24
mov r10, r10, lsr #8
orr r10, r10, r11, lsl #24
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #8
#endif
bhs 1b
b less_than_thirtytwo
loop24:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
subs r2, r2, #32
ldrhs r12, [r1], #4
#if __ARMEB__
orr r3, r3, r4, lsr #8
mov r4, r4, lsl #24
orr r4, r4, r5, lsr #8
mov r5, r5, lsl #24
orr r5, r5, r6, lsr #8
mov r6, r6, lsl #24
orr r6, r6, r7, lsr #8
mov r7, r7, lsl #24
orr r7, r7, r8, lsr #8
mov r8, r8, lsl #24
orr r8, r8, r9, lsr #8
mov r9, r9, lsl #24
orr r9, r9, r10, lsr #8
mov r10, r10, lsl #24
orr r10, r10, r11, lsr #8
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsl #24
#else
orr r3, r3, r4, lsl #8
mov r4, r4, lsr #24
orr r4, r4, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r6, lsl #8
mov r6, r6, lsr #24
orr r6, r6, r7, lsl #8
mov r7, r7, lsr #24
orr r7, r7, r8, lsl #8
mov r8, r8, lsr #24
orr r8, r8, r9, lsl #8
mov r9, r9, lsr #24
orr r9, r9, r10, lsl #8
mov r10, r10, lsr #24
orr r10, r10, r11, lsl #8
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #24
#endif
bhs 1b
less_than_thirtytwo:
/* copy the last 0 to 31 bytes of the source */
rsb r12, lr, #32 /* we corrupted r12, recompute it */
add r2, r2, #32
cmp r2, #4
blo partial_word_tail
1: ldr r5, [r1], #4
sub r2, r2, #4
#if __ARMEB__
mov r4, r5, lsr lr
orr r4, r4, r3
mov r3, r5, lsl r12
#else
mov r4, r5, lsl lr
orr r4, r4, r3
mov r3, r5, lsr r12
#endif
str r4, [r0], #4
cmp r2, #4
bhs 1b
partial_word_tail:
/* we have a partial word in the input buffer */
movs r5, lr, lsl #(31-3)
#if __ARMEB__
movmi r3, r3, ror #24
strbmi r3, [r0], #1
movcs r3, r3, ror #24
strbcs r3, [r0], #1
movcs r3, r3, ror #24
strbcs r3, [r0], #1
#else
strbmi r3, [r0], #1
movmi r3, r3, lsr #8
strbcs r3, [r0], #1
movcs r3, r3, lsr #8
strbcs r3, [r0], #1
#endif
/* Refill spilled registers from the stack. Don't update sp. */
ldmfd sp, {r5-r11}
copy_last_3_and_return:
movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
ldrbmi r2, [r1], #1
ldrbcs r3, [r1], #1
ldrbcs r12,[r1]
strbmi r2, [r0], #1
strbcs r3, [r0], #1
strbcs r12,[r0]
/* we're done! restore sp and spilled registers and return */
add sp, sp, #28
ldmfd sp!, {r0, r4, lr}
bx lr
|
paritytech/polkaports | 2,715 | libs/musl/src/fenv/i386/fenv.s | .hidden __hwcap
.global feclearexcept
.type feclearexcept,@function
feclearexcept:
mov 4(%esp),%ecx
and $0x3f,%ecx
fnstsw %ax
# consider sse fenv as well if the cpu has XMM capability
call 1f
1: addl $__hwcap-1b,(%esp)
pop %edx
testl $0x02000000,(%edx)
jz 2f
# maintain exceptions in the sse mxcsr, clear x87 exceptions
test %eax,%ecx
jz 1f
fnclex
1: push %edx
stmxcsr (%esp)
pop %edx
and $0x3f,%eax
or %eax,%edx
test %edx,%ecx
jz 1f
not %ecx
and %ecx,%edx
push %edx
ldmxcsr (%esp)
pop %edx
1: xor %eax,%eax
ret
# only do the expensive x87 fenv load/store when needed
2: test %eax,%ecx
jz 1b
not %ecx
and %ecx,%eax
test $0x3f,%eax
jz 1f
fnclex
jmp 1b
1: sub $32,%esp
fnstenv (%esp)
mov %al,4(%esp)
fldenv (%esp)
add $32,%esp
xor %eax,%eax
ret
.global feraiseexcept
.type feraiseexcept,@function
feraiseexcept:
mov 4(%esp),%eax
and $0x3f,%eax
sub $32,%esp
fnstenv (%esp)
or %al,4(%esp)
fldenv (%esp)
add $32,%esp
xor %eax,%eax
ret
.global __fesetround
.hidden __fesetround
.type __fesetround,@function
__fesetround:
mov 4(%esp),%ecx
push %eax
xor %eax,%eax
fnstcw (%esp)
andb $0xf3,1(%esp)
or %ch,1(%esp)
fldcw (%esp)
# consider sse fenv as well if the cpu has XMM capability
call 1f
1: addl $__hwcap-1b,(%esp)
pop %edx
testl $0x02000000,(%edx)
jz 1f
stmxcsr (%esp)
shl $3,%ch
andb $0x9f,1(%esp)
or %ch,1(%esp)
ldmxcsr (%esp)
1: pop %ecx
ret
.global fegetround
.type fegetround,@function
fegetround:
push %eax
fnstcw (%esp)
pop %eax
and $0xc00,%eax
ret
.global fegetenv
.type fegetenv,@function
fegetenv:
mov 4(%esp),%ecx
xor %eax,%eax
fnstenv (%ecx)
# consider sse fenv as well if the cpu has XMM capability
call 1f
1: addl $__hwcap-1b,(%esp)
pop %edx
testl $0x02000000,(%edx)
jz 1f
push %eax
stmxcsr (%esp)
pop %edx
and $0x3f,%edx
or %edx,4(%ecx)
1: ret
.global fesetenv
.type fesetenv,@function
fesetenv:
mov 4(%esp),%ecx
xor %eax,%eax
inc %ecx
jz 1f
fldenv -1(%ecx)
movl -1(%ecx),%ecx
jmp 2f
1: push %eax
push %eax
push %eax
push %eax
pushl $0xffff
push %eax
pushl $0x37f
fldenv (%esp)
add $28,%esp
# consider sse fenv as well if the cpu has XMM capability
2: call 1f
1: addl $__hwcap-1b,(%esp)
pop %edx
testl $0x02000000,(%edx)
jz 1f
# mxcsr := same rounding mode, cleared exceptions, default mask
and $0xc00,%ecx
shl $3,%ecx
or $0x1f80,%ecx
mov %ecx,4(%esp)
ldmxcsr 4(%esp)
1: ret
.global fetestexcept
.type fetestexcept,@function
fetestexcept:
mov 4(%esp),%ecx
and $0x3f,%ecx
fnstsw %ax
# consider sse fenv as well if the cpu has XMM capability
call 1f
1: addl $__hwcap-1b,(%esp)
pop %edx
testl $0x02000000,(%edx)
jz 1f
stmxcsr 4(%esp)
or 4(%esp),%eax
1: and %ecx,%eax
ret
|
paritytech/polkaports | 1,444 | libs/musl/src/fenv/x86_64/fenv.s | .global feclearexcept
.type feclearexcept,@function
feclearexcept:
# maintain exceptions in the sse mxcsr, clear x87 exceptions
mov %edi,%ecx
and $0x3f,%ecx
fnstsw %ax
test %eax,%ecx
jz 1f
fnclex
1: stmxcsr -8(%rsp)
and $0x3f,%eax
or %eax,-8(%rsp)
test %ecx,-8(%rsp)
jz 1f
not %ecx
and %ecx,-8(%rsp)
ldmxcsr -8(%rsp)
1: xor %eax,%eax
ret
.global feraiseexcept
.type feraiseexcept,@function
feraiseexcept:
and $0x3f,%edi
stmxcsr -8(%rsp)
or %edi,-8(%rsp)
ldmxcsr -8(%rsp)
xor %eax,%eax
ret
.global __fesetround
.hidden __fesetround
.type __fesetround,@function
__fesetround:
push %rax
xor %eax,%eax
mov %edi,%ecx
fnstcw (%rsp)
andb $0xf3,1(%rsp)
or %ch,1(%rsp)
fldcw (%rsp)
stmxcsr (%rsp)
shl $3,%ch
andb $0x9f,1(%rsp)
or %ch,1(%rsp)
ldmxcsr (%rsp)
pop %rcx
ret
.global fegetround
.type fegetround,@function
fegetround:
push %rax
stmxcsr (%rsp)
pop %rax
shr $3,%eax
and $0xc00,%eax
ret
.global fegetenv
.type fegetenv,@function
fegetenv:
xor %eax,%eax
fnstenv (%rdi)
stmxcsr 28(%rdi)
ret
.global fesetenv
.type fesetenv,@function
fesetenv:
xor %eax,%eax
inc %rdi
jz 1f
fldenv -1(%rdi)
ldmxcsr 27(%rdi)
ret
1: push %rax
push %rax
pushq $0xffff
pushq $0x37f
fldenv (%rsp)
pushq $0x1f80
ldmxcsr (%rsp)
add $40,%rsp
ret
.global fetestexcept
.type fetestexcept,@function
fetestexcept:
and $0x3f,%edi
push %rax
stmxcsr (%rsp)
pop %rsi
fnstsw %ax
or %esi,%eax
and %edi,%eax
ret
|
paritytech/polkaports | 1,233 | libs/musl/src/fenv/sh/fenv.S | #if __SH_FPU_ANY__ || __SH4__
.global fegetround
.type fegetround, @function
fegetround:
sts fpscr, r0
rts
and #3, r0
.global __fesetround
.hidden __fesetround
.type __fesetround, @function
__fesetround:
sts fpscr, r0
mov #-4, r1
and r1, r0
or r4, r0
lds r0, fpscr
rts
mov #0, r0
.global fetestexcept
.type fetestexcept, @function
fetestexcept:
sts fpscr, r0
and r4, r0
rts
and #0x7c, r0
.global feclearexcept
.type feclearexcept, @function
feclearexcept:
mov r4, r0
and #0x7c, r0
not r0, r4
sts fpscr, r0
and r4, r0
lds r0, fpscr
rts
mov #0, r0
.global feraiseexcept
.type feraiseexcept, @function
feraiseexcept:
mov r4, r0
and #0x7c, r0
sts fpscr, r4
or r4, r0
lds r0, fpscr
rts
mov #0, r0
.global fegetenv
.type fegetenv, @function
fegetenv:
sts fpscr, r0
mov.l r0, @r4
rts
mov #0, r0
.global fesetenv
.type fesetenv, @function
fesetenv:
mov r4, r0
cmp/eq #-1, r0
bf 1f
! the default environment is complicated by the fact that we need to
! preserve the current precision bit, which we do not know a priori
sts fpscr, r0
mov #8, r1
swap.w r1, r1
bra 2f
and r1, r0
1: mov.l @r4, r0 ! non-default environment
2: lds r0, fpscr
rts
mov #0, r0
#endif
|
paritytech/polkaports | 1,388 | libs/musl/src/fenv/loongarch64/fenv.S | #ifndef __loongarch_soft_float
#ifdef BROKEN_LOONGARCH_FCSR_ASM
#define FCSR $r0
#else
#define FCSR $fcsr0
#endif
.global feclearexcept
.type feclearexcept,@function
feclearexcept:
li.w $t0, 0x1f0000
and $a0, $a0, $t0
movfcsr2gr $t1, FCSR
andn $t1, $t1, $a0
movgr2fcsr FCSR, $t1
li.w $a0, 0
jr $ra
.global feraiseexcept
.type feraiseexcept,@function
feraiseexcept:
li.w $t0, 0x1f0000
and $a0, $a0, $t0
movfcsr2gr $t1, FCSR
or $t1, $t1, $a0
movgr2fcsr FCSR, $t1
li.w $a0, 0
jr $ra
.global fetestexcept
.type fetestexcept,@function
fetestexcept:
li.w $t0, 0x1f0000
and $a0, $a0, $t0
movfcsr2gr $t1, FCSR
and $a0, $t1, $a0
jr $ra
.global fegetround
.type fegetround,@function
fegetround:
movfcsr2gr $t0, FCSR
andi $a0, $t0, 0x300
jr $ra
.global __fesetround
.hidden __fesetround
.type __fesetround,@function
__fesetround:
li.w $t0, 0x300
and $a0, $a0, $t0
movfcsr2gr $t1, FCSR
andn $t1, $t1, $t0
or $t1, $t1, $a0
movgr2fcsr FCSR, $t1
li.w $a0, 0
jr $ra
.global fegetenv
.type fegetenv,@function
fegetenv:
movfcsr2gr $t0, FCSR
st.w $t0, $a0, 0
li.w $a0, 0
jr $ra
.global fesetenv
.type fesetenv,@function
fesetenv:
addi.d $t0, $a0, 1
beq $t0, $r0, 1f
ld.w $t0, $a0, 0
1: movgr2fcsr FCSR, $t0
li.w $a0, 0
jr $ra
#endif
|
paritytech/polkaports | 1,444 | libs/musl/src/fenv/x32/fenv.s | .global feclearexcept
.type feclearexcept,@function
feclearexcept:
# maintain exceptions in the sse mxcsr, clear x87 exceptions
mov %edi,%ecx
and $0x3f,%ecx
fnstsw %ax
test %eax,%ecx
jz 1f
fnclex
1: stmxcsr -8(%esp)
and $0x3f,%eax
or %eax,-8(%esp)
test %ecx,-8(%esp)
jz 1f
not %ecx
and %ecx,-8(%esp)
ldmxcsr -8(%esp)
1: xor %eax,%eax
ret
.global feraiseexcept
.type feraiseexcept,@function
feraiseexcept:
and $0x3f,%edi
stmxcsr -8(%esp)
or %edi,-8(%esp)
ldmxcsr -8(%esp)
xor %eax,%eax
ret
.global __fesetround
.hidden __fesetround
.type __fesetround,@function
__fesetround:
push %rax
xor %eax,%eax
mov %edi,%ecx
fnstcw (%esp)
andb $0xf3,1(%esp)
or %ch,1(%esp)
fldcw (%esp)
stmxcsr (%esp)
shl $3,%ch
andb $0x9f,1(%esp)
or %ch,1(%esp)
ldmxcsr (%esp)
pop %rcx
ret
.global fegetround
.type fegetround,@function
fegetround:
push %rax
stmxcsr (%esp)
pop %rax
shr $3,%eax
and $0xc00,%eax
ret
.global fegetenv
.type fegetenv,@function
fegetenv:
xor %eax,%eax
fnstenv (%edi)
stmxcsr 28(%edi)
ret
.global fesetenv
.type fesetenv,@function
fesetenv:
xor %eax,%eax
inc %edi
jz 1f
fldenv -1(%edi)
ldmxcsr 27(%edi)
ret
1: push %rax
push %rax
pushq $0xffff
pushq $0x37f
fldenv (%esp)
pushq $0x1f80
ldmxcsr (%esp)
add $40,%esp
ret
.global fetestexcept
.type fetestexcept,@function
fetestexcept:
and $0x3f,%edi
push %rax
stmxcsr (%esp)
pop %rsi
fnstsw %ax
or %esi,%eax
and %edi,%eax
ret
|
paritytech/polkaports | 1,150 | libs/musl/src/fenv/mips/fenv.S | #ifndef __mips_soft_float
.set noreorder
.global feclearexcept
.type feclearexcept,@function
feclearexcept:
and $4, $4, 0x7c
cfc1 $5, $31
or $5, $5, $4
xor $5, $5, $4
ctc1 $5, $31
jr $ra
li $2, 0
.global feraiseexcept
.type feraiseexcept,@function
feraiseexcept:
and $4, $4, 0x7c
cfc1 $5, $31
or $5, $5, $4
ctc1 $5, $31
jr $ra
li $2, 0
.global fetestexcept
.type fetestexcept,@function
fetestexcept:
and $4, $4, 0x7c
cfc1 $2, $31
jr $ra
and $2, $2, $4
.global fegetround
.type fegetround,@function
fegetround:
cfc1 $2, $31
jr $ra
andi $2, $2, 3
.global __fesetround
.hidden __fesetround
.type __fesetround,@function
__fesetround:
cfc1 $5, $31
li $6, -4
and $5, $5, $6
or $5, $5, $4
ctc1 $5, $31
jr $ra
li $2, 0
.global fegetenv
.type fegetenv,@function
fegetenv:
cfc1 $5, $31
sw $5, 0($4)
jr $ra
li $2, 0
.global fesetenv
.type fesetenv,@function
fesetenv:
addiu $5, $4, 1
beq $5, $0, 1f
nop
lw $5, 0($4)
1: ctc1 $5, $31
jr $ra
li $2, 0
#endif
|
paritytech/polkaports | 1,933 | libs/musl/src/fenv/powerpc/fenv.S | #if !defined(_SOFT_FLOAT) && !defined(__NO_FPRS__)
.global feclearexcept
.type feclearexcept,@function
feclearexcept:
andis. 3,3,0x3e00
/* if (r3 & FE_INVALID) r3 |= all_invalid_flags */
andis. 0,3,0x2000
stwu 1,-16(1)
beq- 0,1f
oris 3,3,0x01f8
ori 3,3,0x0700
1:
/*
* note: fpscr contains various fpu status and control
* flags and we dont check if r3 may alter other flags
* than the exception related ones
* ufpscr &= ~r3
*/
mffs 0
stfd 0,8(1)
lwz 9,12(1)
andc 9,9,3
stw 9,12(1)
lfd 0,8(1)
mtfsf 255,0
/* return 0 */
li 3,0
addi 1,1,16
blr
.global feraiseexcept
.type feraiseexcept,@function
feraiseexcept:
andis. 3,3,0x3e00
/* if (r3 & FE_INVALID) r3 |= software_invalid_flag */
andis. 0,3,0x2000
stwu 1,-16(1)
beq- 0,1f
ori 3,3,0x0400
1:
/* fpscr |= r3 */
mffs 0
stfd 0,8(1)
lwz 9,12(1)
or 9,9,3
stw 9,12(1)
lfd 0,8(1)
mtfsf 255,0
/* return 0 */
li 3,0
addi 1,1,16
blr
.global fetestexcept
.type fetestexcept,@function
fetestexcept:
andis. 3,3,0x3e00
/* return r3 & fpscr */
stwu 1,-16(1)
mffs 0
stfd 0,8(1)
lwz 9,12(1)
addi 1,1,16
and 3,3,9
blr
.global fegetround
.type fegetround,@function
fegetround:
/* return fpscr & 3 */
stwu 1,-16(1)
mffs 0
stfd 0,8(1)
lwz 3,12(1)
addi 1,1,16
clrlwi 3,3,30
blr
.global __fesetround
.hidden __fesetround
.type __fesetround,@function
__fesetround:
/*
* note: invalid input is not checked, r3 < 4 must hold
* fpscr = (fpscr & -4U) | r3
*/
stwu 1,-16(1)
mffs 0
stfd 0,8(1)
lwz 9,12(1)
clrrwi 9,9,2
or 9,9,3
stw 9,12(1)
lfd 0,8(1)
mtfsf 255,0
/* return 0 */
li 3,0
addi 1,1,16
blr
.global fegetenv
.type fegetenv,@function
fegetenv:
/* *r3 = fpscr */
mffs 0
stfd 0,0(3)
/* return 0 */
li 3,0
blr
.global fesetenv
.type fesetenv,@function
fesetenv:
cmpwi 3, -1
bne 1f
mflr 4
bl 2f
.zero 8
2: mflr 3
mtlr 4
1: /* fpscr = *r3 */
lfd 0,0(3)
mtfsf 255,0
/* return 0 */
li 3,0
blr
#endif
|
paritytech/polkaports | 7,188 | apps/quake/r_aliasa.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// r_aliasa.s
// x86 assembly-language Alias model transform and project code.
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#include "d_ifacea.h"
#if id386
.data
Lfloat_1: .single 1.0
Ltemp: .long 0
Ltempbx: .long 0
Lcoords: .long 0, 0, 0
.text
#define fv 12+4
#define pstverts 12+8
.globl C(R_AliasTransformAndProjectFinalVerts)
C(R_AliasTransformAndProjectFinalVerts):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
// this is odd, but quake crashes on pushl/popl
movl %ebx, Ltempbx
// int i, temp;
// float lightcos, *plightnormal, zi;
// trivertx_t *pverts;
// pverts = r_apverts;
movl C(r_oldapverts),%esi
movl C(r_apverts),%ebx
// for (i=0 ; i<r_anumverts ; i++, fv++, pverts++, pstverts++)
// {
movl pstverts(%esp),%ebp
movl fv(%esp),%edi
movl C(r_anumverts),%ecx
subl %edx,%edx
Lloop2:
// // transform and project
// zi = 1.0 / (DotProduct(pverts->v, aliastransform[2]) +
// aliastransform[2][3]);
movb (%esi),%dl
movb %dl,Lcoords
fildl Lcoords
movb (%ebx),%dl
movb %dl,Lcoords
fildl Lcoords
fsub %st(1),%st(0)
fmuls C(r_framelerp)
faddp %st(0),%st(1) // v[0]
movb 1(%esi),%dl
movb %dl,Lcoords+4
fildl Lcoords+4
movb 1(%ebx),%dl
movb %dl,Lcoords+4
fildl Lcoords+4
fsub %st(1),%st(0)
fmuls C(r_framelerp)
faddp %st(0),%st(1) // v[1] | v[0]
movb 2(%esi),%dl
movb %dl,Lcoords+8
fildl Lcoords+8
movb 2(%ebx),%dl
movb %dl,Lcoords+8
fildl Lcoords+8
fsub %st(1),%st(0)
fmuls C(r_framelerp)
faddp %st(0),%st(1) // v[2] | v[1] | v[0]
fld %st(2) // v[0] | v[2] | v[1] | v[0]
fmuls C(aliastransform)+32 // accum | v[2] | v[1] | v[0]
fld %st(2) // v[1] | accum | v[2] | v[1] | v[0]
fmuls C(aliastransform)+36 // accum2 | accum | v[2] | v[1] | v[0]
fxch %st(1) // accum | accum2 | v[2] | v[1] | v[0]
fadds C(aliastransform)+44 // accum | accum2 | v[2] | v[1] | v[0]
fld %st(2) // v[2] | accum | accum2 | v[2] | v[1] | v[0]
fmuls C(aliastransform)+40 // accum3 | accum | accum2 | v[2] | v[1] |
// v[0]
fxch %st(1) // accum | accum3 | accum2 | v[2] | v[1] | v[0]
faddp %st(0),%st(2) // accum3 | accum | v[2] | v[1] | v[0]
subl %edx,%edx
movb tv_lightnormalindex(%esi),%dl
movl stv_s(%ebp),%eax
movl %eax,fv_v+8(%edi)
faddp %st(0),%st(1) // z | v[2] | v[1] | v[0]
movl stv_t(%ebp),%eax
movl %eax,fv_v+12(%edi)
fdivrs Lfloat_1 // zi | v[2] | v[1] | v[0]
// fv->v[2] = pstverts->s;
// fv->v[3] = pstverts->t;
// fv->flags = pstverts->onseam;
movl stv_onseam(%ebp),%eax
movl %eax,fv_flags(%edi)
// movl fv_size(%edi),%eax
// movl stv_size(%ebp),%eax
// movl 4(%esi),%eax
fxch %st(3) // v[0] | v[2] | v[1] | zi
fld %st(0) // v[0] | v[0] | v[2] | v[1] | zi
fmuls C(aliastransform)+0 // xaccum | v[0] | v[2] | v[1] | zi
// lightcos = DotProduct (plightnormal, r_plightvec);
movl C(r_shadedots_quant), %eax,
shll $12,%eax // r_avertexnormal_dots[r_shadedots_quant][pverts->lightnormalindex]
flds C(r_avertexnormal_dots)(,%edx,4)
flds %st(0)
movb tv_lightnormalindex(%ebx),%dl
flds C(r_avertexnormal_dots)(,%edx,4)
fsubp %st(0),%st(1)
fmuls C(r_framelerp)
faddp %st(0),%st(1)
fmuls C(r_shadelight)
// laccum | xaccum | v[0] | v[2] | v[1] | zi
movl C(r_ambientlight),%eax
// FIXME: fast float->int conversion?
fistpl Ltemp
subl Ltemp,%eax
// // clamp; because we limited the minimum ambient and shading light, we
// // don't have to clamp low light, just bright
// if (fv->v[4] < 0)
// fv->v[4] = 0;
jns Lp2
subl %eax,%eax
// }
Lp2:
// fv->v[4] = temp;
//
// // x, y, and z are scaled down by 1/2**31 in the transform, so 1/z is
// // scaled up by 1/2**31, and the scaling cancels out for x and y in the
// // projection
// fv->v[0] = ((DotProduct(pverts->v, aliastransform[0]) +
// aliastransform[0][3]) * zi) + aliasxcenter;
// fv->v[1] = ((DotProduct(pverts->v, aliastransform[1]) +
// aliastransform[1][3]) * zi) + aliasycenter;
// fv->v[5] = zi;
fxch %st(1) // v[0] | xaccum | v[2] | v[1] | zi
fmuls C(aliastransform)+16 // yaccum | xaccum | v[2] | v[1] | zi
fxch %st(3) // v[1] | xaccum | v[2] | yaccum | zi
fld %st(0) // v[1] | v[1] | xaccum | v[2] | yaccum | zi
fmuls C(aliastransform)+4 // xaccum2 | v[1] | xaccum | v[2] | yaccum |zi
fxch %st(1) // v[1] | xaccum2 | xaccum | v[2] | yaccum |zi
movl %eax,fv_v+16(%edi)
fmuls C(aliastransform)+20 // yaccum2 | xaccum2 | xaccum | v[2] | yaccum|
// zi
fxch %st(2) // xaccum | xaccum2 | yaccum2 | v[2] | yaccum|
// zi
fadds C(aliastransform)+12 // xaccum | xaccum2 | yaccum2 | v[2] | yaccum|
// zi
fxch %st(4) // yaccum | xaccum2 | yaccum2 | v[2] | xaccum|
// zi
fadds C(aliastransform)+28 // yaccum | xaccum2 | yaccum2 | v[2] | xaccum|
// zi
fxch %st(3) // v[2] | xaccum2 | yaccum2 | yaccum | xaccum|
// zi
fld %st(0) // v[2] | v[2] | xaccum2 | yaccum2 | yaccum |
// xaccum | zi
fmuls C(aliastransform)+8 // xaccum3 | v[2] | xaccum2 | yaccum2 |yaccum|
// xaccum | zi
fxch %st(1) // v[2] | xaccum3 | xaccum2 | yaccum2 |yaccum|
// xaccum | zi
fmuls C(aliastransform)+24 // yaccum3 | xaccum3 | xaccum2 | yaccum2 |
// yaccum | xaccum | zi
fxch %st(5) // xaccum | xaccum3 | xaccum2 | yaccum2 |
// yaccum | yaccum3 | zi
faddp %st(0),%st(2) // xaccum3 | xaccum | yaccum2 | yaccum |
// yaccum3 | zi
fxch %st(3) // yaccum | xaccum | yaccum2 | xaccum3 |
// yaccum3 | zi
faddp %st(0),%st(2) // xaccum | yaccum | xaccum3 | yaccum3 | zi
addl $(tv_size),%esi
addl $(tv_size),%ebx
faddp %st(0),%st(2) // yaccum | x | yaccum3 | zi
faddp %st(0),%st(2) // x | y | zi
addl $(stv_size),%ebp
fmul %st(2),%st(0) // x/z | y | zi
fxch %st(1) // y | x/z | zi
fmul %st(2),%st(0) // y/z | x/z | zi
fxch %st(1) // x/z | y/z | zi
fadds C(aliasxcenter) // u | y/z | zi
fxch %st(1) // y/z | u | zi
fadds C(aliasycenter) // v | u | zi
fxch %st(2) // zi | u | v
// FIXME: fast float->int conversion?
fistpl fv_v+20(%edi) // u | v
fistpl fv_v+0(%edi) // v
fistpl fv_v+4(%edi)
// }
addl $(fv_size),%edi
decl %ecx
jnz Lloop2
// this is odd, but quake crashes on pushl/popl
movl %ebx, Ltempbx
popl %esi // restore register variables
popl %edi
popl %ebp // restore the caller's stack frame
ret
#endif // id386
|
paritytech/polkaports | 25,939 | apps/quake/d_rast.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// d_rast.s
// x86 assembly-language horizontal 8-bpp span-drawing code.
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#include "d_ifacea.h"
#if id386
.data
.text
#define in 4
#define out 8
.align 2
.globl C(TransformVector)
C(TransformVector):
movl in(%esp),%eax
movl out(%esp),%edx
flds (%eax) // in[0]
fmuls C(vright) // in[0]*vright[0]
flds (%eax) // in[0] | in[0]*vright[0]
fmuls C(vup) // in[0]*vup[0] | in[0]*vright[0]
flds (%eax) // in[0] | in[0]*vup[0] | in[0]*vright[0]
fmuls C(vpn) // in[0]*vpn[0] | in[0]*vup[0] | in[0]*vright[0]
flds 4(%eax) // in[1] | ...
fmuls C(vright)+4 // in[1]*vright[1] | ...
flds 4(%eax) // in[1] | in[1]*vright[1] | ...
fmuls C(vup)+4 // in[1]*vup[1] | in[1]*vright[1] | ...
flds 4(%eax) // in[1] | in[1]*vup[1] | in[1]*vright[1] | ...
fmuls C(vpn)+4 // in[1]*vpn[1] | in[1]*vup[1] | in[1]*vright[1] | ...
fxch %st(2) // in[1]*vright[1] | in[1]*vup[1] | in[1]*vpn[1] | ...
faddp %st(0),%st(5) // in[1]*vup[1] | in[1]*vpn[1] | ...
faddp %st(0),%st(3) // in[1]*vpn[1] | ...
faddp %st(0),%st(1) // vpn_accum | vup_accum | vright_accum
flds 8(%eax) // in[2] | ...
fmuls C(vright)+8 // in[2]*vright[2] | ...
flds 8(%eax) // in[2] | in[2]*vright[2] | ...
fmuls C(vup)+8 // in[2]*vup[2] | in[2]*vright[2] | ...
flds 8(%eax) // in[2] | in[2]*vup[2] | in[2]*vright[2] | ...
fmuls C(vpn)+8 // in[2]*vpn[2] | in[2]*vup[2] | in[2]*vright[2] | ...
fxch %st(2) // in[2]*vright[2] | in[2]*vup[2] | in[2]*vpn[2] | ...
faddp %st(0),%st(5) // in[2]*vup[2] | in[2]*vpn[2] | ...
faddp %st(0),%st(3) // in[2]*vpn[2] | ...
faddp %st(0),%st(1) // vpn_accum | vup_accum | vright_accum
fstps 8(%edx) // out[2]
fstps 4(%edx) // out[1]
fstps (%edx) // out[0]
ret
//----------------------------------------------------------------------
// 8-bpp horizontal span drawing code for polygons, with no transparency.
//
// Assumes there is at least one span in pspans, and that every span
// contains at least one pixel
//----------------------------------------------------------------------
// out-of-line, rarely-needed clamping code
LClampHigh0:
movl C(bbextents),%esi
jmp LClampReentry0
LClampHighOrLow0:
jg LClampHigh0
xorl %esi,%esi
jmp LClampReentry0
LClampHigh1:
movl C(bbextentt),%edx
jmp LClampReentry1
LClampHighOrLow1:
jg LClampHigh1
xorl %edx,%edx
jmp LClampReentry1
LClampLow2:
movl $2048,%ebp
jmp LClampReentry2
LClampHigh2:
movl C(bbextents),%ebp
jmp LClampReentry2
LClampLow3:
movl $2048,%ecx
jmp LClampReentry3
LClampHigh3:
movl C(bbextentt),%ecx
jmp LClampReentry3
LClampLow4:
movl $2048,%eax
jmp LClampReentry4
LClampHigh4:
movl C(bbextents),%eax
jmp LClampReentry4
LClampLow5:
movl $2048,%ebx
jmp LClampReentry5
LClampHigh5:
movl C(bbextentt),%ebx
jmp LClampReentry5
#define pspans 4+16
.align 4
.globl C(D_DrawSpans8)
C(D_DrawSpans8):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
//
// set up scaled-by-8 steps, for 8-long segments; also set up cacheblock
// and span list pointers
//
// TODO: any overlap from rearranging?
flds C(d_sdivzstepu)
fmuls fp_8
movl C(cacheblock),%edx
flds C(d_tdivzstepu)
fmuls fp_8
movl pspans(%esp),%ebx // point to the first span descriptor
flds C(d_zistepu)
fmuls fp_8
movl %edx,pbase // pbase = cacheblock
fstps zi8stepu
fstps tdivz8stepu
fstps sdivz8stepu
LSpanLoop:
//
// set up the initial s/z, t/z, and 1/z on the FP stack, and generate the
// initial s and t values
//
// FIXME: pipeline FILD?
fildl espan_t_v(%ebx)
fildl espan_t_u(%ebx)
fld %st(1) // dv | du | dv
fmuls C(d_sdivzstepv) // dv*d_sdivzstepv | du | dv
fld %st(1) // du | dv*d_sdivzstepv | du | dv
fmuls C(d_sdivzstepu) // du*d_sdivzstepu | dv*d_sdivzstepv | du | dv
fld %st(2) // du | du*d_sdivzstepu | dv*d_sdivzstepv | du | dv
fmuls C(d_tdivzstepu) // du*d_tdivzstepu | du*d_sdivzstepu |
// dv*d_sdivzstepv | du | dv
fxch %st(1) // du*d_sdivzstepu | du*d_tdivzstepu |
// dv*d_sdivzstepv | du | dv
faddp %st(0),%st(2) // du*d_tdivzstepu |
// du*d_sdivzstepu + dv*d_sdivzstepv | du | dv
fxch %st(1) // du*d_sdivzstepu + dv*d_sdivzstepv |
// du*d_tdivzstepu | du | dv
fld %st(3) // dv | du*d_sdivzstepu + dv*d_sdivzstepv |
// du*d_tdivzstepu | du | dv
fmuls C(d_tdivzstepv) // dv*d_tdivzstepv |
// du*d_sdivzstepu + dv*d_sdivzstepv |
// du*d_tdivzstepu | du | dv
fxch %st(1) // du*d_sdivzstepu + dv*d_sdivzstepv |
// dv*d_tdivzstepv | du*d_tdivzstepu | du | dv
fadds C(d_sdivzorigin) // sdivz = d_sdivzorigin + dv*d_sdivzstepv +
// du*d_sdivzstepu; stays in %st(2) at end
fxch %st(4) // dv | dv*d_tdivzstepv | du*d_tdivzstepu | du |
// s/z
fmuls C(d_zistepv) // dv*d_zistepv | dv*d_tdivzstepv |
// du*d_tdivzstepu | du | s/z
fxch %st(1) // dv*d_tdivzstepv | dv*d_zistepv |
// du*d_tdivzstepu | du | s/z
faddp %st(0),%st(2) // dv*d_zistepv |
// dv*d_tdivzstepv + du*d_tdivzstepu | du | s/z
fxch %st(2) // du | dv*d_tdivzstepv + du*d_tdivzstepu |
// dv*d_zistepv | s/z
fmuls C(d_zistepu) // du*d_zistepu |
// dv*d_tdivzstepv + du*d_tdivzstepu |
// dv*d_zistepv | s/z
fxch %st(1) // dv*d_tdivzstepv + du*d_tdivzstepu |
// du*d_zistepu | dv*d_zistepv | s/z
fadds C(d_tdivzorigin) // tdivz = d_tdivzorigin + dv*d_tdivzstepv +
// du*d_tdivzstepu; stays in %st(1) at end
fxch %st(2) // dv*d_zistepv | du*d_zistepu | t/z | s/z
faddp %st(0),%st(1) // dv*d_zistepv + du*d_zistepu | t/z | s/z
flds fp_64k // fp_64k | dv*d_zistepv + du*d_zistepu | t/z | s/z
fxch %st(1) // dv*d_zistepv + du*d_zistepu | fp_64k | t/z | s/z
fadds C(d_ziorigin) // zi = d_ziorigin + dv*d_zistepv +
// du*d_zistepu; stays in %st(0) at end
// 1/z | fp_64k | t/z | s/z
//
// calculate and clamp s & t
//
fdivr %st(0),%st(1) // 1/z | z*64k | t/z | s/z
//
// point %edi to the first pixel in the span
//
movl C(d_viewbuffer),%ecx
movl espan_t_v(%ebx),%eax
movl %ebx,pspantemp // preserve spans pointer
movl C(tadjust),%edx
movl C(sadjust),%esi
movl C(d_scantable)(,%eax,4),%edi // v * screenwidth
addl %ecx,%edi
movl espan_t_u(%ebx),%ecx
addl %ecx,%edi // pdest = &pdestspan[scans->u];
movl espan_t_count(%ebx),%ecx
//
// now start the FDIV for the end of the span
//
cmpl $8,%ecx
ja LSetupNotLast1
decl %ecx
jz LCleanup1 // if only one pixel, no need to start an FDIV
movl %ecx,spancountminus1
// finish up the s and t calcs
fxch %st(1) // z*64k | 1/z | t/z | s/z
fld %st(0) // z*64k | z*64k | 1/z | t/z | s/z
fmul %st(4),%st(0) // s | z*64k | 1/z | t/z | s/z
fxch %st(1) // z*64k | s | 1/z | t/z | s/z
fmul %st(3),%st(0) // t | s | 1/z | t/z | s/z
fxch %st(1) // s | t | 1/z | t/z | s/z
fistpl s // 1/z | t | t/z | s/z
fistpl t // 1/z | t/z | s/z
fildl spancountminus1
flds C(d_tdivzstepu) // C(d_tdivzstepu) | spancountminus1
flds C(d_zistepu) // C(d_zistepu) | C(d_tdivzstepu) | spancountminus1
fmul %st(2),%st(0) // C(d_zistepu)*scm1 | C(d_tdivzstepu) | scm1
fxch %st(1) // C(d_tdivzstepu) | C(d_zistepu)*scm1 | scm1
fmul %st(2),%st(0) // C(d_tdivzstepu)*scm1 | C(d_zistepu)*scm1 | scm1
fxch %st(2) // scm1 | C(d_zistepu)*scm1 | C(d_tdivzstepu)*scm1
fmuls C(d_sdivzstepu) // C(d_sdivzstepu)*scm1 | C(d_zistepu)*scm1 |
// C(d_tdivzstepu)*scm1
fxch %st(1) // C(d_zistepu)*scm1 | C(d_sdivzstepu)*scm1 |
// C(d_tdivzstepu)*scm1
faddp %st(0),%st(3) // C(d_sdivzstepu)*scm1 | C(d_tdivzstepu)*scm1
fxch %st(1) // C(d_tdivzstepu)*scm1 | C(d_sdivzstepu)*scm1
faddp %st(0),%st(3) // C(d_sdivzstepu)*scm1
faddp %st(0),%st(3)
flds fp_64k
fdiv %st(1),%st(0) // this is what we've gone to all this trouble to
// overlap
jmp LFDIVInFlight1
LCleanup1:
// finish up the s and t calcs
fxch %st(1) // z*64k | 1/z | t/z | s/z
fld %st(0) // z*64k | z*64k | 1/z | t/z | s/z
fmul %st(4),%st(0) // s | z*64k | 1/z | t/z | s/z
fxch %st(1) // z*64k | s | 1/z | t/z | s/z
fmul %st(3),%st(0) // t | s | 1/z | t/z | s/z
fxch %st(1) // s | t | 1/z | t/z | s/z
fistpl s // 1/z | t | t/z | s/z
fistpl t // 1/z | t/z | s/z
jmp LFDIVInFlight1
.align 4
LSetupNotLast1:
// finish up the s and t calcs
fxch %st(1) // z*64k | 1/z | t/z | s/z
fld %st(0) // z*64k | z*64k | 1/z | t/z | s/z
fmul %st(4),%st(0) // s | z*64k | 1/z | t/z | s/z
fxch %st(1) // z*64k | s | 1/z | t/z | s/z
fmul %st(3),%st(0) // t | s | 1/z | t/z | s/z
fxch %st(1) // s | t | 1/z | t/z | s/z
fistpl s // 1/z | t | t/z | s/z
fistpl t // 1/z | t/z | s/z
fadds zi8stepu
fxch %st(2)
fadds sdivz8stepu
fxch %st(2)
flds tdivz8stepu
faddp %st(0),%st(2)
flds fp_64k
fdiv %st(1),%st(0) // z = 1/1/z
// this is what we've gone to all this trouble to
// overlap
LFDIVInFlight1:
addl s,%esi
addl t,%edx
movl C(bbextents),%ebx
movl C(bbextentt),%ebp
cmpl %ebx,%esi
ja LClampHighOrLow0
LClampReentry0:
movl %esi,s
movl pbase,%ebx
shll $16,%esi
cmpl %ebp,%edx
movl %esi,sfracf
ja LClampHighOrLow1
LClampReentry1:
movl %edx,t
movl s,%esi // sfrac = scans->sfrac;
shll $16,%edx
movl t,%eax // tfrac = scans->tfrac;
sarl $16,%esi
movl %edx,tfracf
//
// calculate the texture starting address
//
sarl $16,%eax
movl C(cachewidth),%edx
imull %edx,%eax // (tfrac >> 16) * cachewidth
addl %ebx,%esi
addl %eax,%esi // psource = pbase + (sfrac >> 16) +
// ((tfrac >> 16) * cachewidth);
//
// determine whether last span or not
//
cmpl $8,%ecx
jna LLastSegment
//
// not the last segment; do full 8-wide segment
//
LNotLastSegment:
//
// advance s/z, t/z, and 1/z, and calculate s & t at end of span and steps to
// get there
//
// pick up after the FDIV that was left in flight previously
fld %st(0) // duplicate it
fmul %st(4),%st(0) // s = s/z * z
fxch %st(1)
fmul %st(3),%st(0) // t = t/z * z
fxch %st(1)
fistpl snext
fistpl tnext
movl snext,%eax
movl tnext,%edx
movb (%esi),%bl // get first source texel
subl $8,%ecx // count off this segments' pixels
movl C(sadjust),%ebp
movl %ecx,counttemp // remember count of remaining pixels
movl C(tadjust),%ecx
movb %bl,(%edi) // store first dest pixel
addl %eax,%ebp
addl %edx,%ecx
movl C(bbextents),%eax
movl C(bbextentt),%edx
cmpl $2048,%ebp
jl LClampLow2
cmpl %eax,%ebp
ja LClampHigh2
LClampReentry2:
cmpl $2048,%ecx
jl LClampLow3
cmpl %edx,%ecx
ja LClampHigh3
LClampReentry3:
movl %ebp,snext
movl %ecx,tnext
subl s,%ebp
subl t,%ecx
//
// set up advancetable
//
movl %ecx,%eax
movl %ebp,%edx
sarl $19,%eax // tstep >>= 16;
jz LZero
sarl $19,%edx // sstep >>= 16;
movl C(cachewidth),%ebx
imull %ebx,%eax
jmp LSetUp1
LZero:
sarl $19,%edx // sstep >>= 16;
movl C(cachewidth),%ebx
LSetUp1:
addl %edx,%eax // add in sstep
// (tstep >> 16) * cachewidth + (sstep >> 16);
movl tfracf,%edx
movl %eax,advancetable+4 // advance base in t
addl %ebx,%eax // ((tstep >> 16) + 1) * cachewidth +
// (sstep >> 16);
shll $13,%ebp // left-justify sstep fractional part
movl sfracf,%ebx
shll $13,%ecx // left-justify tstep fractional part
movl %eax,advancetable // advance extra in t
movl %ecx,tstep
addl %ecx,%edx // advance tfrac fractional part by tstep frac
sbbl %ecx,%ecx // turn tstep carry into -1 (0 if none)
addl %ebp,%ebx // advance sfrac fractional part by sstep frac
adcl advancetable+4(,%ecx,4),%esi // point to next source texel
addl tstep,%edx
sbbl %ecx,%ecx
movb (%esi),%al
addl %ebp,%ebx
movb %al,1(%edi)
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
sbbl %ecx,%ecx
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
sbbl %ecx,%ecx
movb %al,2(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
sbbl %ecx,%ecx
movb %al,3(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
//
// start FDIV for end of next segment in flight, so it can overlap
//
movl counttemp,%ecx
cmpl $8,%ecx // more than one segment after this?
ja LSetupNotLast2 // yes
decl %ecx
jz LFDIVInFlight2 // if only one pixel, no need to start an FDIV
movl %ecx,spancountminus1
fildl spancountminus1
flds C(d_zistepu) // C(d_zistepu) | spancountminus1
fmul %st(1),%st(0) // C(d_zistepu)*scm1 | scm1
flds C(d_tdivzstepu) // C(d_tdivzstepu) | C(d_zistepu)*scm1 | scm1
fmul %st(2),%st(0) // C(d_tdivzstepu)*scm1 | C(d_zistepu)*scm1 | scm1
fxch %st(1) // C(d_zistepu)*scm1 | C(d_tdivzstepu)*scm1 | scm1
faddp %st(0),%st(3) // C(d_tdivzstepu)*scm1 | scm1
fxch %st(1) // scm1 | C(d_tdivzstepu)*scm1
fmuls C(d_sdivzstepu) // C(d_sdivzstepu)*scm1 | C(d_tdivzstepu)*scm1
fxch %st(1) // C(d_tdivzstepu)*scm1 | C(d_sdivzstepu)*scm1
faddp %st(0),%st(3) // C(d_sdivzstepu)*scm1
flds fp_64k // 64k | C(d_sdivzstepu)*scm1
fxch %st(1) // C(d_sdivzstepu)*scm1 | 64k
faddp %st(0),%st(4) // 64k
fdiv %st(1),%st(0) // this is what we've gone to all this trouble to
// overlap
jmp LFDIVInFlight2
.align 4
LSetupNotLast2:
fadds zi8stepu
fxch %st(2)
fadds sdivz8stepu
fxch %st(2)
flds tdivz8stepu
faddp %st(0),%st(2)
flds fp_64k
fdiv %st(1),%st(0) // z = 1/1/z
// this is what we've gone to all this trouble to
// overlap
LFDIVInFlight2:
movl %ecx,counttemp
addl tstep,%edx
sbbl %ecx,%ecx
movb %al,4(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
sbbl %ecx,%ecx
movb %al,5(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
sbbl %ecx,%ecx
movb %al,6(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
addl $8,%edi
movl %edx,tfracf
movl snext,%edx
movl %ebx,sfracf
movl tnext,%ebx
movl %edx,s
movl %ebx,t
movl counttemp,%ecx // retrieve count
//
// determine whether last span or not
//
cmpl $8,%ecx // are there multiple segments remaining?
movb %al,-1(%edi)
ja LNotLastSegment // yes
//
// last segment of scan
//
LLastSegment:
//
// advance s/z, t/z, and 1/z, and calculate s & t at end of span and steps to
// get there. The number of pixels left is variable, and we want to land on the
// last pixel, not step one past it, so we can't run into arithmetic problems
//
testl %ecx,%ecx
jz LNoSteps // just draw the last pixel and we're done
// pick up after the FDIV that was left in flight previously
fld %st(0) // duplicate it
fmul %st(4),%st(0) // s = s/z * z
fxch %st(1)
fmul %st(3),%st(0) // t = t/z * z
fxch %st(1)
fistpl snext
fistpl tnext
movb (%esi),%al // load first texel in segment
movl C(tadjust),%ebx
movb %al,(%edi) // store first pixel in segment
movl C(sadjust),%eax
addl snext,%eax
addl tnext,%ebx
movl C(bbextents),%ebp
movl C(bbextentt),%edx
cmpl $2048,%eax
jl LClampLow4
cmpl %ebp,%eax
ja LClampHigh4
LClampReentry4:
movl %eax,snext
cmpl $2048,%ebx
jl LClampLow5
cmpl %edx,%ebx
ja LClampHigh5
LClampReentry5:
cmpl $1,%ecx // don't bother
je LOnlyOneStep // if two pixels in segment, there's only one step,
// of the segment length
subl s,%eax
subl t,%ebx
addl %eax,%eax // convert to 15.17 format so multiply by 1.31
addl %ebx,%ebx // reciprocal yields 16.48
imull reciprocal_table-8(,%ecx,4) // sstep = (snext - s) / (spancount-1)
movl %edx,%ebp
movl %ebx,%eax
imull reciprocal_table-8(,%ecx,4) // tstep = (tnext - t) / (spancount-1)
LSetEntryvec:
//
// set up advancetable
//
movl entryvec_table(,%ecx,4),%ebx
movl %edx,%eax
movl %ebx,jumptemp // entry point into code for RET later
movl %ebp,%ecx
sarl $16,%edx // tstep >>= 16;
movl C(cachewidth),%ebx
sarl $16,%ecx // sstep >>= 16;
imull %ebx,%edx
addl %ecx,%edx // add in sstep
// (tstep >> 16) * cachewidth + (sstep >> 16);
movl tfracf,%ecx
movl %edx,advancetable+4 // advance base in t
addl %ebx,%edx // ((tstep >> 16) + 1) * cachewidth +
// (sstep >> 16);
shll $16,%ebp // left-justify sstep fractional part
movl sfracf,%ebx
shll $16,%eax // left-justify tstep fractional part
movl %edx,advancetable // advance extra in t
movl %eax,tstep
movl %ecx,%edx
addl %eax,%edx
sbbl %ecx,%ecx
addl %ebp,%ebx
adcl advancetable+4(,%ecx,4),%esi
jmp *jumptemp // jump to the number-of-pixels handler
//----------------------------------------
LNoSteps:
movb (%esi),%al // load first texel in segment
subl $7,%edi // adjust for hardwired offset
jmp LEndSpan
LOnlyOneStep:
subl s,%eax
subl t,%ebx
movl %eax,%ebp
movl %ebx,%edx
jmp LSetEntryvec
//----------------------------------------
.globl Entry2_8
Entry2_8:
subl $6,%edi // adjust for hardwired offsets
movb (%esi),%al
jmp LLEntry2_8
//----------------------------------------
.globl Entry3_8
Entry3_8:
subl $5,%edi // adjust for hardwired offsets
addl %eax,%edx
movb (%esi),%al
sbbl %ecx,%ecx
addl %ebp,%ebx
adcl advancetable+4(,%ecx,4),%esi
jmp LLEntry3_8
//----------------------------------------
.globl Entry4_8
Entry4_8:
subl $4,%edi // adjust for hardwired offsets
addl %eax,%edx
movb (%esi),%al
sbbl %ecx,%ecx
addl %ebp,%ebx
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
jmp LLEntry4_8
//----------------------------------------
.globl Entry5_8
Entry5_8:
subl $3,%edi // adjust for hardwired offsets
addl %eax,%edx
movb (%esi),%al
sbbl %ecx,%ecx
addl %ebp,%ebx
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
jmp LLEntry5_8
//----------------------------------------
.globl Entry6_8
Entry6_8:
subl $2,%edi // adjust for hardwired offsets
addl %eax,%edx
movb (%esi),%al
sbbl %ecx,%ecx
addl %ebp,%ebx
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
jmp LLEntry6_8
//----------------------------------------
.globl Entry7_8
Entry7_8:
decl %edi // adjust for hardwired offsets
addl %eax,%edx
movb (%esi),%al
sbbl %ecx,%ecx
addl %ebp,%ebx
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
jmp LLEntry7_8
//----------------------------------------
.globl Entry8_8
Entry8_8:
addl %eax,%edx
movb (%esi),%al
sbbl %ecx,%ecx
addl %ebp,%ebx
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
sbbl %ecx,%ecx
movb %al,1(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
LLEntry7_8:
sbbl %ecx,%ecx
movb %al,2(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
LLEntry6_8:
sbbl %ecx,%ecx
movb %al,3(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
LLEntry5_8:
sbbl %ecx,%ecx
movb %al,4(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
addl tstep,%edx
LLEntry4_8:
sbbl %ecx,%ecx
movb %al,5(%edi)
addl %ebp,%ebx
movb (%esi),%al
adcl advancetable+4(,%ecx,4),%esi
LLEntry3_8:
movb %al,6(%edi)
movb (%esi),%al
LLEntry2_8:
LEndSpan:
//
// clear s/z, t/z, 1/z from FP stack
//
fstp %st(0)
fstp %st(0)
fstp %st(0)
movl pspantemp,%ebx // restore spans pointer
movl espan_t_pnext(%ebx),%ebx // point to next span
testl %ebx,%ebx // any more spans?
movb %al,7(%edi)
jnz LSpanLoop // more spans
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
//----------------------------------------------------------------------
// 8-bpp horizontal span z drawing codefor polygons, with no transparency.
//
// Assumes there is at least one span in pzspans, and that every span
// contains at least one pixel
//----------------------------------------------------------------------
.text
// z-clamp on a non-negative gradient span
LClamp:
movl $0x40000000,%edx
xorl %ebx,%ebx
fstp %st(0)
jmp LZDraw
// z-clamp on a negative gradient span
LClampNeg:
movl $0x40000000,%edx
xorl %ebx,%ebx
fstp %st(0)
jmp LZDrawNeg
#define pzspans 4+16
.globl C(D_DrawZSpans)
C(D_DrawZSpans):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
flds C(d_zistepu)
movl C(d_zistepu),%eax
movl pzspans(%esp),%esi
testl %eax,%eax
jz LFNegSpan
fmuls Float2ToThe31nd
fistpl izistep // note: we are relying on FP exceptions being turned
// off here to avoid range problems
movl izistep,%ebx // remains loaded for all spans
LFSpanLoop:
// set up the initial 1/z value
fildl espan_t_v(%esi)
fildl espan_t_u(%esi)
movl espan_t_v(%esi),%ecx
movl C(d_pzbuffer),%edi
fmuls C(d_zistepu)
fxch %st(1)
fmuls C(d_zistepv)
fxch %st(1)
fadds C(d_ziorigin)
imull C(d_zrowbytes),%ecx
faddp %st(0),%st(1)
// clamp if z is nearer than 2 (1/z > 0.5)
fcoms float_point5
addl %ecx,%edi
movl espan_t_u(%esi),%edx
addl %edx,%edx // word count
movl espan_t_count(%esi),%ecx
addl %edx,%edi // pdest = &pdestspan[scans->u];
pushl %esi // preserve spans pointer
fnstsw %ax
testb $0x45,%ah
jz LClamp
fmuls Float2ToThe31nd
fistpl izi // note: we are relying on FP exceptions being turned
// off here to avoid problems when the span is closer
// than 1/(2**31)
movl izi,%edx
// at this point:
// %ebx = izistep
// %ecx = count
// %edx = izi
// %edi = pdest
LZDraw:
// do a single pixel up front, if necessary to dword align the destination
testl $2,%edi
jz LFMiddle
movl %edx,%eax
addl %ebx,%edx
shrl $16,%eax
decl %ecx
movw %ax,(%edi)
addl $2,%edi
// do middle a pair of aligned dwords at a time
LFMiddle:
pushl %ecx
shrl $1,%ecx // count / 2
jz LFLast // no aligned dwords to do
shrl $1,%ecx // (count / 2) / 2
jnc LFMiddleLoop // even number of aligned dwords to do
movl %edx,%eax
addl %ebx,%edx
shrl $16,%eax
movl %edx,%esi
addl %ebx,%edx
andl $0xFFFF0000,%esi
orl %esi,%eax
movl %eax,(%edi)
addl $4,%edi
andl %ecx,%ecx
jz LFLast
LFMiddleLoop:
movl %edx,%eax
addl %ebx,%edx
shrl $16,%eax
movl %edx,%esi
addl %ebx,%edx
andl $0xFFFF0000,%esi
orl %esi,%eax
movl %edx,%ebp
movl %eax,(%edi)
addl %ebx,%edx
shrl $16,%ebp
movl %edx,%esi
addl %ebx,%edx
andl $0xFFFF0000,%esi
orl %esi,%ebp
movl %ebp,4(%edi) // FIXME: eliminate register contention
addl $8,%edi
decl %ecx
jnz LFMiddleLoop
LFLast:
popl %ecx // retrieve count
popl %esi // retrieve span pointer
// do the last, unaligned pixel, if there is one
andl $1,%ecx // is there an odd pixel left to do?
jz LFSpanDone // no
shrl $16,%edx
movw %dx,(%edi) // do the final pixel's z
LFSpanDone:
movl espan_t_pnext(%esi),%esi
testl %esi,%esi
jnz LFSpanLoop
jmp LFDone
LFNegSpan:
fmuls FloatMinus2ToThe31nd
fistpl izistep // note: we are relying on FP exceptions being turned
// off here to avoid range problems
movl izistep,%ebx // remains loaded for all spans
LFNegSpanLoop:
// set up the initial 1/z value
fildl espan_t_v(%esi)
fildl espan_t_u(%esi)
movl espan_t_v(%esi),%ecx
movl C(d_pzbuffer),%edi
fmuls C(d_zistepu)
fxch %st(1)
fmuls C(d_zistepv)
fxch %st(1)
fadds C(d_ziorigin)
imull C(d_zrowbytes),%ecx
faddp %st(0),%st(1)
// clamp if z is nearer than 2 (1/z > 0.5)
fcoms float_point5
addl %ecx,%edi
movl espan_t_u(%esi),%edx
addl %edx,%edx // word count
movl espan_t_count(%esi),%ecx
addl %edx,%edi // pdest = &pdestspan[scans->u];
pushl %esi // preserve spans pointer
fnstsw %ax
testb $0x45,%ah
jz LClampNeg
fmuls Float2ToThe31nd
fistpl izi // note: we are relying on FP exceptions being turned
// off here to avoid problems when the span is closer
// than 1/(2**31)
movl izi,%edx
// at this point:
// %ebx = izistep
// %ecx = count
// %edx = izi
// %edi = pdest
LZDrawNeg:
// do a single pixel up front, if necessary to dword align the destination
testl $2,%edi
jz LFNegMiddle
movl %edx,%eax
subl %ebx,%edx
shrl $16,%eax
decl %ecx
movw %ax,(%edi)
addl $2,%edi
// do middle a pair of aligned dwords at a time
LFNegMiddle:
pushl %ecx
shrl $1,%ecx // count / 2
jz LFNegLast // no aligned dwords to do
shrl $1,%ecx // (count / 2) / 2
jnc LFNegMiddleLoop // even number of aligned dwords to do
movl %edx,%eax
subl %ebx,%edx
shrl $16,%eax
movl %edx,%esi
subl %ebx,%edx
andl $0xFFFF0000,%esi
orl %esi,%eax
movl %eax,(%edi)
addl $4,%edi
andl %ecx,%ecx
jz LFNegLast
LFNegMiddleLoop:
movl %edx,%eax
subl %ebx,%edx
shrl $16,%eax
movl %edx,%esi
subl %ebx,%edx
andl $0xFFFF0000,%esi
orl %esi,%eax
movl %edx,%ebp
movl %eax,(%edi)
subl %ebx,%edx
shrl $16,%ebp
movl %edx,%esi
subl %ebx,%edx
andl $0xFFFF0000,%esi
orl %esi,%ebp
movl %ebp,4(%edi) // FIXME: eliminate register contention
addl $8,%edi
decl %ecx
jnz LFNegMiddleLoop
LFNegLast:
popl %ecx // retrieve count
popl %esi // retrieve span pointer
// do the last, unaligned pixel, if there is one
andl $1,%ecx // is there an odd pixel left to do?
jz LFNegSpanDone // no
shrl $16,%edx
movw %dx,(%edi) // do the final pixel's z
LFNegSpanDone:
movl espan_t_pnext(%esi),%esi
testl %esi,%esi
jnz LFNegSpanLoop
LFDone:
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
#endif // id386
|
paritytech/polkaports | 9,059 | apps/quake/math.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// math.s
// x86 assembly-language math routines.
#include "asm_i386.h"
#include "quakeasm.h"
#if id386
.data
.align 4
Ljmptab: .long Lcase0, Lcase1, Lcase2, Lcase3
.long Lcase4, Lcase5, Lcase6, Lcase7
.text
#define EMINS 4+4
#define EMAXS 4+8
#define P 4+12
.align 2
.globl C(BoxOnPlaneSide)
C(BoxOnPlaneSide):
pushl %ebx
movl P(%esp),%edx
movl EMINS(%esp),%ecx
xorl %eax,%eax
movl EMAXS(%esp),%ebx
movb pl_signbits(%edx),%al
cmpb $8,%al
jge Lerror
flds pl_normal(%edx) // p->normal[0]
fld %st(0) // p->normal[0] | p->normal[0]
jmp Ljmptab(,%eax,4)
//dist1= p->normal[0]*emaxs[0] + p->normal[1]*emaxs[1] + p->normal[2]*emaxs[2];
//dist2= p->normal[0]*emins[0] + p->normal[1]*emins[1] + p->normal[2]*emins[2];
Lcase0:
fmuls (%ebx) // p->normal[0]*emaxs[0] | p->normal[0]
flds pl_normal+4(%edx) // p->normal[1] | p->normal[0]*emaxs[0] |
// p->normal[0]
fxch %st(2) // p->normal[0] | p->normal[0]*emaxs[0] |
// p->normal[1]
fmuls (%ecx) // p->normal[0]*emins[0] |
// p->normal[0]*emaxs[0] | p->normal[1]
fxch %st(2) // p->normal[1] | p->normal[0]*emaxs[0] |
// p->normal[0]*emins[0]
fld %st(0) // p->normal[1] | p->normal[1] |
// p->normal[0]*emaxs[0] |
// p->normal[0]*emins[0]
fmuls 4(%ebx) // p->normal[1]*emaxs[1] | p->normal[1] |
// p->normal[0]*emaxs[0] |
// p->normal[0]*emins[0]
flds pl_normal+8(%edx) // p->normal[2] | p->normal[1]*emaxs[1] |
// p->normal[1] | p->normal[0]*emaxs[0] |
// p->normal[0]*emins[0]
fxch %st(2) // p->normal[1] | p->normal[1]*emaxs[1] |
// p->normal[2] | p->normal[0]*emaxs[0] |
// p->normal[0]*emins[0]
fmuls 4(%ecx) // p->normal[1]*emins[1] |
// p->normal[1]*emaxs[1] |
// p->normal[2] | p->normal[0]*emaxs[0] |
// p->normal[0]*emins[0]
fxch %st(2) // p->normal[2] | p->normal[1]*emaxs[1] |
// p->normal[1]*emins[1] |
// p->normal[0]*emaxs[0] |
// p->normal[0]*emins[0]
fld %st(0) // p->normal[2] | p->normal[2] |
// p->normal[1]*emaxs[1] |
// p->normal[1]*emins[1] |
// p->normal[0]*emaxs[0] |
// p->normal[0]*emins[0]
fmuls 8(%ebx) // p->normal[2]*emaxs[2] |
// p->normal[2] |
// p->normal[1]*emaxs[1] |
// p->normal[1]*emins[1] |
// p->normal[0]*emaxs[0] |
// p->normal[0]*emins[0]
fxch %st(5) // p->normal[0]*emins[0] |
// p->normal[2] |
// p->normal[1]*emaxs[1] |
// p->normal[1]*emins[1] |
// p->normal[0]*emaxs[0] |
// p->normal[2]*emaxs[2]
faddp %st(0),%st(3) //p->normal[2] |
// p->normal[1]*emaxs[1] |
// p->normal[1]*emins[1]+p->normal[0]*emins[0]|
// p->normal[0]*emaxs[0] |
// p->normal[2]*emaxs[2]
fmuls 8(%ecx) //p->normal[2]*emins[2] |
// p->normal[1]*emaxs[1] |
// p->normal[1]*emins[1]+p->normal[0]*emins[0]|
// p->normal[0]*emaxs[0] |
// p->normal[2]*emaxs[2]
fxch %st(1) //p->normal[1]*emaxs[1] |
// p->normal[2]*emins[2] |
// p->normal[1]*emins[1]+p->normal[0]*emins[0]|
// p->normal[0]*emaxs[0] |
// p->normal[2]*emaxs[2]
faddp %st(0),%st(3) //p->normal[2]*emins[2] |
// p->normal[1]*emins[1]+p->normal[0]*emins[0]|
// p->normal[0]*emaxs[0]+p->normal[1]*emaxs[1]|
// p->normal[2]*emaxs[2]
fxch %st(3) //p->normal[2]*emaxs[2] +
// p->normal[1]*emins[1]+p->normal[0]*emins[0]|
// p->normal[0]*emaxs[0]+p->normal[1]*emaxs[1]|
// p->normal[2]*emins[2]
faddp %st(0),%st(2) //p->normal[1]*emins[1]+p->normal[0]*emins[0]|
// dist1 | p->normal[2]*emins[2]
jmp LSetSides
//dist1= p->normal[0]*emins[0] + p->normal[1]*emaxs[1] + p->normal[2]*emaxs[2];
//dist2= p->normal[0]*emaxs[0] + p->normal[1]*emins[1] + p->normal[2]*emins[2];
Lcase1:
fmuls (%ecx) // emins[0]
flds pl_normal+4(%edx)
fxch %st(2)
fmuls (%ebx) // emaxs[0]
fxch %st(2)
fld %st(0)
fmuls 4(%ebx) // emaxs[1]
flds pl_normal+8(%edx)
fxch %st(2)
fmuls 4(%ecx) // emins[1]
fxch %st(2)
fld %st(0)
fmuls 8(%ebx) // emaxs[2]
fxch %st(5)
faddp %st(0),%st(3)
fmuls 8(%ecx) // emins[2]
fxch %st(1)
faddp %st(0),%st(3)
fxch %st(3)
faddp %st(0),%st(2)
jmp LSetSides
//dist1= p->normal[0]*emaxs[0] + p->normal[1]*emins[1] + p->normal[2]*emaxs[2];
//dist2= p->normal[0]*emins[0] + p->normal[1]*emaxs[1] + p->normal[2]*emins[2];
Lcase2:
fmuls (%ebx) // emaxs[0]
flds pl_normal+4(%edx)
fxch %st(2)
fmuls (%ecx) // emins[0]
fxch %st(2)
fld %st(0)
fmuls 4(%ecx) // emins[1]
flds pl_normal+8(%edx)
fxch %st(2)
fmuls 4(%ebx) // emaxs[1]
fxch %st(2)
fld %st(0)
fmuls 8(%ebx) // emaxs[2]
fxch %st(5)
faddp %st(0),%st(3)
fmuls 8(%ecx) // emins[2]
fxch %st(1)
faddp %st(0),%st(3)
fxch %st(3)
faddp %st(0),%st(2)
jmp LSetSides
//dist1= p->normal[0]*emins[0] + p->normal[1]*emins[1] + p->normal[2]*emaxs[2];
//dist2= p->normal[0]*emaxs[0] + p->normal[1]*emaxs[1] + p->normal[2]*emins[2];
Lcase3:
fmuls (%ecx) // emins[0]
flds pl_normal+4(%edx)
fxch %st(2)
fmuls (%ebx) // emaxs[0]
fxch %st(2)
fld %st(0)
fmuls 4(%ecx) // emins[1]
flds pl_normal+8(%edx)
fxch %st(2)
fmuls 4(%ebx) // emaxs[1]
fxch %st(2)
fld %st(0)
fmuls 8(%ebx) // emaxs[2]
fxch %st(5)
faddp %st(0),%st(3)
fmuls 8(%ecx) // emins[2]
fxch %st(1)
faddp %st(0),%st(3)
fxch %st(3)
faddp %st(0),%st(2)
jmp LSetSides
//dist1= p->normal[0]*emaxs[0] + p->normal[1]*emaxs[1] + p->normal[2]*emins[2];
//dist2= p->normal[0]*emins[0] + p->normal[1]*emins[1] + p->normal[2]*emaxs[2];
Lcase4:
fmuls (%ebx) // emaxs[0]
flds pl_normal+4(%edx)
fxch %st(2)
fmuls (%ecx) // emins[0]
fxch %st(2)
fld %st(0)
fmuls 4(%ebx) // emaxs[1]
flds pl_normal+8(%edx)
fxch %st(2)
fmuls 4(%ecx) // emins[1]
fxch %st(2)
fld %st(0)
fmuls 8(%ecx) // emins[2]
fxch %st(5)
faddp %st(0),%st(3)
fmuls 8(%ebx) // emaxs[2]
fxch %st(1)
faddp %st(0),%st(3)
fxch %st(3)
faddp %st(0),%st(2)
jmp LSetSides
//dist1= p->normal[0]*emins[0] + p->normal[1]*emaxs[1] + p->normal[2]*emins[2];
//dist2= p->normal[0]*emaxs[0] + p->normal[1]*emins[1] + p->normal[2]*emaxs[2];
Lcase5:
fmuls (%ecx) // emins[0]
flds pl_normal+4(%edx)
fxch %st(2)
fmuls (%ebx) // emaxs[0]
fxch %st(2)
fld %st(0)
fmuls 4(%ebx) // emaxs[1]
flds pl_normal+8(%edx)
fxch %st(2)
fmuls 4(%ecx) // emins[1]
fxch %st(2)
fld %st(0)
fmuls 8(%ecx) // emins[2]
fxch %st(5)
faddp %st(0),%st(3)
fmuls 8(%ebx) // emaxs[2]
fxch %st(1)
faddp %st(0),%st(3)
fxch %st(3)
faddp %st(0),%st(2)
jmp LSetSides
//dist1= p->normal[0]*emaxs[0] + p->normal[1]*emins[1] + p->normal[2]*emins[2];
//dist2= p->normal[0]*emins[0] + p->normal[1]*emaxs[1] + p->normal[2]*emaxs[2];
Lcase6:
fmuls (%ebx) // emaxs[0]
flds pl_normal+4(%edx)
fxch %st(2)
fmuls (%ecx) // emins[0]
fxch %st(2)
fld %st(0)
fmuls 4(%ecx) // emins[1]
flds pl_normal+8(%edx)
fxch %st(2)
fmuls 4(%ebx) // emaxs[1]
fxch %st(2)
fld %st(0)
fmuls 8(%ecx) // emins[2]
fxch %st(5)
faddp %st(0),%st(3)
fmuls 8(%ebx) // emaxs[2]
fxch %st(1)
faddp %st(0),%st(3)
fxch %st(3)
faddp %st(0),%st(2)
jmp LSetSides
//dist1= p->normal[0]*emins[0] + p->normal[1]*emins[1] + p->normal[2]*emins[2];
//dist2= p->normal[0]*emaxs[0] + p->normal[1]*emaxs[1] + p->normal[2]*emaxs[2];
Lcase7:
fmuls (%ecx) // emins[0]
flds pl_normal+4(%edx)
fxch %st(2)
fmuls (%ebx) // emaxs[0]
fxch %st(2)
fld %st(0)
fmuls 4(%ecx) // emins[1]
flds pl_normal+8(%edx)
fxch %st(2)
fmuls 4(%ebx) // emaxs[1]
fxch %st(2)
fld %st(0)
fmuls 8(%ecx) // emins[2]
fxch %st(5)
faddp %st(0),%st(3)
fmuls 8(%ebx) // emaxs[2]
fxch %st(1)
faddp %st(0),%st(3)
fxch %st(3)
faddp %st(0),%st(2)
LSetSides:
// sides = 0;
// if (dist1 >= p->dist)
// sides = 1;
// if (dist2 < p->dist)
// sides |= 2;
faddp %st(0),%st(2) // dist1 | dist2
fcomps pl_dist(%edx)
xorl %ecx,%ecx
fnstsw %ax
fcomps pl_dist(%edx)
andb $1,%ah
xorb $1,%ah
addb %ah,%cl
fnstsw %ax
andb $1,%ah
addb %ah,%ah
addb %ah,%cl
// return sides;
popl %ebx
movl %ecx,%eax // return status
ret
Lerror:
call C(BOPS_Error)
#endif // id386
|
paritytech/polkaports | 4,033 | apps/quake/d_varsa.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// d_varsa.s
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#include "d_ifacea.h"
#if id386
.data
//-------------------------------------------------------
// global refresh variables
//-------------------------------------------------------
// FIXME: put all refresh variables into one contiguous block. Make into one
// big structure, like cl or sv?
.align 4
.globl C(d_sdivzstepu)
.globl C(d_tdivzstepu)
.globl C(d_zistepu)
.globl C(d_sdivzstepv)
.globl C(d_tdivzstepv)
.globl C(d_zistepv)
.globl C(d_sdivzorigin)
.globl C(d_tdivzorigin)
.globl C(d_ziorigin)
C(d_sdivzstepu): .single 0
C(d_tdivzstepu): .single 0
C(d_zistepu): .single 0
C(d_sdivzstepv): .single 0
C(d_tdivzstepv): .single 0
C(d_zistepv): .single 0
C(d_sdivzorigin): .single 0
C(d_tdivzorigin): .single 0
C(d_ziorigin): .single 0
.globl C(sadjust)
.globl C(tadjust)
.globl C(bbextents)
.globl C(bbextentt)
C(sadjust): .long 0
C(tadjust): .long 0
C(bbextents): .long 0
C(bbextentt): .long 0
.globl C(cacheblock)
.globl C(d_viewbuffer)
.globl C(cachewidth)
.globl C(d_pzbuffer)
.globl C(d_zrowbytes)
.globl C(d_zwidth)
C(cacheblock): .long 0
C(cachewidth): .long 0
C(d_viewbuffer): .long 0
C(d_pzbuffer): .long 0
C(d_zrowbytes): .long 0
C(d_zwidth): .long 0
//-------------------------------------------------------
// ASM-only variables
//-------------------------------------------------------
.globl izi
izi: .long 0
.globl pbase, s, t, sfracf, tfracf, snext, tnext
.globl spancountminus1
.globl zi8stepu, sdivz8stepu, tdivz8stepu, pz
s: .long 0
t: .long 0
snext: .long 0
tnext: .long 0
sfracf: .long 0
tfracf: .long 0
pbase: .long 0
zi8stepu: .long 0
sdivz8stepu: .long 0
tdivz8stepu: .long 0
spancountminus1: .long 0
pz: .long 0
.globl izistep
izistep: .long 0
//-------------------------------------------------------
// local variables for d_parta.s
//-------------------------------------------------------
.globl DP_Count, DP_u, DP_v, DP_32768, DP_Color, DP_Pix, DP_EntryTable
DP_Count: .long 0
DP_u: .long 0
DP_v: .long 0
DP_32768: .single 32768.0
DP_Color: .long 0
DP_Pix: .long 0
.extern DP_1x1
.extern DP_2x2
.extern DP_3x3
.extern DP_4x4
DP_EntryTable: .long DP_1x1, DP_2x2, DP_3x3, DP_4x4
//
// advancetable is 8 bytes, but points to the middle of that range so negative
// offsets will work
//
.globl advancetable, sstep, tstep, pspantemp, counttemp, jumptemp
advancetable: .long 0, 0
sstep: .long 0
tstep: .long 0
pspantemp: .long 0
counttemp: .long 0
jumptemp: .long 0
// 1/2, 1/3, 1/4, 1/5, 1/6, and 1/7 in 0.32 form
.globl reciprocal_table, entryvec_table
reciprocal_table: .long 0x40000000, 0x2aaaaaaa, 0x20000000
.long 0x19999999, 0x15555555, 0x12492492
.extern Entry2_8
.extern Entry3_8
.extern Entry4_8
.extern Entry5_8
.extern Entry6_8
.extern Entry7_8
.extern Entry8_8
entryvec_table: .long 0, Entry2_8, Entry3_8, Entry4_8
.long Entry5_8, Entry6_8, Entry7_8, Entry8_8
.extern Spr8Entry2_8
.extern Spr8Entry3_8
.extern Spr8Entry4_8
.extern Spr8Entry5_8
.extern Spr8Entry6_8
.extern Spr8Entry7_8
.extern Spr8Entry8_8
.globl spr8entryvec_table
spr8entryvec_table: .long 0, Spr8Entry2_8, Spr8Entry3_8, Spr8Entry4_8
.long Spr8Entry5_8, Spr8Entry6_8, Spr8Entry7_8, Spr8Entry8_8
#endif // id386
|
paritytech/polkaports | 2,610 | apps/quake/d_copy.s | //
// d_copy.s
// x86 assembly-language screen copying code.
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
.data
LCopyWidth: .long 0
LBlockSrcStep: .long 0
LBlockDestStep: .long 0
LSrcDelta: .long 0
LDestDelta: .long 0
#define bufptr 4+16
// copies 16 rows per plane at a pop; idea is that 16*512 = 8k, and since
// no Mode X mode is wider than 360, all the data should fit in the cache for
// the passes for the next 3 planes
.text
.globl C(VGA_UpdatePlanarScreen)
C(VGA_UpdatePlanarScreen):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
movl C(VGA_bufferrowbytes),%eax
shll $1,%eax
movl %eax,LBlockSrcStep
movl C(VGA_rowbytes),%eax
shll $1,%eax
movl %eax,LBlockDestStep
movl $0x3C4,%edx
movb $2,%al
outb %al,%dx // point the SC to the Map Mask
incl %edx
movl bufptr(%esp),%esi
movl C(VGA_pagebase),%edi
movl C(VGA_height),%ebp
shrl $1,%ebp
movl C(VGA_width),%ecx
movl C(VGA_bufferrowbytes),%eax
subl %ecx,%eax
movl %eax,LSrcDelta
movl C(VGA_rowbytes),%eax
shll $2,%eax
subl %ecx,%eax
movl %eax,LDestDelta
shrl $4,%ecx
movl %ecx,LCopyWidth
LRowLoop:
movb $1,%al
LPlaneLoop:
outb %al,%dx
movb $2,%ah
pushl %esi
pushl %edi
LRowSetLoop:
movl LCopyWidth,%ecx
LColumnLoop:
movb 12(%esi),%bh
movb 8(%esi),%bl
shll $16,%ebx
movb 4(%esi),%bh
movb (%esi),%bl
movl %ebx,(%edi)
addl $16,%esi
addl $4,%edi
decl %ecx
jnz LColumnLoop
addl LDestDelta,%edi
addl LSrcDelta,%esi
decb %ah
jnz LRowSetLoop
popl %edi
popl %esi
incl %esi
shlb $1,%al
cmpb $16,%al
jnz LPlaneLoop
subl $4,%esi
addl LBlockSrcStep,%esi
addl LBlockDestStep,%edi
decl %ebp
jnz LRowLoop
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
#define srcptr 4+16
#define destptr 8+16
#define width 12+16
#define height 16+16
#define srcrowbytes 20+16
#define destrowbytes 24+16
.globl C(VGA_UpdateLinearScreen)
C(VGA_UpdateLinearScreen):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
cld
movl srcptr(%esp),%esi
movl destptr(%esp),%edi
movl width(%esp),%ebx
movl srcrowbytes(%esp),%eax
subl %ebx,%eax
movl destrowbytes(%esp),%edx
subl %ebx,%edx
shrl $2,%ebx
movl height(%esp),%ebp
LLRowLoop:
movl %ebx,%ecx
rep/movsl (%esi),(%edi)
addl %eax,%esi
addl %edx,%edi
decl %ebp
jnz LLRowLoop
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
|
paritytech/polkaports | 5,538 | apps/quake/r_aclipa.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// r_aliasa.s
// x86 assembly-language Alias model transform and project code.
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#include "d_ifacea.h"
#if id386
.data
Ltemp0: .long 0
Ltemp1: .long 0
.text
#define pfv0 8+4
#define pfv1 8+8
#define out 8+12
.globl C(R_Alias_clip_bottom)
C(R_Alias_clip_bottom):
pushl %esi
pushl %edi
movl pfv0(%esp),%esi
movl pfv1(%esp),%edi
movl C(r_oldrefdef)+rd_aliasvrectbottom,%eax
LDoForwardOrBackward:
movl fv_v+4(%esi),%edx
movl fv_v+4(%edi),%ecx
cmpl %ecx,%edx
jl LDoForward
movl fv_v+4(%esi),%ecx
movl fv_v+4(%edi),%edx
movl pfv0(%esp),%edi
movl pfv1(%esp),%esi
LDoForward:
subl %edx,%ecx
subl %edx,%eax
movl %ecx,Ltemp1
movl %eax,Ltemp0
fildl Ltemp1
fildl Ltemp0
movl out(%esp),%edx
movl $2,%eax
fdivp %st(0),%st(1) // scale
LDo3Forward:
fildl fv_v+0(%esi) // fv0v0 | scale
fildl fv_v+0(%edi) // fv1v0 | fv0v0 | scale
fildl fv_v+4(%esi) // fv0v1 | fv1v0 | fv0v0 | scale
fildl fv_v+4(%edi) // fv1v1 | fv0v1 | fv1v0 | fv0v0 | scale
fildl fv_v+8(%esi) // fv0v2 | fv1v1 | fv0v1 | fv1v0 | fv0v0 | scale
fildl fv_v+8(%edi) // fv1v2 | fv0v2 | fv1v1 | fv0v1 | fv1v0 | fv0v0 |
// scale
fxch %st(5) // fv0v0 | fv0v2 | fv1v1 | fv0v1 | fv1v0 | fv1v2 |
// scale
fsubr %st(0),%st(4) // fv0v0 | fv0v2 | fv1v1 | fv0v1 | fv1v0-fv0v0 |
// fv1v2 | scale
fxch %st(3) // fv0v1 | fv0v2 | fv1v1 | fv0v0 | fv1v0-fv0v0 |
// fv1v2 | scale
fsubr %st(0),%st(2) // fv0v1 | fv0v2 | fv1v1-fv0v1 | fv0v0 |
// fv1v0-fv0v0 | fv1v2 | scale
fxch %st(1) // fv0v2 | fv0v1 | fv1v1-fv0v1 | fv0v0 |
// fv1v0-fv0v0 | fv1v2 | scale
fsubr %st(0),%st(5) // fv0v2 | fv0v1 | fv1v1-fv0v1 | fv0v0 |
// fv1v0-fv0v0 | fv1v2-fv0v2 | scale
fxch %st(6) // scale | fv0v1 | fv1v1-fv0v1 | fv0v0 |
// fv1v0-fv0v0 | fv1v2-fv0v2 | fv0v2
fmul %st(0),%st(4) // scale | fv0v1 | fv1v1-fv0v1 | fv0v0 |
// (fv1v0-fv0v0)*scale | fv1v2-fv0v2 | fv0v2
addl $12,%edi
fmul %st(0),%st(2) // scale | fv0v1 | (fv1v1-fv0v1)*scale | fv0v0 |
// (fv1v0-fv0v0)*scale | fv1v2-fv0v2 | fv0v2
addl $12,%esi
addl $12,%edx
fmul %st(0),%st(5) // scale | fv0v1 | (fv1v1-fv0v1)*scale | fv0v0 |
// (fv1v0-fv0v0)*scale | (fv1v2-fv0v2)*scale |
// fv0v2
fxch %st(3) // fv0v0 | fv0v1 | (fv1v1-fv0v1)*scale | scale |
// (fv1v0-fv0v0)*scale | (fv1v2-fv0v2)*scale |
// fv0v2
faddp %st(0),%st(4) // fv0v1 | (fv1v1-fv0v1)*scale | scale |
// fv0v0+(fv1v0-fv0v0)*scale |
// (fv1v2-fv0v2)*scale | fv0v2
faddp %st(0),%st(1) // fv0v1+(fv1v1-fv0v1)*scale | scale |
// fv0v0+(fv1v0-fv0v0)*scale |
// (fv1v2-fv0v2)*scale | fv0v2
fxch %st(4) // fv0v2 | scale | fv0v0+(fv1v0-fv0v0)*scale |
// (fv1v2-fv0v2)*scale | fv0v1+(fv1v1-fv0v1)*scale
faddp %st(0),%st(3) // scale | fv0v0+(fv1v0-fv0v0)*scale |
// fv0v2+(fv1v2-fv0v2)*scale |
// fv0v1+(fv1v1-fv0v1)*scale
fxch %st(1) // fv0v0+(fv1v0-fv0v0)*scale | scale |
// fv0v2+(fv1v2-fv0v2)*scale |
// fv0v1+(fv1v1-fv0v1)*scale
fadds float_point5
fxch %st(3) // fv0v1+(fv1v1-fv0v1)*scale | scale |
// fv0v2+(fv1v2-fv0v2)*scale |
// fv0v0+(fv1v0-fv0v0)*scale
fadds float_point5
fxch %st(2) // fv0v2+(fv1v2-fv0v2)*scale | scale |
// fv0v1+(fv1v1-fv0v1)*scale |
// fv0v0+(fv1v0-fv0v0)*scale
fadds float_point5
fxch %st(3) // fv0v0+(fv1v0-fv0v0)*scale | scale |
// fv0v1+(fv1v1-fv0v1)*scale |
// fv0v2+(fv1v2-fv0v2)*scale
fistpl fv_v+0-12(%edx) // scale | fv0v1+(fv1v1-fv0v1)*scale |
// fv0v2+(fv1v2-fv0v2)*scale
fxch %st(1) // fv0v1+(fv1v1-fv0v1)*scale | scale |
// fv0v2+(fv1v2-fv0v2)*scale | scale
fistpl fv_v+4-12(%edx) // scale | fv0v2+(fv1v2-fv0v2)*scale
fxch %st(1) // fv0v2+(fv1v2-fv0v2)*sc | scale
fistpl fv_v+8-12(%edx) // scale
decl %eax
jnz LDo3Forward
fstp %st(0)
popl %edi
popl %esi
ret
.globl C(R_Alias_clip_top)
C(R_Alias_clip_top):
pushl %esi
pushl %edi
movl pfv0(%esp),%esi
movl pfv1(%esp),%edi
movl C(r_oldrefdef)+rd_aliasvrect+4,%eax
jmp LDoForwardOrBackward
.globl C(R_Alias_clip_right)
C(R_Alias_clip_right):
pushl %esi
pushl %edi
movl pfv0(%esp),%esi
movl pfv1(%esp),%edi
movl C(r_oldrefdef)+rd_aliasvrectright,%eax
LRightLeftEntry:
movl fv_v+4(%esi),%edx
movl fv_v+4(%edi),%ecx
cmpl %ecx,%edx
movl fv_v+0(%esi),%edx
movl fv_v+0(%edi),%ecx
jl LDoForward2
movl fv_v+0(%esi),%ecx
movl fv_v+0(%edi),%edx
movl pfv0(%esp),%edi
movl pfv1(%esp),%esi
LDoForward2:
jmp LDoForward
.globl C(R_Alias_clip_left)
C(R_Alias_clip_left):
pushl %esi
pushl %edi
movl pfv0(%esp),%esi
movl pfv1(%esp),%edi
movl C(r_oldrefdef)+rd_aliasvrect+0,%eax
jmp LRightLeftEntry
#endif // id386
|
paritytech/polkaports | 10,168 | apps/quake/d_parta.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// d_parta.s
// x86 assembly-language 8-bpp particle-drawing code.
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "d_ifacea.h"
#include "asm_draw.h"
#if id386
//----------------------------------------------------------------------
// 8-bpp particle drawing code.
//----------------------------------------------------------------------
//FIXME: comments, full optimization
//----------------------------------------------------------------------
// 8-bpp particle queueing code.
//----------------------------------------------------------------------
.text
#define P 12+4
.align 4
.globl C(D_DrawParticle)
C(D_DrawParticle):
pushl %ebp // preserve caller's stack frame
pushl %edi // preserve register variables
pushl %ebx
movl P(%esp),%edi
// FIXME: better FP overlap in general here
// transform point
// VectorSubtract (p->org, r_origin, local);
flds C(r_origin)
fsubrs pt_org(%edi)
flds pt_org+4(%edi)
fsubs C(r_origin)+4
flds pt_org+8(%edi)
fsubs C(r_origin)+8
fxch %st(2) // local[0] | local[1] | local[2]
// transformed[2] = DotProduct(local, r_ppn);
flds C(r_ppn) // r_ppn[0] | local[0] | local[1] | local[2]
fmul %st(1),%st(0) // dot0 | local[0] | local[1] | local[2]
flds C(r_ppn)+4 // r_ppn[1] | dot0 | local[0] | local[1] | local[2]
fmul %st(3),%st(0) // dot1 | dot0 | local[0] | local[1] | local[2]
flds C(r_ppn)+8 // r_ppn[2] | dot1 | dot0 | local[0] |
// local[1] | local[2]
fmul %st(5),%st(0) // dot2 | dot1 | dot0 | local[0] | local[1] | local[2]
fxch %st(2) // dot0 | dot1 | dot2 | local[0] | local[1] | local[2]
faddp %st(0),%st(1) // dot0 + dot1 | dot2 | local[0] | local[1] |
// local[2]
faddp %st(0),%st(1) // z | local[0] | local[1] | local[2]
fld %st(0) // z | z | local[0] | local[1] |
// local[2]
fdivrs float_1 // 1/z | z | local[0] | local[1] | local[2]
fxch %st(1) // z | 1/z | local[0] | local[1] | local[2]
// if (transformed[2] < PARTICLE_Z_CLIP)
// return;
fcomps float_particle_z_clip // 1/z | local[0] | local[1] | local[2]
fxch %st(3) // local[2] | local[0] | local[1] | 1/z
flds C(r_pup) // r_pup[0] | local[2] | local[0] | local[1] | 1/z
fmul %st(2),%st(0) // dot0 | local[2] | local[0] | local[1] | 1/z
flds C(r_pup)+4 // r_pup[1] | dot0 | local[2] | local[0] |
// local[1] | 1/z
fnstsw %ax
testb $1,%ah
jnz LPop6AndDone
// transformed[1] = DotProduct(local, r_pup);
fmul %st(4),%st(0) // dot1 | dot0 | local[2] | local[0] | local[1] | 1/z
flds C(r_pup)+8 // r_pup[2] | dot1 | dot0 | local[2] |
// local[0] | local[1] | 1/z
fmul %st(3),%st(0) // dot2 | dot1 | dot0 | local[2] | local[0] |
// local[1] | 1/z
fxch %st(2) // dot0 | dot1 | dot2 | local[2] | local[0] |
// local[1] | 1/z
faddp %st(0),%st(1) // dot0 + dot1 | dot2 | local[2] | local[0] |
// local[1] | 1/z
faddp %st(0),%st(1) // y | local[2] | local[0] | local[1] | 1/z
fxch %st(3) // local[1] | local[2] | local[0] | y | 1/z
// transformed[0] = DotProduct(local, r_pright);
fmuls C(r_pright)+4 // dot1 | local[2] | local[0] | y | 1/z
fxch %st(2) // local[0] | local[2] | dot1 | y | 1/z
fmuls C(r_pright) // dot0 | local[2] | dot1 | y | 1/z
fxch %st(1) // local[2] | dot0 | dot1 | y | 1/z
fmuls C(r_pright)+8 // dot2 | dot0 | dot1 | y | 1/z
fxch %st(2) // dot1 | dot0 | dot2 | y | 1/z
faddp %st(0),%st(1) // dot1 + dot0 | dot2 | y | 1/z
faddp %st(0),%st(1) // x | y | 1/z
fxch %st(1) // y | x | 1/z
// project the point
fmul %st(2),%st(0) // y/z | x | 1/z
fxch %st(1) // x | y/z | 1/z
fmul %st(2),%st(0) // x/z | y/z | 1/z
fxch %st(1) // y/z | x/z | 1/z
fsubrs C(ycenter) // v | x/z | 1/z
fxch %st(1) // x/z | v | 1/z
fadds C(xcenter) // u | v | 1/z
// FIXME: preadjust xcenter and ycenter
fxch %st(1) // v | u | 1/z
fadds float_point5 // v | u | 1/z
fxch %st(1) // u | v | 1/z
fadds float_point5 // u | v | 1/z
fxch %st(2) // 1/z | v | u
fmuls DP_32768 // 1/z * 0x8000 | v | u
fxch %st(2) // u | v | 1/z * 0x8000
// FIXME: use Terje's fp->int trick here?
// FIXME: check we're getting proper rounding here
fistpl DP_u // v | 1/z * 0x8000
fistpl DP_v // 1/z * 0x8000
movl DP_u,%eax
movl DP_v,%edx
// if ((v > d_vrectbottom_particle) ||
// (u > d_vrectright_particle) ||
// (v < d_vrecty) ||
// (u < d_vrectx))
// {
// continue;
// }
movl C(d_vrectbottom_particle),%ebx
movl C(d_vrectright_particle),%ecx
cmpl %ebx,%edx
jg LPop1AndDone
cmpl %ecx,%eax
jg LPop1AndDone
movl C(d_vrecty),%ebx
movl C(d_vrectx),%ecx
cmpl %ebx,%edx
jl LPop1AndDone
cmpl %ecx,%eax
jl LPop1AndDone
fildl pt_color(%edi) // color | 1/z * 0x8000
// FIXME: use Terje's fast fp->int trick?
fistpl DP_Color // 1/z * 0x8000
movl C(d_viewbuffer),%ebx
addl %eax,%ebx
movl C(d_scantable)(,%edx,4),%edi // point to the pixel
imull C(d_zrowbytes),%edx // point to the z pixel
leal (%edx,%eax,2),%edx
movl C(d_pzbuffer),%eax
fistpl izi
addl %ebx,%edi
addl %eax,%edx
// pix = izi >> d_pix_shift;
movl izi,%eax
movl C(d_pix_shift),%ecx
shrl %cl,%eax
movl izi,%ebp
// if (pix < d_pix_min)
// pix = d_pix_min;
// else if (pix > d_pix_max)
// pix = d_pix_max;
movl C(d_pix_min),%ebx
movl C(d_pix_max),%ecx
cmpl %ebx,%eax
jnl LTestPixMax
movl %ebx,%eax
jmp LTestDone
LTestPixMax:
cmpl %ecx,%eax
jng LTestDone
movl %ecx,%eax
LTestDone:
movb DP_Color,%ch
movl C(d_y_aspect_shift),%ebx
testl %ebx,%ebx
jnz LDefault
cmpl $4,%eax
ja LDefault
jmp DP_EntryTable-4(,%eax,4)
// 1x1
.globl DP_1x1
DP_1x1:
cmpw %bp,(%edx) // just one pixel to do
jg LDone
movw %bp,(%edx)
movb %ch,(%edi)
jmp LDone
// 2x2
.globl DP_2x2
DP_2x2:
pushl %esi
movl C(screenwidth),%ebx
movl C(d_zrowbytes),%esi
cmpw %bp,(%edx)
jg L2x2_1
movw %bp,(%edx)
movb %ch,(%edi)
L2x2_1:
cmpw %bp,2(%edx)
jg L2x2_2
movw %bp,2(%edx)
movb %ch,1(%edi)
L2x2_2:
cmpw %bp,(%edx,%esi,1)
jg L2x2_3
movw %bp,(%edx,%esi,1)
movb %ch,(%edi,%ebx,1)
L2x2_3:
cmpw %bp,2(%edx,%esi,1)
jg L2x2_4
movw %bp,2(%edx,%esi,1)
movb %ch,1(%edi,%ebx,1)
L2x2_4:
popl %esi
jmp LDone
// 3x3
.globl DP_3x3
DP_3x3:
pushl %esi
movl C(screenwidth),%ebx
movl C(d_zrowbytes),%esi
cmpw %bp,(%edx)
jg L3x3_1
movw %bp,(%edx)
movb %ch,(%edi)
L3x3_1:
cmpw %bp,2(%edx)
jg L3x3_2
movw %bp,2(%edx)
movb %ch,1(%edi)
L3x3_2:
cmpw %bp,4(%edx)
jg L3x3_3
movw %bp,4(%edx)
movb %ch,2(%edi)
L3x3_3:
cmpw %bp,(%edx,%esi,1)
jg L3x3_4
movw %bp,(%edx,%esi,1)
movb %ch,(%edi,%ebx,1)
L3x3_4:
cmpw %bp,2(%edx,%esi,1)
jg L3x3_5
movw %bp,2(%edx,%esi,1)
movb %ch,1(%edi,%ebx,1)
L3x3_5:
cmpw %bp,4(%edx,%esi,1)
jg L3x3_6
movw %bp,4(%edx,%esi,1)
movb %ch,2(%edi,%ebx,1)
L3x3_6:
cmpw %bp,(%edx,%esi,2)
jg L3x3_7
movw %bp,(%edx,%esi,2)
movb %ch,(%edi,%ebx,2)
L3x3_7:
cmpw %bp,2(%edx,%esi,2)
jg L3x3_8
movw %bp,2(%edx,%esi,2)
movb %ch,1(%edi,%ebx,2)
L3x3_8:
cmpw %bp,4(%edx,%esi,2)
jg L3x3_9
movw %bp,4(%edx,%esi,2)
movb %ch,2(%edi,%ebx,2)
L3x3_9:
popl %esi
jmp LDone
// 4x4
.globl DP_4x4
DP_4x4:
pushl %esi
movl C(screenwidth),%ebx
movl C(d_zrowbytes),%esi
cmpw %bp,(%edx)
jg L4x4_1
movw %bp,(%edx)
movb %ch,(%edi)
L4x4_1:
cmpw %bp,2(%edx)
jg L4x4_2
movw %bp,2(%edx)
movb %ch,1(%edi)
L4x4_2:
cmpw %bp,4(%edx)
jg L4x4_3
movw %bp,4(%edx)
movb %ch,2(%edi)
L4x4_3:
cmpw %bp,6(%edx)
jg L4x4_4
movw %bp,6(%edx)
movb %ch,3(%edi)
L4x4_4:
cmpw %bp,(%edx,%esi,1)
jg L4x4_5
movw %bp,(%edx,%esi,1)
movb %ch,(%edi,%ebx,1)
L4x4_5:
cmpw %bp,2(%edx,%esi,1)
jg L4x4_6
movw %bp,2(%edx,%esi,1)
movb %ch,1(%edi,%ebx,1)
L4x4_6:
cmpw %bp,4(%edx,%esi,1)
jg L4x4_7
movw %bp,4(%edx,%esi,1)
movb %ch,2(%edi,%ebx,1)
L4x4_7:
cmpw %bp,6(%edx,%esi,1)
jg L4x4_8
movw %bp,6(%edx,%esi,1)
movb %ch,3(%edi,%ebx,1)
L4x4_8:
leal (%edx,%esi,2),%edx
leal (%edi,%ebx,2),%edi
cmpw %bp,(%edx)
jg L4x4_9
movw %bp,(%edx)
movb %ch,(%edi)
L4x4_9:
cmpw %bp,2(%edx)
jg L4x4_10
movw %bp,2(%edx)
movb %ch,1(%edi)
L4x4_10:
cmpw %bp,4(%edx)
jg L4x4_11
movw %bp,4(%edx)
movb %ch,2(%edi)
L4x4_11:
cmpw %bp,6(%edx)
jg L4x4_12
movw %bp,6(%edx)
movb %ch,3(%edi)
L4x4_12:
cmpw %bp,(%edx,%esi,1)
jg L4x4_13
movw %bp,(%edx,%esi,1)
movb %ch,(%edi,%ebx,1)
L4x4_13:
cmpw %bp,2(%edx,%esi,1)
jg L4x4_14
movw %bp,2(%edx,%esi,1)
movb %ch,1(%edi,%ebx,1)
L4x4_14:
cmpw %bp,4(%edx,%esi,1)
jg L4x4_15
movw %bp,4(%edx,%esi,1)
movb %ch,2(%edi,%ebx,1)
L4x4_15:
cmpw %bp,6(%edx,%esi,1)
jg L4x4_16
movw %bp,6(%edx,%esi,1)
movb %ch,3(%edi,%ebx,1)
L4x4_16:
popl %esi
jmp LDone
// default case, handling any size particle
LDefault:
// count = pix << d_y_aspect_shift;
movl %eax,%ebx
movl %eax,DP_Pix
movb C(d_y_aspect_shift),%cl
shll %cl,%ebx
// for ( ; count ; count--, pz += d_zwidth, pdest += screenwidth)
// {
// for (i=0 ; i<pix ; i++)
// {
// if (pz[i] <= izi)
// {
// pz[i] = izi;
// pdest[i] = color;
// }
// }
// }
LGenRowLoop:
movl DP_Pix,%eax
LGenColLoop:
cmpw %bp,-2(%edx,%eax,2)
jg LGSkip
movw %bp,-2(%edx,%eax,2)
movb %ch,-1(%edi,%eax,1)
LGSkip:
decl %eax // --pix
jnz LGenColLoop
addl C(d_zrowbytes),%edx
addl C(screenwidth),%edi
decl %ebx // --count
jnz LGenRowLoop
LDone:
popl %ebx // restore register variables
popl %edi
popl %ebp // restore the caller's stack frame
ret
LPop6AndDone:
fstp %st(0)
fstp %st(0)
fstp %st(0)
fstp %st(0)
fstp %st(0)
LPop1AndDone:
fstp %st(0)
jmp LDone
#endif // id386
|
paritytech/polkaports | 18,978 | apps/quake/r_rasta.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// r_rasta.s
// x86 assembly-language edge clipping and emission code
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#include "d_ifacea.h"
#if id386
// !!! if these are changed, they must be changed in r_draw.c too !!!
#define FULLY_CLIPPED_CACHED 0x80000000
#define FRAMECOUNT_MASK 0x7FFFFFFF
.data
Ld0: .single 0.0
Ld1: .single 0.0
Lstack: .long 0
Lfp_near_clip: .single NEAR_CLIP
Lceilv0: .long 0
Lv: .long 0
Lu0: .long 0
Lv0: .long 0
Lzi0: .long 0
.text
//----------------------------------------------------------------------
// edge clipping code
//----------------------------------------------------------------------
#define pv0 4+12
#define pv1 8+12
#define clip 12+12
.align 4
.globl C(R_ClipEdge)
C(R_ClipEdge):
pushl %esi // preserve register variables
pushl %edi
pushl %ebx
movl %esp,Lstack // for clearing the stack later
// float d0, d1, f;
// mvertex_t clipvert;
movl clip(%esp),%ebx
movl pv0(%esp),%esi
movl pv1(%esp),%edx
// if (clip)
// {
testl %ebx,%ebx
jz Lemit
// do
// {
Lcliploop:
// d0 = DotProduct (pv0->position, clip->normal) - clip->dist;
// d1 = DotProduct (pv1->position, clip->normal) - clip->dist;
flds mv_position+0(%esi)
fmuls cp_normal+0(%ebx)
flds mv_position+4(%esi)
fmuls cp_normal+4(%ebx)
flds mv_position+8(%esi)
fmuls cp_normal+8(%ebx)
fxch %st(1)
faddp %st(0),%st(2) // d0mul2 | d0add0
flds mv_position+0(%edx)
fmuls cp_normal+0(%ebx)
flds mv_position+4(%edx)
fmuls cp_normal+4(%ebx)
flds mv_position+8(%edx)
fmuls cp_normal+8(%ebx)
fxch %st(1)
faddp %st(0),%st(2) // d1mul2 | d1add0 | d0mul2 | d0add0
fxch %st(3) // d0add0 | d1add0 | d0mul2 | d1mul2
faddp %st(0),%st(2) // d1add0 | dot0 | d1mul2
faddp %st(0),%st(2) // dot0 | dot1
fsubs cp_dist(%ebx) // d0 | dot1
fxch %st(1) // dot1 | d0
fsubs cp_dist(%ebx) // d1 | d0
fxch %st(1)
fstps Ld0
fstps Ld1
// if (d0 >= 0)
// {
movl Ld0,%eax
movl Ld1,%ecx
orl %eax,%ecx
js Lp2
// both points are unclipped
Lcontinue:
//
// R_ClipEdge (&clipvert, pv1, clip->next);
// return;
// }
// } while ((clip = clip->next) != NULL);
movl cp_next(%ebx),%ebx
testl %ebx,%ebx
jnz Lcliploop
// }
//// add the edge
// R_EmitEdge (pv0, pv1);
Lemit:
//
// set integer rounding to ceil mode, set to single precision
//
// FIXME: do away with by manually extracting integers from floats?
// FIXME: set less often
fldcw ceil_cw
// edge_t *edge, *pcheck;
// int u_check;
// float u, u_step;
// vec3_t local, transformed;
// float *world;
// int v, v2, ceilv0;
// float scale, lzi0, u0, v0;
// int side;
// if (r_lastvertvalid)
// {
cmpl $0,C(r_lastvertvalid)
jz LCalcFirst
// u0 = r_u1;
// v0 = r_v1;
// lzi0 = r_lzi1;
// ceilv0 = r_ceilv1;
movl C(r_lzi1),%eax
movl C(r_u1),%ecx
movl %eax,Lzi0
movl %ecx,Lu0
movl C(r_v1),%ecx
movl C(r_ceilv1),%eax
movl %ecx,Lv0
movl %eax,Lceilv0
jmp LCalcSecond
// }
LCalcFirst:
// else
// {
// world = &pv0->position[0];
call LTransformAndProject // v0 | lzi0 | u0
fsts Lv0
fxch %st(2) // u0 | lzi0 | v0
fstps Lu0 // lzi0 | v0
fstps Lzi0 // v0
// ceilv0 = (int)(v0 - 2000) + 2000; // ceil(v0);
fistpl Lceilv0
// }
LCalcSecond:
// world = &pv1->position[0];
movl %edx,%esi
call LTransformAndProject // v1 | lzi1 | u1
flds Lu0 // u0 | v1 | lzi1 | u1
fxch %st(3) // u1 | v1 | lzi1 | u0
flds Lzi0 // lzi0 | u1 | v1 | lzi1 | u0
fxch %st(3) // lzi1 | u1 | v1 | lzi0 | u0
flds Lv0 // v0 | lzi1 | u1 | v1 | lzi0 | u0
fxch %st(3) // v1 | lzi1 | u1 | v0 | lzi0 | u0
// r_ceilv1 = (int)(r_v1 - 2000) + 2000; // ceil(r_v1);
fistl C(r_ceilv1)
fldcw single_cw // put back normal floating-point state
fsts C(r_v1)
fxch %st(4) // lzi0 | lzi1 | u1 | v0 | v1 | u0
// if (r_lzi1 > lzi0)
// lzi0 = r_lzi1;
fcom %st(1)
fnstsw %ax
testb $1,%ah
jz LP0
fstp %st(0)
fld %st(0)
LP0:
fxch %st(1) // lzi1 | lzi0 | u1 | v0 | v1 | u0
fstps C(r_lzi1) // lzi0 | u1 | v0 | v1 | u0
fxch %st(1)
fsts C(r_u1)
fxch %st(1)
// if (lzi0 > r_nearzi) // for mipmap finding
// r_nearzi = lzi0;
fcoms C(r_nearzi)
fnstsw %ax
testb $0x45,%ah
jnz LP1
fsts C(r_nearzi)
LP1:
// // for right edges, all we want is the effect on 1/z
// if (r_nearzionly)
// return;
movl C(r_nearzionly),%eax
testl %eax,%eax
jz LP2
LPop5AndDone:
movl C(cacheoffset),%eax
movl C(r_framecount),%edx
cmpl $0x7FFFFFFF,%eax
jz LDoPop
andl $(FRAMECOUNT_MASK),%edx
orl $(FULLY_CLIPPED_CACHED),%edx
movl %edx,C(cacheoffset)
LDoPop:
fstp %st(0) // u1 | v0 | v1 | u0
fstp %st(0) // v0 | v1 | u0
fstp %st(0) // v1 | u0
fstp %st(0) // u0
fstp %st(0)
jmp Ldone
LP2:
// // create the edge
// if (ceilv0 == r_ceilv1)
// return; // horizontal edge
movl Lceilv0,%ebx
movl C(edge_p),%edi
movl C(r_ceilv1),%ecx
movl %edi,%edx
movl C(r_pedge),%esi
addl $(et_size),%edx
cmpl %ecx,%ebx
jz LPop5AndDone
movl C(r_pedge),%eax
movl %eax,et_owner(%edi)
// side = ceilv0 > r_ceilv1;
//
// edge->nearzi = lzi0;
fstps et_nearzi(%edi) // u1 | v0 | v1 | u0
// if (side == 1)
// {
jc LSide0
LSide1:
// // leading edge (go from p2 to p1)
// u_step = ((u0 - r_u1) / (v0 - r_v1));
fsubrp %st(0),%st(3) // v0 | v1 | u0-u1
fsub %st(1),%st(0) // v0-v1 | v1 | u0-u1
fdivrp %st(0),%st(2) // v1 | ustep
// r_emitted = 1;
movl $1,C(r_emitted)
// edge = edge_p++;
movl %edx,C(edge_p)
// pretouch next edge
movl (%edx),%eax
// v2 = ceilv0 - 1;
// v = r_ceilv1;
movl %ecx,%eax
leal -1(%ebx),%ecx
movl %eax,%ebx
// edge->surfs[0] = 0;
// edge->surfs[1] = surface_p - surfaces;
movl C(surface_p),%eax
movl C(surfaces),%esi
subl %edx,%edx
subl %esi,%eax
shrl $(SURF_T_SHIFT),%eax
movl %edx,et_surfs(%edi)
movl %eax,et_surfs+2(%edi)
subl %esi,%esi
// u = r_u1 + ((float)v - r_v1) * u_step;
movl %ebx,Lv
fildl Lv // v | v1 | ustep
fsubp %st(0),%st(1) // v-v1 | ustep
fmul %st(1),%st(0) // (v-v1)*ustep | ustep
fadds C(r_u1) // u | ustep
jmp LSideDone
// }
LSide0:
// else
// {
// // trailing edge (go from p1 to p2)
// u_step = ((r_u1 - u0) / (r_v1 - v0));
fsub %st(3),%st(0) // u1-u0 | v0 | v1 | u0
fxch %st(2) // v1 | v0 | u1-u0 | u0
fsub %st(1),%st(0) // v1-v0 | v0 | u1-u0 | u0
fdivrp %st(0),%st(2) // v0 | ustep | u0
// r_emitted = 1;
movl $1,C(r_emitted)
// edge = edge_p++;
movl %edx,C(edge_p)
// pretouch next edge
movl (%edx),%eax
// v = ceilv0;
// v2 = r_ceilv1 - 1;
decl %ecx
// edge->surfs[0] = surface_p - surfaces;
// edge->surfs[1] = 0;
movl C(surface_p),%eax
movl C(surfaces),%esi
subl %edx,%edx
subl %esi,%eax
shrl $(SURF_T_SHIFT),%eax
movl %edx,et_surfs+2(%edi)
movl %eax,et_surfs(%edi)
movl $1,%esi
// u = u0 + ((float)v - v0) * u_step;
movl %ebx,Lv
fildl Lv // v | v0 | ustep | u0
fsubp %st(0),%st(1) // v-v0 | ustep | u0
fmul %st(1),%st(0) // (v-v0)*ustep | ustep | u0
faddp %st(0),%st(2) // ustep | u
fxch %st(1) // u | ustep
// }
LSideDone:
// edge->u_step = u_step*0x100000;
// edge->u = u*0x100000 + 0xFFFFF;
fmuls fp_1m // u*0x100000 | ustep
fxch %st(1) // ustep | u*0x100000
fmuls fp_1m // ustep*0x100000 | u*0x100000
fxch %st(1) // u*0x100000 | ustep*0x100000
fadds fp_1m_minus_1 // u*0x100000 + 0xFFFFF | ustep*0x100000
fxch %st(1) // ustep*0x100000 | u*0x100000 + 0xFFFFF
fistpl et_u_step(%edi) // u*0x100000 + 0xFFFFF
fistpl et_u(%edi)
// // we need to do this to avoid stepping off the edges if a very nearly
// // horizontal edge is less than epsilon above a scan, and numeric error
// // causes it to incorrectly extend to the scan, and the extension of the
// // line goes off the edge of the screen
// // FIXME: is this actually needed?
// if (edge->u < r_oldrefdef.vrect_x_adj_shift20)
// edge->u = r_oldrefdef.vrect_x_adj_shift20;
// if (edge->u > r_oldrefdef.vrectright_adj_shift20)
// edge->u = r_oldrefdef.vrectright_adj_shift20;
movl et_u(%edi),%eax
movl C(r_oldrefdef)+rd_vrect_x_adj_shift20,%edx
cmpl %edx,%eax
jl LP4
movl C(r_oldrefdef)+rd_vrectright_adj_shift20,%edx
cmpl %edx,%eax
jng LP5
LP4:
movl %edx,et_u(%edi)
movl %edx,%eax
LP5:
// // sort the edge in normally
// u_check = edge->u;
//
// if (edge->surfs[0])
// u_check++; // sort trailers after leaders
addl %esi,%eax
// if (!newedges[v] || newedges[v]->u >= u_check)
// {
movl C(newedges)(,%ebx,4),%esi
testl %esi,%esi
jz LDoFirst
cmpl %eax,et_u(%esi)
jl LNotFirst
LDoFirst:
// edge->next = newedges[v];
// newedges[v] = edge;
movl %esi,et_next(%edi)
movl %edi,C(newedges)(,%ebx,4)
jmp LSetRemove
// }
LNotFirst:
// else
// {
// pcheck = newedges[v];
//
// while (pcheck->next && pcheck->next->u < u_check)
// pcheck = pcheck->next;
LFindInsertLoop:
movl %esi,%edx
movl et_next(%esi),%esi
testl %esi,%esi
jz LInsertFound
cmpl %eax,et_u(%esi)
jl LFindInsertLoop
LInsertFound:
// edge->next = pcheck->next;
// pcheck->next = edge;
movl %esi,et_next(%edi)
movl %edi,et_next(%edx)
// }
LSetRemove:
// edge->nextremove = removeedges[v2];
// removeedges[v2] = edge;
movl C(removeedges)(,%ecx,4),%eax
movl %edi,C(removeedges)(,%ecx,4)
movl %eax,et_nextremove(%edi)
Ldone:
movl Lstack,%esp // clear temporary variables from stack
popl %ebx // restore register variables
popl %edi
popl %esi
ret
// at least one point is clipped
Lp2:
testl %eax,%eax
jns Lp1
// else
// {
// // point 0 is clipped
// if (d1 < 0)
// {
movl Ld1,%eax
testl %eax,%eax
jns Lp3
// // both points are clipped
// // we do cache fully clipped edges
// if (!leftclipped)
movl C(r_leftclipped),%eax
movl C(r_pedge),%ecx
testl %eax,%eax
jnz Ldone
// r_pedge->framecount = r_framecount;
movl C(r_framecount),%eax
andl $(FRAMECOUNT_MASK),%eax
orl $(FULLY_CLIPPED_CACHED),%eax
movl %eax,C(cacheoffset)
// return;
jmp Ldone
// }
Lp1:
// // point 0 is unclipped
// if (d1 >= 0)
// {
// // both points are unclipped
// continue;
// // only point 1 is clipped
// f = d0 / (d0 - d1);
flds Ld0
flds Ld1
fsubr %st(1),%st(0)
// // we don't cache partially clipped edges
movl $0x7FFFFFFF,C(cacheoffset)
fdivrp %st(0),%st(1)
subl $(mv_size),%esp // allocate space for clipvert
// clipvert.position[0] = pv0->position[0] +
// f * (pv1->position[0] - pv0->position[0]);
// clipvert.position[1] = pv0->position[1] +
// f * (pv1->position[1] - pv0->position[1]);
// clipvert.position[2] = pv0->position[2] +
// f * (pv1->position[2] - pv0->position[2]);
flds mv_position+8(%edx)
fsubs mv_position+8(%esi)
flds mv_position+4(%edx)
fsubs mv_position+4(%esi)
flds mv_position+0(%edx)
fsubs mv_position+0(%esi) // 0 | 1 | 2
// replace pv1 with the clip point
movl %esp,%edx
movl cp_leftedge(%ebx),%eax
testb %al,%al
fmul %st(3),%st(0)
fxch %st(1) // 1 | 0 | 2
fmul %st(3),%st(0)
fxch %st(2) // 2 | 0 | 1
fmulp %st(0),%st(3) // 0 | 1 | 2
fadds mv_position+0(%esi)
fxch %st(1) // 1 | 0 | 2
fadds mv_position+4(%esi)
fxch %st(2) // 2 | 0 | 1
fadds mv_position+8(%esi)
fxch %st(1) // 0 | 2 | 1
fstps mv_position+0(%esp) // 2 | 1
fstps mv_position+8(%esp) // 1
fstps mv_position+4(%esp)
// if (clip->leftedge)
// {
jz Ltestright
// r_leftclipped = true;
// r_leftexit = clipvert;
movl $1,C(r_leftclipped)
movl mv_position+0(%esp),%eax
movl %eax,C(r_leftexit)+mv_position+0
movl mv_position+4(%esp),%eax
movl %eax,C(r_leftexit)+mv_position+4
movl mv_position+8(%esp),%eax
movl %eax,C(r_leftexit)+mv_position+8
jmp Lcontinue
// }
Ltestright:
// else if (clip->rightedge)
// {
testb %ah,%ah
jz Lcontinue
// r_rightclipped = true;
// r_rightexit = clipvert;
movl $1,C(r_rightclipped)
movl mv_position+0(%esp),%eax
movl %eax,C(r_rightexit)+mv_position+0
movl mv_position+4(%esp),%eax
movl %eax,C(r_rightexit)+mv_position+4
movl mv_position+8(%esp),%eax
movl %eax,C(r_rightexit)+mv_position+8
// }
//
// R_ClipEdge (pv0, &clipvert, clip->next);
// return;
// }
jmp Lcontinue
// }
Lp3:
// // only point 0 is clipped
// r_lastvertvalid = false;
movl $0,C(r_lastvertvalid)
// f = d0 / (d0 - d1);
flds Ld0
flds Ld1
fsubr %st(1),%st(0)
// // we don't cache partially clipped edges
movl $0x7FFFFFFF,C(cacheoffset)
fdivrp %st(0),%st(1)
subl $(mv_size),%esp // allocate space for clipvert
// clipvert.position[0] = pv0->position[0] +
// f * (pv1->position[0] - pv0->position[0]);
// clipvert.position[1] = pv0->position[1] +
// f * (pv1->position[1] - pv0->position[1]);
// clipvert.position[2] = pv0->position[2] +
// f * (pv1->position[2] - pv0->position[2]);
flds mv_position+8(%edx)
fsubs mv_position+8(%esi)
flds mv_position+4(%edx)
fsubs mv_position+4(%esi)
flds mv_position+0(%edx)
fsubs mv_position+0(%esi) // 0 | 1 | 2
movl cp_leftedge(%ebx),%eax
testb %al,%al
fmul %st(3),%st(0)
fxch %st(1) // 1 | 0 | 2
fmul %st(3),%st(0)
fxch %st(2) // 2 | 0 | 1
fmulp %st(0),%st(3) // 0 | 1 | 2
fadds mv_position+0(%esi)
fxch %st(1) // 1 | 0 | 2
fadds mv_position+4(%esi)
fxch %st(2) // 2 | 0 | 1
fadds mv_position+8(%esi)
fxch %st(1) // 0 | 2 | 1
fstps mv_position+0(%esp) // 2 | 1
fstps mv_position+8(%esp) // 1
fstps mv_position+4(%esp)
// replace pv0 with the clip point
movl %esp,%esi
// if (clip->leftedge)
// {
jz Ltestright2
// r_leftclipped = true;
// r_leftenter = clipvert;
movl $1,C(r_leftclipped)
movl mv_position+0(%esp),%eax
movl %eax,C(r_leftenter)+mv_position+0
movl mv_position+4(%esp),%eax
movl %eax,C(r_leftenter)+mv_position+4
movl mv_position+8(%esp),%eax
movl %eax,C(r_leftenter)+mv_position+8
jmp Lcontinue
// }
Ltestright2:
// else if (clip->rightedge)
// {
testb %ah,%ah
jz Lcontinue
// r_rightclipped = true;
// r_rightenter = clipvert;
movl $1,C(r_rightclipped)
movl mv_position+0(%esp),%eax
movl %eax,C(r_rightenter)+mv_position+0
movl mv_position+4(%esp),%eax
movl %eax,C(r_rightenter)+mv_position+4
movl mv_position+8(%esp),%eax
movl %eax,C(r_rightenter)+mv_position+8
// }
jmp Lcontinue
// %esi = vec3_t point to transform and project
// %edx preserved
LTransformAndProject:
// // transform and project
// VectorSubtract (world, modelorg, local);
flds mv_position+0(%esi)
fsubs C(modelorg)+0
flds mv_position+4(%esi)
fsubs C(modelorg)+4
flds mv_position+8(%esi)
fsubs C(modelorg)+8
fxch %st(2) // local[0] | local[1] | local[2]
// TransformVector (local, transformed);
//
// if (transformed[2] < NEAR_CLIP)
// transformed[2] = NEAR_CLIP;
//
// lzi0 = 1.0 / transformed[2];
fld %st(0) // local[0] | local[0] | local[1] | local[2]
fmuls C(vpn)+0 // zm0 | local[0] | local[1] | local[2]
fld %st(1) // local[0] | zm0 | local[0] | local[1] |
// local[2]
fmuls C(vright)+0 // xm0 | zm0 | local[0] | local[1] | local[2]
fxch %st(2) // local[0] | zm0 | xm0 | local[1] | local[2]
fmuls C(vup)+0 // ym0 | zm0 | xm0 | local[1] | local[2]
fld %st(3) // local[1] | ym0 | zm0 | xm0 | local[1] |
// local[2]
fmuls C(vpn)+4 // zm1 | ym0 | zm0 | xm0 | local[1] |
// local[2]
fld %st(4) // local[1] | zm1 | ym0 | zm0 | xm0 |
// local[1] | local[2]
fmuls C(vright)+4 // xm1 | zm1 | ym0 | zm0 | xm0 |
// local[1] | local[2]
fxch %st(5) // local[1] | zm1 | ym0 | zm0 | xm0 |
// xm1 | local[2]
fmuls C(vup)+4 // ym1 | zm1 | ym0 | zm0 | xm0 |
// xm1 | local[2]
fxch %st(1) // zm1 | ym1 | ym0 | zm0 | xm0 |
// xm1 | local[2]
faddp %st(0),%st(3) // ym1 | ym0 | zm2 | xm0 | xm1 | local[2]
fxch %st(3) // xm0 | ym0 | zm2 | ym1 | xm1 | local[2]
faddp %st(0),%st(4) // ym0 | zm2 | ym1 | xm2 | local[2]
faddp %st(0),%st(2) // zm2 | ym2 | xm2 | local[2]
fld %st(3) // local[2] | zm2 | ym2 | xm2 | local[2]
fmuls C(vpn)+8 // zm3 | zm2 | ym2 | xm2 | local[2]
fld %st(4) // local[2] | zm3 | zm2 | ym2 | xm2 | local[2]
fmuls C(vright)+8 // xm3 | zm3 | zm2 | ym2 | xm2 | local[2]
fxch %st(5) // local[2] | zm3 | zm2 | ym2 | xm2 | xm3
fmuls C(vup)+8 // ym3 | zm3 | zm2 | ym2 | xm2 | xm3
fxch %st(1) // zm3 | ym3 | zm2 | ym2 | xm2 | xm3
faddp %st(0),%st(2) // ym3 | zm4 | ym2 | xm2 | xm3
fxch %st(4) // xm3 | zm4 | ym2 | xm2 | ym3
faddp %st(0),%st(3) // zm4 | ym2 | xm4 | ym3
fxch %st(1) // ym2 | zm4 | xm4 | ym3
faddp %st(0),%st(3) // zm4 | xm4 | ym4
fcoms Lfp_near_clip
fnstsw %ax
testb $1,%ah
jz LNoClip
fstp %st(0)
flds Lfp_near_clip
LNoClip:
fdivrs float_1 // lzi0 | x | y
fxch %st(1) // x | lzi0 | y
// // FIXME: build x/yscale into transform?
// scale = xscale * lzi0;
// u0 = (xcenter + scale*transformed[0]);
flds C(xscale) // xscale | x | lzi0 | y
fmul %st(2),%st(0) // scale | x | lzi0 | y
fmulp %st(0),%st(1) // scale*x | lzi0 | y
fadds C(xcenter) // u0 | lzi0 | y
// if (u0 < r_oldrefdef.fvrectx_adj)
// u0 = r_oldrefdef.fvrectx_adj;
// if (u0 > r_oldrefdef.fvrectright_adj)
// u0 = r_oldrefdef.fvrectright_adj;
// FIXME: use integer compares of floats?
fcoms C(r_oldrefdef)+rd_fvrectx_adj
fnstsw %ax
testb $1,%ah
jz LClampP0
fstp %st(0)
flds C(r_oldrefdef)+rd_fvrectx_adj
LClampP0:
fcoms C(r_oldrefdef)+rd_fvrectright_adj
fnstsw %ax
testb $0x45,%ah
jnz LClampP1
fstp %st(0)
flds C(r_oldrefdef)+rd_fvrectright_adj
LClampP1:
fld %st(1) // lzi0 | u0 | lzi0 | y
// scale = yscale * lzi0;
// v0 = (ycenter - scale*transformed[1]);
fmuls C(yscale) // scale | u0 | lzi0 | y
fmulp %st(0),%st(3) // u0 | lzi0 | scale*y
fxch %st(2) // scale*y | lzi0 | u0
fsubrs C(ycenter) // v0 | lzi0 | u0
// if (v0 < r_oldrefdef.fvrecty_adj)
// v0 = r_oldrefdef.fvrecty_adj;
// if (v0 > r_oldrefdef.fvrectbottom_adj)
// v0 = r_oldrefdef.fvrectbottom_adj;
// FIXME: use integer compares of floats?
fcoms C(r_oldrefdef)+rd_fvrecty_adj
fnstsw %ax
testb $1,%ah
jz LClampP2
fstp %st(0)
flds C(r_oldrefdef)+rd_fvrecty_adj
LClampP2:
fcoms C(r_oldrefdef)+rd_fvrectbottom_adj
fnstsw %ax
testb $0x45,%ah
jnz LClampP3
fstp %st(0)
flds C(r_oldrefdef)+rd_fvrectbottom_adj
LClampP3:
ret
#endif // id386
|
paritytech/polkaports | 2,068 | apps/quake/d_scana.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// d_scana.s
// x86 assembly-language turbulent texture mapping code
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#include "d_ifacea.h"
#if id386
.data
.text
//----------------------------------------------------------------------
// turbulent texture mapping code
//----------------------------------------------------------------------
.align 4
.globl C(D_DrawTurbulent8Span)
C(D_DrawTurbulent8Span):
pushl %ebp // preserve caller's stack frame pointer
pushl %esi // preserve register variables
pushl %edi
pushl %ebx
movl C(r_turb_s),%esi
movl C(r_turb_t),%ecx
movl C(r_turb_pdest),%edi
movl C(r_turb_spancount),%ebx
Llp:
movl %ecx,%eax
movl %esi,%edx
sarl $16,%eax
movl C(r_turb_turb),%ebp
sarl $16,%edx
andl $(CYCLE-1),%eax
andl $(CYCLE-1),%edx
movl (%ebp,%eax,4),%eax
movl (%ebp,%edx,4),%edx
addl %esi,%eax
sarl $16,%eax
addl %ecx,%edx
sarl $16,%edx
andl $(TURB_TEX_SIZE-1),%eax
andl $(TURB_TEX_SIZE-1),%edx
shll $6,%edx
movl C(r_turb_pbase),%ebp
addl %eax,%edx
incl %edi
addl C(r_turb_sstep),%esi
addl C(r_turb_tstep),%ecx
movb (%ebp,%edx,1),%dl
decl %ebx
movb %dl,-1(%edi)
jnz Llp
movl %edi,C(r_turb_pdest)
popl %ebx // restore register variables
popl %edi
popl %esi
popl %ebp // restore caller's stack frame pointer
ret
#endif // id386
|
paritytech/polkaports | 1,653 | apps/quake/r_varsa.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// r_varsa.s
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#include "d_ifacea.h"
#if id386
.data
//-------------------------------------------------------
// ASM-only variables
//-------------------------------------------------------
.globl float_1, float_particle_z_clip, float_point5
.globl float_minus_1, float_0
float_0: .single 0.0
float_1: .single 1.0
float_minus_1: .single -1.0
float_particle_z_clip: .single PARTICLE_Z_CLIP
float_point5: .single 0.5
.globl fp_16, fp_64k, fp_1m, fp_64kx64k
.globl fp_1m_minus_1
.globl fp_8
fp_1m: .single 1048576.0
fp_1m_minus_1: .single 1048575.0
fp_64k: .single 65536.0
fp_8: .single 8.0
fp_16: .single 16.0
fp_64kx64k: .long 0x4f000000 // (float)0x8000*0x10000
.globl FloatZero, Float2ToThe31nd, FloatMinus2ToThe31nd
FloatZero: .long 0
Float2ToThe31nd: .long 0x4f000000
FloatMinus2ToThe31nd: .long 0xcf000000
#endif // id386
|
paritytech/polkaports | 17,773 | apps/quake/r_edgea.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// r_edgea.s
// x86 assembly-language edge-processing code.
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#if id386
.data
Ltemp: .long 0
float_1_div_0100000h: .long 0x35800000 // 1.0/(float)0x100000
float_point_999: .single 0.999
float_1_point_001: .single 1.001
.text
//--------------------------------------------------------------------
#define edgestoadd 4+8 // note odd stack offsets because of interleaving
#define edgelist 8+12 // with pushes
.globl C(R_EdgeCodeStart)
C(R_EdgeCodeStart):
.globl C(R_InsertNewEdges)
C(R_InsertNewEdges):
pushl %edi
pushl %esi // preserve register variables
movl edgestoadd(%esp),%edx
pushl %ebx
movl edgelist(%esp),%ecx
LDoNextEdge:
movl et_u(%edx),%eax
movl %edx,%edi
LContinueSearch:
movl et_u(%ecx),%ebx
movl et_next(%ecx),%esi
cmpl %ebx,%eax
jle LAddedge
movl et_u(%esi),%ebx
movl et_next(%esi),%ecx
cmpl %ebx,%eax
jle LAddedge2
movl et_u(%ecx),%ebx
movl et_next(%ecx),%esi
cmpl %ebx,%eax
jle LAddedge
movl et_u(%esi),%ebx
movl et_next(%esi),%ecx
cmpl %ebx,%eax
jg LContinueSearch
LAddedge2:
movl et_next(%edx),%edx
movl et_prev(%esi),%ebx
movl %esi,et_next(%edi)
movl %ebx,et_prev(%edi)
movl %edi,et_next(%ebx)
movl %edi,et_prev(%esi)
movl %esi,%ecx
cmpl $0,%edx
jnz LDoNextEdge
jmp LDone
.align 4
LAddedge:
movl et_next(%edx),%edx
movl et_prev(%ecx),%ebx
movl %ecx,et_next(%edi)
movl %ebx,et_prev(%edi)
movl %edi,et_next(%ebx)
movl %edi,et_prev(%ecx)
cmpl $0,%edx
jnz LDoNextEdge
LDone:
popl %ebx // restore register variables
popl %esi
popl %edi
ret
//--------------------------------------------------------------------
#define predge 4+4
.globl C(R_RemoveEdges)
C(R_RemoveEdges):
pushl %ebx
movl predge(%esp),%eax
Lre_loop:
movl et_next(%eax),%ecx
movl et_nextremove(%eax),%ebx
movl et_prev(%eax),%edx
testl %ebx,%ebx
movl %edx,et_prev(%ecx)
jz Lre_done
movl %ecx,et_next(%edx)
movl et_next(%ebx),%ecx
movl et_prev(%ebx),%edx
movl et_nextremove(%ebx),%eax
movl %edx,et_prev(%ecx)
testl %eax,%eax
movl %ecx,et_next(%edx)
jnz Lre_loop
popl %ebx
ret
Lre_done:
movl %ecx,et_next(%edx)
popl %ebx
ret
//--------------------------------------------------------------------
#define pedgelist 4+4 // note odd stack offset because of interleaving
// with pushes
.globl C(R_StepActiveU)
C(R_StepActiveU):
pushl %edi
movl pedgelist(%esp),%edx
pushl %esi // preserve register variables
pushl %ebx
movl et_prev(%edx),%esi
LNewEdge:
movl et_u(%esi),%edi
LNextEdge:
movl et_u(%edx),%eax
movl et_u_step(%edx),%ebx
addl %ebx,%eax
movl et_next(%edx),%esi
movl %eax,et_u(%edx)
cmpl %edi,%eax
jl LPushBack
movl et_u(%esi),%edi
movl et_u_step(%esi),%ebx
addl %ebx,%edi
movl et_next(%esi),%edx
movl %edi,et_u(%esi)
cmpl %eax,%edi
jl LPushBack2
movl et_u(%edx),%eax
movl et_u_step(%edx),%ebx
addl %ebx,%eax
movl et_next(%edx),%esi
movl %eax,et_u(%edx)
cmpl %edi,%eax
jl LPushBack
movl et_u(%esi),%edi
movl et_u_step(%esi),%ebx
addl %ebx,%edi
movl et_next(%esi),%edx
movl %edi,et_u(%esi)
cmpl %eax,%edi
jnl LNextEdge
LPushBack2:
movl %edx,%ebx
movl %edi,%eax
movl %esi,%edx
movl %ebx,%esi
LPushBack:
// push it back to keep it sorted
movl et_prev(%edx),%ecx
movl et_next(%edx),%ebx
// done if the -1 in edge_aftertail triggered this
cmpl $(C(edge_aftertail)),%edx
jz LUDone
// pull the edge out of the edge list
movl et_prev(%ecx),%edi
movl %ecx,et_prev(%esi)
movl %ebx,et_next(%ecx)
// find out where the edge goes in the edge list
LPushBackLoop:
movl et_prev(%edi),%ecx
movl et_u(%edi),%ebx
cmpl %ebx,%eax
jnl LPushBackFound
movl et_prev(%ecx),%edi
movl et_u(%ecx),%ebx
cmpl %ebx,%eax
jl LPushBackLoop
movl %ecx,%edi
// put the edge back into the edge list
LPushBackFound:
movl et_next(%edi),%ebx
movl %edi,et_prev(%edx)
movl %ebx,et_next(%edx)
movl %edx,et_next(%edi)
movl %edx,et_prev(%ebx)
movl %esi,%edx
movl et_prev(%esi),%esi
cmpl $(C(edge_tail)),%edx
jnz LNewEdge
LUDone:
popl %ebx // restore register variables
popl %esi
popl %edi
ret
//--------------------------------------------------------------------
#define surf 4 // note this is loaded before any pushes
.align 4
TrailingEdge:
movl st_spanstate(%esi),%eax // check for edge inversion
decl %eax
jnz LInverted
movl %eax,st_spanstate(%esi)
movl st_insubmodel(%esi),%ecx
movl 0x12345678,%edx // surfaces[1].st_next
LPatch0:
cmpl %esi,%edx
jnz LNoEmit // surface isn't on top, just remove
// emit a span (current top going away)
movl et_u(%ebx),%eax
shrl $20,%eax // iu = integral pixel u
movl st_last_u(%esi),%edx
movl st_next(%esi),%ecx
cmpl %edx,%eax
jle LNoEmit2 // iu <= surf->last_u, so nothing to emit
movl %eax,st_last_u(%ecx) // surf->next->last_u = iu;
subl %edx,%eax
movl %edx,espan_t_u(%ebp) // span->u = surf->last_u;
movl %eax,espan_t_count(%ebp) // span->count = iu - span->u;
movl C(current_iv),%eax
movl %eax,espan_t_v(%ebp) // span->v = current_iv;
movl st_spans(%esi),%eax
movl %eax,espan_t_pnext(%ebp) // span->pnext = surf->spans;
movl %ebp,st_spans(%esi) // surf->spans = span;
addl $(espan_t_size),%ebp
movl st_next(%esi),%edx // remove the surface from the surface
movl st_prev(%esi),%esi // stack
movl %edx,st_next(%esi)
movl %esi,st_prev(%edx)
ret
LNoEmit2:
movl %eax,st_last_u(%ecx) // surf->next->last_u = iu;
movl st_next(%esi),%edx // remove the surface from the surface
movl st_prev(%esi),%esi // stack
movl %edx,st_next(%esi)
movl %esi,st_prev(%edx)
ret
LNoEmit:
movl st_next(%esi),%edx // remove the surface from the surface
movl st_prev(%esi),%esi // stack
movl %edx,st_next(%esi)
movl %esi,st_prev(%edx)
ret
LInverted:
movl %eax,st_spanstate(%esi)
ret
//--------------------------------------------------------------------
// trailing edge only
Lgs_trailing:
pushl $Lgs_nextedge
jmp TrailingEdge
.globl C(R_GenerateSpans)
C(R_GenerateSpans):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
// clear active surfaces to just the background surface
movl C(surfaces),%eax
movl C(edge_head_u_shift20),%edx
addl $(st_size),%eax
// %ebp = span_p throughout
movl C(span_p),%ebp
movl %eax,st_next(%eax)
movl %eax,st_prev(%eax)
movl %edx,st_last_u(%eax)
movl C(edge_head)+et_next,%ebx // edge=edge_head.next
// generate spans
cmpl $(C(edge_tail)),%ebx // done if empty list
jz Lgs_lastspan
Lgs_edgeloop:
movl et_surfs(%ebx),%edi
movl C(surfaces),%eax
movl %edi,%esi
andl $0xFFFF0000,%edi
andl $0xFFFF,%esi
jz Lgs_leading // not a trailing edge
// it has a left surface, so a surface is going away for this span
shll $(SURF_T_SHIFT),%esi
addl %eax,%esi
testl %edi,%edi
jz Lgs_trailing
// both leading and trailing
call TrailingEdge
movl C(surfaces),%eax
// ---------------------------------------------------------------
// handle a leading edge
// ---------------------------------------------------------------
Lgs_leading:
shrl $16-SURF_T_SHIFT,%edi
movl C(surfaces),%eax
addl %eax,%edi
movl 0x12345678,%esi // surf2 = surfaces[1].next;
LPatch2:
movl st_spanstate(%edi),%edx
movl st_insubmodel(%edi),%eax
testl %eax,%eax
jnz Lbmodel_leading
// handle a leading non-bmodel edge
// don't start a span if this is an inverted span, with the end edge preceding
// the start edge (that is, we've already seen the end edge)
testl %edx,%edx
jnz Lxl_done
// if (surf->key < surf2->key)
// goto newtop;
incl %edx
movl st_key(%edi),%eax
movl %edx,st_spanstate(%edi)
movl st_key(%esi),%ecx
cmpl %ecx,%eax
jl Lnewtop
// main sorting loop to search through surface stack until insertion point
// found. Always terminates because background surface is sentinel
// do
// {
// surf2 = surf2->next;
// } while (surf->key >= surf2->key);
Lsortloopnb:
movl st_next(%esi),%esi
movl st_key(%esi),%ecx
cmpl %ecx,%eax
jge Lsortloopnb
jmp LInsertAndExit
// handle a leading bmodel edge
.align 4
Lbmodel_leading:
// don't start a span if this is an inverted span, with the end edge preceding
// the start edge (that is, we've already seen the end edge)
testl %edx,%edx
jnz Lxl_done
incl %edx
movl %edx,st_spanstate(%edi)
// if (surf->key < surf2->key)
// goto newtop;
movl st_key(%edi),%eax
movl st_key(%esi),%ecx
cmpl %ecx,%eax
jl Lnewtop
// if ((surf->key == surf2->key) && surf->insubmodel)
// {
jz Lzcheck_for_newtop
// main sorting loop to search through surface stack until insertion point
// found. Always terminates because background surface is sentinel
// do
// {
// surf2 = surf2->next;
// } while (surf->key > surf2->key);
Lsortloop:
movl st_next(%esi),%esi
movl st_key(%esi),%ecx
cmpl %ecx,%eax
jg Lsortloop
jne LInsertAndExit
// Do 1/z sorting to see if we've arrived in the right position
movl et_u(%ebx),%eax
subl $0xFFFFF,%eax
movl %eax,Ltemp
fildl Ltemp
fmuls float_1_div_0100000h // fu = (float)(edge->u - 0xFFFFF) *
// (1.0 / 0x100000);
fld %st(0) // fu | fu
fmuls st_d_zistepu(%edi) // fu*surf->d_zistepu | fu
flds C(fv) // fv | fu*surf->d_zistepu | fu
fmuls st_d_zistepv(%edi) // fv*surf->d_zistepv | fu*surf->d_zistepu | fu
fxch %st(1) // fu*surf->d_zistepu | fv*surf->d_zistepv | fu
fadds st_d_ziorigin(%edi) // fu*surf->d_zistepu + surf->d_ziorigin |
// fv*surf->d_zistepv | fu
flds st_d_zistepu(%esi) // surf2->d_zistepu |
// fu*surf->d_zistepu + surf->d_ziorigin |
// fv*surf->d_zistepv | fu
fmul %st(3),%st(0) // fu*surf2->d_zistepu |
// fu*surf->d_zistepu + surf->d_ziorigin |
// fv*surf->d_zistepv | fu
fxch %st(1) // fu*surf->d_zistepu + surf->d_ziorigin |
// fu*surf2->d_zistepu |
// fv*surf->d_zistepv | fu
faddp %st(0),%st(2) // fu*surf2->d_zistepu | newzi | fu
flds C(fv) // fv | fu*surf2->d_zistepu | newzi | fu
fmuls st_d_zistepv(%esi) // fv*surf2->d_zistepv |
// fu*surf2->d_zistepu | newzi | fu
fld %st(2) // newzi | fv*surf2->d_zistepv |
// fu*surf2->d_zistepu | newzi | fu
fmuls float_point_999 // newzibottom | fv*surf2->d_zistepv |
// fu*surf2->d_zistepu | newzi | fu
fxch %st(2) // fu*surf2->d_zistepu | fv*surf2->d_zistepv |
// newzibottom | newzi | fu
fadds st_d_ziorigin(%esi) // fu*surf2->d_zistepu + surf2->d_ziorigin |
// fv*surf2->d_zistepv | newzibottom | newzi |
// fu
faddp %st(0),%st(1) // testzi | newzibottom | newzi | fu
fxch %st(1) // newzibottom | testzi | newzi | fu
// if (newzibottom >= testzi)
// goto Lgotposition;
fcomp %st(1) // testzi | newzi | fu
fxch %st(1) // newzi | testzi | fu
fmuls float_1_point_001 // newzitop | testzi | fu
fxch %st(1) // testzi | newzitop | fu
fnstsw %ax
testb $0x01,%ah
jz Lgotposition_fpop3
// if (newzitop >= testzi)
// {
fcomp %st(1) // newzitop | fu
fnstsw %ax
testb $0x45,%ah
jz Lsortloop_fpop2
// if (surf->d_zistepu >= surf2->d_zistepu)
// goto newtop;
flds st_d_zistepu(%edi) // surf->d_zistepu | newzitop| fu
fcomps st_d_zistepu(%esi) // newzitop | fu
fnstsw %ax
testb $0x01,%ah
jz Lgotposition_fpop2
fstp %st(0) // clear the FPstack
fstp %st(0)
movl st_key(%edi),%eax
jmp Lsortloop
Lgotposition_fpop3:
fstp %st(0)
Lgotposition_fpop2:
fstp %st(0)
fstp %st(0)
jmp LInsertAndExit
// emit a span (obscures current top)
Lnewtop_fpop3:
fstp %st(0)
Lnewtop_fpop2:
fstp %st(0)
fstp %st(0)
movl st_key(%edi),%eax // reload the sorting key
Lnewtop:
movl et_u(%ebx),%eax
movl st_last_u(%esi),%edx
shrl $20,%eax // iu = integral pixel u
movl %eax,st_last_u(%edi) // surf->last_u = iu;
cmpl %edx,%eax
jle LInsertAndExit // iu <= surf->last_u, so nothing to emit
subl %edx,%eax
movl %edx,espan_t_u(%ebp) // span->u = surf->last_u;
movl %eax,espan_t_count(%ebp) // span->count = iu - span->u;
movl C(current_iv),%eax
movl %eax,espan_t_v(%ebp) // span->v = current_iv;
movl st_spans(%esi),%eax
movl %eax,espan_t_pnext(%ebp) // span->pnext = surf->spans;
movl %ebp,st_spans(%esi) // surf->spans = span;
addl $(espan_t_size),%ebp
LInsertAndExit:
// insert before surf2
movl %esi,st_next(%edi) // surf->next = surf2;
movl st_prev(%esi),%eax
movl %eax,st_prev(%edi) // surf->prev = surf2->prev;
movl %edi,st_prev(%esi) // surf2->prev = surf;
movl %edi,st_next(%eax) // surf2->prev->next = surf;
// ---------------------------------------------------------------
// leading edge done
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// see if there are any more edges
// ---------------------------------------------------------------
Lgs_nextedge:
movl et_next(%ebx),%ebx
cmpl $(C(edge_tail)),%ebx
jnz Lgs_edgeloop
// clean up at the right edge
Lgs_lastspan:
// now that we've reached the right edge of the screen, we're done with any
// unfinished surfaces, so emit a span for whatever's on top
movl 0x12345678,%esi // surfaces[1].st_next
LPatch3:
movl C(edge_tail_u_shift20),%eax
xorl %ecx,%ecx
movl st_last_u(%esi),%edx
subl %edx,%eax
jle Lgs_resetspanstate
movl %edx,espan_t_u(%ebp)
movl %eax,espan_t_count(%ebp)
movl C(current_iv),%eax
movl %eax,espan_t_v(%ebp)
movl st_spans(%esi),%eax
movl %eax,espan_t_pnext(%ebp)
movl %ebp,st_spans(%esi)
addl $(espan_t_size),%ebp
// reset spanstate for all surfaces in the surface stack
Lgs_resetspanstate:
movl %ecx,st_spanstate(%esi)
movl st_next(%esi),%esi
cmpl $0x12345678,%esi // &surfaces[1]
LPatch4:
jnz Lgs_resetspanstate
// store the final span_p
movl %ebp,C(span_p)
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
// ---------------------------------------------------------------
// 1/z sorting for bmodels in the same leaf
// ---------------------------------------------------------------
.align 4
Lxl_done:
incl %edx
movl %edx,st_spanstate(%edi)
jmp Lgs_nextedge
.align 4
Lzcheck_for_newtop:
movl et_u(%ebx),%eax
subl $0xFFFFF,%eax
movl %eax,Ltemp
fildl Ltemp
fmuls float_1_div_0100000h // fu = (float)(edge->u - 0xFFFFF) *
// (1.0 / 0x100000);
fld %st(0) // fu | fu
fmuls st_d_zistepu(%edi) // fu*surf->d_zistepu | fu
flds C(fv) // fv | fu*surf->d_zistepu | fu
fmuls st_d_zistepv(%edi) // fv*surf->d_zistepv | fu*surf->d_zistepu | fu
fxch %st(1) // fu*surf->d_zistepu | fv*surf->d_zistepv | fu
fadds st_d_ziorigin(%edi) // fu*surf->d_zistepu + surf->d_ziorigin |
// fv*surf->d_zistepv | fu
flds st_d_zistepu(%esi) // surf2->d_zistepu |
// fu*surf->d_zistepu + surf->d_ziorigin |
// fv*surf->d_zistepv | fu
fmul %st(3),%st(0) // fu*surf2->d_zistepu |
// fu*surf->d_zistepu + surf->d_ziorigin |
// fv*surf->d_zistepv | fu
fxch %st(1) // fu*surf->d_zistepu + surf->d_ziorigin |
// fu*surf2->d_zistepu |
// fv*surf->d_zistepv | fu
faddp %st(0),%st(2) // fu*surf2->d_zistepu | newzi | fu
flds C(fv) // fv | fu*surf2->d_zistepu | newzi | fu
fmuls st_d_zistepv(%esi) // fv*surf2->d_zistepv |
// fu*surf2->d_zistepu | newzi | fu
fld %st(2) // newzi | fv*surf2->d_zistepv |
// fu*surf2->d_zistepu | newzi | fu
fmuls float_point_999 // newzibottom | fv*surf2->d_zistepv |
// fu*surf2->d_zistepu | newzi | fu
fxch %st(2) // fu*surf2->d_zistepu | fv*surf2->d_zistepv |
// newzibottom | newzi | fu
fadds st_d_ziorigin(%esi) // fu*surf2->d_zistepu + surf2->d_ziorigin |
// fv*surf2->d_zistepv | newzibottom | newzi |
// fu
faddp %st(0),%st(1) // testzi | newzibottom | newzi | fu
fxch %st(1) // newzibottom | testzi | newzi | fu
// if (newzibottom >= testzi)
// goto newtop;
fcomp %st(1) // testzi | newzi | fu
fxch %st(1) // newzi | testzi | fu
fmuls float_1_point_001 // newzitop | testzi | fu
fxch %st(1) // testzi | newzitop | fu
fnstsw %ax
testb $0x01,%ah
jz Lnewtop_fpop3
// if (newzitop >= testzi)
// {
fcomp %st(1) // newzitop | fu
fnstsw %ax
testb $0x45,%ah
jz Lsortloop_fpop2
// if (surf->d_zistepu >= surf2->d_zistepu)
// goto newtop;
flds st_d_zistepu(%edi) // surf->d_zistepu | newzitop | fu
fcomps st_d_zistepu(%esi) // newzitop | fu
fnstsw %ax
testb $0x01,%ah
jz Lnewtop_fpop2
Lsortloop_fpop2:
fstp %st(0) // clear the FP stack
fstp %st(0)
movl st_key(%edi),%eax
jmp Lsortloop
.globl C(R_EdgeCodeEnd)
C(R_EdgeCodeEnd):
//----------------------------------------------------------------------
// Surface array address code patching routine
//----------------------------------------------------------------------
.align 4
.globl C(R_SurfacePatch)
C(R_SurfacePatch):
movl C(surfaces),%eax
addl $(st_size),%eax
movl %eax,LPatch4-4
addl $(st_next),%eax
movl %eax,LPatch0-4
movl %eax,LPatch2-4
movl %eax,LPatch3-4
ret
#endif // id386
|
paritytech/polkaports | 20,683 | apps/quake/d_spr8.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// d_spr8.s
// x86 assembly-language horizontal 8-bpp transparent span-drawing code.
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#if id386
//----------------------------------------------------------------------
// 8-bpp horizontal span drawing code for polygons, with transparency.
//----------------------------------------------------------------------
.text
// out-of-line, rarely-needed clamping code
LClampHigh0:
movl C(bbextents),%esi
jmp LClampReentry0
LClampHighOrLow0:
jg LClampHigh0
xorl %esi,%esi
jmp LClampReentry0
LClampHigh1:
movl C(bbextentt),%edx
jmp LClampReentry1
LClampHighOrLow1:
jg LClampHigh1
xorl %edx,%edx
jmp LClampReentry1
LClampLow2:
movl $2048,%ebp
jmp LClampReentry2
LClampHigh2:
movl C(bbextents),%ebp
jmp LClampReentry2
LClampLow3:
movl $2048,%ecx
jmp LClampReentry3
LClampHigh3:
movl C(bbextentt),%ecx
jmp LClampReentry3
LClampLow4:
movl $2048,%eax
jmp LClampReentry4
LClampHigh4:
movl C(bbextents),%eax
jmp LClampReentry4
LClampLow5:
movl $2048,%ebx
jmp LClampReentry5
LClampHigh5:
movl C(bbextentt),%ebx
jmp LClampReentry5
#define pspans 4+16
.align 4
.globl C(D_SpriteDrawSpans)
C(D_SpriteDrawSpans):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
//
// set up scaled-by-8 steps, for 8-long segments; also set up cacheblock
// and span list pointers, and 1/z step in 0.32 fixed-point
//
// FIXME: any overlap from rearranging?
flds C(d_sdivzstepu)
fmuls fp_8
movl C(cacheblock),%edx
flds C(d_tdivzstepu)
fmuls fp_8
movl pspans(%esp),%ebx // point to the first span descriptor
flds C(d_zistepu)
fmuls fp_8
movl %edx,pbase // pbase = cacheblock
flds C(d_zistepu)
fmuls fp_64kx64k
fxch %st(3)
fstps sdivz8stepu
fstps zi8stepu
fstps tdivz8stepu
fistpl izistep
movl izistep,%eax
rorl $16,%eax // put upper 16 bits in low word
movl sspan_t_count(%ebx),%ecx
movl %eax,izistep
cmpl $0,%ecx
jle LNextSpan
LSpanLoop:
//
// set up the initial s/z, t/z, and 1/z on the FP stack, and generate the
// initial s and t values
//
// FIXME: pipeline FILD?
fildl sspan_t_v(%ebx)
fildl sspan_t_u(%ebx)
fld %st(1) // dv | du | dv
fmuls C(d_sdivzstepv) // dv*d_sdivzstepv | du | dv
fld %st(1) // du | dv*d_sdivzstepv | du | dv
fmuls C(d_sdivzstepu) // du*d_sdivzstepu | dv*d_sdivzstepv | du | dv
fld %st(2) // du | du*d_sdivzstepu | dv*d_sdivzstepv | du | dv
fmuls C(d_tdivzstepu) // du*d_tdivzstepu | du*d_sdivzstepu |
// dv*d_sdivzstepv | du | dv
fxch %st(1) // du*d_sdivzstepu | du*d_tdivzstepu |
// dv*d_sdivzstepv | du | dv
faddp %st(0),%st(2) // du*d_tdivzstepu |
// du*d_sdivzstepu + dv*d_sdivzstepv | du | dv
fxch %st(1) // du*d_sdivzstepu + dv*d_sdivzstepv |
// du*d_tdivzstepu | du | dv
fld %st(3) // dv | du*d_sdivzstepu + dv*d_sdivzstepv |
// du*d_tdivzstepu | du | dv
fmuls C(d_tdivzstepv) // dv*d_tdivzstepv |
// du*d_sdivzstepu + dv*d_sdivzstepv |
// du*d_tdivzstepu | du | dv
fxch %st(1) // du*d_sdivzstepu + dv*d_sdivzstepv |
// dv*d_tdivzstepv | du*d_tdivzstepu | du | dv
fadds C(d_sdivzorigin) // sdivz = d_sdivzorigin + dv*d_sdivzstepv +
// du*d_sdivzstepu; stays in %st(2) at end
fxch %st(4) // dv | dv*d_tdivzstepv | du*d_tdivzstepu | du |
// s/z
fmuls C(d_zistepv) // dv*d_zistepv | dv*d_tdivzstepv |
// du*d_tdivzstepu | du | s/z
fxch %st(1) // dv*d_tdivzstepv | dv*d_zistepv |
// du*d_tdivzstepu | du | s/z
faddp %st(0),%st(2) // dv*d_zistepv |
// dv*d_tdivzstepv + du*d_tdivzstepu | du | s/z
fxch %st(2) // du | dv*d_tdivzstepv + du*d_tdivzstepu |
// dv*d_zistepv | s/z
fmuls C(d_zistepu) // du*d_zistepu |
// dv*d_tdivzstepv + du*d_tdivzstepu |
// dv*d_zistepv | s/z
fxch %st(1) // dv*d_tdivzstepv + du*d_tdivzstepu |
// du*d_zistepu | dv*d_zistepv | s/z
fadds C(d_tdivzorigin) // tdivz = d_tdivzorigin + dv*d_tdivzstepv +
// du*d_tdivzstepu; stays in %st(1) at end
fxch %st(2) // dv*d_zistepv | du*d_zistepu | t/z | s/z
faddp %st(0),%st(1) // dv*d_zistepv + du*d_zistepu | t/z | s/z
flds fp_64k // fp_64k | dv*d_zistepv + du*d_zistepu | t/z | s/z
fxch %st(1) // dv*d_zistepv + du*d_zistepu | fp_64k | t/z | s/z
fadds C(d_ziorigin) // zi = d_ziorigin + dv*d_zistepv +
// du*d_zistepu; stays in %st(0) at end
// 1/z | fp_64k | t/z | s/z
fld %st(0) // FIXME: get rid of stall on FMUL?
fmuls fp_64kx64k
fxch %st(1)
//
// calculate and clamp s & t
//
fdivr %st(0),%st(2) // 1/z | z*64k | t/z | s/z
fxch %st(1)
fistpl izi // 0.32 fixed-point 1/z
movl izi,%ebp
//
// set pz to point to the first z-buffer pixel in the span
//
rorl $16,%ebp // put upper 16 bits in low word
movl sspan_t_v(%ebx),%eax
movl %ebp,izi
movl sspan_t_u(%ebx),%ebp
imull C(d_zrowbytes)
shll $1,%ebp // a word per pixel
addl C(d_pzbuffer),%eax
addl %ebp,%eax
movl %eax,pz
//
// point %edi to the first pixel in the span
//
movl C(d_viewbuffer),%ebp
movl sspan_t_v(%ebx),%eax
pushl %ebx // preserve spans pointer
movl C(tadjust),%edx
movl C(sadjust),%esi
movl C(d_scantable)(,%eax,4),%edi // v * screenwidth
addl %ebp,%edi
movl sspan_t_u(%ebx),%ebp
addl %ebp,%edi // pdest = &pdestspan[scans->u];
//
// now start the FDIV for the end of the span
//
cmpl $8,%ecx
ja LSetupNotLast1
decl %ecx
jz LCleanup1 // if only one pixel, no need to start an FDIV
movl %ecx,spancountminus1
// finish up the s and t calcs
fxch %st(1) // z*64k | 1/z | t/z | s/z
fld %st(0) // z*64k | z*64k | 1/z | t/z | s/z
fmul %st(4),%st(0) // s | z*64k | 1/z | t/z | s/z
fxch %st(1) // z*64k | s | 1/z | t/z | s/z
fmul %st(3),%st(0) // t | s | 1/z | t/z | s/z
fxch %st(1) // s | t | 1/z | t/z | s/z
fistpl s // 1/z | t | t/z | s/z
fistpl t // 1/z | t/z | s/z
fildl spancountminus1
flds C(d_tdivzstepu) // _d_tdivzstepu | spancountminus1
flds C(d_zistepu) // _d_zistepu | _d_tdivzstepu | spancountminus1
fmul %st(2),%st(0) // _d_zistepu*scm1 | _d_tdivzstepu | scm1
fxch %st(1) // _d_tdivzstepu | _d_zistepu*scm1 | scm1
fmul %st(2),%st(0) // _d_tdivzstepu*scm1 | _d_zistepu*scm1 | scm1
fxch %st(2) // scm1 | _d_zistepu*scm1 | _d_tdivzstepu*scm1
fmuls C(d_sdivzstepu) // _d_sdivzstepu*scm1 | _d_zistepu*scm1 |
// _d_tdivzstepu*scm1
fxch %st(1) // _d_zistepu*scm1 | _d_sdivzstepu*scm1 |
// _d_tdivzstepu*scm1
faddp %st(0),%st(3) // _d_sdivzstepu*scm1 | _d_tdivzstepu*scm1
fxch %st(1) // _d_tdivzstepu*scm1 | _d_sdivzstepu*scm1
faddp %st(0),%st(3) // _d_sdivzstepu*scm1
faddp %st(0),%st(3)
flds fp_64k
fdiv %st(1),%st(0) // this is what we've gone to all this trouble to
// overlap
jmp LFDIVInFlight1
LCleanup1:
// finish up the s and t calcs
fxch %st(1) // z*64k | 1/z | t/z | s/z
fld %st(0) // z*64k | z*64k | 1/z | t/z | s/z
fmul %st(4),%st(0) // s | z*64k | 1/z | t/z | s/z
fxch %st(1) // z*64k | s | 1/z | t/z | s/z
fmul %st(3),%st(0) // t | s | 1/z | t/z | s/z
fxch %st(1) // s | t | 1/z | t/z | s/z
fistpl s // 1/z | t | t/z | s/z
fistpl t // 1/z | t/z | s/z
jmp LFDIVInFlight1
.align 4
LSetupNotLast1:
// finish up the s and t calcs
fxch %st(1) // z*64k | 1/z | t/z | s/z
fld %st(0) // z*64k | z*64k | 1/z | t/z | s/z
fmul %st(4),%st(0) // s | z*64k | 1/z | t/z | s/z
fxch %st(1) // z*64k | s | 1/z | t/z | s/z
fmul %st(3),%st(0) // t | s | 1/z | t/z | s/z
fxch %st(1) // s | t | 1/z | t/z | s/z
fistpl s // 1/z | t | t/z | s/z
fistpl t // 1/z | t/z | s/z
fadds zi8stepu
fxch %st(2)
fadds sdivz8stepu
fxch %st(2)
flds tdivz8stepu
faddp %st(0),%st(2)
flds fp_64k
fdiv %st(1),%st(0) // z = 1/1/z
// this is what we've gone to all this trouble to
// overlap
LFDIVInFlight1:
addl s,%esi
addl t,%edx
movl C(bbextents),%ebx
movl C(bbextentt),%ebp
cmpl %ebx,%esi
ja LClampHighOrLow0
LClampReentry0:
movl %esi,s
movl pbase,%ebx
shll $16,%esi
cmpl %ebp,%edx
movl %esi,sfracf
ja LClampHighOrLow1
LClampReentry1:
movl %edx,t
movl s,%esi // sfrac = scans->sfrac;
shll $16,%edx
movl t,%eax // tfrac = scans->tfrac;
sarl $16,%esi
movl %edx,tfracf
//
// calculate the texture starting address
//
sarl $16,%eax
addl %ebx,%esi
imull C(cachewidth),%eax // (tfrac >> 16) * cachewidth
addl %eax,%esi // psource = pbase + (sfrac >> 16) +
// ((tfrac >> 16) * cachewidth);
//
// determine whether last span or not
//
cmpl $8,%ecx
jna LLastSegment
//
// not the last segment; do full 8-wide segment
//
LNotLastSegment:
//
// advance s/z, t/z, and 1/z, and calculate s & t at end of span and steps to
// get there
//
// pick up after the FDIV that was left in flight previously
fld %st(0) // duplicate it
fmul %st(4),%st(0) // s = s/z * z
fxch %st(1)
fmul %st(3),%st(0) // t = t/z * z
fxch %st(1)
fistpl snext
fistpl tnext
movl snext,%eax
movl tnext,%edx
subl $8,%ecx // count off this segments' pixels
movl C(sadjust),%ebp
pushl %ecx // remember count of remaining pixels
movl C(tadjust),%ecx
addl %eax,%ebp
addl %edx,%ecx
movl C(bbextents),%eax
movl C(bbextentt),%edx
cmpl $2048,%ebp
jl LClampLow2
cmpl %eax,%ebp
ja LClampHigh2
LClampReentry2:
cmpl $2048,%ecx
jl LClampLow3
cmpl %edx,%ecx
ja LClampHigh3
LClampReentry3:
movl %ebp,snext
movl %ecx,tnext
subl s,%ebp
subl t,%ecx
//
// set up advancetable
//
movl %ecx,%eax
movl %ebp,%edx
sarl $19,%edx // sstep >>= 16;
movl C(cachewidth),%ebx
sarl $19,%eax // tstep >>= 16;
jz LIsZero
imull %ebx,%eax // (tstep >> 16) * cachewidth;
LIsZero:
addl %edx,%eax // add in sstep
// (tstep >> 16) * cachewidth + (sstep >> 16);
movl tfracf,%edx
movl %eax,advancetable+4 // advance base in t
addl %ebx,%eax // ((tstep >> 16) + 1) * cachewidth +
// (sstep >> 16);
shll $13,%ebp // left-justify sstep fractional part
movl %ebp,sstep
movl sfracf,%ebx
shll $13,%ecx // left-justify tstep fractional part
movl %eax,advancetable // advance extra in t
movl %ecx,tstep
movl pz,%ecx
movl izi,%ebp
cmpw (%ecx),%bp
jl Lp1
movb (%esi),%al // get first source texel
cmpb $(TRANSPARENT_COLOR),%al
jz Lp1
movw %bp,(%ecx)
movb %al,(%edi) // store first dest pixel
Lp1:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx // advance tfrac fractional part by tstep frac
sbbl %eax,%eax // turn tstep carry into -1 (0 if none)
addl sstep,%ebx // advance sfrac fractional part by sstep frac
adcl advancetable+4(,%eax,4),%esi // point to next source texel
cmpw 2(%ecx),%bp
jl Lp2
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp2
movw %bp,2(%ecx)
movb %al,1(%edi)
Lp2:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
cmpw 4(%ecx),%bp
jl Lp3
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp3
movw %bp,4(%ecx)
movb %al,2(%edi)
Lp3:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
cmpw 6(%ecx),%bp
jl Lp4
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp4
movw %bp,6(%ecx)
movb %al,3(%edi)
Lp4:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
cmpw 8(%ecx),%bp
jl Lp5
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp5
movw %bp,8(%ecx)
movb %al,4(%edi)
Lp5:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
//
// start FDIV for end of next segment in flight, so it can overlap
//
popl %eax
cmpl $8,%eax // more than one segment after this?
ja LSetupNotLast2 // yes
decl %eax
jz LFDIVInFlight2 // if only one pixel, no need to start an FDIV
movl %eax,spancountminus1
fildl spancountminus1
flds C(d_zistepu) // _d_zistepu | spancountminus1
fmul %st(1),%st(0) // _d_zistepu*scm1 | scm1
flds C(d_tdivzstepu) // _d_tdivzstepu | _d_zistepu*scm1 | scm1
fmul %st(2),%st(0) // _d_tdivzstepu*scm1 | _d_zistepu*scm1 | scm1
fxch %st(1) // _d_zistepu*scm1 | _d_tdivzstepu*scm1 | scm1
faddp %st(0),%st(3) // _d_tdivzstepu*scm1 | scm1
fxch %st(1) // scm1 | _d_tdivzstepu*scm1
fmuls C(d_sdivzstepu) // _d_sdivzstepu*scm1 | _d_tdivzstepu*scm1
fxch %st(1) // _d_tdivzstepu*scm1 | _d_sdivzstepu*scm1
faddp %st(0),%st(3) // _d_sdivzstepu*scm1
flds fp_64k // 64k | _d_sdivzstepu*scm1
fxch %st(1) // _d_sdivzstepu*scm1 | 64k
faddp %st(0),%st(4) // 64k
fdiv %st(1),%st(0) // this is what we've gone to all this trouble to
// overlap
jmp LFDIVInFlight2
.align 4
LSetupNotLast2:
fadds zi8stepu
fxch %st(2)
fadds sdivz8stepu
fxch %st(2)
flds tdivz8stepu
faddp %st(0),%st(2)
flds fp_64k
fdiv %st(1),%st(0) // z = 1/1/z
// this is what we've gone to all this trouble to
// overlap
LFDIVInFlight2:
pushl %eax
cmpw 10(%ecx),%bp
jl Lp6
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp6
movw %bp,10(%ecx)
movb %al,5(%edi)
Lp6:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
cmpw 12(%ecx),%bp
jl Lp7
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp7
movw %bp,12(%ecx)
movb %al,6(%edi)
Lp7:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
cmpw 14(%ecx),%bp
jl Lp8
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp8
movw %bp,14(%ecx)
movb %al,7(%edi)
Lp8:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
addl $8,%edi
addl $16,%ecx
movl %edx,tfracf
movl snext,%edx
movl %ebx,sfracf
movl tnext,%ebx
movl %edx,s
movl %ebx,t
movl %ecx,pz
movl %ebp,izi
popl %ecx // retrieve count
//
// determine whether last span or not
//
cmpl $8,%ecx // are there multiple segments remaining?
ja LNotLastSegment // yes
//
// last segment of scan
//
LLastSegment:
//
// advance s/z, t/z, and 1/z, and calculate s & t at end of span and steps to
// get there. The number of pixels left is variable, and we want to land on the
// last pixel, not step one past it, so we can't run into arithmetic problems
//
testl %ecx,%ecx
jz LNoSteps // just draw the last pixel and we're done
// pick up after the FDIV that was left in flight previously
fld %st(0) // duplicate it
fmul %st(4),%st(0) // s = s/z * z
fxch %st(1)
fmul %st(3),%st(0) // t = t/z * z
fxch %st(1)
fistpl snext
fistpl tnext
movl C(tadjust),%ebx
movl C(sadjust),%eax
addl snext,%eax
addl tnext,%ebx
movl C(bbextents),%ebp
movl C(bbextentt),%edx
cmpl $2048,%eax
jl LClampLow4
cmpl %ebp,%eax
ja LClampHigh4
LClampReentry4:
movl %eax,snext
cmpl $2048,%ebx
jl LClampLow5
cmpl %edx,%ebx
ja LClampHigh5
LClampReentry5:
cmpl $1,%ecx // don't bother
je LOnlyOneStep // if two pixels in segment, there's only one step,
// of the segment length
subl s,%eax
subl t,%ebx
addl %eax,%eax // convert to 15.17 format so multiply by 1.31
addl %ebx,%ebx // reciprocal yields 16.48
imull reciprocal_table-8(,%ecx,4) // sstep = (snext - s) / (spancount-1)
movl %edx,%ebp
movl %ebx,%eax
imull reciprocal_table-8(,%ecx,4) // tstep = (tnext - t) / (spancount-1)
LSetEntryvec:
//
// set up advancetable
//
movl spr8entryvec_table(,%ecx,4),%ebx
movl %edx,%eax
pushl %ebx // entry point into code for RET later
movl %ebp,%ecx
sarl $16,%ecx // sstep >>= 16;
movl C(cachewidth),%ebx
sarl $16,%edx // tstep >>= 16;
jz LIsZeroLast
imull %ebx,%edx // (tstep >> 16) * cachewidth;
LIsZeroLast:
addl %ecx,%edx // add in sstep
// (tstep >> 16) * cachewidth + (sstep >> 16);
movl tfracf,%ecx
movl %edx,advancetable+4 // advance base in t
addl %ebx,%edx // ((tstep >> 16) + 1) * cachewidth +
// (sstep >> 16);
shll $16,%ebp // left-justify sstep fractional part
movl sfracf,%ebx
shll $16,%eax // left-justify tstep fractional part
movl %edx,advancetable // advance extra in t
movl %eax,tstep
movl %ebp,sstep
movl %ecx,%edx
movl pz,%ecx
movl izi,%ebp
ret // jump to the number-of-pixels handler
//----------------------------------------
LNoSteps:
movl pz,%ecx
subl $7,%edi // adjust for hardwired offset
subl $14,%ecx
jmp LEndSpan
LOnlyOneStep:
subl s,%eax
subl t,%ebx
movl %eax,%ebp
movl %ebx,%edx
jmp LSetEntryvec
//----------------------------------------
.globl Spr8Entry2_8
Spr8Entry2_8:
subl $6,%edi // adjust for hardwired offsets
subl $12,%ecx
movb (%esi),%al
jmp LLEntry2_8
//----------------------------------------
.globl Spr8Entry3_8
Spr8Entry3_8:
subl $5,%edi // adjust for hardwired offsets
subl $10,%ecx
jmp LLEntry3_8
//----------------------------------------
.globl Spr8Entry4_8
Spr8Entry4_8:
subl $4,%edi // adjust for hardwired offsets
subl $8,%ecx
jmp LLEntry4_8
//----------------------------------------
.globl Spr8Entry5_8
Spr8Entry5_8:
subl $3,%edi // adjust for hardwired offsets
subl $6,%ecx
jmp LLEntry5_8
//----------------------------------------
.globl Spr8Entry6_8
Spr8Entry6_8:
subl $2,%edi // adjust for hardwired offsets
subl $4,%ecx
jmp LLEntry6_8
//----------------------------------------
.globl Spr8Entry7_8
Spr8Entry7_8:
decl %edi // adjust for hardwired offsets
subl $2,%ecx
jmp LLEntry7_8
//----------------------------------------
.globl Spr8Entry8_8
Spr8Entry8_8:
cmpw (%ecx),%bp
jl Lp9
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp9
movw %bp,(%ecx)
movb %al,(%edi)
Lp9:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
LLEntry7_8:
cmpw 2(%ecx),%bp
jl Lp10
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp10
movw %bp,2(%ecx)
movb %al,1(%edi)
Lp10:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
LLEntry6_8:
cmpw 4(%ecx),%bp
jl Lp11
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp11
movw %bp,4(%ecx)
movb %al,2(%edi)
Lp11:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
LLEntry5_8:
cmpw 6(%ecx),%bp
jl Lp12
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp12
movw %bp,6(%ecx)
movb %al,3(%edi)
Lp12:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
LLEntry4_8:
cmpw 8(%ecx),%bp
jl Lp13
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp13
movw %bp,8(%ecx)
movb %al,4(%edi)
Lp13:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
LLEntry3_8:
cmpw 10(%ecx),%bp
jl Lp14
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp14
movw %bp,10(%ecx)
movb %al,5(%edi)
Lp14:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
LLEntry2_8:
cmpw 12(%ecx),%bp
jl Lp15
movb (%esi),%al
cmpb $(TRANSPARENT_COLOR),%al
jz Lp15
movw %bp,12(%ecx)
movb %al,6(%edi)
Lp15:
addl izistep,%ebp
adcl $0,%ebp
addl tstep,%edx
sbbl %eax,%eax
addl sstep,%ebx
adcl advancetable+4(,%eax,4),%esi
LEndSpan:
cmpw 14(%ecx),%bp
jl Lp16
movb (%esi),%al // load first texel in segment
cmpb $(TRANSPARENT_COLOR),%al
jz Lp16
movw %bp,14(%ecx)
movb %al,7(%edi)
Lp16:
//
// clear s/z, t/z, 1/z from FP stack
//
fstp %st(0)
fstp %st(0)
fstp %st(0)
popl %ebx // restore spans pointer
LNextSpan:
addl $(sspan_t_size),%ebx // point to next span
movl sspan_t_count(%ebx),%ecx
cmpl $0,%ecx // any more spans?
jg LSpanLoop // yes
jz LNextSpan // yes, but this one's empty
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
#endif // id386
|
paritytech/polkaports | 35,434 | apps/quake/d_polysa.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// d_polysa.s
// x86 assembly-language polygon model drawing code
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#include "d_ifacea.h"
#if id386
// !!! if this is changed, it must be changed in d_polyse.c too !!!
#define DPS_MAXSPANS MAXHEIGHT+1
// 1 extra for spanpackage that marks end
//#define SPAN_SIZE (((DPS_MAXSPANS + 1 + ((CACHE_SIZE - 1) / spanpackage_t_size)) + 1) * spanpackage_t_size)
#define SPAN_SIZE (1024+1+1+1)*32
.data
.align 4
p10_minus_p20: .single 0
p01_minus_p21: .single 0
temp0: .single 0
temp1: .single 0
Ltemp: .single 0
aff8entryvec_table: .long LDraw8, LDraw7, LDraw6, LDraw5
.long LDraw4, LDraw3, LDraw2, LDraw1
lzistepx: .long 0
.text
.extern C(D_PolysetSetEdgeTable)
.extern C(D_RasterizeAliasPolySmooth)
//----------------------------------------------------------------------
// affine triangle gradient calculation code
//----------------------------------------------------------------------
#define skinwidth 4+0
.globl C(D_PolysetCalcGradients)
C(D_PolysetCalcGradients):
// p00_minus_p20 = r_p0[0] - r_p2[0];
// p01_minus_p21 = r_p0[1] - r_p2[1];
// p10_minus_p20 = r_p1[0] - r_p2[0];
// p11_minus_p21 = r_p1[1] - r_p2[1];
//
// xstepdenominv = 1.0 / (p10_minus_p20 * p01_minus_p21 -
// p00_minus_p20 * p11_minus_p21);
//
// ystepdenominv = -xstepdenominv;
fildl C(r_p0)+0 // r_p0[0]
fildl C(r_p2)+0 // r_p2[0] | r_p0[0]
fildl C(r_p0)+4 // r_p0[1] | r_p2[0] | r_p0[0]
fildl C(r_p2)+4 // r_p2[1] | r_p0[1] | r_p2[0] | r_p0[0]
fildl C(r_p1)+0 // r_p1[0] | r_p2[1] | r_p0[1] | r_p2[0] | r_p0[0]
fildl C(r_p1)+4 // r_p1[1] | r_p1[0] | r_p2[1] | r_p0[1] |
// r_p2[0] | r_p0[0]
fxch %st(3) // r_p0[1] | r_p1[0] | r_p2[1] | r_p1[1] |
// r_p2[0] | r_p0[0]
fsub %st(2),%st(0) // p01_minus_p21 | r_p1[0] | r_p2[1] | r_p1[1] |
// r_p2[0] | r_p0[0]
fxch %st(1) // r_p1[0] | p01_minus_p21 | r_p2[1] | r_p1[1] |
// r_p2[0] | r_p0[0]
fsub %st(4),%st(0) // p10_minus_p20 | p01_minus_p21 | r_p2[1] |
// r_p1[1] | r_p2[0] | r_p0[0]
fxch %st(5) // r_p0[0] | p01_minus_p21 | r_p2[1] |
// r_p1[1] | r_p2[0] | p10_minus_p20
fsubp %st(0),%st(4) // p01_minus_p21 | r_p2[1] | r_p1[1] |
// p00_minus_p20 | p10_minus_p20
fxch %st(2) // r_p1[1] | r_p2[1] | p01_minus_p21 |
// p00_minus_p20 | p10_minus_p20
fsubp %st(0),%st(1) // p11_minus_p21 | p01_minus_p21 |
// p00_minus_p20 | p10_minus_p20
fxch %st(1) // p01_minus_p21 | p11_minus_p21 |
// p00_minus_p20 | p10_minus_p20
flds C(d_xdenom) // d_xdenom | p01_minus_p21 | p11_minus_p21 |
// p00_minus_p20 | p10_minus_p20
fxch %st(4) // p10_minus_p20 | p01_minus_p21 | p11_minus_p21 |
// p00_minus_p20 | d_xdenom
fstps p10_minus_p20 // p01_minus_p21 | p11_minus_p21 |
// p00_minus_p20 | d_xdenom
fstps p01_minus_p21 // p11_minus_p21 | p00_minus_p20 | xstepdenominv
fxch %st(2) // xstepdenominv | p00_minus_p20 | p11_minus_p21
//// ceil () for light so positive steps are exaggerated, negative steps
//// diminished, pushing us away from underflow toward overflow. Underflow is
//// very visible, overflow is very unlikely, because of ambient lighting
// t0 = r_p0[4] - r_p2[4];
// t1 = r_p1[4] - r_p2[4];
fildl C(r_p2)+16 // r_p2[4] | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fildl C(r_p0)+16 // r_p0[4] | r_p2[4] | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fildl C(r_p1)+16 // r_p1[4] | r_p0[4] | r_p2[4] | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fxch %st(2) // r_p2[4] | r_p0[4] | r_p1[4] | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fld %st(0) // r_p2[4] | r_p2[4] | r_p0[4] | r_p1[4] |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fsubrp %st(0),%st(2) // r_p2[4] | t0 | r_p1[4] | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fsubrp %st(0),%st(2) // t0 | t1 | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
// r_lstepx = (int)
// ceil((t1 * p01_minus_p21 - t0 * p11_minus_p21) * xstepdenominv);
// r_lstepy = (int)
// ceil((t1 * p00_minus_p20 - t0 * p10_minus_p20) * ystepdenominv);
fld %st(0) // t0 | t0 | t1 | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fmul %st(5),%st(0) // t0*p11_minus_p21 | t0 | t1 | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fxch %st(2) // t1 | t0 | t0*p11_minus_p21 | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fld %st(0) // t1 | t1 | t0 | t0*p11_minus_p21 |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fmuls p01_minus_p21 // t1*p01_minus_p21 | t1 | t0 | t0*p11_minus_p21 |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(2) // t0 | t1 | t1*p01_minus_p21 | t0*p11_minus_p21 |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fmuls p10_minus_p20 // t0*p10_minus_p20 | t1 | t1*p01_minus_p21 |
// t0*p11_minus_p21 | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fxch %st(1) // t1 | t0*p10_minus_p20 | t1*p01_minus_p21 |
// t0*p11_minus_p21 | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fmul %st(5),%st(0) // t1*p00_minus_p20 | t0*p10_minus_p20 |
// t1*p01_minus_p21 | t0*p11_minus_p21 |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(2) // t1*p01_minus_p21 | t0*p10_minus_p20 |
// t1*p00_minus_p20 | t0*p11_minus_p21 |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fsubp %st(0),%st(3) // t0*p10_minus_p20 | t1*p00_minus_p20 |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fsubrp %st(0),%st(1) // t1*p00_minus_p20 - t0*p10_minus_p20 |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fld %st(2) // xstepdenominv |
// t1*p00_minus_p20 - t0*p10_minus_p20 |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fmuls float_minus_1 // ystepdenominv |
// t1*p00_minus_p20 - t0*p10_minus_p20 |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(2) // t1*p01_minus_p21 - t0*p11_minus_p21 |
// t1*p00_minus_p20 - t0*p10_minus_p20 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fmul %st(3),%st(0) // (t1*p01_minus_p21 - t0*p11_minus_p21)*
// xstepdenominv |
// t1*p00_minus_p20 - t0*p10_minus_p20 |
// | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fxch %st(1) // t1*p00_minus_p20 - t0*p10_minus_p20 |
// (t1*p01_minus_p21 - t0*p11_minus_p21)*
// xstepdenominv | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fmul %st(2),%st(0) // (t1*p00_minus_p20 - t0*p10_minus_p20)*
// ystepdenominv |
// (t1*p01_minus_p21 - t0*p11_minus_p21)*
// xstepdenominv | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fldcw ceil_cw
fistpl C(r_lstepy) // r_lstepx | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fistpl C(r_lstepx) // ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fldcw single_cw
// t0 = r_p0[2] - r_p2[2];
// t1 = r_p1[2] - r_p2[2];
fildl C(r_p2)+8 // r_p2[2] | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fildl C(r_p0)+8 // r_p0[2] | r_p2[2] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fildl C(r_p1)+8 // r_p1[2] | r_p0[2] | r_p2[2] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(2) // r_p2[2] | r_p0[2] | r_p1[2] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fld %st(0) // r_p2[2] | r_p2[2] | r_p0[2] | r_p1[2] |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fsubrp %st(0),%st(2) // r_p2[2] | t0 | r_p1[2] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fsubrp %st(0),%st(2) // t0 | t1 | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
// r_sstepx = (int)((t1 * p01_minus_p21 - t0 * p11_minus_p21) *
// xstepdenominv);
// r_sstepy = (int)((t1 * p00_minus_p20 - t0 * p10_minus_p20) *
// ystepdenominv);
fld %st(0) // t0 | t0 | t1 | ystepdenominv | xstepdenominv
fmul %st(6),%st(0) // t0*p11_minus_p21 | t0 | t1 | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(2) // t1 | t0 | t0*p11_minus_p21 | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fld %st(0) // t1 | t1 | t0 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fmuls p01_minus_p21 // t1*p01_minus_p21 | t1 | t0 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fxch %st(2) // t0 | t1 | t1*p01_minus_p21 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fmuls p10_minus_p20 // t0*p10_minus_p20 | t1 | t1*p01_minus_p21 |
// t0*p11_minus_p21 | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(1) // t1 | t0*p10_minus_p20 | t1*p01_minus_p21 |
// t0*p11_minus_p21 | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fmul %st(6),%st(0) // t1*p00_minus_p20 | t0*p10_minus_p20 |
// t1*p01_minus_p21 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fxch %st(2) // t1*p01_minus_p21 | t0*p10_minus_p20 |
// t1*p00_minus_p20 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fsubp %st(0),%st(3) // t0*p10_minus_p20 | t1*p00_minus_p20 |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fsubrp %st(0),%st(1) // t1*p00_minus_p20 - t0*p10_minus_p20 |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fmul %st(2),%st(0) // (t1*p00_minus_p20 - t0*p10_minus_p20)*
// ystepdenominv |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fxch %st(1) // t1*p01_minus_p21 - t0*p11_minus_p21 |
// (t1*p00_minus_p20 - t0*p10_minus_p20)*
// ystepdenominv | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fmul %st(3),%st(0) // (t1*p01_minus_p21 - t0*p11_minus_p21)*
// xstepdenominv |
// (t1*p00_minus_p20 - t0*p10_minus_p20)*
// ystepdenominv | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(1) // (t1*p00_minus_p20 - t0*p10_minus_p20)*
// ystepdenominv |
// (t1*p01_minus_p21 - t0*p11_minus_p21)*
// xstepdenominv | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fistpl C(r_sstepy) // r_sstepx | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fistpl C(r_sstepx) // ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
// t0 = r_p0[3] - r_p2[3];
// t1 = r_p1[3] - r_p2[3];
fildl C(r_p2)+12 // r_p2[3] | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fildl C(r_p0)+12 // r_p0[3] | r_p2[3] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fildl C(r_p1)+12 // r_p1[3] | r_p0[3] | r_p2[3] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(2) // r_p2[3] | r_p0[3] | r_p1[3] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fld %st(0) // r_p2[3] | r_p2[3] | r_p0[3] | r_p1[3] |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fsubrp %st(0),%st(2) // r_p2[3] | t0 | r_p1[3] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fsubrp %st(0),%st(2) // t0 | t1 | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
// r_tstepx = (int)((t1 * p01_minus_p21 - t0 * p11_minus_p21) *
// xstepdenominv);
// r_tstepy = (int)((t1 * p00_minus_p20 - t0 * p10_minus_p20) *
// ystepdenominv);
fld %st(0) // t0 | t0 | t1 | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fmul %st(6),%st(0) // t0*p11_minus_p21 | t0 | t1 | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(2) // t1 | t0 | t0*p11_minus_p21 | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fld %st(0) // t1 | t1 | t0 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fmuls p01_minus_p21 // t1*p01_minus_p21 | t1 | t0 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fxch %st(2) // t0 | t1 | t1*p01_minus_p21 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fmuls p10_minus_p20 // t0*p10_minus_p20 | t1 | t1*p01_minus_p21 |
// t0*p11_minus_p21 | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(1) // t1 | t0*p10_minus_p20 | t1*p01_minus_p21 |
// t0*p11_minus_p21 | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fmul %st(6),%st(0) // t1*p00_minus_p20 | t0*p10_minus_p20 |
// t1*p01_minus_p21 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fxch %st(2) // t1*p01_minus_p21 | t0*p10_minus_p20 |
// t1*p00_minus_p20 | t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fsubp %st(0),%st(3) // t0*p10_minus_p20 | t1*p00_minus_p20 |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fsubrp %st(0),%st(1) // t1*p00_minus_p20 - t0*p10_minus_p20 |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fmul %st(2),%st(0) // (t1*p00_minus_p20 - t0*p10_minus_p20)*
// ystepdenominv |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fxch %st(1) // t1*p01_minus_p21 - t0*p11_minus_p21 |
// (t1*p00_minus_p20 - t0*p10_minus_p20)*
// ystepdenominv | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fmul %st(3),%st(0) // (t1*p01_minus_p21 - t0*p11_minus_p21)*
// xstepdenominv |
// (t1*p00_minus_p20 - t0*p10_minus_p20)*
// ystepdenominv | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(1) // (t1*p00_minus_p20 - t0*p10_minus_p20)*
// ystepdenominv |
// (t1*p01_minus_p21 - t0*p11_minus_p21)*
// xstepdenominv | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fistpl C(r_tstepy) // r_tstepx | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fistpl C(r_tstepx) // ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
// t0 = r_p0[5] - r_p2[5];
// t1 = r_p1[5] - r_p2[5];
fildl C(r_p2)+20 // r_p2[5] | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fildl C(r_p0)+20 // r_p0[5] | r_p2[5] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fildl C(r_p1)+20 // r_p1[5] | r_p0[5] | r_p2[5] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fxch %st(2) // r_p2[5] | r_p0[5] | r_p1[5] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fld %st(0) // r_p2[5] | r_p2[5] | r_p0[5] | r_p1[5] |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// p11_minus_p21
fsubrp %st(0),%st(2) // r_p2[5] | t0 | r_p1[5] | ystepdenominv |
// xstepdenominv | p00_minus_p20 | p11_minus_p21
fsubrp %st(0),%st(2) // t0 | t1 | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
// r_zistepx = (int)((t1 * p01_minus_p21 - t0 * p11_minus_p21) *
// xstepdenominv);
// r_zistepy = (int)((t1 * p00_minus_p20 - t0 * p10_minus_p20) *
// ystepdenominv);
fld %st(0) // t0 | t0 | t1 | ystepdenominv | xstepdenominv |
// p00_minus_p20 | p11_minus_p21
fmulp %st(0),%st(6) // t0 | t1 | ystepdenominv | xstepdenominv |
// p00_minus_p20 | t0*p11_minus_p21
fxch %st(1) // t1 | t0 | ystepdenominv | xstepdenominv |
// p00_minus_p20 | t0*p11_minus_p21
fld %st(0) // t1 | t1 | t0 | ystepdenominv | xstepdenominv |
// p00_minus_p20 | t0*p11_minus_p21
fmuls p01_minus_p21 // t1*p01_minus_p21 | t1 | t0 | ystepdenominv |
// xstepdenominv | p00_minus_p20 |
// t0*p11_minus_p21
fxch %st(2) // t0 | t1 | t1*p01_minus_p21 | ystepdenominv |
// xstepdenominv | p00_minus_p20 |
// t0*p11_minus_p21
fmuls p10_minus_p20 // t0*p10_minus_p20 | t1 | t1*p01_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// t0*p11_minus_p21
fxch %st(1) // t1 | t0*p10_minus_p20 | t1*p01_minus_p21 |
// ystepdenominv | xstepdenominv | p00_minus_p20 |
// t0*p11_minus_p21
fmulp %st(0),%st(5) // t0*p10_minus_p20 | t1*p01_minus_p21 |
// ystepdenominv | xstepdenominv |
// t1*p00_minus_p20 | t0*p11_minus_p21
fxch %st(5) // t0*p11_minus_p21 | t1*p01_minus_p21 |
// ystepdenominv | xstepdenominv |
// t1*p00_minus_p20 | t0*p10_minus_p20
fsubrp %st(0),%st(1) // t1*p01_minus_p21 - t0*p11_minus_p21 |
// ystepdenominv | xstepdenominv |
// t1*p00_minus_p20 | t0*p10_minus_p20
fxch %st(3) // t1*p00_minus_p20 | ystepdenominv |
// xstepdenominv |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// t0*p10_minus_p20
fsubp %st(0),%st(4) // ystepdenominv | xstepdenominv |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// t1*p00_minus_p20 - t0*p10_minus_p20
fxch %st(1) // xstepdenominv | ystepdenominv |
// t1*p01_minus_p21 - t0*p11_minus_p21 |
// t1*p00_minus_p20 - t0*p10_minus_p20
fmulp %st(0),%st(2) // ystepdenominv |
// (t1*p01_minus_p21 - t0*p11_minus_p21) *
// xstepdenominv |
// t1*p00_minus_p20 - t0*p10_minus_p20
fmulp %st(0),%st(2) // (t1*p01_minus_p21 - t0*p11_minus_p21) *
// xstepdenominv |
// (t1*p00_minus_p20 - t0*p10_minus_p20) *
// ystepdenominv
fistpl C(r_zistepx) // (t1*p00_minus_p20 - t0*p10_minus_p20) *
// ystepdenominv
fistpl C(r_zistepy)
// a_sstepxfrac = r_sstepx << 16;
// a_tstepxfrac = r_tstepx << 16;
//
// a_ststepxwhole = r_affinetridesc.skinwidth * (r_tstepx >> 16) +
// (r_sstepx >> 16);
movl C(r_sstepx),%eax
movl C(r_tstepx),%edx
shll $16,%eax
shll $16,%edx
movl %eax,C(a_sstepxfrac)
movl %edx,C(a_tstepxfrac)
movl C(r_sstepx),%ecx
movl C(r_tstepx),%eax
sarl $16,%ecx
sarl $16,%eax
imull skinwidth(%esp)
addl %ecx,%eax
movl %eax,C(a_ststepxwhole)
ret
//----------------------------------------------------------------------
// 8-bpp horizontal span drawing code for affine polygons, with smooth
// shading and no transparency
//----------------------------------------------------------------------
#define pspans 4+8
.globl C(D_PolysetAff8Start)
C(D_PolysetAff8Start):
.globl C(D_PolysetDrawSpans8)
C(D_PolysetDrawSpans8):
pushl %esi // preserve register variables
pushl %ebx
movl pspans(%esp),%esi // point to the first span descriptor
movl C(r_zistepx),%ecx
pushl %ebp // preserve caller's stack frame
pushl %edi
rorl $16,%ecx // put high 16 bits of 1/z step in low word
movl spanpackage_t_count(%esi),%edx
movl %ecx,lzistepx
LSpanLoop:
// lcount = d_aspancount - pspanpackage->count;
//
// errorterm += erroradjustup;
// if (errorterm >= 0)
// {
// d_aspancount += d_countextrastep;
// errorterm -= erroradjustdown;
// }
// else
// {
// d_aspancount += ubasestep;
// }
movl C(d_aspancount),%eax
subl %edx,%eax
movl C(erroradjustup),%edx
movl C(errorterm),%ebx
addl %edx,%ebx
js LNoTurnover
movl C(erroradjustdown),%edx
movl C(d_countextrastep),%edi
subl %edx,%ebx
movl C(d_aspancount),%ebp
movl %ebx,C(errorterm)
addl %edi,%ebp
movl %ebp,C(d_aspancount)
jmp LRightEdgeStepped
LNoTurnover:
movl C(d_aspancount),%edi
movl C(ubasestep),%edx
movl %ebx,C(errorterm)
addl %edx,%edi
movl %edi,C(d_aspancount)
LRightEdgeStepped:
cmpl $1,%eax
jl LNextSpan
jz LExactlyOneLong
//
// set up advancetable
//
movl C(a_ststepxwhole),%ecx
movl C(r_affinetridesc)+atd_skinwidth,%edx
movl %ecx,advancetable+4 // advance base in t
addl %edx,%ecx
movl %ecx,advancetable // advance extra in t
movl C(a_tstepxfrac),%ecx
movw C(r_lstepx),%cx
movl %eax,%edx // count
movl %ecx,tstep
addl $7,%edx
shrl $3,%edx // count of full and partial loops
movl spanpackage_t_sfrac(%esi),%ebx
movw %dx,%bx
movl spanpackage_t_pz(%esi),%ecx
negl %eax
movl spanpackage_t_pdest(%esi),%edi
andl $7,%eax // 0->0, 1->7, 2->6, ... , 7->1
subl %eax,%edi // compensate for hardwired offsets
subl %eax,%ecx
subl %eax,%ecx
movl spanpackage_t_tfrac(%esi),%edx
movw spanpackage_t_light(%esi),%dx
movl spanpackage_t_zi(%esi),%ebp
rorl $16,%ebp // put high 16 bits of 1/z in low word
pushl %esi
movl spanpackage_t_ptex(%esi),%esi
jmp aff8entryvec_table(,%eax,4)
// %bx = count of full and partial loops
// %ebx high word = sfrac
// %ecx = pz
// %dx = light
// %edx high word = tfrac
// %esi = ptex
// %edi = pdest
// %ebp = 1/z
// tstep low word = C(r_lstepx)
// tstep high word = C(a_tstepxfrac)
// C(a_sstepxfrac) low word = 0
// C(a_sstepxfrac) high word = C(a_sstepxfrac)
LDrawLoop:
// FIXME: do we need to clamp light? We may need at least a buffer bit to
// keep it from poking into tfrac and causing problems
LDraw8:
cmpw (%ecx),%bp
jl Lp1
xorl %eax,%eax
movb %dh,%ah
movb (%esi),%al
movw %bp,(%ecx)
movb 0x12345678(%eax),%al
LPatch8:
movb %al,(%edi)
Lp1:
addl tstep,%edx
sbbl %eax,%eax
addl lzistepx,%ebp
adcl $0,%ebp
addl C(a_sstepxfrac),%ebx
adcl advancetable+4(,%eax,4),%esi
LDraw7:
cmpw 2(%ecx),%bp
jl Lp2
xorl %eax,%eax
movb %dh,%ah
movb (%esi),%al
movw %bp,2(%ecx)
movb 0x12345678(%eax),%al
LPatch7:
movb %al,1(%edi)
Lp2:
addl tstep,%edx
sbbl %eax,%eax
addl lzistepx,%ebp
adcl $0,%ebp
addl C(a_sstepxfrac),%ebx
adcl advancetable+4(,%eax,4),%esi
LDraw6:
cmpw 4(%ecx),%bp
jl Lp3
xorl %eax,%eax
movb %dh,%ah
movb (%esi),%al
movw %bp,4(%ecx)
movb 0x12345678(%eax),%al
LPatch6:
movb %al,2(%edi)
Lp3:
addl tstep,%edx
sbbl %eax,%eax
addl lzistepx,%ebp
adcl $0,%ebp
addl C(a_sstepxfrac),%ebx
adcl advancetable+4(,%eax,4),%esi
LDraw5:
cmpw 6(%ecx),%bp
jl Lp4
xorl %eax,%eax
movb %dh,%ah
movb (%esi),%al
movw %bp,6(%ecx)
movb 0x12345678(%eax),%al
LPatch5:
movb %al,3(%edi)
Lp4:
addl tstep,%edx
sbbl %eax,%eax
addl lzistepx,%ebp
adcl $0,%ebp
addl C(a_sstepxfrac),%ebx
adcl advancetable+4(,%eax,4),%esi
LDraw4:
cmpw 8(%ecx),%bp
jl Lp5
xorl %eax,%eax
movb %dh,%ah
movb (%esi),%al
movw %bp,8(%ecx)
movb 0x12345678(%eax),%al
LPatch4:
movb %al,4(%edi)
Lp5:
addl tstep,%edx
sbbl %eax,%eax
addl lzistepx,%ebp
adcl $0,%ebp
addl C(a_sstepxfrac),%ebx
adcl advancetable+4(,%eax,4),%esi
LDraw3:
cmpw 10(%ecx),%bp
jl Lp6
xorl %eax,%eax
movb %dh,%ah
movb (%esi),%al
movw %bp,10(%ecx)
movb 0x12345678(%eax),%al
LPatch3:
movb %al,5(%edi)
Lp6:
addl tstep,%edx
sbbl %eax,%eax
addl lzistepx,%ebp
adcl $0,%ebp
addl C(a_sstepxfrac),%ebx
adcl advancetable+4(,%eax,4),%esi
LDraw2:
cmpw 12(%ecx),%bp
jl Lp7
xorl %eax,%eax
movb %dh,%ah
movb (%esi),%al
movw %bp,12(%ecx)
movb 0x12345678(%eax),%al
LPatch2:
movb %al,6(%edi)
Lp7:
addl tstep,%edx
sbbl %eax,%eax
addl lzistepx,%ebp
adcl $0,%ebp
addl C(a_sstepxfrac),%ebx
adcl advancetable+4(,%eax,4),%esi
LDraw1:
cmpw 14(%ecx),%bp
jl Lp8
xorl %eax,%eax
movb %dh,%ah
movb (%esi),%al
movw %bp,14(%ecx)
movb 0x12345678(%eax),%al
LPatch1:
movb %al,7(%edi)
Lp8:
addl tstep,%edx
sbbl %eax,%eax
addl lzistepx,%ebp
adcl $0,%ebp
addl C(a_sstepxfrac),%ebx
adcl advancetable+4(,%eax,4),%esi
addl $8,%edi
addl $16,%ecx
decw %bx
jnz LDrawLoop
popl %esi // restore spans pointer
LNextSpan:
addl $(spanpackage_t_size),%esi // point to next span
LNextSpanESISet:
movl spanpackage_t_count(%esi),%edx
cmpl $-999999,%edx // any more spans?
jnz LSpanLoop // yes
popl %edi
popl %ebp // restore the caller's stack frame
popl %ebx // restore register variables
popl %esi
ret
// draw a one-long span
LExactlyOneLong:
movl spanpackage_t_pz(%esi),%ecx
movl spanpackage_t_zi(%esi),%ebp
rorl $16,%ebp // put high 16 bits of 1/z in low word
movl spanpackage_t_ptex(%esi),%ebx
cmpw (%ecx),%bp
jl LNextSpan
xorl %eax,%eax
movl spanpackage_t_pdest(%esi),%edi
movb spanpackage_t_light+1(%esi),%ah
addl $(spanpackage_t_size),%esi // point to next span
movb (%ebx),%al
movw %bp,(%ecx)
movb 0x12345678(%eax),%al
LPatch9:
movb %al,(%edi)
jmp LNextSpanESISet
.globl C(D_PolysetAff8End)
C(D_PolysetAff8End):
#define pcolormap 4
.globl C(D_Aff8Patch)
C(D_Aff8Patch):
movl pcolormap(%esp),%eax
movl %eax,LPatch1-4
movl %eax,LPatch2-4
movl %eax,LPatch3-4
movl %eax,LPatch4-4
movl %eax,LPatch5-4
movl %eax,LPatch6-4
movl %eax,LPatch7-4
movl %eax,LPatch8-4
movl %eax,LPatch9-4
ret
//----------------------------------------------------------------------
// Alias model polygon dispatching code, combined with subdivided affine
// triangle drawing code
//----------------------------------------------------------------------
.globl C(D_PolysetDraw)
C(D_PolysetDraw):
// spanpackage_t spans[DPS_MAXSPANS + 1 +
// ((CACHE_SIZE - 1) / sizeof(spanpackage_t)) + 1];
// // one extra because of cache line pretouching
//
// a_spans = (spanpackage_t *)
// (((long)&spans[0] + CACHE_SIZE - 1) & ~(CACHE_SIZE - 1));
subl $(SPAN_SIZE),%esp
movl %esp,%eax
addl $(CACHE_SIZE - 1),%eax
andl $(~(CACHE_SIZE - 1)),%eax
movl %eax,C(a_spans)
// D_DrawNonSubdiv ();
jmp C(D_DrawNonSubdiv)
ret
//----------------------------------------------------------------------
// Alias model triangle left-edge scanning code
//----------------------------------------------------------------------
#define height 4+16
.globl C(D_PolysetScanLeftEdge)
C(D_PolysetScanLeftEdge):
pushl %ebp // preserve caller stack frame pointer
pushl %esi // preserve register variables
pushl %edi
pushl %ebx
movl height(%esp),%eax
movl C(d_sfrac),%ecx
andl $0xFFFF,%eax
movl C(d_ptex),%ebx
orl %eax,%ecx
movl C(d_pedgespanpackage),%esi
movl C(d_tfrac),%edx
movl C(d_light),%edi
movl C(d_zi),%ebp
// %eax: scratch
// %ebx: d_ptex
// %ecx: d_sfrac in high word, count in low word
// %edx: d_tfrac
// %esi: d_pedgespanpackage, errorterm, scratch alternately
// %edi: d_light
// %ebp: d_zi
// do
// {
LScanLoop:
// d_pedgespanpackage->ptex = ptex;
// d_pedgespanpackage->pdest = d_pdest;
// d_pedgespanpackage->pz = d_pz;
// d_pedgespanpackage->count = d_aspancount;
// d_pedgespanpackage->light = d_light;
// d_pedgespanpackage->zi = d_zi;
// d_pedgespanpackage->sfrac = d_sfrac << 16;
// d_pedgespanpackage->tfrac = d_tfrac << 16;
movl %ebx,spanpackage_t_ptex(%esi)
movl C(d_pdest),%eax
movl %eax,spanpackage_t_pdest(%esi)
movl C(d_pz),%eax
movl %eax,spanpackage_t_pz(%esi)
movl C(d_aspancount),%eax
movl %eax,spanpackage_t_count(%esi)
movl %edi,spanpackage_t_light(%esi)
movl %ebp,spanpackage_t_zi(%esi)
movl %ecx,spanpackage_t_sfrac(%esi)
movl %edx,spanpackage_t_tfrac(%esi)
// pretouch the next cache line
movb spanpackage_t_size(%esi),%al
// d_pedgespanpackage++;
addl $(spanpackage_t_size),%esi
movl C(erroradjustup),%eax
movl %esi,C(d_pedgespanpackage)
// errorterm += erroradjustup;
movl C(errorterm),%esi
addl %eax,%esi
movl C(d_pdest),%eax
// if (errorterm >= 0)
// {
js LNoLeftEdgeTurnover
// errorterm -= erroradjustdown;
// d_pdest += d_pdestextrastep;
subl C(erroradjustdown),%esi
addl C(d_pdestextrastep),%eax
movl %esi,C(errorterm)
movl %eax,C(d_pdest)
// d_pz += d_pzextrastep;
// d_aspancount += d_countextrastep;
// d_ptex += d_ptexextrastep;
// d_sfrac += d_sfracextrastep;
// d_ptex += d_sfrac >> 16;
// d_sfrac &= 0xFFFF;
// d_tfrac += d_tfracextrastep;
movl C(d_pz),%eax
movl C(d_aspancount),%esi
addl C(d_pzextrastep),%eax
addl C(d_sfracextrastep),%ecx
adcl C(d_ptexextrastep),%ebx
addl C(d_countextrastep),%esi
movl %eax,C(d_pz)
movl C(d_tfracextrastep),%eax
movl %esi,C(d_aspancount)
addl %eax,%edx
// if (d_tfrac & 0x10000)
// {
jnc LSkip1
// d_ptex += r_affinetridesc.skinwidth;
// d_tfrac &= 0xFFFF;
addl C(r_affinetridesc)+atd_skinwidth,%ebx
// }
LSkip1:
// d_light += d_lightextrastep;
// d_zi += d_ziextrastep;
addl C(d_lightextrastep),%edi
addl C(d_ziextrastep),%ebp
// }
movl C(d_pedgespanpackage),%esi
decl %ecx
testl $0xFFFF,%ecx
jnz LScanLoop
popl %ebx
popl %edi
popl %esi
popl %ebp
ret
// else
// {
LNoLeftEdgeTurnover:
movl %esi,C(errorterm)
// d_pdest += d_pdestbasestep;
addl C(d_pdestbasestep),%eax
movl %eax,C(d_pdest)
// d_pz += d_pzbasestep;
// d_aspancount += ubasestep;
// d_ptex += d_ptexbasestep;
// d_sfrac += d_sfracbasestep;
// d_ptex += d_sfrac >> 16;
// d_sfrac &= 0xFFFF;
movl C(d_pz),%eax
movl C(d_aspancount),%esi
addl C(d_pzbasestep),%eax
addl C(d_sfracbasestep),%ecx
adcl C(d_ptexbasestep),%ebx
addl C(ubasestep),%esi
movl %eax,C(d_pz)
movl %esi,C(d_aspancount)
// d_tfrac += d_tfracbasestep;
movl C(d_tfracbasestep),%esi
addl %esi,%edx
// if (d_tfrac & 0x10000)
// {
jnc LSkip2
// d_ptex += r_affinetridesc.skinwidth;
// d_tfrac &= 0xFFFF;
addl C(r_affinetridesc)+atd_skinwidth,%ebx
// }
LSkip2:
// d_light += d_lightbasestep;
// d_zi += d_zibasestep;
addl C(d_lightbasestep),%edi
addl C(d_zibasestep),%ebp
// }
// } while (--height);
movl C(d_pedgespanpackage),%esi
decl %ecx
testl $0xFFFF,%ecx
jnz LScanLoop
popl %ebx
popl %edi
popl %esi
popl %ebp
ret
//----------------------------------------------------------------------
// Alias model non-subdivided polygon dispatching code
//
// not C-callable because of stack buffer cleanup
//----------------------------------------------------------------------
.globl C(D_DrawNonSubdiv)
C(D_DrawNonSubdiv):
pushl %ebp // preserve caller stack frame pointer
movl C(r_affinetridesc)+atd_numtriangles,%ebp
pushl %ebx
shll $(mtri_shift),%ebp
pushl %esi // preserve register variables
movl C(r_affinetridesc)+atd_ptriangles,%esi
pushl %edi
// mtriangle_t *ptri;
// finalvert_t *pfv, *index0, *index1, *index2;
// int i;
// int lnumtriangles;
// pfv = r_affinetridesc.pfinalverts;
// ptri = r_affinetridesc.ptriangles;
// lnumtriangles = r_affinetridesc.numtriangles;
LNDLoop:
// for (i=0 ; i<lnumtriangles ; i++, ptri++)
// {
// index0 = pfv + ptri->vertindex[0];
// index1 = pfv + ptri->vertindex[1];
// index2 = pfv + ptri->vertindex[2];
movl C(r_affinetridesc)+atd_pfinalverts,%edi
movl mtri_vertindex+0-mtri_size(%esi,%ebp,1),%ecx
shll $(fv_shift),%ecx
movl mtri_vertindex+4-mtri_size(%esi,%ebp,1),%edx
shll $(fv_shift),%edx
movl mtri_vertindex+8-mtri_size(%esi,%ebp,1),%ebx
shll $(fv_shift),%ebx
addl %edi,%ecx
addl %edi,%edx
addl %edi,%ebx
// d_xdenom = (index0->v[1]-index1->v[1]) *
// (index0->v[0]-index2->v[0]) -
// (index0->v[0]-index1->v[0])*(index0->v[1]-index2->v[1]);
movl fv_v+4(%ecx),%eax
movl fv_v+0(%ecx),%esi
subl fv_v+4(%edx),%eax
subl fv_v+0(%ebx),%esi
imull %esi,%eax
movl fv_v+0(%ecx),%esi
movl fv_v+4(%ecx),%edi
subl fv_v+0(%edx),%esi
subl fv_v+4(%ebx),%edi
imull %esi,%edi
subl %edi,%eax
// if (d_xdenom >= 0)
// {
// continue;
jns LNextTri
// }
movl %eax,C(d_xdenom)
fildl C(d_xdenom)
// r_p0[0] = index0->v[0]; // u
// r_p0[1] = index0->v[1]; // v
// r_p0[2] = index0->v[2]; // s
// r_p0[3] = index0->v[3]; // t
// r_p0[4] = index0->v[4]; // light
// r_p0[5] = index0->v[5]; // iz
movl fv_v+0(%ecx),%eax
movl fv_v+4(%ecx),%esi
movl %eax,C(r_p0)+0
movl %esi,C(r_p0)+4
movl fv_v+8(%ecx),%eax
movl fv_v+12(%ecx),%esi
movl %eax,C(r_p0)+8
movl %esi,C(r_p0)+12
movl fv_v+16(%ecx),%eax
movl fv_v+20(%ecx),%esi
movl %eax,C(r_p0)+16
movl %esi,C(r_p0)+20
fdivrs float_1
// r_p1[0] = index1->v[0];
// r_p1[1] = index1->v[1];
// r_p1[2] = index1->v[2];
// r_p1[3] = index1->v[3];
// r_p1[4] = index1->v[4];
// r_p1[5] = index1->v[5];
movl fv_v+0(%edx),%eax
movl fv_v+4(%edx),%esi
movl %eax,C(r_p1)+0
movl %esi,C(r_p1)+4
movl fv_v+8(%edx),%eax
movl fv_v+12(%edx),%esi
movl %eax,C(r_p1)+8
movl %esi,C(r_p1)+12
movl fv_v+16(%edx),%eax
movl fv_v+20(%edx),%esi
movl %eax,C(r_p1)+16
movl %esi,C(r_p1)+20
// r_p2[0] = index2->v[0];
// r_p2[1] = index2->v[1];
// r_p2[2] = index2->v[2];
// r_p2[3] = index2->v[3];
// r_p2[4] = index2->v[4];
// r_p2[5] = index2->v[5];
movl fv_v+0(%ebx),%eax
movl fv_v+4(%ebx),%esi
movl %eax,C(r_p2)+0
movl %esi,C(r_p2)+4
movl fv_v+8(%ebx),%eax
movl fv_v+12(%ebx),%esi
movl %eax,C(r_p2)+8
movl %esi,C(r_p2)+12
movl fv_v+16(%ebx),%eax
movl fv_v+20(%ebx),%esi
movl %eax,C(r_p2)+16
movl C(r_affinetridesc)+atd_ptriangles,%edi
movl %esi,C(r_p2)+20
movl mtri_facesfront-mtri_size(%edi,%ebp,1),%eax
// if (!ptri->facesfront)
// {
testl %eax,%eax
jnz LFacesFront
// if (index0->flags & ALIAS_ONSEAM)
// r_p0[2] += r_affinetridesc.seamfixupX16;
movl fv_flags(%ecx),%eax
movl fv_flags(%edx),%esi
movl fv_flags(%ebx),%edi
testl $(ALIAS_ONSEAM),%eax
movl C(r_affinetridesc)+atd_seamfixupX16,%eax
jz LOnseamDone0
addl %eax,C(r_p0)+8
LOnseamDone0:
// if (index1->flags & ALIAS_ONSEAM)
// r_p1[2] += r_affinetridesc.seamfixupX16;
testl $(ALIAS_ONSEAM),%esi
jz LOnseamDone1
addl %eax,C(r_p1)+8
LOnseamDone1:
// if (index2->flags & ALIAS_ONSEAM)
// r_p2[2] += r_affinetridesc.seamfixupX16;
testl $(ALIAS_ONSEAM),%edi
jz LOnseamDone2
addl %eax,C(r_p2)+8
LOnseamDone2:
// }
LFacesFront:
fstps C(d_xdenom)
// D_PolysetSetEdgeTable ();
// D_RasterizeAliasPolySmooth ();
call C(D_PolysetSetEdgeTable)
call C(D_RasterizeAliasPolySmooth)
LNextTri:
movl C(r_affinetridesc)+atd_ptriangles,%esi
subl $16,%ebp
jnz LNDLoop
// }
popl %edi
popl %esi
popl %ebx
popl %ebp
addl $(SPAN_SIZE),%esp
ret
#endif // id386
|
paritytech/polkaports | 13,915 | apps/quake/r_surf8.s | /*
Copyright (C) 1996-1997 Id Software, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
//
// r_surf8.s
// x86 assembly-language 8 bpp surface block drawing code.
//
#include "asm_i386.h"
#include "quakeasm.h"
#include "asm_draw.h"
#if id386
.data
sb_v: .long 0
.text
.align 4
.globl C(R_Surf8Start)
C(R_Surf8Start):
//----------------------------------------------------------------------
// Surface block drawer for mip level 0
//----------------------------------------------------------------------
.align 4
.globl C(R_DrawSurfaceBlock8_mip0)
C(R_DrawSurfaceBlock8_mip0):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
// for (v=0 ; v<numvblocks ; v++)
// {
movl C(r_lightptr),%ebx
movl C(r_numvblocks),%eax
movl %eax,sb_v
movl C(prowdestbase),%edi
movl C(pbasesource),%esi
Lv_loop_mip0:
// lightleft = lightptr[0];
// lightright = lightptr[1];
// lightdelta = (lightleft - lightright) & 0xFFFFF;
movl (%ebx),%eax // lightleft
movl 4(%ebx),%edx // lightright
movl %eax,%ebp
movl C(r_lightwidth),%ecx
movl %edx,C(lightright)
subl %edx,%ebp
andl $0xFFFFF,%ebp
leal (%ebx,%ecx,4),%ebx
// lightptr += lightwidth;
movl %ebx,C(r_lightptr)
// lightleftstep = (lightptr[0] - lightleft) >> blockdivshift;
// lightrightstep = (lightptr[1] - lightright) >> blockdivshift;
// lightdeltastep = ((lightleftstep - lightrightstep) & 0xFFFFF) |
// 0xF0000000;
movl 4(%ebx),%ecx // lightptr[1]
movl (%ebx),%ebx // lightptr[0]
subl %eax,%ebx
subl %edx,%ecx
sarl $4,%ecx
orl $0xF0000000,%ebp
sarl $4,%ebx
movl %ecx,C(lightrightstep)
subl %ecx,%ebx
andl $0xFFFFF,%ebx
orl $0xF0000000,%ebx
subl %ecx,%ecx // high word must be 0 in loop for addressing
movl %ebx,C(lightdeltastep)
subl %ebx,%ebx // high word must be 0 in loop for addressing
Lblockloop8_mip0:
movl %ebp,C(lightdelta)
movb 14(%esi),%cl
sarl $4,%ebp
movb %dh,%bh
movb 15(%esi),%bl
addl %ebp,%edx
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch0:
movb 13(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch1:
movb 12(%esi),%cl
movb %dh,%bh
addl %ebp,%edx
rorl $16,%eax
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch2:
movb 11(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch3:
movb 10(%esi),%cl
movl %eax,12(%edi)
movb %dh,%bh
addl %ebp,%edx
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch4:
movb 9(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch5:
movb 8(%esi),%cl
movb %dh,%bh
addl %ebp,%edx
rorl $16,%eax
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch6:
movb 7(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch7:
movb 6(%esi),%cl
movl %eax,8(%edi)
movb %dh,%bh
addl %ebp,%edx
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch8:
movb 5(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch9:
movb 4(%esi),%cl
movb %dh,%bh
addl %ebp,%edx
rorl $16,%eax
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch10:
movb 3(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch11:
movb 2(%esi),%cl
movl %eax,4(%edi)
movb %dh,%bh
addl %ebp,%edx
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch12:
movb 1(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch13:
movb (%esi),%cl
movb %dh,%bh
addl %ebp,%edx
rorl $16,%eax
movb %dh,%ch
movb 0x12345678(%ebx),%ah
LBPatch14:
movl C(lightright),%edx
movb 0x12345678(%ecx),%al
LBPatch15:
movl C(lightdelta),%ebp
movl %eax,(%edi)
addl C(sourcetstep),%esi
addl C(surfrowbytes),%edi
addl C(lightrightstep),%edx
addl C(lightdeltastep),%ebp
movl %edx,C(lightright)
jc Lblockloop8_mip0
// if (pbasesource >= r_sourcemax)
// pbasesource -= stepback;
cmpl C(r_sourcemax),%esi
jb LSkip_mip0
subl C(r_stepback),%esi
LSkip_mip0:
movl C(r_lightptr),%ebx
decl sb_v
jnz Lv_loop_mip0
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
//----------------------------------------------------------------------
// Surface block drawer for mip level 1
//----------------------------------------------------------------------
.align 4
.globl C(R_DrawSurfaceBlock8_mip1)
C(R_DrawSurfaceBlock8_mip1):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
// for (v=0 ; v<numvblocks ; v++)
// {
movl C(r_lightptr),%ebx
movl C(r_numvblocks),%eax
movl %eax,sb_v
movl C(prowdestbase),%edi
movl C(pbasesource),%esi
Lv_loop_mip1:
// lightleft = lightptr[0];
// lightright = lightptr[1];
// lightdelta = (lightleft - lightright) & 0xFFFFF;
movl (%ebx),%eax // lightleft
movl 4(%ebx),%edx // lightright
movl %eax,%ebp
movl C(r_lightwidth),%ecx
movl %edx,C(lightright)
subl %edx,%ebp
andl $0xFFFFF,%ebp
leal (%ebx,%ecx,4),%ebx
// lightptr += lightwidth;
movl %ebx,C(r_lightptr)
// lightleftstep = (lightptr[0] - lightleft) >> blockdivshift;
// lightrightstep = (lightptr[1] - lightright) >> blockdivshift;
// lightdeltastep = ((lightleftstep - lightrightstep) & 0xFFFFF) |
// 0xF0000000;
movl 4(%ebx),%ecx // lightptr[1]
movl (%ebx),%ebx // lightptr[0]
subl %eax,%ebx
subl %edx,%ecx
sarl $3,%ecx
orl $0x70000000,%ebp
sarl $3,%ebx
movl %ecx,C(lightrightstep)
subl %ecx,%ebx
andl $0xFFFFF,%ebx
orl $0xF0000000,%ebx
subl %ecx,%ecx // high word must be 0 in loop for addressing
movl %ebx,C(lightdeltastep)
subl %ebx,%ebx // high word must be 0 in loop for addressing
Lblockloop8_mip1:
movl %ebp,C(lightdelta)
movb 6(%esi),%cl
sarl $3,%ebp
movb %dh,%bh
movb 7(%esi),%bl
addl %ebp,%edx
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch22:
movb 5(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch23:
movb 4(%esi),%cl
movb %dh,%bh
addl %ebp,%edx
rorl $16,%eax
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch24:
movb 3(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch25:
movb 2(%esi),%cl
movl %eax,4(%edi)
movb %dh,%bh
addl %ebp,%edx
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch26:
movb 1(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch27:
movb (%esi),%cl
movb %dh,%bh
addl %ebp,%edx
rorl $16,%eax
movb %dh,%ch
movb 0x12345678(%ebx),%ah
LBPatch28:
movl C(lightright),%edx
movb 0x12345678(%ecx),%al
LBPatch29:
movl C(lightdelta),%ebp
movl %eax,(%edi)
movl C(sourcetstep),%eax
addl %eax,%esi
movl C(surfrowbytes),%eax
addl %eax,%edi
movl C(lightrightstep),%eax
addl %eax,%edx
movl C(lightdeltastep),%eax
addl %eax,%ebp
movl %edx,C(lightright)
jc Lblockloop8_mip1
// if (pbasesource >= r_sourcemax)
// pbasesource -= stepback;
cmpl C(r_sourcemax),%esi
jb LSkip_mip1
subl C(r_stepback),%esi
LSkip_mip1:
movl C(r_lightptr),%ebx
decl sb_v
jnz Lv_loop_mip1
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
//----------------------------------------------------------------------
// Surface block drawer for mip level 2
//----------------------------------------------------------------------
.align 4
.globl C(R_DrawSurfaceBlock8_mip2)
C(R_DrawSurfaceBlock8_mip2):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
// for (v=0 ; v<numvblocks ; v++)
// {
movl C(r_lightptr),%ebx
movl C(r_numvblocks),%eax
movl %eax,sb_v
movl C(prowdestbase),%edi
movl C(pbasesource),%esi
Lv_loop_mip2:
// lightleft = lightptr[0];
// lightright = lightptr[1];
// lightdelta = (lightleft - lightright) & 0xFFFFF;
movl (%ebx),%eax // lightleft
movl 4(%ebx),%edx // lightright
movl %eax,%ebp
movl C(r_lightwidth),%ecx
movl %edx,C(lightright)
subl %edx,%ebp
andl $0xFFFFF,%ebp
leal (%ebx,%ecx,4),%ebx
// lightptr += lightwidth;
movl %ebx,C(r_lightptr)
// lightleftstep = (lightptr[0] - lightleft) >> blockdivshift;
// lightrightstep = (lightptr[1] - lightright) >> blockdivshift;
// lightdeltastep = ((lightleftstep - lightrightstep) & 0xFFFFF) |
// 0xF0000000;
movl 4(%ebx),%ecx // lightptr[1]
movl (%ebx),%ebx // lightptr[0]
subl %eax,%ebx
subl %edx,%ecx
sarl $2,%ecx
orl $0x30000000,%ebp
sarl $2,%ebx
movl %ecx,C(lightrightstep)
subl %ecx,%ebx
andl $0xFFFFF,%ebx
orl $0xF0000000,%ebx
subl %ecx,%ecx // high word must be 0 in loop for addressing
movl %ebx,C(lightdeltastep)
subl %ebx,%ebx // high word must be 0 in loop for addressing
Lblockloop8_mip2:
movl %ebp,C(lightdelta)
movb 2(%esi),%cl
sarl $2,%ebp
movb %dh,%bh
movb 3(%esi),%bl
addl %ebp,%edx
movb %dh,%ch
addl %ebp,%edx
movb 0x12345678(%ebx),%ah
LBPatch18:
movb 1(%esi),%bl
movb 0x12345678(%ecx),%al
LBPatch19:
movb (%esi),%cl
movb %dh,%bh
addl %ebp,%edx
rorl $16,%eax
movb %dh,%ch
movb 0x12345678(%ebx),%ah
LBPatch20:
movl C(lightright),%edx
movb 0x12345678(%ecx),%al
LBPatch21:
movl C(lightdelta),%ebp
movl %eax,(%edi)
movl C(sourcetstep),%eax
addl %eax,%esi
movl C(surfrowbytes),%eax
addl %eax,%edi
movl C(lightrightstep),%eax
addl %eax,%edx
movl C(lightdeltastep),%eax
addl %eax,%ebp
movl %edx,C(lightright)
jc Lblockloop8_mip2
// if (pbasesource >= r_sourcemax)
// pbasesource -= stepback;
cmpl C(r_sourcemax),%esi
jb LSkip_mip2
subl C(r_stepback),%esi
LSkip_mip2:
movl C(r_lightptr),%ebx
decl sb_v
jnz Lv_loop_mip2
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
//----------------------------------------------------------------------
// Surface block drawer for mip level 3
//----------------------------------------------------------------------
.align 4
.globl C(R_DrawSurfaceBlock8_mip3)
C(R_DrawSurfaceBlock8_mip3):
pushl %ebp // preserve caller's stack frame
pushl %edi
pushl %esi // preserve register variables
pushl %ebx
// for (v=0 ; v<numvblocks ; v++)
// {
movl C(r_lightptr),%ebx
movl C(r_numvblocks),%eax
movl %eax,sb_v
movl C(prowdestbase),%edi
movl C(pbasesource),%esi
Lv_loop_mip3:
// lightleft = lightptr[0];
// lightright = lightptr[1];
// lightdelta = (lightleft - lightright) & 0xFFFFF;
movl (%ebx),%eax // lightleft
movl 4(%ebx),%edx // lightright
movl %eax,%ebp
movl C(r_lightwidth),%ecx
movl %edx,C(lightright)
subl %edx,%ebp
andl $0xFFFFF,%ebp
leal (%ebx,%ecx,4),%ebx
movl %ebp,C(lightdelta)
// lightptr += lightwidth;
movl %ebx,C(r_lightptr)
// lightleftstep = (lightptr[0] - lightleft) >> blockdivshift;
// lightrightstep = (lightptr[1] - lightright) >> blockdivshift;
// lightdeltastep = ((lightleftstep - lightrightstep) & 0xFFFFF) |
// 0xF0000000;
movl 4(%ebx),%ecx // lightptr[1]
movl (%ebx),%ebx // lightptr[0]
subl %eax,%ebx
subl %edx,%ecx
sarl $1,%ecx
sarl $1,%ebx
movl %ecx,C(lightrightstep)
subl %ecx,%ebx
andl $0xFFFFF,%ebx
sarl $1,%ebp
orl $0xF0000000,%ebx
movl %ebx,C(lightdeltastep)
subl %ebx,%ebx // high word must be 0 in loop for addressing
movb 1(%esi),%bl
subl %ecx,%ecx // high word must be 0 in loop for addressing
movb %dh,%bh
movb (%esi),%cl
addl %ebp,%edx
movb %dh,%ch
movb 0x12345678(%ebx),%al
LBPatch16:
movl C(lightright),%edx
movb %al,1(%edi)
movb 0x12345678(%ecx),%al
LBPatch17:
movb %al,(%edi)
movl C(sourcetstep),%eax
addl %eax,%esi
movl C(surfrowbytes),%eax
addl %eax,%edi
movl C(lightdeltastep),%eax
movl C(lightdelta),%ebp
movb (%esi),%cl
addl %eax,%ebp
movl C(lightrightstep),%eax
sarl $1,%ebp
addl %eax,%edx
movb %dh,%bh
movb 1(%esi),%bl
addl %ebp,%edx
movb %dh,%ch
movb 0x12345678(%ebx),%al
LBPatch30:
movl C(sourcetstep),%edx
movb %al,1(%edi)
movb 0x12345678(%ecx),%al
LBPatch31:
movb %al,(%edi)
movl C(surfrowbytes),%ebp
addl %edx,%esi
addl %ebp,%edi
// if (pbasesource >= r_sourcemax)
// pbasesource -= stepback;
cmpl C(r_sourcemax),%esi
jb LSkip_mip3
subl C(r_stepback),%esi
LSkip_mip3:
movl C(r_lightptr),%ebx
decl sb_v
jnz Lv_loop_mip3
popl %ebx // restore register variables
popl %esi
popl %edi
popl %ebp // restore the caller's stack frame
ret
.globl C(R_Surf8End)
C(R_Surf8End):
//----------------------------------------------------------------------
// Code patching routines
//----------------------------------------------------------------------
.data
.align 4
LPatchTable8:
.long LBPatch0-4
.long LBPatch1-4
.long LBPatch2-4
.long LBPatch3-4
.long LBPatch4-4
.long LBPatch5-4
.long LBPatch6-4
.long LBPatch7-4
.long LBPatch8-4
.long LBPatch9-4
.long LBPatch10-4
.long LBPatch11-4
.long LBPatch12-4
.long LBPatch13-4
.long LBPatch14-4
.long LBPatch15-4
.long LBPatch16-4
.long LBPatch17-4
.long LBPatch18-4
.long LBPatch19-4
.long LBPatch20-4
.long LBPatch21-4
.long LBPatch22-4
.long LBPatch23-4
.long LBPatch24-4
.long LBPatch25-4
.long LBPatch26-4
.long LBPatch27-4
.long LBPatch28-4
.long LBPatch29-4
.long LBPatch30-4
.long LBPatch31-4
.text
.align 4
.globl C(R_Surf8Patch)
C(R_Surf8Patch):
pushl %ebx
movl C(colormap),%eax
movl $LPatchTable8,%ebx
movl $32,%ecx
LPatchLoop8:
movl (%ebx),%edx
addl $4,%ebx
movl %eax,(%edx)
decl %ecx
jnz LPatchLoop8
popl %ebx
ret
#endif // id386
|
ParrotSec/car-hacking-tools | 3,032 | CANBus-Triple/avr/bootloaders/caterina/LUFA-111009/LUFA/Platform/UC3/Exception.S | /*
LUFA Library
Copyright (C) Dean Camera, 2011.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2011 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaim all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
#include <avr32/io.h>
.section .exception_handlers, "ax", @progbits
// ================= EXCEPTION TABLE ================
.balign 0x200
.global EVBA_Table
EVBA_Table:
.org 0x000
Exception_Unrecoverable_Exception:
rjmp $
.org 0x004
Exception_TLB_Multiple_Hit:
rjmp $
.org 0x008
Exception_Bus_Error_Data_Fetch:
rjmp $
.org 0x00C
Exception_Bus_Error_Instruction_Fetch:
rjmp $
.org 0x010
Exception_NMI:
rjmp $
.org 0x014
Exception_Instruction_Address:
rjmp $
.org 0x018
Exception_ITLB_Protection:
rjmp $
.org 0x01C
Exception_OCD_Breakpoint:
rjmp $
.org 0x020
Exception_Illegal_Opcode:
rjmp $
.org 0x024
Exception_Unimplemented_Instruction:
rjmp $
.org 0x028
Exception_Privilege_Violation:
rjmp $
.org 0x02C
Exception_Floating_Point:
rjmp $
.org 0x030
Exception_Coprocessor_Absent:
rjmp $
.org 0x034
Exception_Data_Address_Read:
rjmp $
.org 0x038
Exception_Data_Address_Write:
rjmp $
.org 0x03C
Exception_DTLB_Protection_Read:
rjmp $
.org 0x040
Exception_DTLB_Protection_Write:
rjmp $
.org 0x044
Exception_DTLB_Modified:
rjmp $
.org 0x050
Exception_ITLB_Miss:
rjmp $
.org 0x060
Exception_DTLB_Miss_Read:
rjmp $
.org 0x070
Exception_DTLB_Miss_Write:
rjmp $
.org 0x100
Exception_Supervisor_Call:
rjmp $
// ============== END OF EXCEPTION TABLE =============
// ============= GENERAL INTERRUPT HANDLER ===========
.balign 4
.irp Level, 0, 1, 2, 3
Exception_INT\Level:
mov r12, \Level
call INTC_GetInterruptHandler
mov pc, r12
.endr
// ========= END OF GENERAL INTERRUPT HANDLER ========
// ====== GENERAL INTERRUPT HANDLER OFFSET TABLE ======
.balign 4
.global Autovector_Table
Autovector_Table:
.irp Level, 0, 1, 2, 3
.word ((AVR32_INTC_INT0 + \Level) << AVR32_INTC_IPR_INTLEVEL_OFFSET) | (Exception_INT\Level - EVBA_Table)
.endr
// === END OF GENERAL INTERRUPT HANDLER OFFSET TABLE ===
|
ParrotSec/car-hacking-tools | 6,022 | CANBus-Triple/avr/cores/arduino/wiring_pulse.S | /*
wiring_pulse.s - pulseInASM() function in different flavours
Part of Arduino - http://www.arduino.cc/
Copyright (c) 2014 Martino Facchin
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with this library; if not, write to the
Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
*/
/*
* The following routine was generated by avr-gcc 4.8.3 with the following parameters
* -gstabs -Wa,-ahlmsd=output.lst -dp -fverbose-asm -O2
* on the original C function
*
* unsigned long pulseInSimpl(volatile uint8_t *port, uint8_t bit, uint8_t stateMask, unsigned long maxloops)
* {
* unsigned long width = 0;
* // wait for any previous pulse to end
* while ((*port & bit) == stateMask)
* if (--maxloops == 0)
* return 0;
*
* // wait for the pulse to start
* while ((*port & bit) != stateMask)
* if (--maxloops == 0)
* return 0;
*
* // wait for the pulse to stop
* while ((*port & bit) == stateMask) {
* if (++width == maxloops)
* return 0;
* }
* return width;
* }
*
* some compiler outputs were removed but the rest of the code is untouched
*/
#include <avr/io.h>
.section .text
.global countPulseASM
countPulseASM:
.LM0:
.LFBB1:
push r12 ; ; 130 pushqi1/1 [length = 1]
push r13 ; ; 131 pushqi1/1 [length = 1]
push r14 ; ; 132 pushqi1/1 [length = 1]
push r15 ; ; 133 pushqi1/1 [length = 1]
push r16 ; ; 134 pushqi1/1 [length = 1]
push r17 ; ; 135 pushqi1/1 [length = 1]
/* prologue: function */
/* frame size = 0 */
/* stack size = 6 */
.L__stack_usage = 6
mov r30,r24 ; port, port ; 2 *movhi/1 [length = 2]
mov r31,r25 ; port, port
/* unsigned long width = 0;
*** // wait for any previous pulse to end
*** while ((*port & bit) == stateMask)
*/
.LM1:
rjmp .L2 ; ; 181 jump [length = 1]
.L4:
/* if (--maxloops == 0) */
.LM2:
subi r16,1 ; maxloops, ; 17 addsi3/2 [length = 4]
sbc r17, r1 ; maxloops
sbc r18, r1 ; maxloops
sbc r19, r1 ; maxloops
breq .L13 ; , ; 19 branch [length = 1]
.L2:
/* if (--maxloops == 0) */
.LM3:
ld r25,Z ; D.1554, *port_7(D) ; 22 movqi_insn/4 [length = 1]
and r25,r22 ; D.1554, bit ; 24 andqi3/1 [length = 1]
cp r25,r20 ; D.1554, stateMask ; 25 *cmpqi/2 [length = 1]
breq .L4 ; , ; 26 branch [length = 1]
rjmp .L6 ; ; 184 jump [length = 1]
.L7:
/* return 0;
***
*** // wait for the pulse to start
*** while ((*port & bit) != stateMask)
*** if (--maxloops == 0)
*/
.LM4:
subi r16,1 ; maxloops, ; 31 addsi3/2 [length = 4]
sbc r17, r1 ; maxloops
sbc r18, r1 ; maxloops
sbc r19, r1 ; maxloops
breq .L13 ; , ; 33 branch [length = 1]
.L6:
/* if (--maxloops == 0) */
.LM5:
ld r25,Z ; D.1554, *port_7(D) ; 41 movqi_insn/4 [length = 1]
and r25,r22 ; D.1554, bit ; 43 andqi3/1 [length = 1]
cpse r25,r20 ; D.1554, stateMask ; 44 enable_interrupt-3 [length = 1]
rjmp .L7 ;
mov r12, r1 ; width ; 7 *movsi/2 [length = 4]
mov r13, r1 ; width
mov r14, r1 ; width
mov r15, r1 ; width
rjmp .L9 ; ; 186 jump [length = 1]
.L10:
/* return 0;
***
*** // wait for the pulse to stop
*** while ((*port & bit) == stateMask) {
*** if (++width == maxloops)
*/
.LM6:
ldi r24,-1 ; , ; 50 addsi3/3 [length = 5]
sub r12,r24 ; width,
sbc r13,r24 ; width,
sbc r14,r24 ; width,
sbc r15,r24 ; width,
cp r16,r12 ; maxloops, width ; 51 *cmpsi/2 [length = 4]
cpc r17,r13 ; maxloops, width
cpc r18,r14 ; maxloops, width
cpc r19,r15 ; maxloops, width
breq .L13 ; , ; 52 branch [length = 1]
.L9:
/* if (++width == maxloops) */
.LM7:
ld r24,Z ; D.1554, *port_7(D) ; 60 movqi_insn/4 [length = 1]
and r24,r22 ; D.1554, bit ; 62 andqi3/1 [length = 1]
cp r24,r20 ; D.1554, stateMask ; 63 *cmpqi/2 [length = 1]
breq .L10 ; , ; 64 branch [length = 1]
/* return 0;
*** }
*** return width;
*/
.LM8:
mov r22,r12 ; D.1553, width ; 108 movqi_insn/1 [length = 1]
mov r23,r13 ; D.1553, width ; 109 movqi_insn/1 [length = 1]
mov r24,r14 ; D.1553, width ; 110 movqi_insn/1 [length = 1]
mov r25,r15 ; D.1553, width ; 111 movqi_insn/1 [length = 1]
/* epilogue start */
.LM9:
pop r17 ; ; 171 popqi [length = 1]
pop r16 ; ; 172 popqi [length = 1]
pop r15 ; ; 173 popqi [length = 1]
pop r14 ; ; 174 popqi [length = 1]
pop r13 ; ; 175 popqi [length = 1]
pop r12 ; ; 176 popqi [length = 1]
ret ; 177 return_from_epilogue [length = 1]
.L13:
.LM10:
ldi r22,0 ; D.1553 ; 120 movqi_insn/1 [length = 1]
ldi r23,0 ; D.1553 ; 121 movqi_insn/1 [length = 1]
ldi r24,0 ; D.1553 ; 122 movqi_insn/1 [length = 1]
ldi r25,0 ; D.1553 ; 123 movqi_insn/1 [length = 1]
/* epilogue start */
.LM11:
pop r17 ; ; 138 popqi [length = 1]
pop r16 ; ; 139 popqi [length = 1]
pop r15 ; ; 140 popqi [length = 1]
pop r14 ; ; 141 popqi [length = 1]
pop r13 ; ; 142 popqi [length = 1]
pop r12 ; ; 143 popqi [length = 1]
ret ; 144 return_from_epilogue [length = 1]
|
patex-ecosystem/patex-chain | 7,684 | crypto/blake2b/blake2b_amd64.s | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
#include "textflag.h"
DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v6, t1; \
PUNPCKLQDQ v6, t2; \
PUNPCKHQDQ v7, v6; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ v7, t2; \
MOVO t1, v7; \
MOVO v2, t1; \
PUNPCKHQDQ t2, v7; \
PUNPCKLQDQ v3, t2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v3
#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v2, t1; \
PUNPCKLQDQ v2, t2; \
PUNPCKHQDQ v3, v2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ v3, t2; \
MOVO t1, v3; \
MOVO v6, t1; \
PUNPCKHQDQ t2, v3; \
PUNPCKLQDQ v7, t2; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v7
#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
PADDQ m0, v0; \
PADDQ m1, v1; \
PADDQ v2, v0; \
PADDQ v3, v1; \
PXOR v0, v6; \
PXOR v1, v7; \
PSHUFD $0xB1, v6, v6; \
PSHUFD $0xB1, v7, v7; \
PADDQ v6, v4; \
PADDQ v7, v5; \
PXOR v4, v2; \
PXOR v5, v3; \
PSHUFB c40, v2; \
PSHUFB c40, v3; \
PADDQ m2, v0; \
PADDQ m3, v1; \
PADDQ v2, v0; \
PADDQ v3, v1; \
PXOR v0, v6; \
PXOR v1, v7; \
PSHUFB c48, v6; \
PSHUFB c48, v7; \
PADDQ v6, v4; \
PADDQ v7, v5; \
PXOR v4, v2; \
PXOR v5, v3; \
MOVOU v2, t0; \
PADDQ v2, t0; \
PSRLQ $63, v2; \
PXOR t0, v2; \
MOVOU v3, t0; \
PADDQ v3, t0; \
PSRLQ $63, v3; \
PXOR t0, v3
#define LOAD_MSG(m0, m1, m2, m3, i0, i1, i2, i3, i4, i5, i6, i7) \
MOVQ i0*8(SI), m0; \
PINSRQ $1, i1*8(SI), m0; \
MOVQ i2*8(SI), m1; \
PINSRQ $1, i3*8(SI), m1; \
MOVQ i4*8(SI), m2; \
PINSRQ $1, i5*8(SI), m2; \
MOVQ i6*8(SI), m3; \
PINSRQ $1, i7*8(SI), m3
// func fSSE4(h *[8]uint64, m *[16]uint64, c0, c1 uint64, flag uint64, rounds uint64)
TEXT ·fSSE4(SB), 4, $24-48 // frame size = 8 + 16 byte alignment
MOVQ h+0(FP), AX
MOVQ m+8(FP), SI
MOVQ c0+16(FP), R8
MOVQ c1+24(FP), R9
MOVQ flag+32(FP), CX
MOVQ rounds+40(FP), BX
MOVQ SP, BP
MOVQ SP, R10
ADDQ $15, R10
ANDQ $~15, R10
MOVQ R10, SP
MOVOU ·iv3<>(SB), X0
MOVO X0, 0(SP)
XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0)
MOVOU ·c40<>(SB), X13
MOVOU ·c48<>(SB), X14
MOVOU 0(AX), X12
MOVOU 16(AX), X15
MOVQ R8, X8
PINSRQ $1, R9, X8
MOVO X12, X0
MOVO X15, X1
MOVOU 32(AX), X2
MOVOU 48(AX), X3
MOVOU ·iv0<>(SB), X4
MOVOU ·iv1<>(SB), X5
MOVOU ·iv2<>(SB), X6
PXOR X8, X6
MOVO 0(SP), X7
loop:
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 0, 2, 4, 6, 1, 3, 5, 7)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 8, 10, 12, 14, 9, 11, 13, 15)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 14, 4, 9, 13, 10, 8, 15, 6)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 1, 0, 11, 5, 12, 2, 7, 3)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 11, 12, 5, 15, 8, 0, 2, 13)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 10, 3, 7, 9, 14, 6, 1, 4)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 7, 3, 13, 11, 9, 1, 12, 14)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 2, 5, 4, 15, 6, 10, 0, 8)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 9, 5, 2, 10, 0, 7, 4, 15)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 14, 11, 6, 3, 1, 12, 8, 13)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 2, 6, 0, 8, 12, 10, 11, 3)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 4, 7, 15, 1, 13, 5, 14, 9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 12, 1, 14, 4, 5, 15, 13, 10)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 0, 6, 9, 8, 7, 3, 2, 11)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 13, 7, 12, 3, 11, 14, 1, 9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 5, 15, 8, 2, 0, 4, 6, 10)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 6, 14, 11, 0, 15, 9, 3, 8)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 12, 13, 1, 10, 2, 7, 4, 5)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 10, 8, 7, 1, 2, 4, 6, 5)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 15, 9, 3, 13, 11, 14, 12, 0)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
JMP loop
done:
MOVOU 32(AX), X10
MOVOU 48(AX), X11
PXOR X0, X12
PXOR X1, X15
PXOR X2, X10
PXOR X3, X11
PXOR X4, X12
PXOR X5, X15
PXOR X6, X10
PXOR X7, X11
MOVOU X10, 32(AX)
MOVOU X11, 48(AX)
MOVOU X12, 0(AX)
MOVOU X15, 16(AX)
MOVQ BP, SP
RET
|
patex-ecosystem/patex-chain | 23,301 | crypto/blake2b/blake2bAVX2_amd64.s | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,amd64,!gccgo,!appengine
#include "textflag.h"
DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
VPADDQ m0, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m1, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y1_Y1; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y3_Y3; \
VPADDQ m2, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m3, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y3_Y3; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y1_Y1
#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
// load msg: Y12 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y12, Y12
// load msg: Y13 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
VMOVQ_SI_X13(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X13(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y13, Y13
// load msg: Y14 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
VMOVQ_SI_X14(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X14(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y14, Y14
// load msg: Y15 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
VMOVQ_SI_X15(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X15(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X11(6*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
VMOVQ_SI_X11(11*8); \
VPSHUFD $0x4E, 0*8(SI), X14; \
VPINSRQ_1_SI_X11(5*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
VMOVQ_SI_X11(5*8); \
VMOVDQU 11*8(SI), X12; \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y12, Y12; \
VMOVQ_SI_X13(8*8); \
VMOVQ_SI_X11(2*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X11(13*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
VMOVQ_SI_X15(6*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X15(10*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X13(7*8); \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
VMOVQ_SI_X14_0; \
VPSHUFD $0x4E, 8*8(SI), X11; \
VPINSRQ_1_SI_X14(6*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
VMOVQ_SI_X15_0; \
VMOVQ_SI_X11(6*8); \
VPINSRQ_1_SI_X15(4*8); \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
VMOVQ_SI_X12(6*8); \
VMOVQ_SI_X11(11*8); \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
VMOVQ_SI_X11(1*8); \
VMOVDQU 12*8(SI), X14; \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y14, Y14; \
VMOVQ_SI_X15(2*8); \
VMOVDQU 4*8(SI), X11; \
VPINSRQ_1_SI_X15(7*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
VMOVQ_SI_X13(2*8); \
VPSHUFD $0x4E, 5*8(SI), X11; \
VPINSRQ_1_SI_X13(4*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
VMOVQ_SI_X15(11*8); \
VMOVQ_SI_X11(12*8); \
VPINSRQ_1_SI_X15(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y15, Y15
// func fAVX2(h *[8]uint64, m *[16]uint64, c0, c1 uint64, flag uint64, rounds uint64)
TEXT ·fAVX2(SB), 4, $64-48 // frame size = 32 + 32 byte alignment
MOVQ h+0(FP), AX
MOVQ m+8(FP), SI
MOVQ c0+16(FP), R8
MOVQ c1+24(FP), R9
MOVQ flag+32(FP), CX
MOVQ rounds+40(FP), BX
MOVQ SP, DX
MOVQ SP, R10
ADDQ $31, R10
ANDQ $~31, R10
MOVQ R10, SP
MOVQ CX, 16(SP)
XORQ CX, CX
MOVQ CX, 24(SP)
VMOVDQU ·AVX2_c40<>(SB), Y4
VMOVDQU ·AVX2_c48<>(SB), Y5
VMOVDQU 0(AX), Y8
VMOVDQU 32(AX), Y9
VMOVDQU ·AVX2_iv0<>(SB), Y6
VMOVDQU ·AVX2_iv1<>(SB), Y7
MOVQ R8, 0(SP)
MOVQ R9, 8(SP)
VMOVDQA Y8, Y0
VMOVDQA Y9, Y1
VMOVDQA Y6, Y2
VPXOR 0(SP), Y7, Y3
loop:
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
JMP loop
done:
VPXOR Y0, Y8, Y8
VPXOR Y1, Y9, Y9
VPXOR Y2, Y8, Y8
VPXOR Y3, Y9, Y9
VMOVDQU Y8, 0(AX)
VMOVDQU Y9, 32(AX)
VZEROUPPER
MOVQ DX, SP
RET
#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
#define SHUFFLE_AVX() \
VMOVDQA X6, X13; \
VMOVDQA X2, X14; \
VMOVDQA X4, X6; \
VPUNPCKLQDQ_X13_X13_X15; \
VMOVDQA X5, X4; \
VMOVDQA X6, X5; \
VPUNPCKHQDQ_X15_X7_X6; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X13_X7; \
VPUNPCKLQDQ_X3_X3_X15; \
VPUNPCKHQDQ_X15_X2_X2; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X3_X3; \
#define SHUFFLE_AVX_INV() \
VMOVDQA X2, X13; \
VMOVDQA X4, X14; \
VPUNPCKLQDQ_X2_X2_X15; \
VMOVDQA X5, X4; \
VPUNPCKHQDQ_X15_X3_X2; \
VMOVDQA X14, X5; \
VPUNPCKLQDQ_X3_X3_X15; \
VMOVDQA X6, X14; \
VPUNPCKHQDQ_X15_X13_X3; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X6_X6; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X7_X7; \
#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
VPADDQ m0, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m1, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFD $-79, v6, v6; \
VPSHUFD $-79, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPSHUFB c40, v2, v2; \
VPSHUFB c40, v3, v3; \
VPADDQ m2, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m3, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFB c48, v6, v6; \
VPSHUFB c48, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPADDQ v2, v2, t0; \
VPSRLQ $63, v2, v2; \
VPXOR t0, v2, v2; \
VPADDQ v3, v3, t0; \
VPSRLQ $63, v3, v3; \
VPXOR t0, v3, v3
// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X13(i2*8); \
VMOVQ_SI_X14(i4*8); \
VMOVQ_SI_X15(i6*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X13(i3*8); \
VPINSRQ_1_SI_X14(i5*8); \
VPINSRQ_1_SI_X15(i7*8)
// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(1*8); \
VMOVQ_SI_X15(5*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X13(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(7*8)
// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
VPSHUFD $0x4E, 0*8(SI), X12; \
VMOVQ_SI_X13(11*8); \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(7*8); \
VPINSRQ_1_SI_X13(5*8); \
VPINSRQ_1_SI_X14(2*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
VMOVDQU 11*8(SI), X12; \
VMOVQ_SI_X13(5*8); \
VMOVQ_SI_X14(8*8); \
VMOVQ_SI_X15(2*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14_0; \
VPINSRQ_1_SI_X15(13*8)
// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(6*8); \
VMOVQ_SI_X15_0; \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
VMOVQ_SI_X12(9*8); \
VMOVQ_SI_X13(2*8); \
VMOVQ_SI_X14_0; \
VMOVQ_SI_X15(4*8); \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VPINSRQ_1_SI_X15(15*8)
// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(11*8); \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X13(8*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
MOVQ 0*8(SI), X12; \
VPSHUFD $0x4E, 8*8(SI), X13; \
MOVQ 7*8(SI), X14; \
MOVQ 2*8(SI), X15; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(11*8)
// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
MOVQ 6*8(SI), X12; \
MOVQ 11*8(SI), X13; \
MOVQ 15*8(SI), X14; \
MOVQ 3*8(SI), X15; \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X14(9*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
MOVQ 5*8(SI), X12; \
MOVQ 8*8(SI), X13; \
MOVQ 0*8(SI), X14; \
MOVQ 6*8(SI), X15; \
VPINSRQ_1_SI_X12(15*8); \
VPINSRQ_1_SI_X13(2*8); \
VPINSRQ_1_SI_X14(4*8); \
VPINSRQ_1_SI_X15(10*8)
// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
VMOVDQU 12*8(SI), X12; \
MOVQ 1*8(SI), X13; \
MOVQ 2*8(SI), X14; \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VMOVDQU 4*8(SI), X15
// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
MOVQ 15*8(SI), X12; \
MOVQ 3*8(SI), X13; \
MOVQ 11*8(SI), X14; \
MOVQ 12*8(SI), X15; \
VPINSRQ_1_SI_X12(9*8); \
VPINSRQ_1_SI_X13(13*8); \
VPINSRQ_1_SI_X14(14*8); \
VPINSRQ_1_SI_X15_0
// func fAVX(h *[8]uint64, m *[16]uint64, c0, c1 uint64, flag uint64, rounds uint64)
TEXT ·fAVX(SB), 4, $24-48 // frame size = 8 + 16 byte alignment
MOVQ h+0(FP), AX
MOVQ m+8(FP), SI
MOVQ c0+16(FP), R8
MOVQ c1+24(FP), R9
MOVQ flag+32(FP), CX
MOVQ rounds+40(FP), BX
MOVQ SP, BP
MOVQ SP, R10
ADDQ $15, R10
ANDQ $~15, R10
MOVQ R10, SP
VMOVDQU ·AVX_c40<>(SB), X0
VMOVDQU ·AVX_c48<>(SB), X1
VMOVDQA X0, X8
VMOVDQA X1, X9
VMOVDQU ·AVX_iv3<>(SB), X0
VMOVDQA X0, 0(SP)
XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
VMOVDQU 0(AX), X10
VMOVDQU 16(AX), X11
VMOVDQU 32(AX), X2
VMOVDQU 48(AX), X3
VMOVQ_R8_X15
VPINSRQ_1_R9_X15
VMOVDQA X10, X0
VMOVDQA X11, X1
VMOVDQU ·AVX_iv0<>(SB), X4
VMOVDQU ·AVX_iv1<>(SB), X5
VMOVDQU ·AVX_iv2<>(SB), X6
VPXOR X15, X6, X6
VMOVDQA 0(SP), X7
loop:
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
JMP loop
done:
VMOVDQU 32(AX), X14
VMOVDQU 48(AX), X15
VPXOR X0, X10, X10
VPXOR X1, X11, X11
VPXOR X2, X14, X14
VPXOR X3, X15, X15
VPXOR X4, X10, X10
VPXOR X5, X11, X11
VPXOR X6, X14, X2
VPXOR X7, X15, X3
VMOVDQU X2, 32(AX)
VMOVDQU X3, 48(AX)
VMOVDQU X10, 0(AX)
VMOVDQU X11, 16(AX)
VZEROUPPER
MOVQ BP, SP
RET
|
patex-ecosystem/patex-chain | 31,125 | crypto/bls12381/arithmetic_x86.s | // +build amd64,blsasm amd64,blsadx
#include "textflag.h"
// addition w/ modular reduction
// a = (a + b) % p
TEXT ·addAssign(SB), NOSPLIT, $0-16
// |
MOVQ a+0(FP), DI
MOVQ b+8(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ 32(SI), R12
ADCQ 40(SI), R13
// |
MOVQ R8, R14
MOVQ R9, R15
MOVQ R10, CX
MOVQ R11, DX
MOVQ R12, SI
MOVQ R13, BX
MOVQ $0xb9feffffffffaaab, AX
SUBQ AX, R14
MOVQ $0x1eabfffeb153ffff, AX
SBBQ AX, R15
MOVQ $0x6730d2a0f6b0f624, AX
SBBQ AX, CX
MOVQ $0x64774b84f38512bf, AX
SBBQ AX, DX
MOVQ $0x4b1ba7b6434bacd7, AX
SBBQ AX, SI
MOVQ $0x1a0111ea397fe69a, AX
SBBQ AX, BX
CMOVQCC R14, R8
CMOVQCC R15, R9
CMOVQCC CX, R10
CMOVQCC DX, R11
CMOVQCC SI, R12
CMOVQCC BX, R13
// |
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// addition w/ modular reduction
// c = (a + b) % p
TEXT ·add(SB), NOSPLIT, $0-24
// |
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ 32(SI), R12
ADCQ 40(SI), R13
// |
MOVQ R8, R14
MOVQ R9, R15
MOVQ R10, CX
MOVQ R11, DX
MOVQ R12, SI
MOVQ R13, BX
MOVQ $0xb9feffffffffaaab, DI
SUBQ DI, R14
MOVQ $0x1eabfffeb153ffff, DI
SBBQ DI, R15
MOVQ $0x6730d2a0f6b0f624, DI
SBBQ DI, CX
MOVQ $0x64774b84f38512bf, DI
SBBQ DI, DX
MOVQ $0x4b1ba7b6434bacd7, DI
SBBQ DI, SI
MOVQ $0x1a0111ea397fe69a, DI
SBBQ DI, BX
CMOVQCC R14, R8
CMOVQCC R15, R9
CMOVQCC CX, R10
CMOVQCC DX, R11
CMOVQCC SI, R12
CMOVQCC BX, R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// addition w/o reduction check
// c = (a + b)
TEXT ·ladd(SB), NOSPLIT, $0-24
// |
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ 32(SI), R12
ADCQ 40(SI), R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// addition w/o reduction check
// a = a + b
TEXT ·laddAssign(SB), NOSPLIT, $0-16
// |
MOVQ a+0(FP), DI
MOVQ b+8(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ 32(SI), R12
ADCQ 40(SI), R13
// |
MOVQ a+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// subtraction w/ modular reduction
// c = (a - b) % p
TEXT ·sub(SB), NOSPLIT, $0-24
// |
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
XORQ AX, AX
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
SUBQ (SI), R8
SBBQ 8(SI), R9
SBBQ 16(SI), R10
SBBQ 24(SI), R11
SBBQ 32(SI), R12
SBBQ 40(SI), R13
// |
MOVQ $0xb9feffffffffaaab, R14
MOVQ $0x1eabfffeb153ffff, R15
MOVQ $0x6730d2a0f6b0f624, CX
MOVQ $0x64774b84f38512bf, DX
MOVQ $0x4b1ba7b6434bacd7, SI
MOVQ $0x1a0111ea397fe69a, BX
CMOVQCC AX, R14
CMOVQCC AX, R15
CMOVQCC AX, CX
CMOVQCC AX, DX
CMOVQCC AX, SI
CMOVQCC AX, BX
ADDQ R14, R8
ADCQ R15, R9
ADCQ CX, R10
ADCQ DX, R11
ADCQ SI, R12
ADCQ BX, R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// subtraction w/ modular reduction
// a = (a - b) % p
TEXT ·subAssign(SB), NOSPLIT, $0-16
// |
MOVQ a+0(FP), DI
MOVQ b+8(FP), SI
XORQ AX, AX
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
SUBQ (SI), R8
SBBQ 8(SI), R9
SBBQ 16(SI), R10
SBBQ 24(SI), R11
SBBQ 32(SI), R12
SBBQ 40(SI), R13
// |
MOVQ $0xb9feffffffffaaab, R14
MOVQ $0x1eabfffeb153ffff, R15
MOVQ $0x6730d2a0f6b0f624, CX
MOVQ $0x64774b84f38512bf, DX
MOVQ $0x4b1ba7b6434bacd7, SI
MOVQ $0x1a0111ea397fe69a, BX
CMOVQCC AX, R14
CMOVQCC AX, R15
CMOVQCC AX, CX
CMOVQCC AX, DX
CMOVQCC AX, SI
CMOVQCC AX, BX
ADDQ R14, R8
ADCQ R15, R9
ADCQ CX, R10
ADCQ DX, R11
ADCQ SI, R12
ADCQ BX, R13
// |
MOVQ a+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// subtraction w/o reduction check
// a = (a - b)
TEXT ·lsubAssign(SB), NOSPLIT, $0-16
// |
MOVQ a+0(FP), DI
MOVQ b+8(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
SUBQ (SI), R8
SBBQ 8(SI), R9
SBBQ 16(SI), R10
SBBQ 24(SI), R11
SBBQ 32(SI), R12
SBBQ 40(SI), R13
// |
MOVQ a+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// doubling w/ reduction
// c = (2 * a) % p
TEXT ·double(SB), NOSPLIT, $0-16
// |
MOVQ a+8(FP), DI
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
ADDQ R8, R8
ADCQ R9, R9
ADCQ R10, R10
ADCQ R11, R11
ADCQ R12, R12
ADCQ R13, R13
// |
MOVQ R8, R14
MOVQ R9, R15
MOVQ R10, CX
MOVQ R11, DX
MOVQ R12, SI
MOVQ R13, BX
MOVQ $0xb9feffffffffaaab, DI
SUBQ DI, R14
MOVQ $0x1eabfffeb153ffff, DI
SBBQ DI, R15
MOVQ $0x6730d2a0f6b0f624, DI
SBBQ DI, CX
MOVQ $0x64774b84f38512bf, DI
SBBQ DI, DX
MOVQ $0x4b1ba7b6434bacd7, DI
SBBQ DI, SI
MOVQ $0x1a0111ea397fe69a, DI
SBBQ DI, BX
CMOVQCC R14, R8
CMOVQCC R15, R9
CMOVQCC CX, R10
CMOVQCC DX, R11
CMOVQCC SI, R12
CMOVQCC BX, R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// doubling w/ reduction
// a = (2 * a) % p
TEXT ·doubleAssign(SB), NOSPLIT, $0-8
// |
MOVQ a+0(FP), DI
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
ADDQ R8, R8
ADCQ R9, R9
ADCQ R10, R10
ADCQ R11, R11
ADCQ R12, R12
ADCQ R13, R13
// |
MOVQ R8, R14
MOVQ R9, R15
MOVQ R10, CX
MOVQ R11, DX
MOVQ R12, SI
MOVQ R13, BX
MOVQ $0xb9feffffffffaaab, AX
SUBQ AX, R14
MOVQ $0x1eabfffeb153ffff, AX
SBBQ AX, R15
MOVQ $0x6730d2a0f6b0f624, AX
SBBQ AX, CX
MOVQ $0x64774b84f38512bf, AX
SBBQ AX, DX
MOVQ $0x4b1ba7b6434bacd7, AX
SBBQ AX, SI
MOVQ $0x1a0111ea397fe69a, AX
SBBQ AX, BX
CMOVQCC R14, R8
CMOVQCC R15, R9
CMOVQCC CX, R10
CMOVQCC DX, R11
CMOVQCC SI, R12
CMOVQCC BX, R13
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// doubling w/o reduction
// c = 2 * a
TEXT ·ldouble(SB), NOSPLIT, $0-16
// |
MOVQ a+8(FP), DI
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ R8, R8
ADCQ R9, R9
ADCQ R10, R10
ADCQ R11, R11
ADCQ R12, R12
ADCQ R13, R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
TEXT ·_neg(SB), NOSPLIT, $0-16
// |
MOVQ a+8(FP), DI
// |
MOVQ $0xb9feffffffffaaab, R8
MOVQ $0x1eabfffeb153ffff, R9
MOVQ $0x6730d2a0f6b0f624, R10
MOVQ $0x64774b84f38512bf, R11
MOVQ $0x4b1ba7b6434bacd7, R12
MOVQ $0x1a0111ea397fe69a, R13
SUBQ (DI), R8
SBBQ 8(DI), R9
SBBQ 16(DI), R10
SBBQ 24(DI), R11
SBBQ 32(DI), R12
SBBQ 40(DI), R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// multiplication without using MULX/ADX
// c = a * b % p
TEXT ·mulNoADX(SB), NOSPLIT, $24-24
// |
/* inputs */
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
MOVQ $0x00, R9
MOVQ $0x00, R10
MOVQ $0x00, R11
MOVQ $0x00, R12
MOVQ $0x00, R13
MOVQ $0x00, R14
MOVQ $0x00, R15
// |
/* i0 */
// | a0 @ CX
MOVQ (DI), CX
// | a0 * b0
MOVQ (SI), AX
MULQ CX
MOVQ AX, (SP)
MOVQ DX, R8
// | a0 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R8
ADCQ DX, R9
// | a0 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R9
ADCQ DX, R10
// | a0 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R10
ADCQ DX, R11
// | a0 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
// | a0 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
// |
/* i1 */
// | a1 @ CX
MOVQ 8(DI), CX
MOVQ $0x00, BX
// | a1 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R8
ADCQ DX, R9
ADCQ $0x00, R10
ADCQ $0x00, BX
MOVQ R8, 8(SP)
MOVQ $0x00, R8
// | a1 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R9
ADCQ DX, R10
ADCQ BX, R11
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a1 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R10
ADCQ DX, R11
ADCQ BX, R12
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a1 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
ADCQ BX, R13
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a1 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ BX, R14
// | a1 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
// |
/* i2 */
// | a2 @ CX
MOVQ 16(DI), CX
MOVQ $0x00, BX
// | a2 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R9
ADCQ DX, R10
ADCQ $0x00, R11
ADCQ $0x00, BX
MOVQ R9, 16(SP)
MOVQ $0x00, R9
// | a2 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R10
ADCQ DX, R11
ADCQ BX, R12
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a2 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
ADCQ BX, R13
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a2 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ BX, R14
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a2 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
ADCQ BX, R15
// | a2 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R14
ADCQ DX, R15
// |
/* i3 */
// | a3 @ CX
MOVQ 24(DI), CX
MOVQ $0x00, BX
// | a3 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R10
ADCQ DX, R11
ADCQ $0x00, R12
ADCQ $0x00, BX
// | a3 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
ADCQ BX, R13
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a3 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ BX, R14
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a3 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
ADCQ BX, R15
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a3 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R14
ADCQ DX, R15
ADCQ BX, R8
// | a3 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R15
ADCQ DX, R8
// |
/* i4 */
// | a4 @ CX
MOVQ 32(DI), CX
MOVQ $0x00, BX
// | a4 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
ADCQ $0x00, R13
ADCQ $0x00, BX
// | a4 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ BX, R14
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a4 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
ADCQ BX, R15
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a4 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R14
ADCQ DX, R15
ADCQ BX, R8
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a4 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R15
ADCQ DX, R8
ADCQ BX, R9
// | a4 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R8
ADCQ DX, R9
// |
/* i5 */
// | a5 @ CX
MOVQ 40(DI), CX
MOVQ $0x00, BX
// | a5 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ $0x00, R14
ADCQ $0x00, BX
// | a5 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
ADCQ BX, R15
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a5 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R14
ADCQ DX, R15
ADCQ BX, R8
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a5 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R15
ADCQ DX, R8
ADCQ BX, R9
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a5 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R8
ADCQ DX, R9
ADCQ $0x00, BX
// | a5 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R9
ADCQ DX, BX
// |
/* */
// |
// | W
// | 0 (SP) | 1 8(SP) | 2 16(SP) | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 R9 | 11 BX
MOVQ (SP), CX
MOVQ 8(SP), DI
MOVQ 16(SP), SI
MOVQ BX, (SP)
MOVQ R9, 8(SP)
// |
/* montgomery reduction */
// |
/* i0 */
// |
// | W
// | 0 CX | 1 DI | 2 SI | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u0 = w0 * inp
MOVQ CX, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w0 @ CX
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, CX
ADCQ DX, BX
// | j1
// | w1 @ DI
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, DI
ADCQ $0x00, DX
ADDQ BX, DI
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w2 @ SI
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, SI
ADCQ $0x00, DX
ADDQ BX, SI
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w3 @ R10
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R10
ADCQ $0x00, DX
ADDQ BX, R10
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w4 @ R11
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ $0x00, DX
ADDQ BX, R11
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w5 @ R12
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
// | w6 @ R13
ADCQ DX, R13
ADCQ $0x00, CX
// |
/* i1 */
// |
// | W
// | 0 - | 1 DI | 2 SI | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u1 = w1 * inp
MOVQ DI, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w1 @ DI
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, DI
ADCQ DX, BX
// | j1
// | w2 @ SI
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, SI
ADCQ $0x00, DX
ADDQ BX, SI
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w3 @ R10
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R10
ADCQ $0x00, DX
ADDQ BX, R10
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w4 @ R11
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ $0x00, DX
ADDQ BX, R11
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w5 @ R12
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w6 @ R13
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ DX, CX
ADDQ BX, R13
// | w7 @ R14
ADCQ CX, R14
MOVQ $0x00, CX
ADCQ $0x00, CX
// |
/* i2 */
// |
// | W
// | 0 - | 1 - | 2 SI | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u2 = w2 * inp
MOVQ SI, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w2 @ SI
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, SI
ADCQ DX, BX
// | j1
// | w3 @ R10
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, R10
ADCQ $0x00, DX
ADDQ BX, R10
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w4 @ R11
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ $0x00, DX
ADDQ BX, R11
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w5 @ R12
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w6 @ R13
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ $0x00, DX
ADDQ BX, R13
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w7 @ R14
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R14
ADCQ DX, CX
ADDQ BX, R14
// | w8 @ R15
ADCQ CX, R15
MOVQ $0x00, CX
ADCQ $0x00, CX
// |
/* i3 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u3 = w3 * inp
MOVQ R10, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w3 @ R10
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, R10
ADCQ DX, BX
// | j1
// | w4 @ R11
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ $0x00, DX
ADDQ BX, R11
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w5 @ R12
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w6 @ R13
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ $0x00, DX
ADDQ BX, R13
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w7 @ R14
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R14
ADCQ $0x00, DX
ADDQ BX, R14
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w8 @ R15
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R15
ADCQ DX, CX
ADDQ BX, R15
// | w9 @ R8
ADCQ CX, R8
MOVQ $0x00, CX
ADCQ $0x00, CX
// |
/* i4 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 - | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u4 = w4 * inp
MOVQ R11, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w4 @ R11
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ DX, BX
// | j1
// | w5 @ R12
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w6 @ R13
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ $0x00, DX
ADDQ BX, R13
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w7 @ R14
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R14
ADCQ $0x00, DX
ADDQ BX, R14
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w8 @ R15
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R15
ADCQ $0x00, DX
ADDQ BX, R15
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w9 @ R8
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R8
ADCQ DX, CX
ADDQ BX, R8
// | move to idle register
MOVQ 8(SP), DI
// | w10 @ DI
ADCQ CX, DI
MOVQ $0x00, CX
ADCQ $0x00, CX
// |
/* i5 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 - | 4 - | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 DI | 11 (SP)
// | | u5 = w5 * inp
MOVQ R12, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w5 @ R12
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ DX, BX
// | j1
// | w6 @ R13
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ $0x00, DX
ADDQ BX, R13
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w7 @ R14
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R14
ADCQ $0x00, DX
ADDQ BX, R14
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w8 @ R15
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R15
ADCQ $0x00, DX
ADDQ BX, R15
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w9 @ R8
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R8
ADCQ $0x00, DX
ADDQ BX, R8
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w10 @ DI
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, DI
ADCQ DX, CX
ADDQ BX, DI
// | w11 @ CX
ADCQ (SP), CX
// |
// | W montgomerry reduction ends
// | 0 - | 1 - | 2 - | 3 - | 4 - | 5 -
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 DI | 11 CX
// |
/* modular reduction */
MOVQ R13, R10
SUBQ ·modulus+0(SB), R10
MOVQ R14, R11
SBBQ ·modulus+8(SB), R11
MOVQ R15, R12
SBBQ ·modulus+16(SB), R12
MOVQ R8, AX
SBBQ ·modulus+24(SB), AX
MOVQ DI, BX
SBBQ ·modulus+32(SB), BX
MOVQ CX, R9
SBBQ ·modulus+40(SB), R9
// |
/* out */
MOVQ c+0(FP), SI
CMOVQCC R10, R13
MOVQ R13, (SI)
CMOVQCC R11, R14
MOVQ R14, 8(SI)
CMOVQCC R12, R15
MOVQ R15, 16(SI)
CMOVQCC AX, R8
MOVQ R8, 24(SI)
CMOVQCC BX, DI
MOVQ DI, 32(SI)
CMOVQCC R9, CX
MOVQ CX, 40(SI)
RET
// |
/* end */
// multiplication
// c = a * b % p
TEXT ·mulADX(SB), NOSPLIT, $16-24
// |
/* inputs */
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
XORQ AX, AX
// |
/* i0 */
// | a0 @ DX
MOVQ (DI), DX
// | a0 * b0
MULXQ (SI), AX, CX
MOVQ AX, (SP)
// | a0 * b1
MULXQ 8(SI), AX, R8
ADCXQ AX, CX
// | a0 * b2
MULXQ 16(SI), AX, R9
ADCXQ AX, R8
// | a0 * b3
MULXQ 24(SI), AX, R10
ADCXQ AX, R9
// | a0 * b4
MULXQ 32(SI), AX, R11
ADCXQ AX, R10
// | a0 * b5
MULXQ 40(SI), AX, R12
ADCXQ AX, R11
ADCQ $0x00, R12
// |
/* i1 */
// | a1 @ DX
MOVQ 8(DI), DX
XORQ R13, R13
// | a1 * b0
MULXQ (SI), AX, BX
ADOXQ AX, CX
ADCXQ BX, R8
MOVQ CX, 8(SP)
// | a1 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R8
ADCXQ BX, R9
// | a1 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R9
ADCXQ BX, R10
// | a1 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R10
ADCXQ BX, R11
// | a1 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a1 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, R12
ADOXQ R13, R13
ADCXQ BX, R13
// |
/* i2 */
// | a2 @ DX
MOVQ 16(DI), DX
XORQ R14, R14
// | a2 * b0
MULXQ (SI), AX, BX
ADOXQ AX, R8
ADCXQ BX, R9
// | a2 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R9
ADCXQ BX, R10
// | a2 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R10
ADCXQ BX, R11
// | a2 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a2 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R12
ADCXQ BX, R13
// | a2 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, R13
ADOXQ R14, R14
ADCXQ BX, R14
// |
/* i3 */
// | a3 @ DX
MOVQ 24(DI), DX
XORQ R15, R15
// | a3 * b0
MULXQ (SI), AX, BX
ADOXQ AX, R9
ADCXQ BX, R10
// | a3 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R10
ADCXQ BX, R11
// | a3 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a3 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R12
ADCXQ BX, R13
// | a3 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R13
ADCXQ BX, R14
// | a3 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, R14
ADOXQ R15, R15
ADCXQ BX, R15
// |
/* i4 */
// | a4 @ DX
MOVQ 32(DI), DX
XORQ CX, CX
// | a4 * b0
MULXQ (SI), AX, BX
ADOXQ AX, R10
ADCXQ BX, R11
// | a4 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a4 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R12
ADCXQ BX, R13
// | a4 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R13
ADCXQ BX, R14
// | a4 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R14
ADCXQ BX, R15
// | a4 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, R15
ADOXQ CX, CX
ADCXQ BX, CX
// |
/* i5 */
// | a5 @ DX
MOVQ 40(DI), DX
XORQ DI, DI
// | a5 * b0
MULXQ (SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a5 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R12
ADCXQ BX, R13
// | a5 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R13
ADCXQ BX, R14
// | a5 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R14
ADCXQ BX, R15
// | a5 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R15
ADCXQ BX, CX
// | a5 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, CX
ADOXQ BX, DI
ADCQ $0x00, DI
// |
/* */
// |
// | W
// | 0 (SP) | 1 8(SP) | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 DI
MOVQ (SP), BX
MOVQ 8(SP), SI
MOVQ DI, (SP)
// |
// | W ready to mont
// | 0 BX | 1 SI | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// |
/* montgomery reduction */
// | clear flags
XORQ AX, AX
// |
/* i0 */
// |
// | W
// | 0 BX | 1 SI | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u0 = w0 * inp
MOVQ BX, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w0 @ BX
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, BX
ADCXQ DI, SI
// | j1
// | w1 @ SI
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, SI
ADCXQ DI, R8
// | j2
// | w2 @ R8
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R8
ADCXQ DI, R9
// | j3
// | w3 @ R9
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R9
ADCXQ DI, R10
// | j4
// | w4 @ R10
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j5
// | w5 @ R11
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
ADOXQ BX, R12
ADCXQ BX, BX
MOVQ $0x00, AX
ADOXQ AX, BX
// | clear flags
XORQ AX, AX
// |
/* i1 */
// |
// | W
// | 0 - | 1 SI | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u1 = w1 * inp
MOVQ SI, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w1 @ SI
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, SI
ADCXQ DI, R8
// | j1
// | w2 @ R8
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R8
ADCXQ DI, R9
// | j2
// | w3 @ R9
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R9
ADCXQ DI, R10
// | j3
// | w4 @ R10
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j4
// | w5 @ R11
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j5
// | w6 @ R12
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
ADOXQ BX, R13
ADCXQ SI, SI
MOVQ $0x00, AX
ADOXQ AX, SI
// | clear flags
XORQ AX, AX
// |
/* i2 */
// |
// | W
// | 0 - | 1 - | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u2 = w2 * inp
MOVQ R8, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w2 @ R8
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, R8
ADCXQ DI, R9
// | j1
// | w3 @ R9
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R9
ADCXQ DI, R10
// | j2
// | w4 @ R10
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j3
// | w5 @ R11
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j4
// | w6 @ R12
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
// | j5
// | w7 @ R13
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R13
ADCXQ DI, R14
ADOXQ SI, R14
ADCXQ R8, R8
MOVQ $0x00, AX
ADOXQ AX, R8
// | clear flags
XORQ AX, AX
// |
/* i3 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u3 = w3 * inp
MOVQ R9, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w3 @ R9
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, R9
ADCXQ DI, R10
// | j1
// | w4 @ R10
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j2
// | w5 @ R11
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j3
// | w6 @ R12
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
// | j4
// | w7 @ R13
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R13
ADCXQ DI, R14
// | j5
// | w8 @ R14
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R14
ADCXQ DI, R15
ADOXQ R8, R15
ADCXQ R9, R9
MOVQ $0x00, AX
ADOXQ AX, R9
// | clear flags
XORQ AX, AX
// |
/* i4 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 - | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u4 = w4 * inp
MOVQ R10, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w4 @ R10
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j1
// | w5 @ R11
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j2
// | w6 @ R12
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
// | j3
// | w7 @ R13
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R13
ADCXQ DI, R14
// | j4
// | w8 @ R14
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R14
ADCXQ DI, R15
// | j5
// | w9 @ R15
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R15
ADCXQ DI, CX
ADOXQ R9, CX
ADCXQ R10, R10
MOVQ $0x00, AX
ADOXQ AX, R10
// | clear flags
XORQ AX, AX
// |
/* i5 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 - | 4 - | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u5 = w5 * inp
MOVQ R11, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w5 @ R11
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j1
// | w6 @ R12
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
// | j2
// | w7 @ R13
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R13
ADCXQ DI, R14
// | j3
// | w8 @ R14
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R14
ADCXQ DI, R15
// | j4
// | w9 @ R15
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R15
ADCXQ DI, CX
// | j5
// | w10 @ CX
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, CX
// | w11 @ (SP)
// | move to an idle register
MOVQ (SP), BX
ADCXQ DI, BX
ADOXQ R10, BX
// |
// | W montgomery reduction ends
// | 0 - | 1 - | 2 - | 3 - | 4 - | 5 -
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 BX
// |
/* modular reduction */
MOVQ R12, AX
SUBQ ·modulus+0(SB), AX
MOVQ R13, DI
SBBQ ·modulus+8(SB), DI
MOVQ R14, SI
SBBQ ·modulus+16(SB), SI
MOVQ R15, R8
SBBQ ·modulus+24(SB), R8
MOVQ CX, R9
SBBQ ·modulus+32(SB), R9
MOVQ BX, R10
SBBQ ·modulus+40(SB), R10
// |
/* out */
MOVQ c+0(FP), R11
CMOVQCC AX, R12
MOVQ R12, (R11)
CMOVQCC DI, R13
MOVQ R13, 8(R11)
CMOVQCC SI, R14
MOVQ R14, 16(R11)
CMOVQCC R8, R15
MOVQ R15, 24(R11)
CMOVQCC R9, CX
MOVQ CX, 32(R11)
CMOVQCC R10, BX
MOVQ BX, 40(R11)
RET
// |
/* end */
|
patex-ecosystem/patex-chain | 28,453 | crypto/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s | @ vim: set tabstop=8 softtabstop=8 shiftwidth=8 noexpandtab syntax=armasm:
/**********************************************************************
* Copyright (c) 2014 Wladimir J. van der Laan *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
/*
ARM implementation of field_10x26 inner loops.
Note:
- To avoid unnecessary loads and make use of available registers, two
'passes' have every time been interleaved, with the odd passes accumulating c' and d'
which will be added to c and d respectively in the even passes
*/
.syntax unified
.arch armv7-a
@ eabi attributes - see readelf -A
.eabi_attribute 8, 1 @ Tag_ARM_ISA_use = yes
.eabi_attribute 9, 0 @ Tag_Thumb_ISA_use = no
.eabi_attribute 10, 0 @ Tag_FP_arch = none
.eabi_attribute 24, 1 @ Tag_ABI_align_needed = 8-byte
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved = 8-byte, except leaf SP
.eabi_attribute 30, 2 @ Tag_ABI_optimization_goals = Aggressive Speed
.eabi_attribute 34, 1 @ Tag_CPU_unaligned_access = v6
.text
@ Field constants
.set field_R0, 0x3d10
.set field_R1, 0x400
.set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff
.align 2
.global secp256k1_fe_mul_inner
.type secp256k1_fe_mul_inner, %function
@ Arguments:
@ r0 r Restrict: can overlap with a, not with b
@ r1 a
@ r2 b
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
secp256k1_fe_mul_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
/******************************************
* Main computation code.
******************************************
Allocation:
r0,r14,r7,r8 scratch
r1 a (pointer)
r2 b (pointer)
r3:r4 c
r5:r6 d
r11:r12 c'
r9:r10 d'
Note: do not write to r[] here, it may overlap with a[]
*/
/* A - interleaved with B */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #9*4] @ b[9]
ldr r0, [r1, #1*4] @ a[1]
umull r5, r6, r7, r8 @ d = a[0] * b[9]
ldr r14, [r2, #8*4] @ b[8]
umull r9, r10, r0, r8 @ d' = a[1] * b[9]
ldr r7, [r1, #2*4] @ a[2]
umlal r5, r6, r0, r14 @ d += a[1] * b[8]
ldr r8, [r2, #7*4] @ b[7]
umlal r9, r10, r7, r14 @ d' += a[2] * b[8]
ldr r0, [r1, #3*4] @ a[3]
umlal r5, r6, r7, r8 @ d += a[2] * b[7]
ldr r14, [r2, #6*4] @ b[6]
umlal r9, r10, r0, r8 @ d' += a[3] * b[7]
ldr r7, [r1, #4*4] @ a[4]
umlal r5, r6, r0, r14 @ d += a[3] * b[6]
ldr r8, [r2, #5*4] @ b[5]
umlal r9, r10, r7, r14 @ d' += a[4] * b[6]
ldr r0, [r1, #5*4] @ a[5]
umlal r5, r6, r7, r8 @ d += a[4] * b[5]
ldr r14, [r2, #4*4] @ b[4]
umlal r9, r10, r0, r8 @ d' += a[5] * b[5]
ldr r7, [r1, #6*4] @ a[6]
umlal r5, r6, r0, r14 @ d += a[5] * b[4]
ldr r8, [r2, #3*4] @ b[3]
umlal r9, r10, r7, r14 @ d' += a[6] * b[4]
ldr r0, [r1, #7*4] @ a[7]
umlal r5, r6, r7, r8 @ d += a[6] * b[3]
ldr r14, [r2, #2*4] @ b[2]
umlal r9, r10, r0, r8 @ d' += a[7] * b[3]
ldr r7, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r14 @ d += a[7] * b[2]
ldr r8, [r2, #1*4] @ b[1]
umlal r9, r10, r7, r14 @ d' += a[8] * b[2]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r8 @ d += a[8] * b[1]
ldr r14, [r2, #0*4] @ b[0]
umlal r9, r10, r0, r8 @ d' += a[9] * b[1]
ldr r7, [r1, #0*4] @ a[0]
umlal r5, r6, r0, r14 @ d += a[9] * b[0]
@ r7,r14 used in B
bic r0, r5, field_not_M @ t9 = d & M
str r0, [sp, #4 + 4*9]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
/* B */
umull r3, r4, r7, r14 @ c = a[0] * b[0]
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u0 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u0 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t0 = c & M
str r14, [sp, #4 + 0*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u0 * R1
umlal r3, r4, r0, r14
/* C - interleaved with D */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #2*4] @ b[2]
ldr r14, [r2, #1*4] @ b[1]
umull r11, r12, r7, r8 @ c' = a[0] * b[2]
ldr r0, [r1, #1*4] @ a[1]
umlal r3, r4, r7, r14 @ c += a[0] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r11, r12, r0, r14 @ c' += a[1] * b[1]
ldr r7, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r8 @ c += a[1] * b[0]
ldr r14, [r2, #9*4] @ b[9]
umlal r11, r12, r7, r8 @ c' += a[2] * b[0]
ldr r0, [r1, #3*4] @ a[3]
umlal r5, r6, r7, r14 @ d += a[2] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umull r9, r10, r0, r14 @ d' = a[3] * b[9]
ldr r7, [r1, #4*4] @ a[4]
umlal r5, r6, r0, r8 @ d += a[3] * b[8]
ldr r14, [r2, #7*4] @ b[7]
umlal r9, r10, r7, r8 @ d' += a[4] * b[8]
ldr r0, [r1, #5*4] @ a[5]
umlal r5, r6, r7, r14 @ d += a[4] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r9, r10, r0, r14 @ d' += a[5] * b[7]
ldr r7, [r1, #6*4] @ a[6]
umlal r5, r6, r0, r8 @ d += a[5] * b[6]
ldr r14, [r2, #5*4] @ b[5]
umlal r9, r10, r7, r8 @ d' += a[6] * b[6]
ldr r0, [r1, #7*4] @ a[7]
umlal r5, r6, r7, r14 @ d += a[6] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r9, r10, r0, r14 @ d' += a[7] * b[5]
ldr r7, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r8 @ d += a[7] * b[4]
ldr r14, [r2, #3*4] @ b[3]
umlal r9, r10, r7, r8 @ d' += a[8] * b[4]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r14 @ d += a[8] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r9, r10, r0, r14 @ d' += a[9] * b[3]
umlal r5, r6, r0, r8 @ d += a[9] * b[2]
bic r0, r5, field_not_M @ u1 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u1 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t1 = c & M
str r14, [sp, #4 + 1*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u1 * R1
umlal r3, r4, r0, r14
/* D */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u2 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u2 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t2 = c & M
str r14, [sp, #4 + 2*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u2 * R1
umlal r3, r4, r0, r14
/* E - interleaved with F */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #4*4] @ b[4]
umull r11, r12, r7, r8 @ c' = a[0] * b[4]
ldr r8, [r2, #3*4] @ b[3]
umlal r3, r4, r7, r8 @ c += a[0] * b[3]
ldr r7, [r1, #1*4] @ a[1]
umlal r11, r12, r7, r8 @ c' += a[1] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r3, r4, r7, r8 @ c += a[1] * b[2]
ldr r7, [r1, #2*4] @ a[2]
umlal r11, r12, r7, r8 @ c' += a[2] * b[2]
ldr r8, [r2, #1*4] @ b[1]
umlal r3, r4, r7, r8 @ c += a[2] * b[1]
ldr r7, [r1, #3*4] @ a[3]
umlal r11, r12, r7, r8 @ c' += a[3] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r3, r4, r7, r8 @ c += a[3] * b[0]
ldr r7, [r1, #4*4] @ a[4]
umlal r11, r12, r7, r8 @ c' += a[4] * b[0]
ldr r8, [r2, #9*4] @ b[9]
umlal r5, r6, r7, r8 @ d += a[4] * b[9]
ldr r7, [r1, #5*4] @ a[5]
umull r9, r10, r7, r8 @ d' = a[5] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umlal r5, r6, r7, r8 @ d += a[5] * b[8]
ldr r7, [r1, #6*4] @ a[6]
umlal r9, r10, r7, r8 @ d' += a[6] * b[8]
ldr r8, [r2, #7*4] @ b[7]
umlal r5, r6, r7, r8 @ d += a[6] * b[7]
ldr r7, [r1, #7*4] @ a[7]
umlal r9, r10, r7, r8 @ d' += a[7] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r5, r6, r7, r8 @ d += a[7] * b[6]
ldr r7, [r1, #8*4] @ a[8]
umlal r9, r10, r7, r8 @ d' += a[8] * b[6]
ldr r8, [r2, #5*4] @ b[5]
umlal r5, r6, r7, r8 @ d += a[8] * b[5]
ldr r7, [r1, #9*4] @ a[9]
umlal r9, r10, r7, r8 @ d' += a[9] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r5, r6, r7, r8 @ d += a[9] * b[4]
bic r0, r5, field_not_M @ u3 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u3 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t3 = c & M
str r14, [sp, #4 + 3*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u3 * R1
umlal r3, r4, r0, r14
/* F */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u4 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u4 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t4 = c & M
str r14, [sp, #4 + 4*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u4 * R1
umlal r3, r4, r0, r14
/* G - interleaved with H */
ldr r7, [r1, #0*4] @ a[0]
ldr r8, [r2, #6*4] @ b[6]
ldr r14, [r2, #5*4] @ b[5]
umull r11, r12, r7, r8 @ c' = a[0] * b[6]
ldr r0, [r1, #1*4] @ a[1]
umlal r3, r4, r7, r14 @ c += a[0] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r11, r12, r0, r14 @ c' += a[1] * b[5]
ldr r7, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r8 @ c += a[1] * b[4]
ldr r14, [r2, #3*4] @ b[3]
umlal r11, r12, r7, r8 @ c' += a[2] * b[4]
ldr r0, [r1, #3*4] @ a[3]
umlal r3, r4, r7, r14 @ c += a[2] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r11, r12, r0, r14 @ c' += a[3] * b[3]
ldr r7, [r1, #4*4] @ a[4]
umlal r3, r4, r0, r8 @ c += a[3] * b[2]
ldr r14, [r2, #1*4] @ b[1]
umlal r11, r12, r7, r8 @ c' += a[4] * b[2]
ldr r0, [r1, #5*4] @ a[5]
umlal r3, r4, r7, r14 @ c += a[4] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r11, r12, r0, r14 @ c' += a[5] * b[1]
ldr r7, [r1, #6*4] @ a[6]
umlal r3, r4, r0, r8 @ c += a[5] * b[0]
ldr r14, [r2, #9*4] @ b[9]
umlal r11, r12, r7, r8 @ c' += a[6] * b[0]
ldr r0, [r1, #7*4] @ a[7]
umlal r5, r6, r7, r14 @ d += a[6] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umull r9, r10, r0, r14 @ d' = a[7] * b[9]
ldr r7, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r8 @ d += a[7] * b[8]
ldr r14, [r2, #7*4] @ b[7]
umlal r9, r10, r7, r8 @ d' += a[8] * b[8]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r14 @ d += a[8] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r9, r10, r0, r14 @ d' += a[9] * b[7]
umlal r5, r6, r0, r8 @ d += a[9] * b[6]
bic r0, r5, field_not_M @ u5 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u5 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t5 = c & M
str r14, [sp, #4 + 5*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u5 * R1
umlal r3, r4, r0, r14
/* H */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u6 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u6 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t6 = c & M
str r14, [sp, #4 + 6*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u6 * R1
umlal r3, r4, r0, r14
/* I - interleaved with J */
ldr r8, [r2, #8*4] @ b[8]
ldr r7, [r1, #0*4] @ a[0]
ldr r14, [r2, #7*4] @ b[7]
umull r11, r12, r7, r8 @ c' = a[0] * b[8]
ldr r0, [r1, #1*4] @ a[1]
umlal r3, r4, r7, r14 @ c += a[0] * b[7]
ldr r8, [r2, #6*4] @ b[6]
umlal r11, r12, r0, r14 @ c' += a[1] * b[7]
ldr r7, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r8 @ c += a[1] * b[6]
ldr r14, [r2, #5*4] @ b[5]
umlal r11, r12, r7, r8 @ c' += a[2] * b[6]
ldr r0, [r1, #3*4] @ a[3]
umlal r3, r4, r7, r14 @ c += a[2] * b[5]
ldr r8, [r2, #4*4] @ b[4]
umlal r11, r12, r0, r14 @ c' += a[3] * b[5]
ldr r7, [r1, #4*4] @ a[4]
umlal r3, r4, r0, r8 @ c += a[3] * b[4]
ldr r14, [r2, #3*4] @ b[3]
umlal r11, r12, r7, r8 @ c' += a[4] * b[4]
ldr r0, [r1, #5*4] @ a[5]
umlal r3, r4, r7, r14 @ c += a[4] * b[3]
ldr r8, [r2, #2*4] @ b[2]
umlal r11, r12, r0, r14 @ c' += a[5] * b[3]
ldr r7, [r1, #6*4] @ a[6]
umlal r3, r4, r0, r8 @ c += a[5] * b[2]
ldr r14, [r2, #1*4] @ b[1]
umlal r11, r12, r7, r8 @ c' += a[6] * b[2]
ldr r0, [r1, #7*4] @ a[7]
umlal r3, r4, r7, r14 @ c += a[6] * b[1]
ldr r8, [r2, #0*4] @ b[0]
umlal r11, r12, r0, r14 @ c' += a[7] * b[1]
ldr r7, [r1, #8*4] @ a[8]
umlal r3, r4, r0, r8 @ c += a[7] * b[0]
ldr r14, [r2, #9*4] @ b[9]
umlal r11, r12, r7, r8 @ c' += a[8] * b[0]
ldr r0, [r1, #9*4] @ a[9]
umlal r5, r6, r7, r14 @ d += a[8] * b[9]
ldr r8, [r2, #8*4] @ b[8]
umull r9, r10, r0, r14 @ d' = a[9] * b[9]
umlal r5, r6, r0, r8 @ d += a[9] * b[8]
bic r0, r5, field_not_M @ u7 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u7 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t7 = c & M
str r14, [sp, #4 + 7*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u7 * R1
umlal r3, r4, r0, r14
/* J */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u8 = d & M
str r0, [sp, #4 + 8*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u8 * R0
umlal r3, r4, r0, r14
/******************************************
* compute and write back result
******************************************
Allocation:
r0 r
r3:r4 c
r5:r6 d
r7 t0
r8 t1
r9 t2
r11 u8
r12 t9
r1,r2,r10,r14 scratch
Note: do not read from a[] after here, it may overlap with r[]
*/
ldr r0, [sp, #0]
add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9
ldmia r1, {r2,r7,r8,r9,r10,r11,r12}
add r1, r0, #3*4
stmia r1, {r2,r7,r8,r9,r10}
bic r2, r3, field_not_M @ r[8] = c & M
str r2, [r0, #8*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u8 * R1
umlal r3, r4, r11, r14
movw r14, field_R0 @ c += d * R0
umlal r3, r4, r5, r14
adds r3, r3, r12 @ c += t9
adc r4, r4, #0
add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2
ldmia r1, {r7,r8,r9}
ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4)
str r2, [r0, #9*4]
mov r3, r3, lsr #22 @ c >>= 22
orr r3, r3, r4, asl #10
mov r4, r4, lsr #22
movw r14, field_R1 << 4 @ c += d * (R1 << 4)
umlal r3, r4, r5, r14
movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add)
umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4)
adds r5, r5, r7 @ d.lo += t0
mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4)
adc r6, r6, 0 @ d.hi += carry
bic r2, r5, field_not_M @ r[0] = d & M
str r2, [r0, #0*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add)
umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4)
adds r5, r5, r8 @ d.lo += t1
adc r6, r6, #0 @ d.hi += carry
adds r5, r5, r1 @ d.lo += tmp.lo
mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4)
adc r6, r6, r2 @ d.hi += carry + tmp.hi
bic r2, r5, field_not_M @ r[1] = d & M
str r2, [r0, #1*4]
mov r5, r5, lsr #26 @ d >>= 26 (ignore hi)
orr r5, r5, r6, asl #6
add r5, r5, r9 @ d += t2
str r5, [r0, #2*4] @ r[2] = d
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size secp256k1_fe_mul_inner, .-secp256k1_fe_mul_inner
.align 2
.global secp256k1_fe_sqr_inner
.type secp256k1_fe_sqr_inner, %function
@ Arguments:
@ r0 r Can overlap with a
@ r1 a
@ Stack (total 4+10*4 = 44)
@ sp + #0 saved 'r' pointer
@ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9
secp256k1_fe_sqr_inner:
stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14}
sub sp, sp, #48 @ frame=44 + alignment
str r0, [sp, #0] @ save result address, we need it only at the end
/******************************************
* Main computation code.
******************************************
Allocation:
r0,r14,r2,r7,r8 scratch
r1 a (pointer)
r3:r4 c
r5:r6 d
r11:r12 c'
r9:r10 d'
Note: do not write to r[] here, it may overlap with a[]
*/
/* A interleaved with B */
ldr r0, [r1, #1*4] @ a[1]*2
ldr r7, [r1, #0*4] @ a[0]
mov r0, r0, asl #1
ldr r14, [r1, #9*4] @ a[9]
umull r3, r4, r7, r7 @ c = a[0] * a[0]
ldr r8, [r1, #8*4] @ a[8]
mov r7, r7, asl #1
umull r5, r6, r7, r14 @ d = a[0]*2 * a[9]
ldr r7, [r1, #2*4] @ a[2]*2
umull r9, r10, r0, r14 @ d' = a[1]*2 * a[9]
ldr r14, [r1, #7*4] @ a[7]
umlal r5, r6, r0, r8 @ d += a[1]*2 * a[8]
mov r7, r7, asl #1
ldr r0, [r1, #3*4] @ a[3]*2
umlal r9, r10, r7, r8 @ d' += a[2]*2 * a[8]
ldr r8, [r1, #6*4] @ a[6]
umlal r5, r6, r7, r14 @ d += a[2]*2 * a[7]
mov r0, r0, asl #1
ldr r7, [r1, #4*4] @ a[4]*2
umlal r9, r10, r0, r14 @ d' += a[3]*2 * a[7]
ldr r14, [r1, #5*4] @ a[5]
mov r7, r7, asl #1
umlal r5, r6, r0, r8 @ d += a[3]*2 * a[6]
umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[6]
umlal r5, r6, r7, r14 @ d += a[4]*2 * a[5]
umlal r9, r10, r14, r14 @ d' += a[5] * a[5]
bic r0, r5, field_not_M @ t9 = d & M
str r0, [sp, #4 + 9*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
/* B */
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u0 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u0 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t0 = c & M
str r14, [sp, #4 + 0*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u0 * R1
umlal r3, r4, r0, r14
/* C interleaved with D */
ldr r0, [r1, #0*4] @ a[0]*2
ldr r14, [r1, #1*4] @ a[1]
mov r0, r0, asl #1
ldr r8, [r1, #2*4] @ a[2]
umlal r3, r4, r0, r14 @ c += a[0]*2 * a[1]
mov r7, r8, asl #1 @ a[2]*2
umull r11, r12, r14, r14 @ c' = a[1] * a[1]
ldr r14, [r1, #9*4] @ a[9]
umlal r11, r12, r0, r8 @ c' += a[0]*2 * a[2]
ldr r0, [r1, #3*4] @ a[3]*2
ldr r8, [r1, #8*4] @ a[8]
umlal r5, r6, r7, r14 @ d += a[2]*2 * a[9]
mov r0, r0, asl #1
ldr r7, [r1, #4*4] @ a[4]*2
umull r9, r10, r0, r14 @ d' = a[3]*2 * a[9]
ldr r14, [r1, #7*4] @ a[7]
umlal r5, r6, r0, r8 @ d += a[3]*2 * a[8]
mov r7, r7, asl #1
ldr r0, [r1, #5*4] @ a[5]*2
umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[8]
ldr r8, [r1, #6*4] @ a[6]
mov r0, r0, asl #1
umlal r5, r6, r7, r14 @ d += a[4]*2 * a[7]
umlal r9, r10, r0, r14 @ d' += a[5]*2 * a[7]
umlal r5, r6, r0, r8 @ d += a[5]*2 * a[6]
umlal r9, r10, r8, r8 @ d' += a[6] * a[6]
bic r0, r5, field_not_M @ u1 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u1 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t1 = c & M
str r14, [sp, #4 + 1*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u1 * R1
umlal r3, r4, r0, r14
/* D */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u2 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u2 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t2 = c & M
str r14, [sp, #4 + 2*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u2 * R1
umlal r3, r4, r0, r14
/* E interleaved with F */
ldr r7, [r1, #0*4] @ a[0]*2
ldr r0, [r1, #1*4] @ a[1]*2
ldr r14, [r1, #2*4] @ a[2]
mov r7, r7, asl #1
ldr r8, [r1, #3*4] @ a[3]
ldr r2, [r1, #4*4]
umlal r3, r4, r7, r8 @ c += a[0]*2 * a[3]
mov r0, r0, asl #1
umull r11, r12, r7, r2 @ c' = a[0]*2 * a[4]
mov r2, r2, asl #1 @ a[4]*2
umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[3]
ldr r8, [r1, #9*4] @ a[9]
umlal r3, r4, r0, r14 @ c += a[1]*2 * a[2]
ldr r0, [r1, #5*4] @ a[5]*2
umlal r11, r12, r14, r14 @ c' += a[2] * a[2]
ldr r14, [r1, #8*4] @ a[8]
mov r0, r0, asl #1
umlal r5, r6, r2, r8 @ d += a[4]*2 * a[9]
ldr r7, [r1, #6*4] @ a[6]*2
umull r9, r10, r0, r8 @ d' = a[5]*2 * a[9]
mov r7, r7, asl #1
ldr r8, [r1, #7*4] @ a[7]
umlal r5, r6, r0, r14 @ d += a[5]*2 * a[8]
umlal r9, r10, r7, r14 @ d' += a[6]*2 * a[8]
umlal r5, r6, r7, r8 @ d += a[6]*2 * a[7]
umlal r9, r10, r8, r8 @ d' += a[7] * a[7]
bic r0, r5, field_not_M @ u3 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u3 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t3 = c & M
str r14, [sp, #4 + 3*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u3 * R1
umlal r3, r4, r0, r14
/* F */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u4 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u4 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t4 = c & M
str r14, [sp, #4 + 4*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u4 * R1
umlal r3, r4, r0, r14
/* G interleaved with H */
ldr r7, [r1, #0*4] @ a[0]*2
ldr r0, [r1, #1*4] @ a[1]*2
mov r7, r7, asl #1
ldr r8, [r1, #5*4] @ a[5]
ldr r2, [r1, #6*4] @ a[6]
umlal r3, r4, r7, r8 @ c += a[0]*2 * a[5]
ldr r14, [r1, #4*4] @ a[4]
mov r0, r0, asl #1
umull r11, r12, r7, r2 @ c' = a[0]*2 * a[6]
ldr r7, [r1, #2*4] @ a[2]*2
umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[5]
mov r7, r7, asl #1
ldr r8, [r1, #3*4] @ a[3]
umlal r3, r4, r0, r14 @ c += a[1]*2 * a[4]
mov r0, r2, asl #1 @ a[6]*2
umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[4]
ldr r14, [r1, #9*4] @ a[9]
umlal r3, r4, r7, r8 @ c += a[2]*2 * a[3]
ldr r7, [r1, #7*4] @ a[7]*2
umlal r11, r12, r8, r8 @ c' += a[3] * a[3]
mov r7, r7, asl #1
ldr r8, [r1, #8*4] @ a[8]
umlal r5, r6, r0, r14 @ d += a[6]*2 * a[9]
umull r9, r10, r7, r14 @ d' = a[7]*2 * a[9]
umlal r5, r6, r7, r8 @ d += a[7]*2 * a[8]
umlal r9, r10, r8, r8 @ d' += a[8] * a[8]
bic r0, r5, field_not_M @ u5 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u5 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t5 = c & M
str r14, [sp, #4 + 5*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u5 * R1
umlal r3, r4, r0, r14
/* H */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
adds r5, r5, r9 @ d += d'
adc r6, r6, r10
bic r0, r5, field_not_M @ u6 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u6 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t6 = c & M
str r14, [sp, #4 + 6*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u6 * R1
umlal r3, r4, r0, r14
/* I interleaved with J */
ldr r7, [r1, #0*4] @ a[0]*2
ldr r0, [r1, #1*4] @ a[1]*2
mov r7, r7, asl #1
ldr r8, [r1, #7*4] @ a[7]
ldr r2, [r1, #8*4] @ a[8]
umlal r3, r4, r7, r8 @ c += a[0]*2 * a[7]
ldr r14, [r1, #6*4] @ a[6]
mov r0, r0, asl #1
umull r11, r12, r7, r2 @ c' = a[0]*2 * a[8]
ldr r7, [r1, #2*4] @ a[2]*2
umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[7]
ldr r8, [r1, #5*4] @ a[5]
umlal r3, r4, r0, r14 @ c += a[1]*2 * a[6]
ldr r0, [r1, #3*4] @ a[3]*2
mov r7, r7, asl #1
umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[6]
ldr r14, [r1, #4*4] @ a[4]
mov r0, r0, asl #1
umlal r3, r4, r7, r8 @ c += a[2]*2 * a[5]
mov r2, r2, asl #1 @ a[8]*2
umlal r11, r12, r0, r8 @ c' += a[3]*2 * a[5]
umlal r3, r4, r0, r14 @ c += a[3]*2 * a[4]
umlal r11, r12, r14, r14 @ c' += a[4] * a[4]
ldr r8, [r1, #9*4] @ a[9]
umlal r5, r6, r2, r8 @ d += a[8]*2 * a[9]
@ r8 will be used in J
bic r0, r5, field_not_M @ u7 = d & M
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u7 * R0
umlal r3, r4, r0, r14
bic r14, r3, field_not_M @ t7 = c & M
str r14, [sp, #4 + 7*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u7 * R1
umlal r3, r4, r0, r14
/* J */
adds r3, r3, r11 @ c += c'
adc r4, r4, r12
umlal r5, r6, r8, r8 @ d += a[9] * a[9]
bic r0, r5, field_not_M @ u8 = d & M
str r0, [sp, #4 + 8*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R0 @ c += u8 * R0
umlal r3, r4, r0, r14
/******************************************
* compute and write back result
******************************************
Allocation:
r0 r
r3:r4 c
r5:r6 d
r7 t0
r8 t1
r9 t2
r11 u8
r12 t9
r1,r2,r10,r14 scratch
Note: do not read from a[] after here, it may overlap with r[]
*/
ldr r0, [sp, #0]
add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9
ldmia r1, {r2,r7,r8,r9,r10,r11,r12}
add r1, r0, #3*4
stmia r1, {r2,r7,r8,r9,r10}
bic r2, r3, field_not_M @ r[8] = c & M
str r2, [r0, #8*4]
mov r3, r3, lsr #26 @ c >>= 26
orr r3, r3, r4, asl #6
mov r4, r4, lsr #26
mov r14, field_R1 @ c += u8 * R1
umlal r3, r4, r11, r14
movw r14, field_R0 @ c += d * R0
umlal r3, r4, r5, r14
adds r3, r3, r12 @ c += t9
adc r4, r4, #0
add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2
ldmia r1, {r7,r8,r9}
ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4)
str r2, [r0, #9*4]
mov r3, r3, lsr #22 @ c >>= 22
orr r3, r3, r4, asl #10
mov r4, r4, lsr #22
movw r14, field_R1 << 4 @ c += d * (R1 << 4)
umlal r3, r4, r5, r14
movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add)
umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4)
adds r5, r5, r7 @ d.lo += t0
mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4)
adc r6, r6, 0 @ d.hi += carry
bic r2, r5, field_not_M @ r[0] = d & M
str r2, [r0, #0*4]
mov r5, r5, lsr #26 @ d >>= 26
orr r5, r5, r6, asl #6
mov r6, r6, lsr #26
movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add)
umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4)
adds r5, r5, r8 @ d.lo += t1
adc r6, r6, #0 @ d.hi += carry
adds r5, r5, r1 @ d.lo += tmp.lo
mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4)
adc r6, r6, r2 @ d.hi += carry + tmp.hi
bic r2, r5, field_not_M @ r[1] = d & M
str r2, [r0, #1*4]
mov r5, r5, lsr #26 @ d >>= 26 (ignore hi)
orr r5, r5, r6, asl #6
add r5, r5, r9 @ d += t2
str r5, [r0, #2*4] @ r[2] = d
add sp, sp, #48
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size secp256k1_fe_sqr_inner, .-secp256k1_fe_sqr_inner
|
patex-ecosystem/patex-chain | 2,193 | crypto/bn256/cloudflare/gfp_amd64.s | // +build amd64,!generic
#define storeBlock(a0,a1,a2,a3, r) \
MOVQ a0, 0+r \
MOVQ a1, 8+r \
MOVQ a2, 16+r \
MOVQ a3, 24+r
#define loadBlock(r, a0,a1,a2,a3) \
MOVQ 0+r, a0 \
MOVQ 8+r, a1 \
MOVQ 16+r, a2 \
MOVQ 24+r, a3
#define gfpCarry(a0,a1,a2,a3,a4, b0,b1,b2,b3,b4) \
\ // b = a-p
MOVQ a0, b0 \
MOVQ a1, b1 \
MOVQ a2, b2 \
MOVQ a3, b3 \
MOVQ a4, b4 \
\
SUBQ ·p2+0(SB), b0 \
SBBQ ·p2+8(SB), b1 \
SBBQ ·p2+16(SB), b2 \
SBBQ ·p2+24(SB), b3 \
SBBQ $0, b4 \
\
\ // if b is negative then return a
\ // else return b
CMOVQCC b0, a0 \
CMOVQCC b1, a1 \
CMOVQCC b2, a2 \
CMOVQCC b3, a3
#include "mul_amd64.h"
#include "mul_bmi2_amd64.h"
TEXT ·gfpNeg(SB),0,$0-16
MOVQ ·p2+0(SB), R8
MOVQ ·p2+8(SB), R9
MOVQ ·p2+16(SB), R10
MOVQ ·p2+24(SB), R11
MOVQ a+8(FP), DI
SUBQ 0(DI), R8
SBBQ 8(DI), R9
SBBQ 16(DI), R10
SBBQ 24(DI), R11
MOVQ $0, AX
gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,CX,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
RET
TEXT ·gfpAdd(SB),0,$0-24
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
loadBlock(0(DI), R8,R9,R10,R11)
MOVQ $0, R12
ADDQ 0(SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ $0, R12
gfpCarry(R8,R9,R10,R11,R12, R13,R14,CX,AX,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
RET
TEXT ·gfpSub(SB),0,$0-24
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
loadBlock(0(DI), R8,R9,R10,R11)
MOVQ ·p2+0(SB), R12
MOVQ ·p2+8(SB), R13
MOVQ ·p2+16(SB), R14
MOVQ ·p2+24(SB), CX
MOVQ $0, AX
SUBQ 0(SI), R8
SBBQ 8(SI), R9
SBBQ 16(SI), R10
SBBQ 24(SI), R11
CMOVQCC AX, R12
CMOVQCC AX, R13
CMOVQCC AX, R14
CMOVQCC AX, CX
ADDQ R12, R8
ADCQ R13, R9
ADCQ R14, R10
ADCQ CX, R11
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
RET
TEXT ·gfpMul(SB),0,$160-24
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
// Jump to a slightly different implementation if MULX isn't supported.
CMPB ·hasBMI2(SB), $0
JE nobmi2Mul
mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI))
storeBlock( R8, R9,R10,R11, 0(SP))
storeBlock(R12,R13,R14,CX, 32(SP))
gfpReduceBMI2()
JMP end
nobmi2Mul:
mul(0(DI),8(DI),16(DI),24(DI), 0(SI), 0(SP))
gfpReduce(0(SP))
end:
MOVQ c+0(FP), DI
storeBlock(R12,R13,R14,CX, 0(DI))
RET
|
patex-ecosystem/patex-chain | 1,870 | crypto/bn256/cloudflare/gfp_arm64.s | // +build arm64,!generic
#define storeBlock(a0,a1,a2,a3, r) \
MOVD a0, 0+r \
MOVD a1, 8+r \
MOVD a2, 16+r \
MOVD a3, 24+r
#define loadBlock(r, a0,a1,a2,a3) \
MOVD 0+r, a0 \
MOVD 8+r, a1 \
MOVD 16+r, a2 \
MOVD 24+r, a3
#define loadModulus(p0,p1,p2,p3) \
MOVD ·p2+0(SB), p0 \
MOVD ·p2+8(SB), p1 \
MOVD ·p2+16(SB), p2 \
MOVD ·p2+24(SB), p3
#include "mul_arm64.h"
TEXT ·gfpNeg(SB),0,$0-16
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
loadModulus(R5,R6,R7,R8)
SUBS R1, R5, R1
SBCS R2, R6, R2
SBCS R3, R7, R3
SBCS R4, R8, R4
SUBS R5, R1, R5
SBCS R6, R2, R6
SBCS R7, R3, R7
SBCS R8, R4, R8
CSEL CS, R5, R1, R1
CSEL CS, R6, R2, R2
CSEL CS, R7, R3, R3
CSEL CS, R8, R4, R4
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
TEXT ·gfpAdd(SB),0,$0-24
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
MOVD b+16(FP), R0
loadBlock(0(R0), R5,R6,R7,R8)
loadModulus(R9,R10,R11,R12)
MOVD ZR, R0
ADDS R5, R1
ADCS R6, R2
ADCS R7, R3
ADCS R8, R4
ADCS ZR, R0
SUBS R9, R1, R5
SBCS R10, R2, R6
SBCS R11, R3, R7
SBCS R12, R4, R8
SBCS ZR, R0, R0
CSEL CS, R5, R1, R1
CSEL CS, R6, R2, R2
CSEL CS, R7, R3, R3
CSEL CS, R8, R4, R4
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
TEXT ·gfpSub(SB),0,$0-24
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
MOVD b+16(FP), R0
loadBlock(0(R0), R5,R6,R7,R8)
loadModulus(R9,R10,R11,R12)
SUBS R5, R1
SBCS R6, R2
SBCS R7, R3
SBCS R8, R4
CSEL CS, ZR, R9, R9
CSEL CS, ZR, R10, R10
CSEL CS, ZR, R11, R11
CSEL CS, ZR, R12, R12
ADDS R9, R1
ADCS R10, R2
ADCS R11, R3
ADCS R12, R4
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
TEXT ·gfpMul(SB),0,$0-24
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
MOVD b+16(FP), R0
loadBlock(0(R0), R5,R6,R7,R8)
mul(R9,R10,R11,R12,R13,R14,R15,R16)
gfpReduce()
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
|
pcsx-redux/nugget | 5,758 | ucl-demo/n2e-d.S | // Copyright (C) 2023 PCSX-Redux authors
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the
// Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
// This is from my old ps2-packer, which I wrote from scratch eons ago.
.set noreorder
.global n2e_decompress
.ent n2e_decompress
#ifdef ARGS_IN_S
#define source $s0
#define dest $s1
#else
#define source $a0
#define dest $a1
#endif
#define bb $t0
#define last_m_off $t2
#define m_off $t3
#define m_len $t4
#define m_pos $t5
#define ff0000 $t6
#define saved_ra $t9
n2e_decompress:
// a0/s0 = source
// a1/s1 = destination
// t0 = bitbucket (bb)
// t1 = temp... (memmove)
// t2 = last_m_off
// t3 = m_off
// t4 = m_len
// t5 = m_pos
// t6 = 0xff0000
// t9 = saved_ra
move saved_ra, $ra
move bb, $0
li last_m_off, 1
lui ff0000, 0xff
main_loop: // for (;;)
bal getbit // while (getbit(bb))
nop
beqz $v0, m_off_loop
li m_off, 1 // m_off = 1 (doing it there, in the unused delay slot) */
lbu $t1, 0(source) // dst[olen++] = src[ilen++]
addiu source, 1
sb $t1, 0(dest)
b main_loop
addiu dest, 1
m_off_loop:
bal getbit // + getbi...
sll m_off, 1 // m_off = m_off * 2...
bal getbit // if (getbit(bb)...
addu m_off, $v0 // ..t(bb)
bnez $v0, exit_m_off_loop // ... break (and m_off-- as a side effect)
addiu m_off, -1 // m_off = (m_off-1)..
bal getbit // + getbi...
sll m_off, 1 // ..*2
b m_off_loop // for(;;)
addu m_off, $v0 // ..t(bb)
exit_m_off_loop:
addiu m_off, -1 // if (!(--m_off)) ...
bnez m_off, m_off_diff_2
nop
bal getbit // m_len = getbit(bb)
move m_off, last_m_off // m_off = last_m_off
b exit_if_moff_eq_2
move m_len, $v0
m_off_diff_2: // else
addiu m_off, -1 // (m_off-1) (m_off was already sub by 2)
lbu $t1, 0(source) // src[ilen++]
sll m_off, 8 // *256
addiu source, 1
addu m_off, $t1 // m_off = ...
addiu $t1, m_off, 1 // if (m_off == -1) (that is, t1 == 0)
bnez $t1, 1f
nop
jr saved_ra // break; (that is, return)
1: andi m_len, $t1, 1 // mlen = (m_off ^ 1) & 1 (that is, (m_off + 1) & 1)
srl m_off, 1 // m_off >>= 1
addiu m_off, 1 // ++m_off
move last_m_off, m_off // last_m_off = m_off
// endif
exit_if_moff_eq_2:
bal getbit // prefetch next bit, can be used twice
nop
beqz m_len, else_1 // if (m_len)
nop
b exit_if
addiu m_len, $v0, 3 // m_len = 3 + getbit(bb) (I add 2 everywhere, for later)
else_1:
bnez $v0, else_2_reversed // else if (getbit(bb)) (second time the prefetched bit can be used)
nop // else...
addiu m_len, 1 // m_len++
while_m_len:
bal getbit // + getbit(bb)
sll m_len, 1 // m_len * 2
bal getbit // preparing the condition
addu m_len, $v0 // m_len = ...
beqz $v0, while_m_len // while(!getbit(bb))
nop
b exit_if
addiu m_len, 5 // m_len += 5 (+2 from the original code)
else_2_reversed:
bal getbit // m_len = 5 + getbit(bb) (still + 2 from the original code)
nop
addiu m_len, $v0, 5
// endif
exit_if:
sltiu $t1, m_off, 0x501 // original code does m_len += (m_off > 0x500)
subu m_len, $t1 // we do m_len -= (m_off < 0x501) (to use sltiu)
// which get one of the +1 from before
subu m_pos, dest, m_off // m_pos = dest + olen - m_off
// Here, the original code does dst[olen++] = *m_pos++
// we roll it back in the loop, by adding +1 before to m_len.
copy_loop:
lbu $t1, 0(m_pos) // dst[olen++] = *m_pos++
addiu m_pos, 1
sb $t1, 0(dest)
nop // stupid 5900 loop bug...
addiu m_len, -1
bnez m_len, copy_loop
addiu dest, 1
b main_loop
nop
.end n2e_decompress
// a0 = source
// t0 = bitbucket
// v0 = returned bit
// t1 = safely erased
.ent getbit
getbit:
and $t1, bb, ff0000
bnez $t1, bb_okay
nop
lbu $t1, 0(source)
addiu source, 1
or bb, $t1, ff0000
bb_okay:
srl $v0, bb, 7
sll bb, 1
jr $ra
andi $v0, 1
.end getbit
|
pcsx-redux/nugget | 5,901 | psyqo/src/vector.s | /*
MIT License
Copyright (c) 2024 PCSX-Redux authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
.section .text.exceptionHandler, "ax", @progbits
.set push
.set noreorder
.set noat
.align 2
.global psyqoAssemblyExceptionHandler
.global psyqoExceptionHandler
.global psyqoBreakHandler
.global psyqoExceptionHandlerAdjustFrameCount
.global psyqoExceptionHandlerStop
.type psyqoAssemblyExceptionHandler, @function
/*
The way this handler works is a bit complicated. The idea is that VBlank
is a common exception which has the singular purpose of incrementing a
frame counter. If we get only VBlank, we increment the frame counter using
self modifying code to poke at the GPU singleton object directly, and no
C++ handler is called. If we get anything else, we call the C++ handler,
which will be responsible for handling the stacked IRQs, including potentially
calling the VBlank handler, which will increment the frame counter. In short,
VBlank has a fast path case if it is the only exception, and a slow path case
in C++ if there are other exceptions alongside it.
*/
psyqoAssemblyExceptionHandler:
sw $at, 0x100($0)
sw $v1, 0x108($0)
sw $a0, 0x10c($0)
/* $k0 = hardware registers base, set globally */
mfc0 $k1, $14 /* $k1 = EPC, will stay there until the end */
mfc0 $a0, $13 /* $a0 = Cause */
li $at, 0x24 /* Prepare for break test in (a) */
lw $v1, 0($k1) /* $v1 = instruction that caused the exception */
andi $a0, 0x3c /* Test for what kind of exception */
beq $a0, $at, .Lbreak /* (a) */
li $at, 0x4a /* Prepare for cop2 test in (b) */
/* Beyond break, psyqo will only support IRQs, aka 0 */
bnez $a0, .Lstop /* Anything else and we just stop - $a0 available again */
srl $v1, 24 /* | (b) */
andi $v1, 0xfe /* |_ Test if we were in a cop2 operation */
lhu $a0, 0x1070($k0) /* $a0 = IREG, which we will pass to our C++ handler */
bne $v1, $at, .LnoCOP2adjustmentNeeded
andi $v1, $a0, 0x7fe /* Prepare for the IRQ test in (c) */
addiu $k1, 4 /* If we were in cop2, we need to adjust our EPC */
.LnoCOP2adjustmentNeeded:
xori $at, $a0, 0x7ff /* $at = IRQ ACK bitfield */
bnez $v1, .LgotIRQs /* (c) Did we get anything beyond VBlank ? */
sw $at, 0x1070($k0) /* ACK the IRQs we are signalling */
psyqoExceptionHandlerAdjustFrameCount:
/* Basically self modifying code here... */
lui $v1, 0
/* ... here... */
lw $a0, 0($v1) /* $a0 = m_frameCount */
lw $at, 0x100($0) /* Load the old at in the load delay slot of $a0 above */
addiu $a0, 1 /* Increment m_frameCount */
/* ... and here. */
sw $a0, 0($v1) /* Store m_frameCount */
lw $v1, 0x108($0) /* Load the old v1 */
lw $a0, 0x10c($0) /* Load the old a0 */
jr $k1 /* Exit the exception handler */
rfe
.LgotIRQs:
la $v1, psyqoExceptionHandler
.LcallCPlusPlus:
/* We want to call into C++ now, so we need to save the rest of the registers */
sw $v0, 0x104($0)
sw $a1, 0x110($0)
sw $a2, 0x114($0)
sw $a3, 0x118($0)
sw $t0, 0x11c($0)
sw $t1, 0x120($0)
sw $t2, 0x124($0)
sw $t3, 0x128($0)
sw $t4, 0x12c($0)
sw $t5, 0x130($0)
sw $t6, 0x134($0)
sw $t7, 0x138($0)
sw $t8, 0x140($0)
sw $t9, 0x144($0)
sw $sp, 0x148($0)
sw $ra, 0x14c($0)
/* Call the C++ exception or break handler while adjusting the stack */
li $a1, 0
jalr $v1
li $sp, 0x1000 - 16
/* Restore the registers and exit */
lw $at, 0x100($0)
lw $v0, 0x104($0)
lw $v1, 0x108($0)
lw $a0, 0x10c($0)
lw $a1, 0x110($0)
lw $a2, 0x114($0)
lw $a3, 0x118($0)
lw $t0, 0x11c($0)
lw $t1, 0x120($0)
lw $t2, 0x124($0)
lw $t3, 0x128($0)
lw $t4, 0x12c($0)
lw $t5, 0x130($0)
lw $t6, 0x134($0)
lw $t7, 0x138($0)
lw $t8, 0x140($0)
lw $t9, 0x144($0)
lw $sp, 0x148($0)
lw $ra, 0x14c($0)
jr $k1
rfe
.Lbreak:
srl $a0, $v1, 6
la $v1, psyqoBreakHandler
b .LsaveMoreRegisters
addiu $k1, 4
psyqoExceptionHandlerStop:
.Lstop:
b .Lstop /* Infinite loop to stop execution */
nop /* Replaced with self-modifying code when adding crash screen */
.LsaveMoreRegisters:
sw $gp, 0x150($0)
sw $s0, 0x154($0)
sw $s1, 0x158($0)
sw $s2, 0x15c($0)
sw $s3, 0x160($0)
sw $s4, 0x164($0)
sw $s5, 0x168($0)
sw $s6, 0x16c($0)
sw $s7, 0x170($0)
sw $fp, 0x174($0)
b .LcallCPlusPlus
srl $a0, $a0, 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.