You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
940 lines
31 KiB
940 lines
31 KiB
/* sysALib.s - system-dependent routines */
|
|
|
|
/*
|
|
*
|
|
* This program is OPEN SOURCE software: you can redistribute it and/or modify it;
|
|
* This program is distributed in the hope that it will be useful,but WITHOUT ANY WARRANTY;
|
|
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
*
|
|
*/
|
|
|
|
|
|
#define _ASMLANGUAGE
|
|
#include <vxWorks.h>
|
|
#include <vsbConfig.h>
|
|
#include <asm.h>
|
|
#include <regs.h>
|
|
#include <arch/arm/arm.h>
|
|
#include <arch/arm/mmuArmLib.h>
|
|
#include <arch/arm/cacheArmArch7.h>
|
|
#include <sysLib.h>
|
|
#include "config.h"
|
|
#ifdef _WRS_CONFIG_SMP
|
|
# include <private/windLibP.h>
|
|
# include <private/vxSmpP.h>
|
|
#endif /* _WRS_CONFIG_SMP */
|
|
#undef ARM_AARCH64
|
|
#undef SLAVE_ARM_AARCH64
|
|
|
|
|
|
/* macros */
|
|
|
|
/* uncomment to enable the static MMU, it is only available when LPAE enabled. */
|
|
|
|
|
|
|
|
/* D-cache enable, Control Register bit */
|
|
#define SYS_CTL_DCACHE_ENABLE (0x1 << 2)
|
|
|
|
/* I-cache enable, Control Register bit */
|
|
#define SYS_CTL_ICACHE_ENABLE (0x1 << 12)
|
|
|
|
/* SMP mode enable, Aux Ctrl register bit */
|
|
#define AUX_CTL_SMP_MODE_EN (0x1 << 6)
|
|
|
|
/* invalidate branch target buffer with I-cache, Aux Ctrl register bit */
|
|
#define AUX_CTL_BTB_INVAL_EN (0x1)
|
|
|
|
/* force in-order D-cache requests to same set/way, Aux Ctrl register bit */
|
|
#define AUX_CTL_INORDER_DCACHE_REQ_EN (0x1 << 23)
|
|
|
|
/* internals */
|
|
|
|
FUNC_EXPORT(sysInit) /* start of system code */
|
|
|
|
#ifndef _ARCH_SUPPORTS_PROTECT_INTERRUPT_STACK
|
|
FUNC_EXPORT(sysIntStackSplit) /* routine to split interrupt stack */
|
|
#endif /* !_ARCH_SUPPORTS_PROTECT_INTERRUPT_STACK */
|
|
|
|
FUNC_EXPORT(archPwrDown) /* power down callback */
|
|
|
|
#ifdef _WRS_CONFIG_SMP
|
|
FUNC_EXPORT(sysCpuInit) /* secondary CPU initialization */
|
|
FUNC_EXPORT(sysMPCoreApResetLoop) /* secondary CPU Reset loop */
|
|
DATA_EXPORT(sysMPCoreStartup) /* startup Data for secondary CPUs */
|
|
#endif /* _WRS_CONFIG_SMP */
|
|
.globl FUNC(arm_mmu_ttbr)
|
|
.globl FUNC(armv7a_secondary_wake)
|
|
.globl FUNC(arm_int_enable)
|
|
.globl FUNC(arm_cpu_phy_index)
|
|
|
|
/* externals */
|
|
|
|
FUNC_IMPORT(usrInit) /* system initialization routine */
|
|
FUNC_IMPORT(excVBARSet) /* set exception address */
|
|
FUNC_EXPORT(__inline__GetVirtTimerCnt)
|
|
FUNC_EXPORT(__inline__GetPhyTimerCnt)
|
|
FUNC_EXPORT(__inline__ArmGicIpiGen)
|
|
#ifndef _ARCH_SUPPORTS_PROTECT_INTERRUPT_STACK
|
|
DATA_IMPORT(vxSvcIntStackBase) /* base of SVC-mode interrupt stack */
|
|
DATA_IMPORT(vxSvcIntStackEnd) /* end of SVC-mode interrupt stack */
|
|
DATA_IMPORT(vxIrqIntStackBase) /* base of IRQ-mode interrupt stack */
|
|
DATA_IMPORT(vxIrqIntStackEnd) /* end of IRQ-mode interrupt stack */
|
|
#endif /* !_ARCH_SUPPORTS_PROTECT_INTERRUPT_STACK */
|
|
#ifdef _WRS_CONFIG_SMP
|
|
DATA_IMPORT(vxKernelVars)
|
|
#endif /* _WRS_CONFIG_SMP */
|
|
|
|
.globl FUNC(sys_icc_igrpen1_set)
|
|
.globl FUNC(sys_icc_igrpen1_get)
|
|
.globl FUNC(sys_icc_ctlr_set)
|
|
.globl FUNC(sys_icc_bpr1_set)
|
|
.globl FUNC(sys_icc_hppir1_get)
|
|
.globl FUNC(sys_icc_eoir1_set)
|
|
.globl FUNC(sys_icc_pmr_set)
|
|
.globl FUNC(sys_icc_pmr_get)
|
|
.globl FUNC(sys_icc_rpr_get)
|
|
.globl FUNC(sys_icc_iar1_get)
|
|
.globl FUNC(vxMpidrGet)
|
|
.globl FUNC(armSmcCall)
|
|
.globl FUNC(sys_cntkctl_get)
|
|
.globl FUNC(sys_dfar_get)
|
|
.globl FUNC(sys_dfsr_get)
|
|
.globl FUNC(sys_ifar_get)
|
|
.globl FUNC(sys_ifsr_get)
|
|
.text
|
|
.balign 4
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* sysInit - start after boot
|
|
*
|
|
* This routine is the system start-up entry point for VxWorks in RAM, the
|
|
* first code executed after booting. It disables interrupts, sets up
|
|
* the stack, and jumps to the C routine usrInit() in usrConfig.c.
|
|
*
|
|
* The initial stack is set to grow down from the address of sysInit(). This
|
|
* stack is used only by usrInit() and is never used again. Memory for the
|
|
* stack must be accounted for when determining the system load address.
|
|
*
|
|
* NOTE: This routine should not be called by the user.
|
|
*
|
|
* RETURNS: N/A
|
|
*
|
|
* void sysInit (UINT32 startType) /@ THIS IS NOT A CALLABLE ROUTINE @/
|
|
*
|
|
*/
|
|
|
|
FUNC_BEGIN(sysInit)
|
|
#ifdef ARM_AARCH64
|
|
.long 0xd5384240 /* mrs x0, currentel */
|
|
.long 0xd342fc00 /* lsr x0, x0, #2 */
|
|
.long 0x92400400 /* and x0, x0, #0x3 */
|
|
.long 0xf1000c1f /* cmp x0, #0x3 */
|
|
.long 0x540003a1 /* b.ne 1d0080c4 <el2_mode> */
|
|
|
|
el3_mode:
|
|
.long 0xd53ecca0 /* mrs x0, s3_6_c12_c12_5 - ICC_SRE_EL3 */
|
|
.long 0xb2400c00 /* orr x0, x0, #0xf */
|
|
.long 0xd51ecca0 /* msr s3_6_c12_c12_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
.long 0xd53cc9a0 /* mrs x0, s3_4_c12_c9_5 - ICC_SRE_EL2 */
|
|
.long 0xb2400c00 /* orr x0, x0, #0xf */
|
|
.long 0xd51cc9a0 /* msr s3_4_c12_c9_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
.long 0xd538cca0 /* mrs x0, s3_0_c12_c12_5 - ICC_SRE_EL1 */
|
|
.long 0xb2400000 /* orr x0, x0, #0x1 */
|
|
.long 0xd518cca0 /* msr s3_0_c12_c12_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
|
|
.long 0xd2803620 /* mov x0, #0x1b1 */
|
|
.long 0xd51e1100 /* msr scr_el3, x0 */
|
|
.long 0xd2867fe0 /* mov x0, #0x33ff */
|
|
.long 0xd51c1140 /* msr cptr_el2, x0 */
|
|
.long 0xd2810000 /* mov x0, #0x800 */
|
|
.long 0xf2a61a00 /* movk x0, #0x30d0, lsl #16 */
|
|
.long 0xd5181000 /* msr sctlr_el1, x0 */
|
|
.long 0x910003e0 /* mov x0, sp */
|
|
.long 0xd51c4100 /* msr sp_el1, x0 */
|
|
.long 0xd53ec000 /* mrs x0, vbar_el3 */
|
|
.long 0xd518c000 /* msr vbar_el1, x0 */
|
|
.long 0xd2803a60 /* mov x0, #0x1d3 */
|
|
.long 0xd51e4000 /* msr spsr_el3, x0 */
|
|
.long 0x10000500 /* adr x0, 1d008158 <el1_mode> */
|
|
.long 0xd51e4020 /* msr elr_el3, x0 */
|
|
.long 0xd69f03e0 /* eret */
|
|
|
|
el2_mode:
|
|
.long 0xd53cc9a0 /* mrs x0, s3_4_c12_c9_5 - ICC_SRE_EL2 */
|
|
.long 0xb2400c00 /* orr x0, x0, #0xf */
|
|
.long 0xd51cc9a0 /* msr s3_4_c12_c9_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
.long 0xd538cca0 /* mrs x0, s3_0_c12_c12_5 - ICC_SRE_EL1 */
|
|
.long 0xb2400000 /* orr x0, x0, #0x1 */
|
|
.long 0xd518cca0 /* msr s3_0_c12_c12_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
.long 0xd53ce100 /* mrs x0, cnthctl_el2 */
|
|
.long 0xb2400400 /* orr x0, x0, #0x3 */
|
|
.long 0xd51ce100 /* msr cnthctl_el2, x0 */
|
|
.long 0xd51ce07f /* msr cntvoff_el2, xzr */
|
|
.long 0xd5380000 /* mrs x0, midr_el1 */
|
|
.long 0xd53800a1 /* mrs x1, mpidr_el1 */
|
|
.long 0xd51c0000 /* msr vpidr_el2, x0 */
|
|
.long 0xd51c00a1 /* msr vmpidr_el2, x1 */
|
|
.long 0xd2867fe0 /* mov x0, #0x33ff */
|
|
.long 0xd51c1140 /* msr cptr_el2, x0 */
|
|
.long 0xd51c117f /* msr hstr_el2, xzr */
|
|
.long 0xd2a00600 /* mov x0, #0x300000 */
|
|
.long 0xd5181040 /* msr cpacr_el1, x0 */
|
|
.long 0xd2800000 /* mov x0, #0x0 */
|
|
.long 0xb2630000 /* orr x0, x0, #0x20000000 */
|
|
.long 0xd51c1100 /* msr hcr_el2, x0 */
|
|
.long 0xd53c1100 /* mrs x0, hcr_el2 */
|
|
.long 0xd2810000 /* mov x0, #0x800 */
|
|
.long 0xf2a61a00 /* movk x0, #0x30d0, lsl #16 */
|
|
.long 0xd5181000 /* msr sctlr_el1, x0 */
|
|
.long 0x910003e0 /* mov x0, sp */
|
|
.long 0xd51c4100 /* msr sp_el1, x0 */
|
|
.long 0xd53cc000 /* mrs x0, vbar_el2 */
|
|
.long 0xd518c000 /* msr vbar_el1, x0 */
|
|
.long 0xd2803a60 /* mov x0, #0x1d3 */
|
|
.long 0xd51c4000 /* msr spsr_el2, x0 */
|
|
.long 0x10000060 /* adr x0, 1d008158 <el1_mode> */
|
|
.long 0xd51c4020 /* msr elr_el2, x0 */
|
|
.long 0xd69f03e0 /* eret */
|
|
el1_mode:
|
|
#endif /* ARM_AARCH64 */
|
|
MOV r12, r0 /* save startType */
|
|
|
|
/* Set initial stack pointer so stack grows down from start of code */
|
|
|
|
ADR sp, FUNC(sysInit) /* Initialize stack pointer */
|
|
|
|
/*
|
|
* Set the processor to a known state: the reset state with
|
|
* MMU and caches disabled and program flow/branch prediction enabled.
|
|
* See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition"
|
|
* (ARM DDI 0406) and "Cortex-A15 Processor Technical Reference Manual"
|
|
* (ARM DDI 0438) for details.
|
|
*/
|
|
|
|
MRC p15, 0, r2, c1, c0, 0 /* Read control register into r2 */
|
|
|
|
LDR r1, =MMU_INIT_VALUE /* Defined in mmuCortexA8Lib.h */
|
|
MCR p15, 0, r1, c1, c0, 0 /* Write to control register */
|
|
ISB /* Ensure processor state is set */
|
|
|
|
/* invalidate the data caches, flushing them if necessary */
|
|
|
|
LDR r1, =SYS_CTL_DCACHE_ENABLE
|
|
AND r2, r2, r1
|
|
TEQ r2, r1 /* Check if data caches were enabled */
|
|
BNE dCacheFlushBypass
|
|
|
|
/*
|
|
* Note the following about _CORTEX_AR_ENTIRE_DATA_CACHE_OP:
|
|
* Registers r0-r3 are modified, r4-r8 are preserved via the stack and
|
|
* a DSB is performed before returning.
|
|
*/
|
|
|
|
_CORTEX_AR_ENTIRE_DATA_CACHE_OP (c14) /* Flush & invalidate data caches */
|
|
B dCacheInvalDone
|
|
|
|
dCacheFlushBypass:
|
|
_CORTEX_AR_ENTIRE_DATA_CACHE_OP (c6) /* Invalidate data caches */
|
|
|
|
dCacheInvalDone:
|
|
ISB /* Instruction Synch Barrier */
|
|
|
|
/* set Context ID Register to zero, including Address Space ID */
|
|
|
|
MCR p15, 0, r1, c13, c0, 1
|
|
|
|
/* disable interrupts in CPU and switch to SVC32 mode */
|
|
|
|
MRS r1, cpsr
|
|
BIC r1, r1, #MASK_MODE
|
|
ORR r1, r1, #MODE_SVC32 | I_BIT | F_BIT
|
|
MSR cpsr, r1
|
|
|
|
|
|
|
|
|
|
ADR sp, FUNC(sysInit) /* Initialize stack pointer */
|
|
MOV fp, #0 /* Initialize frame pointer */
|
|
|
|
/* Make sure Boot type is set correctly. */
|
|
|
|
MOV r0, r12 /* restore startType */
|
|
|
|
MOV r1,#BOOT_NORMAL
|
|
CMP r1,r0
|
|
BEQ L$_Good_Boot
|
|
|
|
MOV r1,#BOOT_NO_AUTOBOOT
|
|
CMP r1,r0
|
|
BEQ L$_Good_Boot
|
|
|
|
MOV r1,#BOOT_CLEAR
|
|
CMP r1,r0
|
|
BEQ L$_Good_Boot
|
|
|
|
MOV r1,#BOOT_QUICK_AUTOBOOT
|
|
CMP r1,r0
|
|
BEQ L$_Good_Boot
|
|
|
|
MOV r0, #BOOT_NORMAL /* default startType */
|
|
|
|
L$_Good_Boot:
|
|
|
|
/* now call usrInit (startType) */
|
|
|
|
B FUNC(usrInit)
|
|
|
|
FUNC_END(sysInit)
|
|
|
|
#ifndef _ARCH_SUPPORTS_PROTECT_INTERRUPT_STACK
|
|
/*******************************************************************************
|
|
*
|
|
* sysIntStackSplit - split interrupt stack and set interrupt stack pointers
|
|
*
|
|
* This routine is called, via a function pointer, during kernel
|
|
* initialisation. It splits the allocated interrupt stack into IRQ and
|
|
* SVC-mode stacks and sets the processor's IRQ stack pointer. Note that
|
|
* the pointer passed points to the bottom of the stack allocated i.e.
|
|
* highest address+1.
|
|
*
|
|
* IRQ stack needs 6 words per nested interrupt;
|
|
* SVC-mode will need a good deal more for the C interrupt handlers.
|
|
* For now, use ratio 1:7 with any excess allocated to the SVC-mode stack
|
|
* at the lowest address.
|
|
*
|
|
* Note that FIQ is not handled by VxWorks so no stack is allocated for it.
|
|
*
|
|
* The stacks and the variables that describe them look like this.
|
|
* \cs
|
|
*
|
|
* - HIGH MEMORY -
|
|
* ------------------------ <--- vxIrqIntStackBase (r0 on entry)
|
|
* | |
|
|
* | IRQ-mode |
|
|
* | interrupt stack |
|
|
* | |
|
|
* ------------------------ <--{ vxIrqIntStackEnd
|
|
* | | { vxSvcIntStackBase
|
|
* | SVC-mode |
|
|
* | interrupt stack |
|
|
* | |
|
|
* ------------------------ <--- vxSvcIntStackEnd
|
|
* - LOW MEMORY -
|
|
* \ce
|
|
*
|
|
* NOTE: This routine should not be called by the user.
|
|
*
|
|
* void sysIntStackSplit
|
|
* (
|
|
* char *pBotStack /@ pointer to bottom of interrupt stack @/
|
|
* long size /@ size of stack @/
|
|
* )
|
|
|
|
*/
|
|
|
|
FUNC_BEGIN(sysIntStackSplit)
|
|
|
|
/*
|
|
* r0 = base of space allocated for stacks (i.e. highest address)
|
|
* r1 = size of space
|
|
*/
|
|
|
|
SUB r2, r0, r1 /* r2->lowest usable address */
|
|
LDR r3, =vxSvcIntStackEnd
|
|
STR r2, [r3] /* == end of SVC-mode stack */
|
|
SUB r2, r0, r1, ASR #3 /* leave 1/8 for IRQ */
|
|
LDR r3, =vxSvcIntStackBase
|
|
STR r2, [r3]
|
|
|
|
/* now allocate IRQ stack, setting irq_sp */
|
|
|
|
LDR r3, =vxIrqIntStackEnd
|
|
STR r2, [r3]
|
|
LDR r3, =vxIrqIntStackBase
|
|
STR r0, [r3]
|
|
|
|
MRS r2, cpsr
|
|
BIC r3, r2, #MASK_MODE
|
|
ORR r3, r3, #MODE_IRQ32 | I_BIT /* set irq_sp */
|
|
MSR cpsr, r3
|
|
MOV sp, r0
|
|
|
|
/* switch back to original mode and return */
|
|
|
|
MSR cpsr, r2
|
|
|
|
MOV pc, lr
|
|
|
|
FUNC_END(sysIntStackSplit)
|
|
|
|
#endif /* !_ARCH_SUPPORTS_PROTECT_INTERRUPT_STACK */
|
|
|
|
|
|
_ARM_FUNCTION_CALLED_FROM_C(arm_int_enable)
|
|
MRS r1, cpsr
|
|
BIC r1, r1, #(I_BIT | F_BIT)
|
|
MSR cpsr, r1
|
|
|
|
MOV pc,lr
|
|
|
|
_ARM_FUNCTION_CALLED_FROM_C(armv7a_secondary_wake)
|
|
DSB
|
|
SEV
|
|
ISB
|
|
#if (ARM_THUMB)
|
|
BX lr
|
|
#else
|
|
MOV pc,lr
|
|
#endif
|
|
|
|
_ARM_FUNCTION_CALLED_FROM_C(arm_cpu_phy_index)
|
|
MRC p15, 0, r0, c0, c0, 5
|
|
AND r0, r0, #0x0f
|
|
|
|
MOV pc,lr
|
|
/*******************************************************************************
|
|
*
|
|
* archPwrDown - turn the processor into reduced power mode
|
|
*
|
|
* This routine activates the reduced power mode.
|
|
* It is called by the scheduler when the kernel enters the idle loop.
|
|
* This function is called by default. Overload it by using routine
|
|
* vxArchPowerDownRtnSet().
|
|
*
|
|
* RETURNS: void.
|
|
*
|
|
* SEE ALSO: vxArchPowerDownRtnSet().
|
|
*
|
|
* void archPwrDown (void)
|
|
*
|
|
*/
|
|
|
|
FUNC_BEGIN(archPwrDown)
|
|
|
|
|
|
/*
|
|
* NB debugger doesn't like powering down.
|
|
* Use foreverloop for debug.
|
|
*foreverLoop:
|
|
* B foreverLoop
|
|
*/
|
|
|
|
/*
|
|
* Write to coprocessor 15 register 7 (the core control)
|
|
* register to set idle
|
|
*/
|
|
|
|
MOV r0, #PWRMODE_IDLE
|
|
MCR CP_CORECTL, 0, r0, c7, c0, 4 /* idle processor */
|
|
|
|
/* Return after waking up */
|
|
|
|
MOV pc, lr
|
|
|
|
FUNC_END(archPwrDown)
|
|
|
|
#ifdef _WRS_CONFIG_SMP
|
|
/*******************************************************************************
|
|
*
|
|
* sysCpuInit - Entry point for non-boot CPUs
|
|
*
|
|
* This routine performs initial CPU init, copies startup parameters
|
|
* from the sysMPCoreStartup structure, and enters sysCpuStart to
|
|
* complete the per-CPU startup.
|
|
*
|
|
* There are no arguments to this routine.
|
|
*
|
|
* RETURNS: Does not return.
|
|
*
|
|
*/
|
|
|
|
FUNC_BEGIN(sysCpuInit)
|
|
#ifdef SLAVE_ARM_AARCH64
|
|
.long 0xd5384240 /* mrs x0, currentel */
|
|
.long 0xd342fc00 /* lsr x0, x0, #2 */
|
|
.long 0x92400400 /* and x0, x0, #0x3 */
|
|
.long 0xf1000c1f /* cmp x0, #0x3 */
|
|
.long 0x540003a1 /* b.ne 1d0080c4 <slave_el2_mode> */
|
|
|
|
slave_el3_mode:
|
|
.long 0xd53ecca0 /* mrs x0, s3_6_c12_c12_5 - ICC_SRE_EL3 */
|
|
.long 0xb2400c00 /* orr x0, x0, #0xf */
|
|
.long 0xd51ecca0 /* msr s3_6_c12_c12_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
.long 0xd53cc9a0 /* mrs x0, s3_4_c12_c9_5 - ICC_SRE_EL2 */
|
|
.long 0xb2400c00 /* orr x0, x0, #0xf */
|
|
.long 0xd51cc9a0 /* msr s3_4_c12_c9_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
.long 0xd538cca0 /* mrs x0, s3_0_c12_c12_5 - ICC_SRE_EL1 */
|
|
.long 0xb2400000 /* orr x0, x0, #0x1 */
|
|
.long 0xd518cca0 /* msr s3_0_c12_c12_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
|
|
.long 0xd2803620 /* mov x0, #0x1b1 */
|
|
.long 0xd51e1100 /* msr scr_el3, x0 */
|
|
.long 0xd2867fe0 /* mov x0, #0x33ff */
|
|
.long 0xd51c1140 /* msr cptr_el2, x0 */
|
|
.long 0xd2810000 /* mov x0, #0x800 */
|
|
.long 0xf2a61a00 /* movk x0, #0x30d0, lsl #16 */
|
|
.long 0xd5181000 /* msr sctlr_el1, x0 */
|
|
.long 0x910003e0 /* mov x0, sp */
|
|
.long 0xd51c4100 /* msr sp_el1, x0 */
|
|
.long 0xd53ec000 /* mrs x0, vbar_el3 */
|
|
.long 0xd518c000 /* msr vbar_el1, x0 */
|
|
.long 0xd2803a60 /* mov x0, #0x1d3 */
|
|
.long 0xd51e4000 /* msr spsr_el3, x0 */
|
|
.long 0x10000500 /* adr x0, 1d008158 <slave_el1_mode> */
|
|
.long 0xd51e4020 /* msr elr_el3, x0 */
|
|
.long 0xd69f03e0 /* eret */
|
|
|
|
slave_el2_mode:
|
|
.long 0xd53cc9a0 /* mrs x0, s3_4_c12_c9_5 - ICC_SRE_EL2 */
|
|
.long 0xb2400c00 /* orr x0, x0, #0xf */
|
|
.long 0xd51cc9a0 /* msr s3_4_c12_c9_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
.long 0xd538cca0 /* mrs x0, s3_0_c12_c12_5 - ICC_SRE_EL1 */
|
|
.long 0xb2400000 /* orr x0, x0, #0x1 */
|
|
.long 0xd518cca0 /* msr s3_0_c12_c12_5, x0 */
|
|
.long 0xd5033fdf /* isb */
|
|
|
|
.long 0xd53ce100 /* mrs x0, cnthctl_el2 */
|
|
.long 0xb2400400 /* orr x0, x0, #0x3 */
|
|
.long 0xd51ce100 /* msr cnthctl_el2, x0 */
|
|
.long 0xd51ce07f /* msr cntvoff_el2, xzr */
|
|
.long 0xd5380000 /* mrs x0, midr_el1 */
|
|
.long 0xd53800a1 /* mrs x1, mpidr_el1 */
|
|
.long 0xd51c0000 /* msr vpidr_el2, x0 */
|
|
.long 0xd51c00a1 /* msr vmpidr_el2, x1 */
|
|
.long 0xd2867fe0 /* mov x0, #0x33ff */
|
|
.long 0xd51c1140 /* msr cptr_el2, x0 */
|
|
.long 0xd51c117f /* msr hstr_el2, xzr */
|
|
.long 0xd2a00600 /* mov x0, #0x300000 */
|
|
.long 0xd5181040 /* msr cpacr_el1, x0 */
|
|
.long 0xd2800000 /* mov x0, #0x0 */
|
|
.long 0xb2630000 /* orr x0, x0, #0x20000000 */
|
|
.long 0xd51c1100 /* msr hcr_el2, x0 */
|
|
.long 0xd53c1100 /* mrs x0, hcr_el2 */
|
|
.long 0xd2810000 /* mov x0, #0x800 */
|
|
.long 0xf2a61a00 /* movk x0, #0x30d0, lsl #16 */
|
|
.long 0xd5181000 /* msr sctlr_el1, x0 */
|
|
.long 0x910003e0 /* mov x0, sp */
|
|
.long 0xd51c4100 /* msr sp_el1, x0 */
|
|
.long 0xd53cc000 /* mrs x0, vbar_el2 */
|
|
.long 0xd518c000 /* msr vbar_el1, x0 */
|
|
.long 0xd2803a60 /* mov x0, #0x1d3 */
|
|
.long 0xd51c4000 /* msr spsr_el2, x0 */
|
|
.long 0x10000060 /* adr x0, 1d008158 <slave_el1_mode> */
|
|
.long 0xd51c4020 /* msr elr_el2, x0 */
|
|
.long 0xd69f03e0 /* eret */
|
|
slave_el1_mode:
|
|
#endif /* ARM_AARCH64 */
|
|
/* disable interrupts in CPU and switch to SVC32 mode */
|
|
#if 0
|
|
ldr r2, =0x28001000
|
|
mov r1, #0x41
|
|
strb r1,[r2]
|
|
ISB
|
|
DSB
|
|
#endif
|
|
MRS r1, cpsr
|
|
BIC r1, r1, #MASK_MODE
|
|
ORR r1, r1, #MODE_SVC32 | I_BIT | F_BIT
|
|
MSR cpsr, r1
|
|
|
|
/*
|
|
* SPSR does not have pre-defined reset value.
|
|
* Here correct endianess (BE bit) in SPSR
|
|
*/
|
|
|
|
MRS r0, spsr
|
|
BIC r0, r0, #(0x1 << 9) /* Little Endian */
|
|
MSR spsr_x, r0
|
|
|
|
|
|
/*
|
|
* Set the processor to a known state: the reset state with
|
|
* MMU and caches disabled and program flow/branch prediction enabled.
|
|
* See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition"
|
|
* (ARM DDI 0406) and "Cortex-A15 Processor Technical Reference Manual"
|
|
* (ARM DDI 0438) for details.
|
|
*/
|
|
|
|
LDR r1, =MMU_INIT_VALUE /* Defined in mmuArmLib.h */
|
|
MCR p15, 0, r1, c1, c0, 0 /* Write to control register */
|
|
ISB /* Ensure processor state is set */
|
|
|
|
/*
|
|
* _CORTEX_AR_ENTIRE_DATA_CACHE_OP macro uses the stack to save and
|
|
* restore registers, so set the stack pointer before the macro is called.
|
|
*/
|
|
|
|
LDR sp, =startupStack
|
|
|
|
/*
|
|
* Flush & invalidate the data caches.
|
|
*
|
|
* Note the following about _CORTEX_AR_ENTIRE_DATA_CACHE_OP:
|
|
* Registers r0-r3 are modified, other registers used are preserved via
|
|
* the stack and a DSB is performed before returning.
|
|
*/
|
|
|
|
_CORTEX_AR_ENTIRE_DATA_CACHE_OP (c14) /* Flush & invalidate data caches */
|
|
|
|
|
|
/* Invalidate TLBs */
|
|
|
|
MCR p15, 0, r1, c8, c7, 0 /* R1 = 0 from above, data SBZ */
|
|
|
|
/* Set Context ID Register to zero, including Address Space ID */
|
|
|
|
MOV r1, #0
|
|
MCR p15, 0, r1, c13, c0, 1
|
|
|
|
/* set exception base address */
|
|
|
|
MOV r0, #LOCAL_MEM_LOCAL_ADRS
|
|
BL FUNC(excVBARSet)
|
|
|
|
/* Get cpuIndex */
|
|
#if 0
|
|
MRC p15, 0, r5, c0, c0, 5
|
|
MOV r0, r5
|
|
MOV r0, r0, LSR#6
|
|
AND r5, r5, #0xF
|
|
ADD r5, r5, r0
|
|
AND r5, r5, #0xF
|
|
#else
|
|
MRC p15, 0, r5, c0, c0, 5
|
|
AND r2, r5, #0xff
|
|
MOVS r5, r5, LSR #8
|
|
AND r3, r5, #0xff
|
|
MOVS r3, r3, LSL#1
|
|
ADDS r5, r2, r3
|
|
#endif
|
|
|
|
/* Get the address of the startup data, sysMPCoreStartup[cpuIndex] */
|
|
|
|
#ifndef _WRS_CONFIG_ARM_LPAE
|
|
MOV r4, r5, LSL #4
|
|
#else /* _WRS_CONFIG_ARM_LPAE */
|
|
MOV r4, #24
|
|
MUL r4, r4, r5
|
|
#endif /* !_WRS_CONFIG_ARM_LPAE */
|
|
|
|
LDR r0, =sysMPCoreStartup
|
|
ADD r4, r4, r0
|
|
|
|
/*
|
|
* Set the Translation Table Base Register
|
|
*
|
|
* r4 = Pointer to the startup data for this CPU
|
|
*/
|
|
|
|
#ifndef _WRS_CONFIG_ARM_LPAE
|
|
LDR r0, [r4, #0xC] /* Get Trans. Tbl Base address */
|
|
MOV r1, #VXWORKS_KERNEL_ASID
|
|
BL mmuCortexA8TtbrSetAll
|
|
#else /* _WRS_CONFIG_ARM_LPAE */
|
|
LDR r0, [r4, #0xC] /* Get Trans. Tbl Base Ctrl address */
|
|
BL mmuCortexA8TtbcrSet
|
|
LDR r0, [r4, #0x10] /* Get Trans. Tbl Base Low address */
|
|
LDR r1, [r4, #0x14] /* Get Trans. Tbl Base High address */
|
|
BL mmuCortexA8TtbrSet64
|
|
#endif /* !_WRS_CONFIG_ARM_LPAE */
|
|
|
|
#ifndef _WRS_CONFIG_ARM_LPAE
|
|
MOV r0, #MMU_DACR_VAL_NORMAL
|
|
BL mmuCortexA8DacrSet
|
|
#else /* _WRS_CONFIG_ARM_LPAE */
|
|
LDR r0, =MMU_MAIR0_VALUE
|
|
LDR r1, =MMU_MAIR1_VALUE
|
|
BL mmuCortexA8MairSet
|
|
#endif /* !_WRS_CONFIG_ARM_LPAE */
|
|
|
|
|
|
/* Enable MMU and caches */
|
|
|
|
LDR r0, =(SYS_CTL_ICACHE_ENABLE | SYS_CTL_DCACHE_ENABLE)
|
|
BL mmuCortexA8AEnable
|
|
|
|
/* Save the cache state */
|
|
|
|
MOV r2, r0
|
|
_ARM_PER_CPU_ADRS_GET(r0, r1, cacheArchState)
|
|
STR r2, [r0]
|
|
|
|
/*
|
|
* Clear the kernel interrupt counter and
|
|
* architecture interrupt nesting counter.
|
|
* This is needed because the secondary CPU startup process
|
|
* will bypass the normal interrupt exit path (intExit).
|
|
*/
|
|
|
|
_ARM_PER_CPU_ADRS_GET(r0, r1, intCnt)
|
|
|
|
MOV r1, #0
|
|
STR r1, [r0]
|
|
|
|
_ARM_PER_CPU_ADRS_GET(r0, r1, intNestingLevel)
|
|
|
|
MOV r1, #0
|
|
STR r1, [r0]
|
|
|
|
/*
|
|
* r4 = Pointer to sysMPCoreStartup arguments array
|
|
* r5 = CPU number
|
|
*
|
|
* Set up call to start VxWorks such that:
|
|
*
|
|
* r0 = vxWorks Kernel Entry point
|
|
* r1 = CPU number
|
|
* r2 = CPU startup entry point, sysCpuStart
|
|
*
|
|
*/
|
|
|
|
LDR sp, [r4, #4] /* set the kernel stack pointer */
|
|
|
|
MOV r1, r5
|
|
|
|
sysMPCoreApStartLoop:
|
|
LDR r2, [r4]
|
|
CMP r2, #0
|
|
LDRNE r0, [r4, #8] /* Load vxWorks Kernel Entry point */
|
|
BLXNE r2 /* Enter vxWorks */
|
|
|
|
FUNC_END(sysCpuInit)
|
|
|
|
#endif /* _WRS_CONFIG_SMP */
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
* PC-relative-addressable pointers
|
|
* note "_" after "$" to stop preprocessor preforming substitution
|
|
*/
|
|
|
|
.balign 4
|
|
|
|
#ifdef _WRS_CONFIG_SMP
|
|
|
|
L$_vxKernelVars:
|
|
.long FUNC(vxKernelVars)
|
|
L$_arm_mmu_ttbr:
|
|
.long FUNC(arm_mmu_ttbr)
|
|
|
|
|
|
arm_mmu_ttbr:
|
|
.long 0
|
|
#endif /* _WRS_CONFIG_SMP */
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* Array used for passing startup parameters from boot CPU to secondary CPUs,
|
|
* aligned on a cache line.
|
|
*
|
|
* struct sysMPCoreStartup
|
|
* {
|
|
* UINT32 newPC; /@ Address of 'C' based startup code @/
|
|
* UINT32 newSP; /@ Stack pointer for startup @/
|
|
* UINT32 newArg; /@ vxWorks kernel entry point @/
|
|
* #ifndef _WRS_CONFIG_ARM_LPAE
|
|
* UINT32 newSync; /@ Translation Table Base and sync @/
|
|
* #else /@ _WRS_CONFIG_ARM_LPAE @/
|
|
* UINT32 ttbcr; /@ Translation Table Base Control Register @/
|
|
* UINT64 newSync; /@ Translation Table Base and sync @/
|
|
* #endif /@ !_WRS_CONFIG_ARM_LPAE @/
|
|
* };
|
|
*/
|
|
|
|
.data
|
|
.balign 64
|
|
VAR_LABEL(sysMPCoreStartup)
|
|
#ifndef _WRS_CONFIG_ARM_LPAE
|
|
.fill 32,4 /* array for 4 cores */
|
|
#else /* _WRS_CONFIG_ARM_LPAE */
|
|
.fill 48,4 /* array for 4 cores */
|
|
#endif /* !_WRS_CONFIG_ARM_LPAE */
|
|
|
|
.text
|
|
.balign 4
|
|
|
|
/* void sys_icc_igrpen1_set(unsigned int) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_igrpen1_set)
|
|
mcr p15, 0, r0, c12, c12, 7
|
|
isb
|
|
mov pc, lr
|
|
|
|
/* void sys_icc_igrpen1_get(unsigned int) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_igrpen1_get)
|
|
mrc p15, 0, r0, c12, c12, 7
|
|
isb
|
|
mov pc, lr
|
|
|
|
/* void sys_icc_ctlr_set(unsigned int) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_ctlr_set)
|
|
mcr p15, 0, r0, c12, c12, 4
|
|
mov pc, lr
|
|
|
|
/* unsigned int sys_icc_hppir1_get(void) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_hppir1_get)
|
|
mrc p15, 0, r0, c12, c12, 2
|
|
isb
|
|
mov pc, lr
|
|
|
|
/* void sys_icc_bpr1_set(unsigned int) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_bpr1_set)
|
|
mcr p15, 0, r0, c12, c12, 3
|
|
isb
|
|
mov pc, lr
|
|
|
|
/* void sys_icc_eoir1_set(unsigned int) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_eoir1_set)
|
|
mcr p15, 0, r0, c12, c12, 1
|
|
isb
|
|
mov pc, lr
|
|
|
|
/* void sys_icc_pmr_set(unsigned int) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_pmr_set)
|
|
mcr p15, 0, r0, c4, c6, 0
|
|
isb
|
|
mov pc, lr
|
|
|
|
/* unsigned int sys_icc_pmr_get(void) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_pmr_get)
|
|
mrc p15, 0, r0, c4, c6, 0
|
|
isb
|
|
mov pc, lr
|
|
|
|
/* unsigned int sys_icc_rpr_get(void). read only */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_rpr_get)
|
|
mrc p15, 0, r0, c12, c11, 3
|
|
isb
|
|
mov pc, lr
|
|
|
|
/* unsigned int sys_icc_iar1_get(void) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_icc_iar1_get)
|
|
mrc p15, 0, r0, c12, c12, 0
|
|
isb
|
|
mov pc, lr
|
|
|
|
/* unsigned int sys_cntkctl_get(void) */
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_cntkctl_get)
|
|
mcr p15, 0, r0, c14, c1, 0
|
|
isb
|
|
mov pc, lr
|
|
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_dfar_get)
|
|
MRC p15,0,r0,c6,c0,0
|
|
isb
|
|
mov pc, lr
|
|
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_dfsr_get)
|
|
MRC p15,0,r0,c5,c0,0
|
|
isb
|
|
mov pc, lr
|
|
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_ifar_get)
|
|
MRC p15,0,r0,c6,c0,2
|
|
isb
|
|
mov pc, lr
|
|
|
|
_ARM_FUNCTION_CALLED_FROM_C(sys_ifsr_get)
|
|
MRC p15,0,r0,c5,c0,1
|
|
isb
|
|
mov pc, lr
|
|
|
|
#if 0
|
|
_ARM_FUNCTION_CALLED_FROM_C(vxCpsrGet)
|
|
MRS r0, cpsr
|
|
isb
|
|
mov pc, lr
|
|
|
|
#endif
|
|
_ARM_FUNCTION_CALLED_FROM_C(vxMpidrGet)
|
|
MRC p15, 0, r0, c0, c0, 5
|
|
MOV pc, lr
|
|
|
|
|
|
.code 32
|
|
.balign 4
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* armSmcCall - initiate SMC call
|
|
*
|
|
* This routine initiates SMC call which traps the processor into Monitor Mode.
|
|
* The ARM SMC Call Convetion defines that up to eight registers can be exchanged
|
|
* during an SMC call. The input parameter contains eight INT32 valeus which are
|
|
* to be passed in the SMC call; similarily the output parameter also contains
|
|
* eight INT32 values which are returned from the SMC call.
|
|
*
|
|
* \NOMANUAL
|
|
*
|
|
* RETURNS: OK
|
|
*
|
|
* void armSmcCall
|
|
* (
|
|
* ARM_SMC_REGS * input, /@ r0 - input register values @/
|
|
* ARM_SMC_REGS * output /@ r1 - output register values @/
|
|
* )
|
|
*/
|
|
|
|
FUNC_BEGIN(armSmcCall)
|
|
stmdb sp!, {r0-r7} /* save clobbered registers to stack */
|
|
ldr r12, [sp, #(4 * 0)] /* get 1st argument (ptr to input struct) */
|
|
ldmia r12, {r0-r7} /* save input argument to r0-r7 */
|
|
smc #0
|
|
ldr r12, [sp, #(4 * 1)] /* get 2th argument (ptr to output result) */
|
|
stmia r12, {r0-r7} /* get output argument from r0-r7 */
|
|
ldmfd sp!, {r0-r7} /* restore clobbered registers from stack */
|
|
bx lr
|
|
FUNC_END(armSmcCall)
|
|
|
|
|
|
|
|
FUNC_BEGIN(__inline__GetVirtTimerCnt)
|
|
DSB
|
|
.long 0xec510f1e /* mrrc p15, 1, r0, r1, c14 */
|
|
ISB
|
|
MOV pc, lr
|
|
FUNC_END(__inline__GetVirtTimerCnt)
|
|
|
|
|
|
FUNC_BEGIN(__inline__GetPhyTimerCnt)
|
|
DSB
|
|
.long 0xec510f0e /* mrrc p15, 0, r0, r1, c14 */
|
|
ISB
|
|
MOV pc, lr
|
|
FUNC_END(__inline__GetPhyTimerCnt)
|
|
|
|
#ifdef __DCC__
|
|
FUNC_BEGIN( __inline__ArmGicIpiGen)
|
|
DSB
|
|
.word 0xec410f0c /*MCRR p15,0,r0,r1,c12*/
|
|
ISB
|
|
MOV pc, lr
|
|
FUNC_END(__inline__ArmGicIpiGen)
|
|
#endif
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* Temporary stack for secondary core.
|
|
* Align on a separate cache line to avoid conflict with boot core's cache
|
|
* contents during the secondary core's initial cache cleaning.
|
|
*/
|
|
|
|
.balign 64
|
|
.fill 6,4 /* depth of 6 registers */
|
|
VAR_LABEL(startupStack)
|
|
|
|
|
|
|
|
|
|
|
|
|