|
|
|
/*
|
|
|
|
* Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <arch.h>
|
|
|
|
#include <asm_macros.S>
|
|
|
|
#include <context.h>
|
|
|
|
#include <cpu_data.h>
|
|
|
|
#include <ea_handle.h>
|
|
|
|
#include <interrupt_mgmt.h>
|
|
|
|
#include <platform_def.h>
|
|
|
|
#include <runtime_svc.h>
|
|
|
|
#include <smccc.h>
|
|
|
|
|
|
|
|
.globl runtime_exceptions
|
|
|
|
|
|
|
|
.globl sync_exception_sp_el0
|
|
|
|
.globl irq_sp_el0
|
|
|
|
.globl fiq_sp_el0
|
|
|
|
.globl serror_sp_el0
|
|
|
|
|
|
|
|
.globl sync_exception_sp_elx
|
|
|
|
.globl irq_sp_elx
|
|
|
|
.globl fiq_sp_elx
|
|
|
|
.globl serror_sp_elx
|
|
|
|
|
|
|
|
.globl sync_exception_aarch64
|
|
|
|
.globl irq_aarch64
|
|
|
|
.globl fiq_aarch64
|
|
|
|
.globl serror_aarch64
|
|
|
|
|
|
|
|
.globl sync_exception_aarch32
|
|
|
|
.globl irq_aarch32
|
|
|
|
.globl fiq_aarch32
|
|
|
|
.globl serror_aarch32
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle External Abort by delegating to the platform's EA handler.
|
|
|
|
* Once the platform handler returns, the macro exits EL3 and returns to
|
|
|
|
* where the abort was taken from.
|
|
|
|
*
|
|
|
|
* This macro assumes that x30 is available for use.
|
|
|
|
*
|
|
|
|
* 'abort_type' is a constant passed to the platform handler, indicating
|
|
|
|
* the cause of the External Abort.
|
|
|
|
*/
|
|
|
|
.macro handle_ea abort_type
|
|
|
|
/* Save GP registers */
|
|
|
|
bl save_gp_registers
|
|
|
|
|
|
|
|
/* Setup exception class and syndrome arguments for platform handler */
|
|
|
|
mov x0, \abort_type
|
|
|
|
mrs x1, esr_el3
|
|
|
|
adr x30, el3_exit
|
|
|
|
b delegate_ea
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
* This macro handles Synchronous exceptions.
|
|
|
|
* Only SMC exceptions are supported.
|
|
|
|
* ---------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
.macro handle_sync_exception
|
|
|
|
/* Enable the SError interrupt */
|
|
|
|
msr daifclr, #DAIF_ABT_BIT
|
|
|
|
|
|
|
|
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
|
|
|
|
|
|
|
|
#if ENABLE_RUNTIME_INSTRUMENTATION
|
|
|
|
/*
|
|
|
|
* Read the timestamp value and store it in per-cpu data. The value
|
|
|
|
* will be extracted from per-cpu data by the C level SMC handler and
|
|
|
|
* saved to the PMF timestamp region.
|
|
|
|
*/
|
|
|
|
mrs x30, cntpct_el0
|
|
|
|
str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
|
|
|
|
mrs x29, tpidr_el3
|
|
|
|
str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
|
|
|
|
ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
|
|
|
|
#endif
|
|
|
|
|
|
|
|
mrs x30, esr_el3
|
|
|
|
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
|
|
|
|
|
|
|
|
/* Handle SMC exceptions separately from other synchronous exceptions */
|
|
|
|
cmp x30, #EC_AARCH32_SMC
|
|
|
|
b.eq smc_handler32
|
|
|
|
|
|
|
|
cmp x30, #EC_AARCH64_SMC
|
|
|
|
b.eq smc_handler64
|
|
|
|
|
|
|
|
/* Check for I/D aborts from lower EL */
|
|
|
|
cmp x30, #EC_IABORT_LOWER_EL
|
|
|
|
b.eq 1f
|
|
|
|
|
|
|
|
cmp x30, #EC_DABORT_LOWER_EL
|
|
|
|
b.ne 2f
|
|
|
|
|
|
|
|
1:
|
|
|
|
/* Test for EA bit in the instruction syndrome */
|
|
|
|
mrs x30, esr_el3
|
|
|
|
tbz x30, #ESR_ISS_EABORT_EA_BIT, 2f
|
|
|
|
handle_ea #ERROR_EA_SYNC
|
|
|
|
|
|
|
|
2:
|
|
|
|
/* Other kinds of synchronous exceptions are not handled */
|
|
|
|
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
|
|
|
|
b report_unhandled_exception
|
|
|
|
.endm
|
|
|
|
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
* This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
|
|
|
|
* interrupts.
|
|
|
|
* ---------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
.macro handle_interrupt_exception label
|
|
|
|
/* Enable the SError interrupt */
|
|
|
|
msr daifclr, #DAIF_ABT_BIT
|
|
|
|
|
|
|
|
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
|
|
|
|
bl save_gp_registers
|
|
|
|
|
|
|
|
/* Save the EL3 system registers needed to return from this exception */
|
|
|
|
mrs x0, spsr_el3
|
|
|
|
mrs x1, elr_el3
|
|
|
|
stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
|
|
|
|
|
|
|
|
/* Switch to the runtime stack i.e. SP_EL0 */
|
|
|
|
ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
|
|
|
|
mov x20, sp
|
|
|
|
msr spsel, #0
|
|
|
|
mov sp, x2
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find out whether this is a valid interrupt type.
|
|
|
|
* If the interrupt controller reports a spurious interrupt then return
|
|
|
|
* to where we came from.
|
|
|
|
*/
|
|
|
|
bl plat_ic_get_pending_interrupt_type
|
|
|
|
cmp x0, #INTR_TYPE_INVAL
|
|
|
|
b.eq interrupt_exit_\label
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the registered handler for this interrupt type.
|
|
|
|
* A NULL return value could be 'cause of the following conditions:
|
|
|
|
*
|
|
|
|
* a. An interrupt of a type was routed correctly but a handler for its
|
|
|
|
* type was not registered.
|
|
|
|
*
|
|
|
|
* b. An interrupt of a type was not routed correctly so a handler for
|
|
|
|
* its type was not registered.
|
|
|
|
*
|
|
|
|
* c. An interrupt of a type was routed correctly to EL3, but was
|
|
|
|
* deasserted before its pending state could be read. Another
|
|
|
|
* interrupt of a different type pended at the same time and its
|
|
|
|
* type was reported as pending instead. However, a handler for this
|
|
|
|
* type was not registered.
|
|
|
|
*
|
|
|
|
* a. and b. can only happen due to a programming error. The
|
|
|
|
* occurrence of c. could be beyond the control of Trusted Firmware.
|
|
|
|
* It makes sense to return from this exception instead of reporting an
|
|
|
|
* error.
|
|
|
|
*/
|
|
|
|
bl get_interrupt_type_handler
|
|
|
|
cbz x0, interrupt_exit_\label
|
|
|
|
mov x21, x0
|
|
|
|
|
|
|
|
mov x0, #INTR_ID_UNAVAILABLE
|
|
|
|
|
|
|
|
/* Set the current security state in the 'flags' parameter */
|
|
|
|
mrs x2, scr_el3
|
|
|
|
ubfx x1, x2, #0, #1
|
|
|
|
|
|
|
|
/* Restore the reference to the 'handle' i.e. SP_EL3 */
|
|
|
|
mov x2, x20
|
|
|
|
|
|
|
|
/* x3 will point to a cookie (not used now) */
|
|
|
|
mov x3, xzr
|
|
|
|
|
|
|
|
/* Call the interrupt type handler */
|
|
|
|
blr x21
|
|
|
|
|
|
|
|
interrupt_exit_\label:
|
|
|
|
/* Return from exception, possibly in a different security state */
|
|
|
|
b el3_exit
|
|
|
|
|
|
|
|
.endm
|
|
|
|
|
|
|
|
|
|
|
|
vector_base runtime_exceptions
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
* Current EL with SP_EL0 : 0x0 - 0x200
|
|
|
|
* ---------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
vector_entry sync_exception_sp_el0
|
|
|
|
/* We don't expect any synchronous exceptions from EL3 */
|
|
|
|
b report_unhandled_exception
|
|
|
|
check_vector_size sync_exception_sp_el0
|
|
|
|
|
|
|
|
vector_entry irq_sp_el0
|
|
|
|
/*
|
|
|
|
* EL3 code is non-reentrant. Any asynchronous exception is a serious
|
|
|
|
* error. Loop infinitely.
|
|
|
|
*/
|
|
|
|
b report_unhandled_interrupt
|
|
|
|
check_vector_size irq_sp_el0
|
|
|
|
|
|
|
|
|
|
|
|
vector_entry fiq_sp_el0
|
|
|
|
b report_unhandled_interrupt
|
|
|
|
check_vector_size fiq_sp_el0
|
|
|
|
|
|
|
|
|
|
|
|
vector_entry serror_sp_el0
|
|
|
|
b report_unhandled_exception
|
|
|
|
check_vector_size serror_sp_el0
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
* Current EL with SP_ELx: 0x200 - 0x400
|
|
|
|
* ---------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
vector_entry sync_exception_sp_elx
|
|
|
|
/*
|
|
|
|
* This exception will trigger if anything went wrong during a previous
|
|
|
|
* exception entry or exit or while handling an earlier unexpected
|
|
|
|
* synchronous exception. There is a high probability that SP_EL3 is
|
|
|
|
* corrupted.
|
|
|
|
*/
|
|
|
|
b report_unhandled_exception
|
|
|
|
check_vector_size sync_exception_sp_elx
|
|
|
|
|
|
|
|
vector_entry irq_sp_elx
|
|
|
|
b report_unhandled_interrupt
|
|
|
|
check_vector_size irq_sp_elx
|
|
|
|
|
|
|
|
vector_entry fiq_sp_elx
|
|
|
|
b report_unhandled_interrupt
|
|
|
|
check_vector_size fiq_sp_elx
|
|
|
|
|
|
|
|
vector_entry serror_sp_elx
|
|
|
|
b report_unhandled_exception
|
|
|
|
check_vector_size serror_sp_elx
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
* Lower EL using AArch64 : 0x400 - 0x600
|
|
|
|
* ---------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
vector_entry sync_exception_aarch64
|
|
|
|
/*
|
|
|
|
* This exception vector will be the entry point for SMCs and traps
|
|
|
|
* that are unhandled at lower ELs most commonly. SP_EL3 should point
|
|
|
|
* to a valid cpu context where the general purpose and system register
|
|
|
|
* state can be saved.
|
|
|
|
*/
|
|
|
|
handle_sync_exception
|
|
|
|
check_vector_size sync_exception_aarch64
|
|
|
|
|
|
|
|
vector_entry irq_aarch64
|
|
|
|
handle_interrupt_exception irq_aarch64
|
|
|
|
check_vector_size irq_aarch64
|
|
|
|
|
|
|
|
vector_entry fiq_aarch64
|
|
|
|
handle_interrupt_exception fiq_aarch64
|
|
|
|
check_vector_size fiq_aarch64
|
|
|
|
|
|
|
|
vector_entry serror_aarch64
|
|
|
|
msr daifclr, #DAIF_ABT_BIT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Explicitly save x30 so as to free up a register and to enable
|
|
|
|
* branching
|
|
|
|
*/
|
|
|
|
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
|
|
|
|
handle_ea #ERROR_EA_ASYNC
|
|
|
|
check_vector_size serror_aarch64
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
* Lower EL using AArch32 : 0x600 - 0x800
|
|
|
|
* ---------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
vector_entry sync_exception_aarch32
|
|
|
|
/*
|
|
|
|
* This exception vector will be the entry point for SMCs and traps
|
|
|
|
* that are unhandled at lower ELs most commonly. SP_EL3 should point
|
|
|
|
* to a valid cpu context where the general purpose and system register
|
|
|
|
* state can be saved.
|
|
|
|
*/
|
|
|
|
handle_sync_exception
|
|
|
|
check_vector_size sync_exception_aarch32
|
|
|
|
|
|
|
|
vector_entry irq_aarch32
|
|
|
|
handle_interrupt_exception irq_aarch32
|
|
|
|
check_vector_size irq_aarch32
|
|
|
|
|
|
|
|
vector_entry fiq_aarch32
|
|
|
|
handle_interrupt_exception fiq_aarch32
|
|
|
|
check_vector_size fiq_aarch32
|
|
|
|
|
|
|
|
vector_entry serror_aarch32
|
|
|
|
msr daifclr, #DAIF_ABT_BIT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Explicitly save x30 so as to free up a register and to enable
|
|
|
|
* branching
|
|
|
|
*/
|
|
|
|
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
|
|
|
|
handle_ea #ERROR_EA_ASYNC
|
|
|
|
check_vector_size serror_aarch32
|
|
|
|
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
* This macro takes an argument in x16 that is the index in the
|
|
|
|
* 'rt_svc_descs_indices' array, checks that the value in the array is
|
|
|
|
* valid, and loads in x15 the pointer to the handler of that service.
|
|
|
|
* ---------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
.macro load_rt_svc_desc_pointer
|
|
|
|
/* Load descriptor index from array of indices */
|
|
|
|
adr x14, rt_svc_descs_indices
|
|
|
|
ldrb w15, [x14, x16]
|
|
|
|
|
|
|
|
#if SMCCC_MAJOR_VERSION == 1
|
|
|
|
/* Any index greater than 127 is invalid. Check bit 7. */
|
|
|
|
tbnz w15, 7, smc_unknown
|
|
|
|
#elif SMCCC_MAJOR_VERSION == 2
|
|
|
|
/* Verify that the top 3 bits of the loaded index are 0 (w15 <= 31) */
|
|
|
|
cmp w15, #31
|
|
|
|
b.hi smc_unknown
|
|
|
|
#endif /* SMCCC_MAJOR_VERSION */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the descriptor using the index
|
|
|
|
* x11 = (base + off), w15 = index
|
|
|
|
*
|
|
|
|
* handler = (base + off) + (index << log2(size))
|
|
|
|
*/
|
|
|
|
adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
|
|
|
|
lsl w10, w15, #RT_SVC_SIZE_LOG2
|
|
|
|
ldr x15, [x11, w10, uxtw]
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
* The following code handles secure monitor calls.
|
|
|
|
* Depending upon the execution state from where the SMC has been
|
|
|
|
* invoked, it frees some general purpose registers to perform the
|
|
|
|
* remaining tasks. They involve finding the runtime service handler
|
|
|
|
* that is the target of the SMC & switching to runtime stacks (SP_EL0)
|
|
|
|
* before calling the handler.
|
|
|
|
*
|
|
|
|
* Note that x30 has been explicitly saved and can be used here
|
|
|
|
* ---------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
func smc_handler
|
|
|
|
smc_handler32:
|
|
|
|
/* Check whether aarch32 issued an SMC64 */
|
|
|
|
tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
|
|
|
|
|
|
|
|
smc_handler64:
|
|
|
|
/*
|
|
|
|
* Populate the parameters for the SMC handler.
|
|
|
|
* We already have x0-x4 in place. x5 will point to a cookie (not used
|
|
|
|
* now). x6 will point to the context structure (SP_EL3) and x7 will
|
|
|
|
* contain flags we need to pass to the handler.
|
|
|
|
*
|
|
|
|
* Save x4-x29 and sp_el0.
|
|
|
|
*/
|
|
|
|
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
|
|
|
|
stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
|
|
|
|
stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
|
|
|
|
stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
|
|
|
|
stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
|
|
|
|
stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
|
|
|
|
stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
|
|
|
|
stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
|
|
|
|
stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
|
|
|
|
stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
|
|
|
|
stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
|
|
|
|
stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
|
|
|
|
stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
|
|
|
|
mrs x18, sp_el0
|
|
|
|
str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
|
|
|
|
|
|
|
|
mov x5, xzr
|
|
|
|
mov x6, sp
|
|
|
|
|
|
|
|
#if SMCCC_MAJOR_VERSION == 1
|
|
|
|
|
|
|
|
/* Get the unique owning entity number */
|
|
|
|
ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
|
|
|
|
ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
|
|
|
|
orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
|
|
|
|
|
|
|
|
load_rt_svc_desc_pointer
|
|
|
|
|
|
|
|
#elif SMCCC_MAJOR_VERSION == 2
|
|
|
|
|
|
|
|
/* Bit 31 must be set */
|
|
|
|
tbz x0, #FUNCID_TYPE_SHIFT, smc_unknown
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check MSB of namespace to decide between compatibility/vendor and
|
|
|
|
* SPCI/SPRT
|
|
|
|
*/
|
|
|
|
tbz x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor
|
|
|
|
|
|
|
|
/* Namespaces SPRT and SPCI currently unimplemented */
|
|
|
|
b smc_unknown
|
|
|
|
|
|
|
|
compat_or_vendor:
|
|
|
|
|
|
|
|
/* Namespace is b'00 (compatibility) or b'01 (vendor) */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the LSB of the namespace (bit [28]) to the OEN [27:24] to create
|
|
|
|
* a 5-bit index into the rt_svc_descs_indices array.
|
|
|
|
*
|
|
|
|
* The low 16 entries of the rt_svc_descs_indices array correspond to
|
|
|
|
* OENs of the compatibility namespace and the top 16 entries of the
|
|
|
|
* array are assigned to the vendor namespace descriptor.
|
|
|
|
*/
|
|
|
|
ubfx x16, x0, #FUNCID_OEN_SHIFT, #(FUNCID_OEN_WIDTH + 1)
|
|
|
|
|
|
|
|
load_rt_svc_desc_pointer
|
|
|
|
|
|
|
|
#endif /* SMCCC_MAJOR_VERSION */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore the saved C runtime stack value which will become the new
|
|
|
|
* SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
|
|
|
|
* structure prior to the last ERET from EL3.
|
|
|
|
*/
|
|
|
|
ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
|
|
|
|
|
|
|
|
/* Switch to SP_EL0 */
|
|
|
|
msr spsel, #0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
|
|
|
|
* switch during SMC handling.
|
|
|
|
* TODO: Revisit if all system registers can be saved later.
|
|
|
|
*/
|
|
|
|
mrs x16, spsr_el3
|
|
|
|
mrs x17, elr_el3
|
|
|
|
mrs x18, scr_el3
|
|
|
|
stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
|
|
|
|
str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
|
|
|
|
|
|
|
|
/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
|
|
|
|
bfi x7, x18, #0, #1
|
|
|
|
|
|
|
|
mov sp, x12
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the Secure Monitor Call handler and then drop directly into
|
|
|
|
* el3_exit() which will program any remaining architectural state
|
|
|
|
* prior to issuing the ERET to the desired lower EL.
|
|
|
|
*/
|
|
|
|
#if DEBUG
|
|
|
|
cbz x15, rt_svc_fw_critical_error
|
|
|
|
#endif
|
|
|
|
blr x15
|
|
|
|
|
|
|
|
b el3_exit
|
|
|
|
|
|
|
|
smc_unknown:
|
|
|
|
/*
|
|
|
|
* Unknown SMC call. Populate return value with SMC_UNK, restore
|
|
|
|
* GP registers, and return to caller.
|
|
|
|
*/
|
|
|
|
mov x0, #SMC_UNK
|
|
|
|
str x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
|
|
|
|
b restore_gp_registers_eret
|
|
|
|
|
|
|
|
smc_prohibited:
|
|
|
|
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
|
|
|
|
mov x0, #SMC_UNK
|
|
|
|
eret
|
|
|
|
|
|
|
|
rt_svc_fw_critical_error:
|
|
|
|
/* Switch to SP_ELx */
|
|
|
|
msr spsel, #1
|
|
|
|
no_ret report_unhandled_exception
|
|
|
|
endfunc smc_handler
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Delegate External Abort handling to platform's EA handler. This function
|
|
|
|
* assumes that all GP registers have been saved by the caller.
|
|
|
|
*
|
|
|
|
* x0: EA reason
|
|
|
|
* x1: EA syndrome
|
|
|
|
*/
|
|
|
|
func delegate_ea
|
|
|
|
/* Save EL3 state */
|
|
|
|
mrs x2, spsr_el3
|
|
|
|
mrs x3, elr_el3
|
|
|
|
stp x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save ESR as handling might involve lower ELs, and returning back to
|
|
|
|
* EL3 from there would trample the original ESR.
|
|
|
|
*/
|
|
|
|
mrs x4, scr_el3
|
|
|
|
mrs x5, esr_el3
|
|
|
|
stp x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup rest of arguments, and call platform External Abort handler.
|
|
|
|
*
|
|
|
|
* x0: EA reason (already in place)
|
|
|
|
* x1: Exception syndrome (already in place).
|
|
|
|
* x2: Cookie (unused for now).
|
|
|
|
* x3: Context pointer.
|
|
|
|
* x4: Flags (security state from SCR for now).
|
|
|
|
*/
|
|
|
|
mov x2, xzr
|
|
|
|
mov x3, sp
|
|
|
|
ubfx x4, x4, #0, #1
|
|
|
|
|
|
|
|
/* Switch to runtime stack */
|
|
|
|
ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
|
|
|
|
msr spsel, #0
|
|
|
|
mov sp, x5
|
|
|
|
|
|
|
|
mov x29, x30
|
|
|
|
bl plat_ea_handler
|
|
|
|
mov x30, x29
|
|
|
|
|
|
|
|
/* Make SP point to context */
|
|
|
|
msr spsel, #1
|
|
|
|
|
|
|
|
/* Restore EL3 state */
|
|
|
|
ldp x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
|
|
|
|
msr spsr_el3, x1
|
|
|
|
msr elr_el3, x2
|
|
|
|
|
|
|
|
/* Restore ESR_EL3 and SCR_EL3 */
|
|
|
|
ldp x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
|
|
|
|
msr scr_el3, x3
|
|
|
|
msr esr_el3, x4
|
|
|
|
|
|
|
|
ret
|
|
|
|
endfunc delegate_ea
|