Browse Source

Merge pull request #927 from jeenu-arm/state-switch

Execution state switch
pull/933/head
davidcunado-arm 8 years ago
committed by GitHub
parent
commit
d6104f5ab4
  1. 10
      bl1/aarch64/bl1_context_mgmt.c
  2. 3
      bl31/bl31_main.c
  3. 91
      docs/arm-sip-service.md
  4. 7
      include/lib/aarch64/arch.h
  5. 8
      include/lib/aarch64/arch_helpers.h
  6. 2
      include/lib/el3_runtime/context_mgmt.h
  7. 1
      include/lib/psci/psci_lib.h
  8. 7
      include/plat/arm/common/arm_sip_svc.h
  9. 11
      include/plat/arm/common/plat_arm.h
  10. 3
      lib/el3_runtime/aarch64/context_mgmt.c
  11. 21
      lib/psci/psci_common.c
  12. 6
      plat/arm/common/arm_common.c
  13. 1
      plat/arm/common/arm_common.mk
  14. 35
      plat/arm/common/arm_sip_svc.c
  15. 197
      plat/arm/common/execution_state_switch.c
  16. 9
      plat/mediatek/mt6795/bl31_plat_setup.c
  17. 6
      plat/qemu/qemu_bl2_setup.c

10
bl1/aarch64/bl1_context_mgmt.c

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -48,8 +48,7 @@ void bl1_prepare_next_image(unsigned int image_id)
* Ensure that the build flag to save AArch32 system registers in CPU
* context is not set for AArch64-only platforms.
*/
if (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL1_SHIFT)
& ID_AA64PFR0_ELX_MASK) == 0x1) {
if (EL_IMPLEMENTED(1) == EL_IMPL_A64ONLY) {
ERROR("EL1 supports AArch64-only. Please set build flag "
"CTX_INCLUDE_AARCH32_REGS = 0");
panic();
@ -75,9 +74,8 @@ void bl1_prepare_next_image(unsigned int image_id)
next_bl_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
} else {
/* Use EL2 if supported else use EL1. */
if (read_id_aa64pfr0_el1() &
(ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
/* Use EL2 if supported; else use EL1. */
if (EL_IMPLEMENTED(2)) {
next_bl_ep->spsr = SPSR_64(MODE_EL2, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
} else {

3
bl31/bl31_main.c

@ -148,8 +148,7 @@ void bl31_prepare_next_image_entry(void)
* Ensure that the build flag to save AArch32 system registers in CPU
* context is not set for AArch64-only platforms.
*/
if (((read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL1_SHIFT)
& ID_AA64PFR0_ELX_MASK) == 0x1) {
if (EL_IMPLEMENTED(1) == EL_IMPL_A64ONLY) {
ERROR("EL1 supports AArch64-only. Please set build flag "
"CTX_INCLUDE_AARCH32_REGS = 0");
panic();

91
docs/arm-sip-service.md

@ -0,0 +1,91 @@
ARM SiP Service
===============
This document enumerates and describes the ARM SiP (Silicon Provider) services.
SiP services are non-standard, platform-specific services offered by the silicon
implementer or platform provider. They are accessed via. `SMC` ("SMC calls")
instruction executed from Exception Levels below EL3. SMC calls for SiP
services:
* Follow [SMC Calling Convention][SMCCC];
* Use SMC function IDs that fall in the SiP range, which are `0xc2000000` -
`0xc200ffff` for 64-bit calls, and `0x82000000` - `0x8200ffff` for 32-bit
calls.
The ARM SiP implementation offers the following services:
* Performance Measurement Framework (PMF)
* Execution State Switching service
Source definitions for ARM SiP service are located in the `arm_sip_svc.h` header
file.
Performance Measurement Framework (PMF)
---------------------------------------
The [Performance Measurement Framework](./firmware-design.md#13--performance-measurement-framework)
allows callers to retrieve timestamps captured at various paths in ARM Trusted
Firmware execution. It's described in detail in [Firmware Design document][Firmware Design].
Execution State Switching service
---------------------------------
Execution State Switching service provides a mechanism for a non-secure lower
Exception Level (either EL2, or NS EL1 if EL2 isn't implemented) to request to
switch its execution state (a.k.a. Register Width), either from AArch64 to
AArch32, or from AArch32 to AArch64, for the calling CPU. This service is only
available when ARM Trusted Firmware is built for AArch64 (i.e. when build option
`ARCH` is set to `aarch64`).
### `ARM_SIP_SVC_EXE_STATE_SWITCH`
Arguments:
uint32_t Function ID
uint32_t PC hi
uint32_t PC lo
uint32_t Cookie hi
uint32_t Cookie lo
Return:
uint32_t
The function ID parameter must be `0x82000020`. It uniquely identifies the
Execution State Switching service being requested.
The parameters _PC hi_ and _PC lo_ defines upper and lower words, respectively,
of the entry point (physical address) at which execution should start, after
Execution State has been switched. When calling from AArch64, _PC hi_ must be 0.
When execution starts at the supplied entry point after Execution State has been
switched, the parameters _Cookie hi_ and _Cookie lo_ are passed in CPU registers
0 and 1, respectively. When calling from AArch64, _Cookie hi_ must be 0.
This call can only be made on the primary CPU, before any secondaries were
brought up with `CPU_ON` PSCI call. Otherwise, the call will always fail.
The effect of switching execution state is as if the Exception Level were
entered for the first time, following power on. This means CPU registers that
have a defined reset value by the Architecture will assume that value. Other
registers should not be expected to hold their values before the call was made.
CPU endianness, however, is preserved from the previous execution state. Note
that this switches the execution state of the calling CPU only. This is not a
substitute for PSCI `SYSTEM_RESET`.
The service may return the following error codes:
- `STATE_SW_E_PARAM`: If any of the parameters were deemed invalid for
a specific request.
- `STATE_SW_E_DENIED`: If the call is not successful, or when ARM Trusted
Firmware is built for AArch32.
If the call is successful, the caller wouldn't observe the SMC returning.
Instead, execution starts at the supplied entry point, with the CPU registers 0
and 1 populated with the supplied _Cookie hi_ and _Cookie lo_ values,
respectively.
- - - - - - - - - - - - - - - - - - - - - - - - - -
[Firmware Design]: ./firmware-design.md
[SMCCC]: http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"

7
include/lib/aarch64/arch.h

@ -110,6 +110,10 @@
#define ID_AA64PFR0_EL3_SHIFT 12
#define ID_AA64PFR0_ELX_MASK 0xf
#define EL_IMPL_NONE 0
#define EL_IMPL_A64ONLY 1
#define EL_IMPL_A64_A32 2
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_GIC_WIDTH 4
#define ID_AA64PFR0_GIC_MASK ((1 << ID_AA64PFR0_GIC_WIDTH) - 1)
@ -183,7 +187,8 @@
#define MDCR_DEF_VAL (MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE))
/* HCR definitions */
#define HCR_RW_BIT (1ull << 31)
#define HCR_RW_SHIFT 31
#define HCR_RW_BIT (1ull << HCR_RW_SHIFT)
#define HCR_AMO_BIT (1 << 5)
#define HCR_IMO_BIT (1 << 4)
#define HCR_FMO_BIT (1 << 3)

8
include/lib/aarch64/arch_helpers.h

@ -328,6 +328,14 @@ DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
#define IS_IN_EL1() IS_IN_EL(1)
#define IS_IN_EL3() IS_IN_EL(3)
/*
* Check if an EL is implemented from AA64PFR0 register fields. 'el' argument
* must be one of 1, 2 or 3.
*/
#define EL_IMPLEMENTED(el) \
((read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL##el##_SHIFT) \
& ID_AA64PFR0_ELX_MASK)
/* Previously defined accesor functions with incomplete register names */
#define read_current_el() read_CurrentEl()

2
include/lib/el3_runtime/context_mgmt.h

@ -9,6 +9,8 @@
#ifndef AARCH32
#include <arch.h>
#include <assert.h>
#include <stdint.h>
#endif
/*******************************************************************************

1
include/lib/psci/psci_lib.h

@ -82,6 +82,7 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
void *handle,
u_register_t flags);
int psci_setup(const psci_lib_args_t *lib_args);
int psci_secondaries_brought_up(void);
void psci_warmboot_entrypoint(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
void psci_prepare_next_non_secure_ctx(

7
include/plat/arm/common/arm_sip_svc.h

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -14,8 +14,11 @@
/* 0x8200ff02 is reserved */
#define ARM_SIP_SVC_VERSION 0x8200ff03
/* Function ID for requesting state switch of lower EL */
#define ARM_SIP_SVC_EXE_STATE_SWITCH 0x82000020
/* ARM SiP Service Calls version numbers */
#define ARM_SIP_SVC_VERSION_MAJOR 0x0
#define ARM_SIP_SVC_VERSION_MINOR 0x1
#define ARM_SIP_SVC_VERSION_MINOR 0x2
#endif /* __ARM_SIP_SVC_H__ */

11
include/plat/arm/common/plat_arm.h

@ -100,6 +100,9 @@ void arm_setup_page_tables(uintptr_t total_base,
#endif /* __ARM_RECOM_STATE_ID_ENC__ */
/* ARM State switch error codes */
#define STATE_SW_E_PARAM (-2)
#define STATE_SW_E_DENIED (-3)
/* IO storage utility functions */
void arm_io_setup(void);
@ -206,4 +209,12 @@ const mmap_region_t *plat_arm_get_mmap(void);
/* Allow platform to override psci_pm_ops during runtime */
const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops);
/* Execution state switch in ARM platforms */
int arm_execution_state_switch(unsigned int smc_fid,
uint32_t pc_hi,
uint32_t pc_lo,
uint32_t cookie_hi,
uint32_t cookie_lo,
void *handle);
#endif /* __PLAT_ARM_H__ */

3
lib/el3_runtime/aarch64/context_mgmt.c

@ -205,8 +205,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
sctlr_elx &= ~SCTLR_EE_BIT;
sctlr_elx |= SCTLR_EL2_RES1;
write_sctlr_el2(sctlr_elx);
} else if (read_id_aa64pfr0_el1() &
(ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
} else if (EL_IMPLEMENTED(2)) {
/* EL2 present but unused, need to disable safely */
/* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */

21
lib/psci/psci_common.c

@ -892,6 +892,27 @@ void psci_print_power_domain_map(void)
#endif
}
/******************************************************************************
* Return whether any secondaries were powered up with CPU_ON call. A CPU that
* have ever been powered up would have set its MPDIR value to something other
* than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
* PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
* meaningful only when called on the primary CPU during early boot.
*****************************************************************************/
int psci_secondaries_brought_up(void)
{
int idx, n_valid = 0;
for (idx = 0; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
n_valid++;
}
assert(n_valid);
return (n_valid > 1);
}
#if ENABLE_PLAT_COMPAT
/*******************************************************************************
* PSCI Compatibility helper function to return the 'power_state' parameter of

6
plat/arm/common/arm_common.c

@ -113,15 +113,11 @@ uint32_t arm_get_spsr_for_bl32_entry(void)
#ifndef AARCH32
uint32_t arm_get_spsr_for_bl33_entry(void)
{
unsigned long el_status;
unsigned int mode;
uint32_t spsr;
/* Figure out what mode we enter the non-secure world in */
el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
el_status &= ID_AA64PFR0_ELX_MASK;
mode = (el_status) ? MODE_EL2 : MODE_EL1;
mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1;
/*
* TODO: Consider the possibility of specifying the SPSR in

1
plat/arm/common/arm_common.mk

@ -140,6 +140,7 @@ BL2U_SOURCES += plat/arm/common/arm_bl2u_setup.c
BL31_SOURCES += plat/arm/common/arm_bl31_setup.c \
plat/arm/common/arm_pm.c \
plat/arm/common/arm_topology.c \
plat/arm/common/execution_state_switch.c \
plat/common/plat_psci_common.c
ifeq (${ENABLE_PMF}, 1)

35
plat/arm/common/arm_sip_svc.c

@ -1,11 +1,12 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arm_sip_svc.h>
#include <debug.h>
#include <plat_arm.h>
#include <pmf.h>
#include <runtime_svc.h>
#include <stdint.h>
@ -36,6 +37,8 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
void *handle,
u_register_t flags)
{
int call_count = 0;
/*
* Dispatch PMF calls to PMF SMC handler and return its return
* value
@ -46,12 +49,34 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
}
switch (smc_fid) {
case ARM_SIP_SVC_CALL_COUNT:
case ARM_SIP_SVC_EXE_STATE_SWITCH: {
u_register_t pc;
/* Allow calls from non-secure only */
if (!is_caller_non_secure(flags))
SMC_RET1(handle, STATE_SW_E_DENIED);
/* Validate supplied entry point */
pc = (u_register_t) ((x1 << 32) | (uint32_t) x2);
if (arm_validate_ns_entrypoint(pc))
SMC_RET1(handle, STATE_SW_E_PARAM);
/*
* Return the number of SiP Service Calls. PMF is the only
* SiP service implemented; so return number of PMF calls
* Pointers used in execution state switch are all 32 bits wide
*/
SMC_RET1(handle, PMF_NUM_SMC_CALLS);
return arm_execution_state_switch(smc_fid, (uint32_t) x1,
(uint32_t) x2, (uint32_t) x3, (uint32_t) x4,
handle);
}
case ARM_SIP_SVC_CALL_COUNT:
/* PMF calls */
call_count += PMF_NUM_SMC_CALLS;
/* State switch call */
call_count += 1;
SMC_RET1(handle, call_count);
case ARM_SIP_SVC_UID:
/* Return UID to the caller */

197
plat/arm/common/execution_state_switch.c

@ -0,0 +1,197 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <arm_sip_svc.h>
#include <context.h>
#include <context_mgmt.h>
#include <plat_arm.h>
#include <psci.h>
#include <smcc_helpers.h>
#include <string.h>
#include <utils.h>
/*
* Handle SMC from a lower exception level to switch its execution state
* (either from AArch64 to AArch32, or vice versa).
*
* smc_fid:
* SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or
* ARM_SIP_SVC_STATE_SWITCH_32.
* pc_hi, pc_lo:
* PC upon re-entry to the calling exception level; width dependent on the
* calling exception level.
* cookie_hi, cookie_lo:
* Opaque pointer pairs received from the caller to pass it back, upon
* re-entry.
* handle:
* Handle to saved context.
*/
int arm_execution_state_switch(unsigned int smc_fid,
uint32_t pc_hi,
uint32_t pc_lo,
uint32_t cookie_hi,
uint32_t cookie_lo,
void *handle)
{
/* Execution state can be switched only if EL3 is AArch64 */
#ifdef AARCH64
int caller_64, from_el2, el, endianness, thumb = 0;
u_register_t spsr, pc, scr, sctlr;
entry_point_info_t ep;
cpu_context_t *ctx = (cpu_context_t *) handle;
el3_state_t *el3_ctx = get_el3state_ctx(ctx);
/* That the SMC originated from NS is already validated by the caller */
/*
* Disallow state switch if any of the secondaries have been brought up.
*/
if (psci_secondaries_brought_up())
goto exec_denied;
spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
caller_64 = (GET_RW(spsr) == MODE_RW_64);
if (caller_64) {
/*
* If the call originated from AArch64, expect 32-bit pointers when
* switching to AArch32.
*/
if ((pc_hi != 0) || (cookie_hi != 0))
goto invalid_param;
pc = pc_lo;
/* Instruction state when entering AArch32 */
thumb = pc & 1;
} else {
/* Construct AArch64 PC */
pc = (((u_register_t) pc_hi) << 32) | pc_lo;
}
/* Make sure PC is 4-byte aligned, except for Thumb */
if ((pc & 0x3) && !thumb)
goto invalid_param;
/*
* EL3 controls register width of the immediate lower EL only. Expect
* this request from EL2/Hyp unless:
*
* - EL2 is not implemented;
* - EL2 is implemented, but was disabled. This can be inferred from
* SCR_EL3.HCE.
*/
from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) :
(GET_M32(spsr) == MODE32_hyp);
scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3);
if (!from_el2) {
/* The call is from NS privilege level other than HYP */
/*
* Disallow switching state if there's a Hypervisor in place;
* this request must be taken up with the Hypervisor instead.
*/
if (scr & SCR_HCE_BIT)
goto exec_denied;
}
/*
* Return to the caller using the same endianness. Extract
* endianness bit from the respective system control register
* directly.
*/
sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
endianness = !!(sctlr & SCTLR_EE_BIT);
/* Construct SPSR for the exception state we're about to switch to */
if (caller_64) {
int impl;
/*
* Switching from AArch64 to AArch32. Ensure this CPU implements
* the target EL in AArch32.
*/
impl = from_el2 ? EL_IMPLEMENTED(2) : EL_IMPLEMENTED(1);
if (impl != EL_IMPL_A64_A32)
goto exec_denied;
/* Return to the equivalent AArch32 privilege level */
el = from_el2 ? MODE32_hyp : MODE32_svc;
spsr = SPSR_MODE32(el, thumb ? SPSR_T_THUMB : SPSR_T_ARM,
endianness, DISABLE_ALL_EXCEPTIONS);
} else {
/*
* Switching from AArch32 to AArch64. Since it's not possible to
* implement an EL as AArch32-only (from which this call was
* raised), it's safe to assume AArch64 is also implemented.
*/
el = from_el2 ? MODE_EL2 : MODE_EL1;
spsr = SPSR_64(el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
}
/*
* Use the context management library to re-initialize the existing
* context with the execution state flipped. Since the library takes
* entry_point_info_t pointer as the argument, construct a dummy one
* with PC, state width, endianness, security etc. appropriately set.
* Other entries in the entry point structure are irrelevant for
* purpose.
*/
zeromem(&ep, sizeof(ep));
ep.pc = pc;
ep.spsr = spsr;
SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
((endianness ? EP_EE_BIG : EP_EE_LITTLE) | NON_SECURE |
EP_ST_DISABLE));
/*
* Re-initialize the system register context, and exit EL3 as if for the
* first time. State switch is effectively a soft reset of the
* calling EL.
*/
cm_init_my_context(&ep);
cm_prepare_el3_exit(NON_SECURE);
/*
* State switch success. The caller of SMC wouldn't see the SMC
* returning. Instead, execution starts at the supplied entry point,
* with context pointers populated in registers 0 and 1.
*/
SMC_RET2(handle, cookie_hi, cookie_lo);
invalid_param:
SMC_RET1(handle, STATE_SW_E_PARAM);
exec_denied:
#endif
/* State switch denied */
SMC_RET1(handle, STATE_SW_E_DENIED);
}

9
plat/mediatek/mt6795/bl31_plat_setup.c

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -336,20 +336,15 @@ void enable_ns_access_to_cpuectlr(void)
static entry_point_info_t *bl31_plat_get_next_kernel64_ep_info(void)
{
entry_point_info_t *next_image_info;
unsigned long el_status;
unsigned int mode;
el_status = 0;
mode = 0;
/* Kernel image is always non-secured */
next_image_info = &bl33_image_ep_info;
/* Figure out what mode we enter the non-secure world in */
el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
el_status &= ID_AA64PFR0_ELX_MASK;
if (el_status) {
if (EL_IMPLEMENTED(2)) {
INFO("Kernel_EL2\n");
mode = MODE_EL2;
} else{

6
plat/qemu/qemu_bl2_setup.c

@ -202,15 +202,11 @@ static uint32_t qemu_get_spsr_for_bl32_entry(void)
******************************************************************************/
static uint32_t qemu_get_spsr_for_bl33_entry(void)
{
unsigned long el_status;
unsigned int mode;
uint32_t spsr;
/* Figure out what mode we enter the non-secure world in */
el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
el_status &= ID_AA64PFR0_ELX_MASK;
mode = (el_status) ? MODE_EL2 : MODE_EL1;
mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1;
/*
* TODO: Consider the possibility of specifying the SPSR in

Loading…
Cancel
Save