Browse Source

Merge changes from topic "jc/refact_el1_ctx" into integration

* changes:
  refactor(cm): convert el1-ctx assembly offset entries to c structure
  feat(cm): add explicit context entries for ERRATA_SPECULATIVE_AT
pull/1996/merge
Manish V Badarkhe 3 months ago
committed by TrustedFirmware Code Review
parent
commit
4bcf5b847c
  1. 2
      include/arch/aarch64/el2_common_macros.S
  2. 2
      include/arch/aarch64/el3_common_macros.S
  3. 226
      include/lib/el3_runtime/aarch64/context.h
  4. 272
      include/lib/el3_runtime/context_el1.h
  5. 19
      lib/el3_runtime/aarch64/context.S
  6. 297
      lib/el3_runtime/aarch64/context_mgmt.c
  7. 65
      plat/arm/board/neoverse_rd/common/ras/nrd_ras_cpu.c
  8. 4
      plat/nvidia/tegra/common/tegra_fiq_glue.c
  9. 4
      plat/nvidia/tegra/soc/t194/plat_psci_handlers.c
  10. 6
      plat/qti/qtiseclib/src/qtiseclib_cb_interface.c
  11. 8
      services/spd/trusty/trusty.c
  12. 43
      services/std_svc/spm/el3_spmc/spmc_setup.c
  13. 43
      services/std_svc/spm/spm_mm/spm_mm_setup.c

2
include/arch/aarch64/el2_common_macros.S

@ -408,7 +408,7 @@
* -----------------------------------------------------------
*/
isb
ldp x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
ldp x28, x29, [sp, #CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1]
msr sctlr_el1, x28
isb
msr tcr_el1, x29

2
include/arch/aarch64/el3_common_macros.S

@ -437,7 +437,7 @@
* -----------------------------------------------------------
*/
isb
ldp x28, x29, [sp, #CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1]
ldp x28, x29, [sp, #CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1]
msr sctlr_el1, x28
isb
msr tcr_el1, x29

226
include/lib/el3_runtime/aarch64/context.h

@ -7,6 +7,7 @@
#ifndef CONTEXT_H
#define CONTEXT_H
#include <lib/el3_runtime/context_el1.h>
#include <lib/el3_runtime/context_el2.h>
#include <lib/el3_runtime/cpu_data.h>
#include <lib/utils_def.h>
@ -81,152 +82,11 @@
#define CTX_EL3STATE_END U(0x50) /* Align to the next 16 byte boundary */
#endif /* FFH_SUPPORT */
/*******************************************************************************
* Constants that allow assembler code to access members of and the
* 'el1_sys_regs' structure at their correct offsets. Note that some of the
* registers are only 32-bits wide but are stored as 64-bit values for
* convenience
******************************************************************************/
#define CTX_EL1_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
#define CTX_SPSR_EL1 U(0x0)
#define CTX_ELR_EL1 U(0x8)
#define CTX_SCTLR_EL1 U(0x10)
#define CTX_TCR_EL1 U(0x18)
#define CTX_CPACR_EL1 U(0x20)
#define CTX_CSSELR_EL1 U(0x28)
#define CTX_SP_EL1 U(0x30)
#define CTX_ESR_EL1 U(0x38)
#define CTX_TTBR0_EL1 U(0x40)
#define CTX_TTBR1_EL1 U(0x48)
#define CTX_MAIR_EL1 U(0x50)
#define CTX_AMAIR_EL1 U(0x58)
#define CTX_ACTLR_EL1 U(0x60)
#define CTX_TPIDR_EL1 U(0x68)
#define CTX_TPIDR_EL0 U(0x70)
#define CTX_TPIDRRO_EL0 U(0x78)
#define CTX_PAR_EL1 U(0x80)
#define CTX_FAR_EL1 U(0x88)
#define CTX_AFSR0_EL1 U(0x90)
#define CTX_AFSR1_EL1 U(0x98)
#define CTX_CONTEXTIDR_EL1 U(0xa0)
#define CTX_VBAR_EL1 U(0xa8)
#define CTX_MDCCINT_EL1 U(0xb0)
#define CTX_MDSCR_EL1 U(0xb8)
#define CTX_AARCH64_END U(0xc0) /* Align to the next 16 byte boundary */
/*
* If the platform is AArch64-only, there is no need to save and restore these
* AArch32 registers.
*/
#if CTX_INCLUDE_AARCH32_REGS
#define CTX_SPSR_ABT (CTX_AARCH64_END + U(0x0))
#define CTX_SPSR_UND (CTX_AARCH64_END + U(0x8))
#define CTX_SPSR_IRQ (CTX_AARCH64_END + U(0x10))
#define CTX_SPSR_FIQ (CTX_AARCH64_END + U(0x18))
#define CTX_DACR32_EL2 (CTX_AARCH64_END + U(0x20))
#define CTX_IFSR32_EL2 (CTX_AARCH64_END + U(0x28))
#define CTX_AARCH32_END (CTX_AARCH64_END + U(0x30)) /* Align to the next 16 byte boundary */
#else
#define CTX_AARCH32_END CTX_AARCH64_END
#endif /* CTX_INCLUDE_AARCH32_REGS */
/*
* If the timer registers aren't saved and restored, we don't have to reserve
* space for them in the context
*/
#if NS_TIMER_SWITCH
#define CTX_CNTP_CTL_EL0 (CTX_AARCH32_END + U(0x0))
#define CTX_CNTP_CVAL_EL0 (CTX_AARCH32_END + U(0x8))
#define CTX_CNTV_CTL_EL0 (CTX_AARCH32_END + U(0x10))
#define CTX_CNTV_CVAL_EL0 (CTX_AARCH32_END + U(0x18))
#define CTX_CNTKCTL_EL1 (CTX_AARCH32_END + U(0x20))
#define CTX_TIMER_SYSREGS_END (CTX_AARCH32_END + U(0x30)) /* Align to the next 16 byte boundary */
#else
#define CTX_TIMER_SYSREGS_END CTX_AARCH32_END
#endif /* NS_TIMER_SWITCH */
#if ENABLE_FEAT_MTE2
#define CTX_TFSRE0_EL1 (CTX_TIMER_SYSREGS_END + U(0x0))
#define CTX_TFSR_EL1 (CTX_TIMER_SYSREGS_END + U(0x8))
#define CTX_RGSR_EL1 (CTX_TIMER_SYSREGS_END + U(0x10))
#define CTX_GCR_EL1 (CTX_TIMER_SYSREGS_END + U(0x18))
#define CTX_MTE_REGS_END (CTX_TIMER_SYSREGS_END + U(0x20)) /* Align to the next 16 byte boundary */
#else
#define CTX_MTE_REGS_END CTX_TIMER_SYSREGS_END
#endif /* ENABLE_FEAT_MTE2 */
#if ENABLE_FEAT_RAS
#define CTX_DISR_EL1 (CTX_MTE_REGS_END + U(0x0))
#define CTX_RAS_REGS_END (CTX_MTE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_RAS_REGS_END CTX_MTE_REGS_END
#endif /* ENABLE_FEAT_RAS */
#if ENABLE_FEAT_S1PIE
#define CTX_PIRE0_EL1 (CTX_RAS_REGS_END + U(0x0))
#define CTX_PIR_EL1 (CTX_RAS_REGS_END + U(0x8))
#define CTX_S1PIE_REGS_END (CTX_RAS_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_S1PIE_REGS_END CTX_RAS_REGS_END
#endif /* ENABLE_FEAT_S1PIE */
#if ENABLE_FEAT_S1POE
#define CTX_POR_EL1 (CTX_S1PIE_REGS_END + U(0x0))
#define CTX_S1POE_REGS_END (CTX_S1PIE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_S1POE_REGS_END CTX_S1PIE_REGS_END
#endif /* ENABLE_FEAT_S1POE */
#if ENABLE_FEAT_S2POE
#define CTX_S2POR_EL1 (CTX_S1POE_REGS_END + U(0x0))
#define CTX_S2POE_REGS_END (CTX_S1POE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_S2POE_REGS_END CTX_S1POE_REGS_END
#endif /* ENABLE_FEAT_S2POE */
#if ENABLE_FEAT_TCR2
#define CTX_TCR2_EL1 (CTX_S2POE_REGS_END + U(0x0))
#define CTX_TCR2_REGS_END (CTX_S2POE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_TCR2_REGS_END CTX_S2POE_REGS_END
#endif /* ENABLE_FEAT_TCR2 */
#if ENABLE_TRF_FOR_NS
#define CTX_TRFCR_EL1 (CTX_TCR2_REGS_END + U(0x0))
#define CTX_TRF_REGS_END (CTX_TCR2_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_TRF_REGS_END CTX_TCR2_REGS_END
#endif /* ENABLE_TRF_FOR_NS */
#if ENABLE_FEAT_CSV2_2
#define CTX_SCXTNUM_EL0 (CTX_TRF_REGS_END + U(0x0))
#define CTX_SCXTNUM_EL1 (CTX_TRF_REGS_END + U(0x8))
#define CTX_CSV2_2_REGS_END (CTX_TRF_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
#else
#define CTX_CSV2_2_REGS_END CTX_TRF_REGS_END
#endif /* ENABLE_FEAT_CSV2_2 */
#if ENABLE_FEAT_GCS
#define CTX_GCSCR_EL1 (CTX_CSV2_2_REGS_END + U(0x0))
#define CTX_GCSCRE0_EL1 (CTX_CSV2_2_REGS_END + U(0x8))
#define CTX_GCSPR_EL1 (CTX_CSV2_2_REGS_END + U(0x10))
#define CTX_GCSPR_EL0 (CTX_CSV2_2_REGS_END + U(0x18))
#define CTX_GCS_REGS_END (CTX_CSV2_2_REGS_END + U(0x20)) /* Align to the next 16 byte boundary */
#else
#define CTX_GCS_REGS_END CTX_CSV2_2_REGS_END
#endif /* ENABLE_FEAT_GCS */
/*
* End of EL1 system registers.
*/
#define CTX_EL1_SYSREGS_END CTX_GCS_REGS_END
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'fp_regs'
* structure at their correct offsets.
******************************************************************************/
# define CTX_FPREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
# define CTX_FPREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
#if CTX_INCLUDE_FPREGS
#define CTX_FP_Q0 U(0x0)
#define CTX_FP_Q1 U(0x10)
@ -279,10 +139,54 @@
#define CTX_CVE_2018_3639_DISABLE U(0)
#define CTX_CVE_2018_3639_END U(0x10) /* Align to the next 16 byte boundary */
/*******************************************************************************
* Registers related to ERRATA_SPECULATIVE_AT
*
* This is essential as with EL1 and EL2 context registers being decoupled,
* both will not be present for a given build configuration.
* As ERRATA_SPECULATIVE_AT errata requires SCTLR_EL1 and TCR_EL1 registers
* independent of the above logic, we need explicit context entries to be
* reserved for these registers.
*
* NOTE: Based on this we end up with following different configurations depending
* on the presence of errata and inclusion of EL1 or EL2 context.
*
* ============================================================================
* | ERRATA_SPECULATIVE_AT | EL1 context| Memory allocation(Sctlr_el1,Tcr_el1)|
* ============================================================================
* | 0 | 0 | None |
* | 0 | 1 | EL1 C-Context structure |
* | 1 | 0 | Errata Context Offset Entries |
* | 1 | 1 | Errata Context Offset Entries |
* ============================================================================
*
* In the above table, when ERRATA_SPECULATIVE_AT=1, EL1_Context=0, it implies
* there is only EL2 context and memory for SCTLR_EL1 and TCR_EL1 registers is
* reserved explicitly under ERRATA_SPECULATIVE_AT build flag here.
*
* In situations when EL1_Context=1 and ERRATA_SPECULATIVE_AT=1, since SCTLR_EL1
* and TCR_EL1 registers will be modified under errata and it happens at the
* early in the codeflow prior to el1 context (save and restore operations),
* context memory still will be reserved under the errata logic here explicitly.
* These registers will not be part of EL1 context save & restore routines.
*
* Only when ERRATA_SPECULATIVE_AT=0, EL1_Context=1, for this combination,
* SCTLR_EL1 and TCR_EL1 will be part of EL1 context structure (context_el1.h)
* -----------------------------------------------------------------------------
******************************************************************************/
#define CTX_ERRATA_SPEC_AT_OFFSET (CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_END)
#if ERRATA_SPECULATIVE_AT
#define CTX_ERRATA_SPEC_AT_SCTLR_EL1 U(0x0)
#define CTX_ERRATA_SPEC_AT_TCR_EL1 U(0x8)
#define CTX_ERRATA_SPEC_AT_END U(0x10) /* Align to the next 16 byte boundary */
#else
#define CTX_ERRATA_SPEC_AT_END U(0x0)
#endif /* ERRATA_SPECULATIVE_AT */
/*******************************************************************************
* Registers related to ARMv8.3-PAuth.
******************************************************************************/
#define CTX_PAUTH_REGS_OFFSET (CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_END)
#define CTX_PAUTH_REGS_OFFSET (CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_END)
#if CTX_INCLUDE_PAUTH_REGS
#define CTX_PACIAKEY_LO U(0x0)
#define CTX_PACIAKEY_HI U(0x8)
@ -325,13 +229,16 @@
/* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
#define CTX_EL1_SYSREGS_ALL (CTX_EL1_SYSREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_FPREGS
# define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
#endif
#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT)
#define CTX_CVE_2018_3639_ALL (CTX_CVE_2018_3639_END >> DWORD_SHIFT)
#if ERRATA_SPECULATIVE_AT
#define CTX_ERRATA_SPEC_AT_ALL (CTX_ERRATA_SPEC_AT_END >> DWORD_SHIFT)
#endif
#if CTX_INCLUDE_PAUTH_REGS
# define CTX_PAUTH_REGS_ALL (CTX_PAUTH_REGS_END >> DWORD_SHIFT)
#endif
@ -345,12 +252,6 @@
*/
DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
/*
* AArch64 EL1 system register context structure for preserving the
* architectural state during world switches.
*/
DEFINE_REG_STRUCT(el1_sysregs, CTX_EL1_SYSREGS_ALL);
/*
* AArch64 floating point register context structure for preserving
* the floating point state during switches from one security state to
@ -369,6 +270,11 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
/* Function pointer used by CVE-2018-3639 dynamic mitigation */
DEFINE_REG_STRUCT(cve_2018_3639, CTX_CVE_2018_3639_ALL);
/* Registers associated to Errata_Speculative */
#if ERRATA_SPECULATIVE_AT
DEFINE_REG_STRUCT(errata_speculative_at, CTX_ERRATA_SPEC_AT_ALL);
#endif
/* Registers associated to ARMv8.3-PAuth */
#if CTX_INCLUDE_PAUTH_REGS
DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL);
@ -393,17 +299,22 @@ DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL);
typedef struct cpu_context {
gp_regs_t gpregs_ctx;
el3_state_t el3state_ctx;
el1_sysregs_t el1_sysregs_ctx;
#if CTX_INCLUDE_FPREGS
fp_regs_t fpregs_ctx;
#endif
cve_2018_3639_t cve_2018_3639_ctx;
#if ERRATA_SPECULATIVE_AT
errata_speculative_at_t errata_speculative_at_ctx;
#endif
#if CTX_INCLUDE_PAUTH_REGS
pauth_t pauth_ctx;
#endif
el1_sysregs_t el1_sysregs_ctx;
#if CTX_INCLUDE_EL2_REGS
el2_sysregs_t el2_sysregs_ctx;
#endif
@ -433,6 +344,11 @@ extern per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM];
#endif
#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx)
#define get_cve_2018_3639_ctx(h) (&((cpu_context_t *) h)->cve_2018_3639_ctx)
#if ERRATA_SPECULATIVE_AT
#define get_errata_speculative_at_ctx(h) (&((cpu_context_t *) h)->errata_speculative_at_ctx)
#endif
#if CTX_INCLUDE_PAUTH_REGS
# define get_pauth_ctx(h) (&((cpu_context_t *) h)->pauth_ctx)
#endif
@ -448,9 +364,6 @@ CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx),
CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx),
assert_core_context_el3state_offset_mismatch);
CASSERT(CTX_EL1_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el1_sysregs_ctx),
assert_core_context_el1_sys_offset_mismatch);
#if CTX_INCLUDE_FPREGS
CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx),
assert_core_context_fp_offset_mismatch);
@ -459,6 +372,11 @@ CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx),
CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx),
assert_core_context_cve_2018_3639_offset_mismatch);
#if ERRATA_SPECULATIVE_AT
CASSERT(CTX_ERRATA_SPEC_AT_OFFSET == __builtin_offsetof(cpu_context_t, errata_speculative_at_ctx),
assert_core_context_errata_speculative_at_offset_mismatch);
#endif
#if CTX_INCLUDE_PAUTH_REGS
CASSERT(CTX_PAUTH_REGS_OFFSET == __builtin_offsetof(cpu_context_t, pauth_ctx),
assert_core_context_pauth_offset_mismatch);

272
include/lib/el3_runtime/context_el1.h

@ -0,0 +1,272 @@
/*
* Copyright (c) 2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef CONTEXT_EL1_H
#define CONTEXT_EL1_H
#ifndef __ASSEMBLER__
/*******************************************************************************
* EL1 Registers:
* AArch64 EL1 system register context structure for preserving the
* architectural state during world switches.
******************************************************************************/
typedef struct el1_common_regs {
uint64_t spsr_el1;
uint64_t elr_el1;
#if (!ERRATA_SPECULATIVE_AT)
uint64_t sctlr_el1;
uint64_t tcr_el1;
#endif /* ERRATA_SPECULATIVE_AT=0 */
uint64_t cpacr_el1;
uint64_t csselr_el1;
uint64_t sp_el1;
uint64_t esr_el1;
uint64_t ttbr0_el1;
uint64_t ttbr1_el1;
uint64_t mair_el1;
uint64_t amair_el1;
uint64_t actlr_el1;
uint64_t tpidr_el1;
uint64_t tpidr_el0;
uint64_t tpidrro_el0;
uint64_t par_el1;
uint64_t far_el1;
uint64_t afsr0_el1;
uint64_t afsr1_el1;
uint64_t contextidr_el1;
uint64_t vbar_el1;
uint64_t mdccint_el1;
uint64_t mdscr_el1;
} el1_common_regs_t;
typedef struct el1_aarch32_regs {
uint64_t spsr_abt;
uint64_t spsr_und;
uint64_t spsr_irq;
uint64_t spsr_fiq;
uint64_t dacr32_el2;
uint64_t ifsr32_el2;
} el1_aarch32_regs_t;
typedef struct el1_arch_timer_regs {
uint64_t cntp_ctl_el0;
uint64_t cntp_cval_el0;
uint64_t cntv_ctl_el0;
uint64_t cntv_cval_el0;
uint64_t cntkctl_el1;
} el1_arch_timer_regs_t;
typedef struct el1_mte2_regs {
uint64_t tfsre0_el1;
uint64_t tfsr_el1;
uint64_t rgsr_el1;
uint64_t gcr_el1;
} el1_mte2_regs_t;
typedef struct el1_ras_regs {
uint64_t disr_el1;
} el1_ras_regs_t;
typedef struct el1_s1pie_regs {
uint64_t pire0_el1;
uint64_t pir_el1;
} el1_s1pie_regs_t;
typedef struct el1_s1poe_regs {
uint64_t por_el1;
} el1_s1poe_regs_t;
typedef struct el1_s2poe_regs {
uint64_t s2por_el1;
} el1_s2poe_regs_t;
typedef struct el1_tcr2_regs {
uint64_t tcr2_el1;
} el1_tcr2_regs_t;
typedef struct el1_trf_regs {
uint64_t trfcr_el1;
} el1_trf_regs_t;
typedef struct el1_csv2_2_regs {
uint64_t scxtnum_el0;
uint64_t scxtnum_el1;
} el1_csv2_2_regs_t;
typedef struct el1_gcs_regs {
uint64_t gcscr_el1;
uint64_t gcscre0_el1;
uint64_t gcspr_el1;
uint64_t gcspr_el0;
} el1_gcs_regs_t;
typedef struct el1_sysregs {
el1_common_regs_t common;
#if CTX_INCLUDE_AARCH32_REGS
el1_aarch32_regs_t el1_aarch32;
#endif
#if NS_TIMER_SWITCH
el1_arch_timer_regs_t arch_timer;
#endif
#if ENABLE_FEAT_MTE2
el1_mte2_regs_t mte2;
#endif
#if ENABLE_FEAT_RAS
el1_ras_regs_t ras;
#endif
#if ENABLE_FEAT_S1PIE
el1_s1pie_regs_t s1pie;
#endif
#if ENABLE_FEAT_S1POE
el1_s1poe_regs_t s1poe;
#endif
#if ENABLE_FEAT_S2POE
el1_s2poe_regs_t s2poe;
#endif
#if ENABLE_FEAT_TCR2
el1_tcr2_regs_t tcr2;
#endif
#if ENABLE_TRF_FOR_NS
el1_trf_regs_t trf;
#endif
#if ENABLE_FEAT_CSV2_2
el1_csv2_2_regs_t csv2_2;
#endif
#if ENABLE_FEAT_GCS
el1_gcs_regs_t gcs;
#endif
} el1_sysregs_t;
/*
* Macros to access members related to individual features of the el1_sysregs_t
* structures.
*/
#define read_el1_ctx_common(ctx, reg) (((ctx)->common).reg)
#define write_el1_ctx_common(ctx, reg, val) ((((ctx)->common).reg) \
= (uint64_t) (val))
#if NS_TIMER_SWITCH
#define read_el1_ctx_arch_timer(ctx, reg) (((ctx)->arch_timer).reg)
#define write_el1_ctx_arch_timer(ctx, reg, val) ((((ctx)->arch_timer).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_arch_timer(ctx, reg) ULL(0)
#define write_el1_ctx_arch_timer(ctx, reg, val)
#endif /* NS_TIMER_SWITCH */
#if CTX_INCLUDE_AARCH32_REGS
#define read_el1_ctx_aarch32(ctx, reg) (((ctx)->el1_aarch32).reg)
#define write_el1_ctx_aarch32(ctx, reg, val) ((((ctx)->el1_aarch32).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_aarch32(ctx, reg) ULL(0)
#define write_el1_ctx_aarch32(ctx, reg, val)
#endif /* CTX_INCLUDE_AARCH32_REGS */
#if ENABLE_FEAT_MTE2
#define read_el1_ctx_mte2(ctx, reg) (((ctx)->mte2).reg)
#define write_el1_ctx_mte2(ctx, reg, val) ((((ctx)->mte2).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_mte2(ctx, reg) ULL(0)
#define write_el1_ctx_mte2(ctx, reg, val)
#endif /* ENABLE_FEAT_MTE2 */
#if ENABLE_FEAT_RAS
#define read_el1_ctx_ras(ctx, reg) (((ctx)->ras).reg)
#define write_el1_ctx_ras(ctx, reg, val) ((((ctx)->ras).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_ras(ctx, reg) ULL(0)
#define write_el1_ctx_ras(ctx, reg, val)
#endif /* ENABLE_FEAT_RAS */
#if ENABLE_FEAT_S1PIE
#define read_el1_ctx_s1pie(ctx, reg) (((ctx)->s1pie).reg)
#define write_el1_ctx_s1pie(ctx, reg, val) ((((ctx)->s1pie).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_s1pie(ctx, reg) ULL(0)
#define write_el1_ctx_s1pie(ctx, reg, val)
#endif /* ENABLE_FEAT_S1PIE */
#if ENABLE_FEAT_S1POE
#define read_el1_ctx_s1poe(ctx, reg) (((ctx)->s1poe).reg)
#define write_el1_ctx_s1poe(ctx, reg, val) ((((ctx)->s1poe).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_s1poe(ctx, reg) ULL(0)
#define write_el1_ctx_s1poe(ctx, reg, val)
#endif /* ENABLE_FEAT_S1POE */
#if ENABLE_FEAT_S2POE
#define read_el1_ctx_s2poe(ctx, reg) (((ctx)->s2poe).reg)
#define write_el1_ctx_s2poe(ctx, reg, val) ((((ctx)->s2poe).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_s2poe(ctx, reg) ULL(0)
#define write_el1_ctx_s2poe(ctx, reg, val)
#endif /* ENABLE_FEAT_S2POE */
#if ENABLE_FEAT_TCR2
#define read_el1_ctx_tcr2(ctx, reg) (((ctx)->tcr2).reg)
#define write_el1_ctx_tcr2(ctx, reg, val) ((((ctx)->tcr2).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_tcr2(ctx, reg) ULL(0)
#define write_el1_ctx_tcr2(ctx, reg, val)
#endif /* ENABLE_FEAT_TCR2 */
#if ENABLE_TRF_FOR_NS
#define read_el1_ctx_trf(ctx, reg) (((ctx)->trf).reg)
#define write_el1_ctx_trf(ctx, reg, val) ((((ctx)->trf).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_trf(ctx, reg) ULL(0)
#define write_el1_ctx_trf(ctx, reg, val)
#endif /* ENABLE_TRF_FOR_NS */
#if ENABLE_FEAT_CSV2_2
#define read_el1_ctx_csv2_2(ctx, reg) (((ctx)->csv2_2).reg)
#define write_el1_ctx_csv2_2(ctx, reg, val) ((((ctx)->csv2_2).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_csv2_2(ctx, reg) ULL(0)
#define write_el1_ctx_csv2_2(ctx, reg, val)
#endif /* ENABLE_FEAT_CSV2_2 */
#if ENABLE_FEAT_GCS
#define read_el1_ctx_gcs(ctx, reg) (((ctx)->gcs).reg)
#define write_el1_ctx_gcs(ctx, reg, val) ((((ctx)->gcs).reg) \
= (uint64_t) (val))
#else
#define read_el1_ctx_gcs(ctx, reg) ULL(0)
#define write_el1_ctx_gcs(ctx, reg, val)
#endif /* ENABLE_FEAT_GCS */
/******************************************************************************/
#endif /* __ASSEMBLER__ */
#endif /* CONTEXT_EL1_H */

19
lib/el3_runtime/aarch64/context.S

@ -14,9 +14,13 @@
.global fpregs_context_save
.global fpregs_context_restore
#endif /* CTX_INCLUDE_FPREGS */
#if ERRATA_SPECULATIVE_AT
.global save_and_update_ptw_el1_sys_regs
#endif /* ERRATA_SPECULATIVE_AT */
.global prepare_el3_entry
.global restore_gp_pmcr_pauth_regs
.global save_and_update_ptw_el1_sys_regs
.global el3_exit
/* ------------------------------------------------------------------
@ -329,10 +333,12 @@ func restore_gp_pmcr_pauth_regs
ret
endfunc restore_gp_pmcr_pauth_regs
/*
#if ERRATA_SPECULATIVE_AT
/* --------------------------------------------------------------------
* In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
* registers and update EL1 registers to disable stage1 and stage2
* page table walk
* page table walk.
* --------------------------------------------------------------------
*/
func save_and_update_ptw_el1_sys_regs
/* ----------------------------------------------------------
@ -340,9 +346,9 @@ func save_and_update_ptw_el1_sys_regs
* ----------------------------------------------------------
*/
mrs x29, sctlr_el1
str x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)]
str x29, [sp, #(CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1)]
mrs x29, tcr_el1
str x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)]
str x29, [sp, #(CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_TCR_EL1)]
/* ------------------------------------------------------------
* Must follow below order in order to disable page table
@ -367,10 +373,11 @@ func save_and_update_ptw_el1_sys_regs
orr x29, x29, #SCTLR_M_BIT
msr sctlr_el1, x29
isb
ret
endfunc save_and_update_ptw_el1_sys_regs
#endif /* ERRATA_SPECULATIVE_AT */
/* -----------------------------------------------------------------
* The below macro returns the address of the per_world context for
* the security state, retrieved through "get_security_state" macro.

297
lib/el3_runtime/aarch64/context_mgmt.c

@ -92,8 +92,13 @@ static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info
*/
sctlr_elx |= SCTLR_IESB_BIT;
#endif
/* Store the initialised SCTLR_EL1 value in the cpu_context */
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
#if (ERRATA_SPECULATIVE_AT)
write_ctx_reg(get_errata_speculative_at_ctx(ctx), CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_elx);
#else
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, sctlr_elx);
#endif /* ERRATA_SPECULATIVE_AT */
/*
* Base the context ACTLR_EL1 on the current value, as it is
@ -103,7 +108,7 @@ static void setup_el1_context(cpu_context_t *ctx, const struct entry_point_info
* be zero.
*/
actlr_elx = read_actlr_el1();
write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), actlr_el1, actlr_elx);
}
/******************************************************************************
@ -1548,220 +1553,192 @@ void cm_prepare_el3_exit_ns(void)
static void el1_sysregs_context_save(el1_sysregs_t *ctx)
{
write_ctx_reg(ctx, CTX_SPSR_EL1, read_spsr_el1());
write_ctx_reg(ctx, CTX_ELR_EL1, read_elr_el1());
write_el1_ctx_common(ctx, spsr_el1, read_spsr_el1());
write_el1_ctx_common(ctx, elr_el1, read_elr_el1());
#if !ERRATA_SPECULATIVE_AT
write_ctx_reg(ctx, CTX_SCTLR_EL1, read_sctlr_el1());
write_ctx_reg(ctx, CTX_TCR_EL1, read_tcr_el1());
#if (!ERRATA_SPECULATIVE_AT)
write_el1_ctx_common(ctx, sctlr_el1, read_sctlr_el1());
write_el1_ctx_common(ctx, tcr_el1, read_tcr_el1());
#endif /* (!ERRATA_SPECULATIVE_AT) */
write_ctx_reg(ctx, CTX_CPACR_EL1, read_cpacr_el1());
write_ctx_reg(ctx, CTX_CSSELR_EL1, read_csselr_el1());
write_ctx_reg(ctx, CTX_SP_EL1, read_sp_el1());
write_ctx_reg(ctx, CTX_ESR_EL1, read_esr_el1());
write_ctx_reg(ctx, CTX_TTBR0_EL1, read_ttbr0_el1());
write_ctx_reg(ctx, CTX_TTBR1_EL1, read_ttbr1_el1());
write_ctx_reg(ctx, CTX_MAIR_EL1, read_mair_el1());
write_ctx_reg(ctx, CTX_AMAIR_EL1, read_amair_el1());
write_ctx_reg(ctx, CTX_ACTLR_EL1, read_actlr_el1());
write_ctx_reg(ctx, CTX_TPIDR_EL1, read_tpidr_el1());
write_ctx_reg(ctx, CTX_TPIDR_EL0, read_tpidr_el0());
write_ctx_reg(ctx, CTX_TPIDRRO_EL0, read_tpidrro_el0());
write_ctx_reg(ctx, CTX_PAR_EL1, read_par_el1());
write_ctx_reg(ctx, CTX_FAR_EL1, read_far_el1());
write_ctx_reg(ctx, CTX_AFSR0_EL1, read_afsr0_el1());
write_ctx_reg(ctx, CTX_AFSR1_EL1, read_afsr1_el1());
write_ctx_reg(ctx, CTX_CONTEXTIDR_EL1, read_contextidr_el1());
write_ctx_reg(ctx, CTX_VBAR_EL1, read_vbar_el1());
write_ctx_reg(ctx, CTX_MDCCINT_EL1, read_mdccint_el1());
write_ctx_reg(ctx, CTX_MDSCR_EL1, read_mdscr_el1());
#if CTX_INCLUDE_AARCH32_REGS
write_ctx_reg(ctx, CTX_SPSR_ABT, read_spsr_abt());
write_ctx_reg(ctx, CTX_SPSR_UND, read_spsr_und());
write_ctx_reg(ctx, CTX_SPSR_IRQ, read_spsr_irq());
write_ctx_reg(ctx, CTX_SPSR_FIQ, read_spsr_fiq());
write_ctx_reg(ctx, CTX_DACR32_EL2, read_dacr32_el2());
write_ctx_reg(ctx, CTX_IFSR32_EL2, read_ifsr32_el2());
#endif /* CTX_INCLUDE_AARCH32_REGS */
#if NS_TIMER_SWITCH
write_ctx_reg(ctx, CTX_CNTP_CTL_EL0, read_cntp_ctl_el0());
write_ctx_reg(ctx, CTX_CNTP_CVAL_EL0, read_cntp_cval_el0());
write_ctx_reg(ctx, CTX_CNTV_CTL_EL0, read_cntv_ctl_el0());
write_ctx_reg(ctx, CTX_CNTV_CVAL_EL0, read_cntv_cval_el0());
write_ctx_reg(ctx, CTX_CNTKCTL_EL1, read_cntkctl_el1());
#endif /* NS_TIMER_SWITCH */
#if ENABLE_FEAT_MTE2
write_ctx_reg(ctx, CTX_TFSRE0_EL1, read_tfsre0_el1());
write_ctx_reg(ctx, CTX_TFSR_EL1, read_tfsr_el1());
write_ctx_reg(ctx, CTX_RGSR_EL1, read_rgsr_el1());
write_ctx_reg(ctx, CTX_GCR_EL1, read_gcr_el1());
#endif /* ENABLE_FEAT_MTE2 */
#if ENABLE_FEAT_RAS
write_el1_ctx_common(ctx, cpacr_el1, read_cpacr_el1());
write_el1_ctx_common(ctx, csselr_el1, read_csselr_el1());
write_el1_ctx_common(ctx, sp_el1, read_sp_el1());
write_el1_ctx_common(ctx, esr_el1, read_esr_el1());
write_el1_ctx_common(ctx, ttbr0_el1, read_ttbr0_el1());
write_el1_ctx_common(ctx, ttbr1_el1, read_ttbr1_el1());
write_el1_ctx_common(ctx, mair_el1, read_mair_el1());
write_el1_ctx_common(ctx, amair_el1, read_amair_el1());
write_el1_ctx_common(ctx, actlr_el1, read_actlr_el1());
write_el1_ctx_common(ctx, tpidr_el1, read_tpidr_el1());
write_el1_ctx_common(ctx, tpidr_el0, read_tpidr_el0());
write_el1_ctx_common(ctx, tpidrro_el0, read_tpidrro_el0());
write_el1_ctx_common(ctx, par_el1, read_par_el1());
write_el1_ctx_common(ctx, far_el1, read_far_el1());
write_el1_ctx_common(ctx, afsr0_el1, read_afsr0_el1());
write_el1_ctx_common(ctx, afsr1_el1, read_afsr1_el1());
write_el1_ctx_common(ctx, contextidr_el1, read_contextidr_el1());
write_el1_ctx_common(ctx, vbar_el1, read_vbar_el1());
write_el1_ctx_common(ctx, mdccint_el1, read_mdccint_el1());
write_el1_ctx_common(ctx, mdscr_el1, read_mdscr_el1());
if (CTX_INCLUDE_AARCH32_REGS) {
/* Save Aarch32 registers */
write_el1_ctx_aarch32(ctx, spsr_abt, read_spsr_abt());
write_el1_ctx_aarch32(ctx, spsr_und, read_spsr_und());
write_el1_ctx_aarch32(ctx, spsr_irq, read_spsr_irq());
write_el1_ctx_aarch32(ctx, spsr_fiq, read_spsr_fiq());
write_el1_ctx_aarch32(ctx, dacr32_el2, read_dacr32_el2());
write_el1_ctx_aarch32(ctx, ifsr32_el2, read_ifsr32_el2());
}
if (NS_TIMER_SWITCH) {
/* Save NS Timer registers */
write_el1_ctx_arch_timer(ctx, cntp_ctl_el0, read_cntp_ctl_el0());
write_el1_ctx_arch_timer(ctx, cntp_cval_el0, read_cntp_cval_el0());
write_el1_ctx_arch_timer(ctx, cntv_ctl_el0, read_cntv_ctl_el0());
write_el1_ctx_arch_timer(ctx, cntv_cval_el0, read_cntv_cval_el0());
write_el1_ctx_arch_timer(ctx, cntkctl_el1, read_cntkctl_el1());
}
if (is_feat_mte2_supported()) {
write_el1_ctx_mte2(ctx, tfsre0_el1, read_tfsre0_el1());
write_el1_ctx_mte2(ctx, tfsr_el1, read_tfsr_el1());
write_el1_ctx_mte2(ctx, rgsr_el1, read_rgsr_el1());
write_el1_ctx_mte2(ctx, gcr_el1, read_gcr_el1());
}
if (is_feat_ras_supported()) {
write_ctx_reg(ctx, CTX_DISR_EL1, read_disr_el1());
write_el1_ctx_ras(ctx, disr_el1, read_disr_el1());
}
#endif
#if ENABLE_FEAT_S1PIE
if (is_feat_s1pie_supported()) {
write_ctx_reg(ctx, CTX_PIRE0_EL1, read_pire0_el1());
write_ctx_reg(ctx, CTX_PIR_EL1, read_pir_el1());
write_el1_ctx_s1pie(ctx, pire0_el1, read_pire0_el1());
write_el1_ctx_s1pie(ctx, pir_el1, read_pir_el1());
}
#endif
#if ENABLE_FEAT_S1POE
if (is_feat_s1poe_supported()) {
write_ctx_reg(ctx, CTX_POR_EL1, read_por_el1());
write_el1_ctx_s1poe(ctx, por_el1, read_por_el1());
}
#endif
#if ENABLE_FEAT_S2POE
if (is_feat_s2poe_supported()) {
write_ctx_reg(ctx, CTX_S2POR_EL1, read_s2por_el1());
write_el1_ctx_s2poe(ctx, s2por_el1, read_s2por_el1());
}
#endif
#if ENABLE_FEAT_TCR2
if (is_feat_tcr2_supported()) {
write_ctx_reg(ctx, CTX_TCR2_EL1, read_tcr2_el1());
write_el1_ctx_tcr2(ctx, tcr2_el1, read_tcr2_el1());
}
#endif
#if ENABLE_TRF_FOR_NS
if (is_feat_trf_supported()) {
write_ctx_reg(ctx, CTX_TRFCR_EL1, read_trfcr_el1());
write_el1_ctx_trf(ctx, trfcr_el1, read_trfcr_el1());
}
#endif
#if ENABLE_FEAT_CSV2_2
if (is_feat_csv2_2_supported()) {
write_ctx_reg(ctx, CTX_SCXTNUM_EL0, read_scxtnum_el0());
write_ctx_reg(ctx, CTX_SCXTNUM_EL1, read_scxtnum_el1());
write_el1_ctx_csv2_2(ctx, scxtnum_el0, read_scxtnum_el0());
write_el1_ctx_csv2_2(ctx, scxtnum_el1, read_scxtnum_el1());
}
#endif
#if ENABLE_FEAT_GCS
if (is_feat_gcs_supported()) {
write_ctx_reg(ctx, CTX_GCSCR_EL1, read_gcscr_el1());
write_ctx_reg(ctx, CTX_GCSCRE0_EL1, read_gcscre0_el1());
write_ctx_reg(ctx, CTX_GCSPR_EL1, read_gcspr_el1());
write_ctx_reg(ctx, CTX_GCSPR_EL0, read_gcspr_el0());
write_el1_ctx_gcs(ctx, gcscr_el1, read_gcscr_el1());
write_el1_ctx_gcs(ctx, gcscre0_el1, read_gcscre0_el1());
write_el1_ctx_gcs(ctx, gcspr_el1, read_gcspr_el1());
write_el1_ctx_gcs(ctx, gcspr_el0, read_gcspr_el0());
}
#endif
}
static void el1_sysregs_context_restore(el1_sysregs_t *ctx)
{
write_spsr_el1(read_ctx_reg(ctx, CTX_SPSR_EL1));
write_elr_el1(read_ctx_reg(ctx, CTX_ELR_EL1));
write_spsr_el1(read_el1_ctx_common(ctx, spsr_el1));
write_elr_el1(read_el1_ctx_common(ctx, elr_el1));
#if !ERRATA_SPECULATIVE_AT
write_sctlr_el1(read_ctx_reg(ctx, CTX_SCTLR_EL1));
write_tcr_el1(read_ctx_reg(ctx, CTX_TCR_EL1));
#if (!ERRATA_SPECULATIVE_AT)
write_sctlr_el1(read_el1_ctx_common(ctx, sctlr_el1));
write_tcr_el1(read_el1_ctx_common(ctx, tcr_el1));
#endif /* (!ERRATA_SPECULATIVE_AT) */
write_cpacr_el1(read_ctx_reg(ctx, CTX_CPACR_EL1));
write_csselr_el1(read_ctx_reg(ctx, CTX_CSSELR_EL1));
write_sp_el1(read_ctx_reg(ctx, CTX_SP_EL1));
write_esr_el1(read_ctx_reg(ctx, CTX_ESR_EL1));
write_ttbr0_el1(read_ctx_reg(ctx, CTX_TTBR0_EL1));
write_ttbr1_el1(read_ctx_reg(ctx, CTX_TTBR1_EL1));
write_mair_el1(read_ctx_reg(ctx, CTX_MAIR_EL1));
write_amair_el1(read_ctx_reg(ctx, CTX_AMAIR_EL1));
write_actlr_el1(read_ctx_reg(ctx, CTX_ACTLR_EL1));
write_tpidr_el1(read_ctx_reg(ctx, CTX_TPIDR_EL1));
write_tpidr_el0(read_ctx_reg(ctx, CTX_TPIDR_EL0));
write_tpidrro_el0(read_ctx_reg(ctx, CTX_TPIDRRO_EL0));
write_par_el1(read_ctx_reg(ctx, CTX_PAR_EL1));
write_far_el1(read_ctx_reg(ctx, CTX_FAR_EL1));
write_afsr0_el1(read_ctx_reg(ctx, CTX_AFSR0_EL1));
write_afsr1_el1(read_ctx_reg(ctx, CTX_AFSR1_EL1));
write_contextidr_el1(read_ctx_reg(ctx, CTX_CONTEXTIDR_EL1));
write_vbar_el1(read_ctx_reg(ctx, CTX_VBAR_EL1));
write_mdccint_el1(read_ctx_reg(ctx, CTX_MDCCINT_EL1));
write_mdscr_el1(read_ctx_reg(ctx, CTX_MDSCR_EL1));
#if CTX_INCLUDE_AARCH32_REGS
write_spsr_abt(read_ctx_reg(ctx, CTX_SPSR_ABT));
write_spsr_und(read_ctx_reg(ctx, CTX_SPSR_UND));
write_spsr_irq(read_ctx_reg(ctx, CTX_SPSR_IRQ));
write_spsr_fiq(read_ctx_reg(ctx, CTX_SPSR_FIQ));
write_dacr32_el2(read_ctx_reg(ctx, CTX_DACR32_EL2));
write_ifsr32_el2(read_ctx_reg(ctx, CTX_IFSR32_EL2));
#endif /* CTX_INCLUDE_AARCH32_REGS */
#if NS_TIMER_SWITCH
write_cntp_ctl_el0(read_ctx_reg(ctx, CTX_CNTP_CTL_EL0));
write_cntp_cval_el0(read_ctx_reg(ctx, CTX_CNTP_CVAL_EL0));
write_cntv_ctl_el0(read_ctx_reg(ctx, CTX_CNTV_CTL_EL0));
write_cntv_cval_el0(read_ctx_reg(ctx, CTX_CNTV_CVAL_EL0));
write_cntkctl_el1(read_ctx_reg(ctx, CTX_CNTKCTL_EL1));
#endif /* NS_TIMER_SWITCH */
#if ENABLE_FEAT_MTE2
write_tfsre0_el1(read_ctx_reg(ctx, CTX_TFSRE0_EL1));
write_tfsr_el1(read_ctx_reg(ctx, CTX_TFSR_EL1));
write_rgsr_el1(read_ctx_reg(ctx, CTX_RGSR_EL1));
write_gcr_el1(read_ctx_reg(ctx, CTX_GCR_EL1));
#endif /* ENABLE_FEAT_MTE2 */
#if ENABLE_FEAT_RAS
write_cpacr_el1(read_el1_ctx_common(ctx, cpacr_el1));
write_csselr_el1(read_el1_ctx_common(ctx, csselr_el1));
write_sp_el1(read_el1_ctx_common(ctx, sp_el1));
write_esr_el1(read_el1_ctx_common(ctx, esr_el1));
write_ttbr0_el1(read_el1_ctx_common(ctx, ttbr0_el1));
write_ttbr1_el1(read_el1_ctx_common(ctx, ttbr1_el1));
write_mair_el1(read_el1_ctx_common(ctx, mair_el1));
write_amair_el1(read_el1_ctx_common(ctx, amair_el1));
write_actlr_el1(read_el1_ctx_common(ctx, actlr_el1));
write_tpidr_el1(read_el1_ctx_common(ctx, tpidr_el1));
write_tpidr_el0(read_el1_ctx_common(ctx, tpidr_el0));
write_tpidrro_el0(read_el1_ctx_common(ctx, tpidrro_el0));
write_par_el1(read_el1_ctx_common(ctx, par_el1));
write_far_el1(read_el1_ctx_common(ctx, far_el1));
write_afsr0_el1(read_el1_ctx_common(ctx, afsr0_el1));
write_afsr1_el1(read_el1_ctx_common(ctx, afsr1_el1));
write_contextidr_el1(read_el1_ctx_common(ctx, contextidr_el1));
write_vbar_el1(read_el1_ctx_common(ctx, vbar_el1));
write_mdccint_el1(read_el1_ctx_common(ctx, mdccint_el1));
write_mdscr_el1(read_el1_ctx_common(ctx, mdscr_el1));
if (CTX_INCLUDE_AARCH32_REGS) {
/* Restore Aarch32 registers */
write_spsr_abt(read_el1_ctx_aarch32(ctx, spsr_abt));
write_spsr_und(read_el1_ctx_aarch32(ctx, spsr_und));
write_spsr_irq(read_el1_ctx_aarch32(ctx, spsr_irq));
write_spsr_fiq(read_el1_ctx_aarch32(ctx, spsr_fiq));
write_dacr32_el2(read_el1_ctx_aarch32(ctx, dacr32_el2));
write_ifsr32_el2(read_el1_ctx_aarch32(ctx, ifsr32_el2));
}
if (NS_TIMER_SWITCH) {
/* Restore NS Timer registers */
write_cntp_ctl_el0(read_el1_ctx_arch_timer(ctx, cntp_ctl_el0));
write_cntp_cval_el0(read_el1_ctx_arch_timer(ctx, cntp_cval_el0));
write_cntv_ctl_el0(read_el1_ctx_arch_timer(ctx, cntv_ctl_el0));
write_cntv_cval_el0(read_el1_ctx_arch_timer(ctx, cntv_cval_el0));
write_cntkctl_el1(read_el1_ctx_arch_timer(ctx, cntkctl_el1));
}
if (is_feat_mte2_supported()) {
write_tfsre0_el1(read_el1_ctx_mte2(ctx, tfsre0_el1));
write_tfsr_el1(read_el1_ctx_mte2(ctx, tfsr_el1));
write_rgsr_el1(read_el1_ctx_mte2(ctx, rgsr_el1));
write_gcr_el1(read_el1_ctx_mte2(ctx, gcr_el1));
}
if (is_feat_ras_supported()) {
write_disr_el1(read_ctx_reg(ctx, CTX_DISR_EL1));
write_disr_el1(read_el1_ctx_ras(ctx, disr_el1));
}
#endif
#if ENABLE_FEAT_S1PIE
if (is_feat_s1pie_supported()) {
write_pire0_el1(read_ctx_reg(ctx, CTX_PIRE0_EL1));
write_pir_el1(read_ctx_reg(ctx, CTX_PIR_EL1));
write_pire0_el1(read_el1_ctx_s1pie(ctx, pire0_el1));
write_pir_el1(read_el1_ctx_s1pie(ctx, pir_el1));
}
#endif
#if ENABLE_FEAT_S1POE
if (is_feat_s1poe_supported()) {
write_por_el1(read_ctx_reg(ctx, CTX_POR_EL1));
write_por_el1(read_el1_ctx_s1poe(ctx, por_el1));
}
#endif
#if ENABLE_FEAT_S2POE
if (is_feat_s2poe_supported()) {
write_s2por_el1(read_ctx_reg(ctx, CTX_S2POR_EL1));
write_s2por_el1(read_el1_ctx_s2poe(ctx, s2por_el1));
}
#endif
#if ENABLE_FEAT_TCR2
if (is_feat_tcr2_supported()) {
write_tcr2_el1(read_ctx_reg(ctx, CTX_TCR2_EL1));
write_tcr2_el1(read_el1_ctx_tcr2(ctx, tcr2_el1));
}
#endif
#if ENABLE_TRF_FOR_NS
if (is_feat_trf_supported()) {
write_trfcr_el1(read_ctx_reg(ctx, CTX_TRFCR_EL1));
write_trfcr_el1(read_el1_ctx_trf(ctx, trfcr_el1));
}
#endif
#if ENABLE_FEAT_CSV2_2
if (is_feat_csv2_2_supported()) {
write_scxtnum_el0(read_ctx_reg(ctx, CTX_SCXTNUM_EL0));
write_scxtnum_el1(read_ctx_reg(ctx, CTX_SCXTNUM_EL1));
write_scxtnum_el0(read_el1_ctx_csv2_2(ctx, scxtnum_el0));
write_scxtnum_el1(read_el1_ctx_csv2_2(ctx, scxtnum_el1));
}
#endif
#if ENABLE_FEAT_GCS
if (is_feat_gcs_supported()) {
write_gcscr_el1(read_ctx_reg(ctx, CTX_GCSCR_EL1));
write_gcscre0_el1(read_ctx_reg(ctx, CTX_GCSCRE0_EL1));
write_gcspr_el1(read_ctx_reg(ctx, CTX_GCSPR_EL1));
write_gcspr_el0(read_ctx_reg(ctx, CTX_GCSPR_EL0));
write_gcscr_el1(read_el1_ctx_gcs(ctx, gcscr_el1));
write_gcscre0_el1(read_el1_ctx_gcs(ctx, gcscre0_el1));
write_gcspr_el1(read_el1_ctx_gcs(ctx, gcspr_el1));
write_gcspr_el0(read_el1_ctx_gcs(ctx, gcspr_el0));
}
#endif
}
/*******************************************************************************

65
plat/arm/board/neoverse_rd/common/ras/nrd_ras_cpu.c

@ -62,37 +62,50 @@ static void populate_cpu_err_data(cpu_err_info *cpu_info,
cpu_info->SecurityState = security_state;
/* populate CPU EL1 context information. */
cpu_info->ErrCtxEl1Reg[0] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_ELR_EL1);
cpu_info->ErrCtxEl1Reg[1] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_ESR_EL1);
cpu_info->ErrCtxEl1Reg[2] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_FAR_EL1);
cpu_info->ErrCtxEl1Reg[0] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
elr_el1);
cpu_info->ErrCtxEl1Reg[1] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
esr_el1);
cpu_info->ErrCtxEl1Reg[2] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
far_el1);
cpu_info->ErrCtxEl1Reg[3] = read_isr_el1();
cpu_info->ErrCtxEl1Reg[4] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_MAIR_EL1);
cpu_info->ErrCtxEl1Reg[4] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
mair_el1);
cpu_info->ErrCtxEl1Reg[5] = read_midr_el1();
cpu_info->ErrCtxEl1Reg[6] = read_mpidr_el1();
cpu_info->ErrCtxEl1Reg[7] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_SCTLR_EL1);
#if (ERRATA_SPECULATIVE_AT)
cpu_info->ErrCtxEl1Reg[7] = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1);
#else
cpu_info->ErrCtxEl1Reg[7] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
sctlr_el1);
#endif /* ERRATA_SPECULATIVE_AT */
cpu_info->ErrCtxEl1Reg[8] = read_ctx_reg(get_gpregs_ctx(ctx),
CTX_GPREG_SP_EL0);
cpu_info->ErrCtxEl1Reg[9] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_SP_EL1);
cpu_info->ErrCtxEl1Reg[10] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_SPSR_EL1);
cpu_info->ErrCtxEl1Reg[11] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TCR_EL1);
cpu_info->ErrCtxEl1Reg[12] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TPIDR_EL0);
cpu_info->ErrCtxEl1Reg[13] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TPIDR_EL1);
cpu_info->ErrCtxEl1Reg[14] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TPIDRRO_EL0);
cpu_info->ErrCtxEl1Reg[15] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TTBR0_EL1);
cpu_info->ErrCtxEl1Reg[16] = read_ctx_reg(get_el1_sysregs_ctx(ctx),
CTX_TTBR1_EL1);
cpu_info->ErrCtxEl1Reg[9] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
sp_el1);
cpu_info->ErrCtxEl1Reg[10] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
spsr_el1);
#if (ERRATA_SPECULATIVE_AT)
cpu_info->ErrCtxEl1Reg[11] = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_TCR_EL1);
#else
cpu_info->ErrCtxEl1Reg[11] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
tcr_el1);
#endif /* ERRATA_SPECULATIVE_AT */
cpu_info->ErrCtxEl1Reg[12] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
tpidr_el0);
cpu_info->ErrCtxEl1Reg[13] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
tpidr_el1);
cpu_info->ErrCtxEl1Reg[14] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
tpidrro_el0);
cpu_info->ErrCtxEl1Reg[15] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
ttbr0_el1);
cpu_info->ErrCtxEl1Reg[16] = read_el1_ctx_common(get_el1_sysregs_ctx(ctx),
ttbr1_el1);
#if CTX_INCLUDE_EL2_REGS
cpu_info->ErrCtxEl2Reg[0] = read_el2_ctx_common(get_el2_sysregs_ctx(ctx),

4
plat/nvidia/tegra/common/tegra_fiq_glue.c

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@ -142,7 +142,7 @@ int32_t tegra_fiq_get_intr_context(void)
val = read_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_SP_EL0));
write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X2), (val));
val = read_ctx_reg((el1state_ctx), (uint32_t)(CTX_SP_EL1));
val = read_el1_ctx_common(el1state_ctx, sp_el1);
write_ctx_reg((gpregs_ctx), (uint32_t)(CTX_GPREG_X3), (val));
return 0;

4
plat/nvidia/tegra/soc/t194/plat_psci_handlers.c

@ -356,10 +356,10 @@ int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
* will re-init this info from non-secure software when the
* core come online.
*/
actlr_elx = read_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1));
actlr_elx = read_el1_ctx_common((get_el1_sysregs_ctx(ctx)), actlr_el1);
actlr_elx &= ~DENVER_CPU_PMSTATE_MASK;
actlr_elx |= DENVER_CPU_PMSTATE_C1;
write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
write_el1_ctx_common((get_el1_sysregs_ctx(ctx)), actlr_el1, actlr_elx);
/*
* Check if we are exiting from deep sleep and restore SE

6
plat/qti/qtiseclib/src/qtiseclib_cb_interface.c

@ -142,10 +142,10 @@ void qtiseclib_cb_get_ns_ctx(qtiseclib_dbg_a64_ctxt_regs_type *qti_ns_ctx)
qti_ns_ctx->elr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_ELR_EL3);
qti_ns_ctx->spsr_el1 =
read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SPSR_EL1);
read_el1_ctx_common(get_el1_sysregs_ctx(ctx), spsr_el1);
qti_ns_ctx->elr_el1 =
read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_ELR_EL1);
qti_ns_ctx->sp_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SP_EL1);
read_el1_ctx_common(get_el1_sysregs_ctx(ctx), elr_el1);
qti_ns_ctx->sp_el1 = read_el1_ctx_common(get_el1_sysregs_ctx(ctx), sp_el1);
qti_ns_ctx->x0 = read_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0);
qti_ns_ctx->x1 = read_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1);

8
services/spd/trusty/trusty.c

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@ -160,9 +160,9 @@ static uint64_t trusty_fiq_handler(uint32_t id,
(void)memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
ctx->fiq_sp_el1 = read_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1);
ctx->fiq_sp_el1 = read_el1_ctx_common(get_el1_sysregs_ctx(handle), sp_el1);
write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
write_el1_ctx_common(get_el1_sysregs_ctx(handle), sp_el1, ctx->fiq_handler_sp);
cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, (uint32_t)ctx->fiq_handler_cpsr);
SMC_RET0(handle);
@ -221,7 +221,7 @@ static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t
*/
(void)memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
ctx->fiq_handler_active = 0;
write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
write_el1_ctx_common(get_el1_sysregs_ctx(handle), sp_el1, ctx->fiq_sp_el1);
cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, (uint32_t)ctx->fiq_cpsr);
SMC_RET0(handle);

43
services/std_svc/spm/el3_spmc/spmc_setup.c

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -319,24 +319,35 @@ static void spmc_el0_sp_setup_mmu(struct secure_partition_desc *sp,
xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
EL1_EL0_REGIME);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), mair_el1,
mmu_cfg_params[MMU_CFG_MAIR]);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
/* Store the initialised SCTLR_EL1 value in the cpu_context */
#if (ERRATA_SPECULATIVE_AT)
write_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_TCR_EL1, mmu_cfg_params[MMU_CFG_TCR]);
#else
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), tcr_el1,
mmu_cfg_params[MMU_CFG_TCR]);
#endif /* ERRATA_SPECULATIVE_AT */
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), ttbr0_el1,
mmu_cfg_params[MMU_CFG_TTBR0]);
}
static void spmc_el0_sp_setup_sctlr_el1(cpu_context_t *ctx)
{
u_register_t sctlr_el1;
u_register_t sctlr_el1_val;
/* Setup SCTLR_EL1 */
sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
sctlr_el1 |=
#if (ERRATA_SPECULATIVE_AT)
sctlr_el1_val = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1);
#else
sctlr_el1_val = read_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1);
#endif /* ERRATA_SPECULATIVE_AT */
sctlr_el1_val |=
/*SCTLR_EL1_RES1 |*/
/* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
SCTLR_UCI_BIT |
@ -357,7 +368,7 @@ static void spmc_el0_sp_setup_sctlr_el1(cpu_context_t *ctx)
/* Enable MMU. */
SCTLR_M_BIT;
sctlr_el1 &= ~(
sctlr_el1_val &= ~(
/* Explicit data accesses at EL0 are little-endian. */
SCTLR_E0E_BIT |
/*
@ -369,7 +380,13 @@ static void spmc_el0_sp_setup_sctlr_el1(cpu_context_t *ctx)
SCTLR_UMA_BIT
);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
/* Store the initialised SCTLR_EL1 value in the cpu_context */
#if (ERRATA_SPECULATIVE_AT)
write_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_el1_val);
#else
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, sctlr_el1_val);
#endif /* ERRATA_SPECULATIVE_AT */
}
static void spmc_el0_sp_setup_system_registers(struct secure_partition_desc *sp,
@ -383,10 +400,10 @@ static void spmc_el0_sp_setup_system_registers(struct secure_partition_desc *sp,
/* Setup other system registers. */
/* Shim Exception Vector Base Address */
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), vbar_el1,
SPM_SHIM_EXCEPTIONS_PTR);
#if NS_TIMER_SWITCH
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), cntkctl_el1,
EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
#endif
@ -397,7 +414,7 @@ static void spmc_el0_sp_setup_system_registers(struct secure_partition_desc *sp,
* TTA: Enable access to trace registers.
* ZEN (v8.2): Trap SVE instructions and access to SVE registers.
*/
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), cpacr_el1,
CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
}

43
services/std_svc/spm/spm_mm/spm_mm_setup.c

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@ -27,7 +27,7 @@
void spm_sp_setup(sp_context_t *sp_ctx)
{
cpu_context_t *ctx = &(sp_ctx->cpu_ctx);
u_register_t sctlr_el1_val;
/* Pointer to the MP information from the platform port. */
const spm_mm_boot_info_t *sp_boot_info =
plat_get_secure_partition_boot_info(NULL);
@ -122,19 +122,30 @@ void spm_sp_setup(sp_context_t *sp_ctx)
xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
EL1_EL0_REGIME);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), mair_el1,
mmu_cfg_params[MMU_CFG_MAIR]);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
/* Store the initialised SCTLR_EL1 value in the cpu_context */
#if (ERRATA_SPECULATIVE_AT)
write_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_TCR_EL1, mmu_cfg_params[MMU_CFG_TCR]);
#else
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), tcr_el1,
mmu_cfg_params[MMU_CFG_TCR]);
#endif /* ERRATA_SPECULATIVE_AT */
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), ttbr0_el1,
mmu_cfg_params[MMU_CFG_TTBR0]);
/* Setup SCTLR_EL1 */
u_register_t sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
sctlr_el1 |=
#if (ERRATA_SPECULATIVE_AT)
sctlr_el1_val = read_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1);
#else
sctlr_el1_val = read_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1);
#endif /* ERRATA_SPECULATIVE_AT */
sctlr_el1_val |=
/*SCTLR_EL1_RES1 |*/
/* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
SCTLR_UCI_BIT |
@ -156,7 +167,7 @@ void spm_sp_setup(sp_context_t *sp_ctx)
SCTLR_M_BIT
;
sctlr_el1 &= ~(
sctlr_el1_val &= ~(
/* Explicit data accesses at EL0 are little-endian. */
SCTLR_E0E_BIT |
/*
@ -168,7 +179,13 @@ void spm_sp_setup(sp_context_t *sp_ctx)
SCTLR_UMA_BIT
);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
/* Store the initialised SCTLR_EL1 value in the cpu_context */
#if (ERRATA_SPECULATIVE_AT)
write_ctx_reg(get_errata_speculative_at_ctx(ctx),
CTX_ERRATA_SPEC_AT_SCTLR_EL1, sctlr_el1_val);
#else
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), sctlr_el1, sctlr_el1_val);
#endif /* ERRATA_SPECULATIVE_AT */
/*
* Setup other system registers
@ -176,10 +193,10 @@ void spm_sp_setup(sp_context_t *sp_ctx)
*/
/* Shim Exception Vector Base Address */
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), vbar_el1,
SPM_SHIM_EXCEPTIONS_PTR);
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
write_el1_ctx_arch_timer(get_el1_sysregs_ctx(ctx), cntkctl_el1,
EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
/*
@ -189,7 +206,7 @@ void spm_sp_setup(sp_context_t *sp_ctx)
* TTA: Enable access to trace registers.
* ZEN (v8.2): Trap SVE instructions and access to SVE registers.
*/
write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
write_el1_ctx_common(get_el1_sysregs_ctx(ctx), cpacr_el1,
CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
/*

Loading…
Cancel
Save