Browse Source

Merge changes from topic "feat_state_part3" into integration

* changes:
  refactor(cpufeat): enable FEAT_VHE for FEAT_STATE_CHECKED
  refactor(mpam): enable FEAT_MPAM for FEAT_STATE_CHECKED
  feat(libc): add support for fallthrough statement
  refactor(spe): enable FEAT_SPE for FEAT_STATE_CHECKED
  refactor(cpufeat): rename ENABLE_SPE_FOR_LOWER_ELS to ENABLE_SPE_FOR_NS
  fix(spe): drop SPE EL2 context switch code
pull/1995/head
Manish Pandey 2 years ago
committed by TrustedFirmware Code Review
parent
commit
7419b7a72e
  1. 4
      Makefile
  2. 4
      bl31/bl31.mk
  3. 25
      common/feat_detect.c
  4. 7
      docs/getting_started/build-options.rst
  5. 6
      include/arch/aarch32/arch_features.h
  6. 8
      include/arch/aarch64/arch.h
  7. 51
      include/arch/aarch64/arch_features.h
  8. 26
      include/arch/aarch64/arch_helpers.h
  9. 13
      include/lib/el3_runtime/aarch64/context.h
  10. 6
      include/lib/extensions/mpam.h
  11. 10
      include/lib/extensions/spe.h
  12. 1
      include/lib/libc/cdefs.h
  13. 252
      lib/el3_runtime/aarch64/context.S
  14. 136
      lib/el3_runtime/aarch64/context_mgmt.c
  15. 5
      lib/extensions/mpam/mpam.c
  16. 17
      lib/extensions/spe/spe.c
  17. 4
      make_helpers/defaults.mk
  18. 2
      plat/allwinner/common/allwinner-common.mk
  19. 2
      plat/arm/board/arm_fpga/fpga_bl31_setup.c
  20. 7
      plat/arm/board/fvp/fvp_pm.c
  21. 3
      plat/arm/board/fvp/platform.mk
  22. 2
      plat/arm/board/tc/platform.mk
  23. 2
      plat/qti/msm8916/platform.mk
  24. 2
      services/std_svc/sdei/sdei_intr_mgmt.c

4
Makefile

@ -1096,7 +1096,6 @@ $(eval $(call assert_booleans,\
ENABLE_RUNTIME_INSTRUMENTATION \
ENABLE_SME_FOR_NS \
ENABLE_SME_FOR_SWD \
ENABLE_SPE_FOR_LOWER_ELS \
ENABLE_SVE_FOR_NS \
ENABLE_SVE_FOR_SWD \
ERROR_DEPRECATED \
@ -1182,6 +1181,7 @@ $(eval $(call assert_numerics,\
ENABLE_FEAT_VHE \
ENABLE_MPAM_FOR_LOWER_ELS \
ENABLE_RME \
ENABLE_SPE_FOR_NS \
ENABLE_TRF_FOR_NS \
FW_ENC_STATUS \
NR_OF_FW_BANKS \
@ -1237,7 +1237,7 @@ $(eval $(call add_defines,\
ENABLE_RUNTIME_INSTRUMENTATION \
ENABLE_SME_FOR_NS \
ENABLE_SME_FOR_SWD \
ENABLE_SPE_FOR_LOWER_ELS \
ENABLE_SPE_FOR_NS \
ENABLE_SVE_FOR_NS \
ENABLE_SVE_FOR_SWD \
ENCRYPT_BL31 \

4
bl31/bl31.mk

@ -87,7 +87,7 @@ BL31_SOURCES += services/std_svc/trng/trng_main.c \
services/std_svc/trng/trng_entropy_pool.c
endif
ifeq (${ENABLE_SPE_FOR_LOWER_ELS},1)
ifneq (${ENABLE_SPE_FOR_NS},0)
BL31_SOURCES += lib/extensions/spe/spe.c
endif
@ -108,7 +108,7 @@ BL31_SOURCES += lib/extensions/sve/sve.c
endif
endif
ifeq (${ENABLE_MPAM_FOR_LOWER_ELS},1)
ifneq (${ENABLE_MPAM_FOR_LOWER_ELS},0)
BL31_SOURCES += lib/extensions/mpam/mpam.c
endif

25
common/feat_detect.c

@ -90,16 +90,6 @@ static void read_feat_pan(void)
#endif
}
/******************************************************
* Feature : FEAT_VHE (Virtualization Host Extensions)
*****************************************************/
static void read_feat_vhe(void)
{
#if (ENABLE_FEAT_VHE == FEAT_STATE_ALWAYS)
feat_detect_panic(is_armv8_1_vhe_present(), "VHE");
#endif
}
/*******************************************************************************
* Feature : FEAT_RAS (Reliability, Availability, and Serviceability Extension)
******************************************************************************/
@ -130,16 +120,6 @@ static void read_feat_dit(void)
#endif
}
/****************************************************************************
* Feature : FEAT_MPAM (Memory Partitioning and Monitoring (MPAM) Extension)
***************************************************************************/
static void read_feat_mpam(void)
{
#if (ENABLE_MPAM_FOR_LOWER_ELS == FEAT_STATE_ALWAYS)
feat_detect_panic(get_mpam_version() != 0U, "MPAM");
#endif
}
/**************************************************************
* Feature : FEAT_NV2 (Enhanced Nested Virtualization Support)
*************************************************************/
@ -281,7 +261,7 @@ void detect_arch_features(void)
/* v8.1 features */
read_feat_pan();
read_feat_vhe();
check_feature(ENABLE_FEAT_VHE, read_feat_vhe_id_field(), "VHE", 1, 1);
/* v8.2 features */
read_feat_ras();
@ -293,7 +273,8 @@ void detect_arch_features(void)
read_feat_dit();
check_feature(ENABLE_FEAT_AMUv1, read_feat_amu_id_field(),
"AMUv1", 1, 2);
read_feat_mpam();
check_feature(ENABLE_MPAM_FOR_LOWER_ELS, read_feat_mpam_version(),
"MPAM", 1, 1);
read_feat_nv2();
read_feat_sel2();
check_feature(ENABLE_TRF_FOR_NS, read_feat_trf_id_field(),

7
docs/getting_started/build-options.rst

@ -428,10 +428,11 @@ Common build options
handle context switching for SME, SVE, and FPU/SIMD registers to ensure that
no data is leaked to non-secure world. This is experimental. Default is 0.
- ``ENABLE_SPE_FOR_LOWER_ELS`` : Boolean option to enable Statistical Profiling
- ``ENABLE_SPE_FOR_NS`` : Numeric value to enable Statistical Profiling
extensions. This is an optional architectural feature for AArch64.
The default is 1 but is automatically disabled when the target architecture
is AArch32.
This flag can take the values 0 to 2, to align with the ``FEATURE_DETECTION``
mechanism. The default is 2 but is automatically disabled when the target
architecture is AArch32.
- ``ENABLE_SVE_FOR_NS``: Boolean option to enable Scalable Vector Extension
(SVE) for the Non-secure world only. SVE is an optional architectural feature

6
include/arch/aarch32/arch_features.h

@ -43,4 +43,10 @@ static inline bool is_feat_trf_supported(void)
return read_feat_trf_id_field() != 0U;
}
static inline bool is_feat_spe_supported(void)
{
/* FEAT_SPE is AArch64 only */
return false;
}
#endif /* ARCH_FEATURES_H */

8
include/arch/aarch64/arch.h

@ -121,6 +121,8 @@
#define TRFCR_EL2 S3_4_C1_C2_1
#define PMSCR_EL2 S3_4_C9_C9_0
#define TFSR_EL2 S3_4_C5_C6_0
#define CONTEXTIDR_EL2 S3_4_C13_C0_1
#define TTBR1_EL2 S3_4_C2_C0_1
/*******************************************************************************
* Generic timer memory mapped registers & offsets
@ -1077,10 +1079,8 @@
#define MPAMHCR_EL2 S3_4_C10_C4_0
#define MPAM3_EL3 S3_6_C10_C5_0
#define MPAMIDR_EL1_HAS_HCR_SHIFT ULL(0x11)
#define MPAMIDR_EL1_VPMR_MAX_SHIFT ULL(0x12)
#define MPAMIDR_EL1_VPMR_MAX_WIDTH ULL(0x3)
#define MPAMIDR_EL1_VPMR_MAX_POSSIBLE ULL(0x7)
#define MPAMIDR_EL1_VPMR_MAX_SHIFT ULL(18)
#define MPAMIDR_EL1_VPMR_MAX_MASK ULL(0x7)
/*******************************************************************************
* Definitions for system register interface to AMU for FEAT_AMUv1
******************************************************************************/

51
include/arch/aarch64/arch_features.h

@ -27,10 +27,22 @@ static inline bool is_armv8_1_pan_present(void)
ID_AA64MMFR1_EL1_PAN_MASK) != 0U;
}
static inline bool is_armv8_1_vhe_present(void)
static inline unsigned int read_feat_vhe_id_field(void)
{
return ((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_VHE_SHIFT) &
ID_AA64MMFR1_EL1_VHE_MASK) != 0U;
return ISOLATE_FIELD(read_id_aa64mmfr1_el1(), ID_AA64MMFR1_EL1_VHE);
}
static inline bool is_feat_vhe_supported(void)
{
if (ENABLE_FEAT_VHE == FEAT_STATE_DISABLED) {
return false;
}
if (ENABLE_FEAT_VHE == FEAT_STATE_ALWAYS) {
return true;
}
return read_feat_vhe_id_field() != 0U;
}
static inline bool is_armv8_2_ttcnp_present(void)
@ -184,7 +196,7 @@ static inline bool is_armv8_6_feat_amuv1p1_present(void)
* 0x11: v1.1 Armv8.4 or later
*
*/
static inline unsigned int get_mpam_version(void)
static inline unsigned int read_feat_mpam_version(void)
{
return (unsigned int)((((read_id_aa64pfr0_el1() >>
ID_AA64PFR0_MPAM_SHIFT) & ID_AA64PFR0_MPAM_MASK) << 4) |
@ -192,6 +204,19 @@ static inline unsigned int get_mpam_version(void)
ID_AA64PFR1_MPAM_FRAC_SHIFT) & ID_AA64PFR1_MPAM_FRAC_MASK));
}
static inline bool is_feat_mpam_supported(void)
{
if (ENABLE_MPAM_FOR_LOWER_ELS == FEAT_STATE_DISABLED) {
return false;
}
if (ENABLE_MPAM_FOR_LOWER_ELS == FEAT_STATE_ALWAYS) {
return true;
}
return read_feat_mpam_version() != 0U;
}
static inline unsigned int read_feat_hcx_id_field(void)
{
return ISOLATE_FIELD(read_id_aa64mmfr1_el1(), ID_AA64MMFR1_EL1_HCX);
@ -249,10 +274,22 @@ static inline bool is_armv8_0_feat_csv2_2_present(void)
/**********************************************************************************
* Function to identify the presence of FEAT_SPE (Statistical Profiling Extension)
*********************************************************************************/
static inline bool is_armv8_2_feat_spe_present(void)
static inline unsigned int read_feat_spe_id_field(void)
{
return ISOLATE_FIELD(read_id_aa64dfr0_el1(), ID_AA64DFR0_PMS);
}
static inline bool is_feat_spe_supported(void)
{
return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT) &
ID_AA64DFR0_PMS_MASK) != ID_AA64DFR0_SPE_NOT_SUPPORTED);
if (ENABLE_SPE_FOR_NS == FEAT_STATE_DISABLED) {
return false;
}
if (ENABLE_SPE_FOR_NS == FEAT_STATE_ALWAYS) {
return true;
}
return read_feat_spe_id_field() != 0U;
}
/*******************************************************************************

26
include/arch/aarch64/arch_helpers.h

@ -522,11 +522,6 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0)
DEFINE_RENAME_SYSREG_READ_FUNC(mpamidr_el1, MPAMIDR_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
@ -545,9 +540,28 @@ DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
/* Armv8.2 Registers */
/* Armv8.1 VHE Registers */
DEFINE_RENAME_SYSREG_RW_FUNCS(contextidr_el2, CONTEXTIDR_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(ttbr1_el2, TTBR1_EL2)
/* Armv8.2 ID Registers */
DEFINE_RENAME_IDREG_READ_FUNC(id_aa64mmfr2_el1, ID_AA64MMFR2_EL1)
/* Armv8.2 MPAM Registers */
DEFINE_RENAME_SYSREG_READ_FUNC(mpamidr_el1, MPAMIDR_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm0_el2, MPAMVPM0_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm1_el2, MPAMVPM1_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm2_el2, MPAMVPM2_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm3_el2, MPAMVPM3_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm4_el2, MPAMVPM4_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm5_el2, MPAMVPM5_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm6_el2, MPAMVPM6_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm7_el2, MPAMVPM7_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpmv_el2, MPAMVPMV_EL2)
/* Armv8.3 Pointer Authentication Registers */
DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeyhi_el1, APIAKeyHi_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeylo_el1, APIAKeyLo_EL1)

13
include/lib/el3_runtime/aarch64/context.h

@ -193,7 +193,6 @@
// Only if MTE registers in use
#define CTX_TFSR_EL2 U(0x100)
// Only if ENABLE_MPAM_FOR_LOWER_ELS==1
#define CTX_MPAM2_EL2 U(0x108)
#define CTX_MPAMHCR_EL2 U(0x110)
#define CTX_MPAMVPM0_EL2 U(0x118)
@ -514,26 +513,14 @@ void el1_sysregs_context_restore(el1_sysregs_t *regs);
#if CTX_INCLUDE_EL2_REGS
void el2_sysregs_context_save_common(el2_sysregs_t *regs);
void el2_sysregs_context_restore_common(el2_sysregs_t *regs);
#if ENABLE_SPE_FOR_LOWER_ELS
void el2_sysregs_context_save_spe(el2_sysregs_t *regs);
void el2_sysregs_context_restore_spe(el2_sysregs_t *regs);
#endif /* ENABLE_SPE_FOR_LOWER_ELS */
#if CTX_INCLUDE_MTE_REGS
void el2_sysregs_context_save_mte(el2_sysregs_t *regs);
void el2_sysregs_context_restore_mte(el2_sysregs_t *regs);
#endif /* CTX_INCLUDE_MTE_REGS */
#if ENABLE_MPAM_FOR_LOWER_ELS
void el2_sysregs_context_save_mpam(el2_sysregs_t *regs);
void el2_sysregs_context_restore_mpam(el2_sysregs_t *regs);
#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
#if ENABLE_FEAT_ECV
void el2_sysregs_context_save_ecv(el2_sysregs_t *regs);
void el2_sysregs_context_restore_ecv(el2_sysregs_t *regs);
#endif /* ENABLE_FEAT_ECV */
#if ENABLE_FEAT_VHE
void el2_sysregs_context_save_vhe(el2_sysregs_t *regs);
void el2_sysregs_context_restore_vhe(el2_sysregs_t *regs);
#endif /* ENABLE_FEAT_VHE */
#if RAS_EXTENSION
void el2_sysregs_context_save_ras(el2_sysregs_t *regs);
void el2_sysregs_context_restore_ras(el2_sysregs_t *regs);

6
include/lib/extensions/mpam.h

@ -9,6 +9,12 @@
#include <stdbool.h>
#if ENABLE_MPAM_FOR_LOWER_ELS
void mpam_enable(bool el2_unused);
#else
void mpam_enable(bool el2_unused)
{
}
#endif
#endif /* MPAM_H */

10
include/lib/extensions/spe.h

@ -9,8 +9,16 @@
#include <stdbool.h>
bool spe_supported(void);
#if ENABLE_SPE_FOR_NS
void spe_enable(bool el2_unused);
void spe_disable(void);
#else
void spe_enable(bool el2_unused)
{
}
void spe_disable(void)
{
}
#endif
#endif /* SPE_H */

1
include/lib/libc/cdefs.h

@ -15,6 +15,7 @@
#define __maybe_unused __attribute__((__unused__))
#define __aligned(x) __attribute__((__aligned__(x)))
#define __section(x) __attribute__((__section__(x)))
#define __fallthrough __attribute__((__fallthrough__))
#if RECLAIM_INIT_CODE
/*
* Add each function to a section that is unique so the functions can still

252
lib/el3_runtime/aarch64/context.S

@ -13,26 +13,14 @@
#if CTX_INCLUDE_EL2_REGS
.global el2_sysregs_context_save_common
.global el2_sysregs_context_restore_common
#if ENABLE_SPE_FOR_LOWER_ELS
.global el2_sysregs_context_save_spe
.global el2_sysregs_context_restore_spe
#endif /* ENABLE_SPE_FOR_LOWER_ELS */
#if CTX_INCLUDE_MTE_REGS
.global el2_sysregs_context_save_mte
.global el2_sysregs_context_restore_mte
#endif /* CTX_INCLUDE_MTE_REGS */
#if ENABLE_MPAM_FOR_LOWER_ELS
.global el2_sysregs_context_save_mpam
.global el2_sysregs_context_restore_mpam
#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
#if ENABLE_FEAT_ECV
.global el2_sysregs_context_save_ecv
.global el2_sysregs_context_restore_ecv
#endif /* ENABLE_FEAT_ECV */
#if ENABLE_FEAT_VHE
.global el2_sysregs_context_save_vhe
.global el2_sysregs_context_restore_vhe
#endif /* ENABLE_FEAT_VHE */
#if RAS_EXTENSION
.global el2_sysregs_context_save_ras
.global el2_sysregs_context_restore_ras
@ -220,20 +208,6 @@ func el2_sysregs_context_restore_common
ret
endfunc el2_sysregs_context_restore_common
#if ENABLE_SPE_FOR_LOWER_ELS
func el2_sysregs_context_save_spe
mrs x13, PMSCR_EL2
str x13, [x0, #CTX_PMSCR_EL2]
ret
endfunc el2_sysregs_context_save_spe
func el2_sysregs_context_restore_spe
ldr x13, [x0, #CTX_PMSCR_EL2]
msr PMSCR_EL2, x13
ret
endfunc el2_sysregs_context_restore_spe
#endif /* ENABLE_SPE_FOR_LOWER_ELS */
#if CTX_INCLUDE_MTE_REGS
func el2_sysregs_context_save_mte
mrs x9, TFSR_EL2
@ -248,208 +222,6 @@ func el2_sysregs_context_restore_mte
endfunc el2_sysregs_context_restore_mte
#endif /* CTX_INCLUDE_MTE_REGS */
#if ENABLE_MPAM_FOR_LOWER_ELS
func el2_sysregs_context_save_mpam
mrs x10, MPAM2_EL2
str x10, [x0, #CTX_MPAM2_EL2]
mrs x10, MPAMIDR_EL1
/*
* The context registers that we intend to save would be part of the
* PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
*/
tbz w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
/*
* MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
* system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to save
* the context of these registers.
*/
mrs x11, MPAMHCR_EL2
mrs x12, MPAMVPM0_EL2
stp x11, x12, [x0, #CTX_MPAMHCR_EL2]
mrs x13, MPAMVPMV_EL2
str x13, [x0, #CTX_MPAMVPMV_EL2]
/*
* MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
* VPMR value. Proceed to save the context of registers from
* MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. From MPAM spec,
* VPMR_MAX should not be zero if HAS_HCR == 1.
*/
ubfx x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT, \
#MPAMIDR_EL1_VPMR_MAX_WIDTH
/*
* Once VPMR_MAX has been identified, calculate the offset relative to
* PC to jump to so that relevant context can be saved. The offset is
* calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
* saving one VPM register) + (absolute address of label "1").
*/
mov w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
sub w10, w11, w10
/* Calculate the size of one block of MPAMVPM*_EL2 save */
adr x11, 1f
adr x12, 2f
sub x12, x12, x11
madd x10, x10, x12, x11
br x10
/*
* The branch above would land properly on one of the blocks following
* label "1". Make sure that the order of save is retained.
*/
1:
#if ENABLE_BTI
bti j
#endif
mrs x10, MPAMVPM7_EL2
str x10, [x0, #CTX_MPAMVPM7_EL2]
2:
#if ENABLE_BTI
bti j
#endif
mrs x11, MPAMVPM6_EL2
str x11, [x0, #CTX_MPAMVPM6_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x12, MPAMVPM5_EL2
str x12, [x0, #CTX_MPAMVPM5_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x13, MPAMVPM4_EL2
str x13, [x0, #CTX_MPAMVPM4_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x14, MPAMVPM3_EL2
str x14, [x0, #CTX_MPAMVPM3_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x15, MPAMVPM2_EL2
str x15, [x0, #CTX_MPAMVPM2_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x16, MPAMVPM1_EL2
str x16, [x0, #CTX_MPAMVPM1_EL2]
3: ret
endfunc el2_sysregs_context_save_mpam
func el2_sysregs_context_restore_mpam
ldr x10, [x0, #CTX_MPAM2_EL2]
msr MPAM2_EL2, x10
mrs x10, MPAMIDR_EL1
/*
* The context registers that we intend to restore would be part of the
* PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
*/
tbz w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
/*
* MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
* system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to restore
* the context of these registers
*/
ldp x11, x12, [x0, #CTX_MPAMHCR_EL2]
msr MPAMHCR_EL2, x11
msr MPAMVPM0_EL2, x12
ldr x13, [x0, #CTX_MPAMVPMV_EL2]
msr MPAMVPMV_EL2, x13
/*
* MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
* VPMR value. Proceed to restore the context of registers from
* MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. from MPAM spec,
* VPMR_MAX should not be zero if HAS_HCR == 1.
*/
ubfx x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT, \
#MPAMIDR_EL1_VPMR_MAX_WIDTH
/*
* Once VPMR_MAX has been identified, calculate the offset relative to
* PC to jump to so that relevant context can be restored. The offset is
* calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
* restoring one VPM register) + (absolute address of label "1").
*/
mov w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
sub w10, w11, w10
/* Calculate the size of one block of MPAMVPM*_EL2 restore */
adr x11, 1f
adr x12, 2f
sub x12, x12, x11
madd x10, x10, x12, x11
br x10
/*
* The branch above would land properly on one of the blocks following
* label "1". Make sure that the order of restore is retained.
*/
1:
#if ENABLE_BTI
bti j
#endif
ldr x10, [x0, #CTX_MPAMVPM7_EL2]
msr MPAMVPM7_EL2, x10
2:
#if ENABLE_BTI
bti j
#endif
ldr x11, [x0, #CTX_MPAMVPM6_EL2]
msr MPAMVPM6_EL2, x11
#if ENABLE_BTI
bti j
#endif
ldr x12, [x0, #CTX_MPAMVPM5_EL2]
msr MPAMVPM5_EL2, x12
#if ENABLE_BTI
bti j
#endif
ldr x13, [x0, #CTX_MPAMVPM4_EL2]
msr MPAMVPM4_EL2, x13
#if ENABLE_BTI
bti j
#endif
ldr x14, [x0, #CTX_MPAMVPM3_EL2]
msr MPAMVPM3_EL2, x14
#if ENABLE_BTI
bti j
#endif
ldr x15, [x0, #CTX_MPAMVPM2_EL2]
msr MPAMVPM2_EL2, x15
#if ENABLE_BTI
bti j
#endif
ldr x16, [x0, #CTX_MPAMVPM1_EL2]
msr MPAMVPM1_EL2, x16
3: ret
endfunc el2_sysregs_context_restore_mpam
#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
#if ENABLE_FEAT_ECV
func el2_sysregs_context_save_ecv
mrs x11, CNTPOFF_EL2
@ -464,30 +236,6 @@ func el2_sysregs_context_restore_ecv
endfunc el2_sysregs_context_restore_ecv
#endif /* ENABLE_FEAT_ECV */
#if ENABLE_FEAT_VHE
func el2_sysregs_context_save_vhe
/*
* CONTEXTIDR_EL2 register is saved only when FEAT_VHE or
* FEAT_Debugv8p2 (currently not in TF-A) is supported.
*/
mrs x9, contextidr_el2
mrs x10, ttbr1_el2
stp x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
ret
endfunc el2_sysregs_context_save_vhe
func el2_sysregs_context_restore_vhe
/*
* CONTEXTIDR_EL2 register is restored only when FEAT_VHE or
* FEAT_Debugv8p2 (currently not in TF-A) is supported.
*/
ldp x9, x10, [x0, #CTX_CONTEXTIDR_EL2]
msr contextidr_el2, x9
msr ttbr1_el2, x10
ret
endfunc el2_sysregs_context_restore_vhe
#endif /* ENABLE_FEAT_VHE */
#if RAS_EXTENSION
func el2_sysregs_context_save_ras
/*

136
lib/el3_runtime/aarch64/context_mgmt.c

@ -482,9 +482,9 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
{
#if IMAGE_BL31
#if ENABLE_SPE_FOR_LOWER_ELS
spe_enable(el2_unused);
#endif
if (is_feat_spe_supported()) {
spe_enable(el2_unused);
}
#if ENABLE_AMU
amu_enable(el2_unused, ctx);
@ -498,9 +498,9 @@ static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
sve_enable(ctx);
#endif
#if ENABLE_MPAM_FOR_LOWER_ELS
mpam_enable(el2_unused);
#endif
if (is_feat_mpam_supported()) {
mpam_enable(el2_unused);
}
if (is_feat_trbe_supported()) {
trbe_enable();
@ -834,6 +834,96 @@ static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx)
write_hfgwtr_el2(read_ctx_reg(ctx, CTX_HFGWTR_EL2));
}
static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx)
{
u_register_t mpam_idr = read_mpamidr_el1();
write_ctx_reg(ctx, CTX_MPAM2_EL2, read_mpam2_el2());
/*
* The context registers that we intend to save would be part of the
* PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
*/
if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
return;
}
/*
* MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if
* MPAMIDR_HAS_HCR_BIT == 1.
*/
write_ctx_reg(ctx, CTX_MPAMHCR_EL2, read_mpamhcr_el2());
write_ctx_reg(ctx, CTX_MPAMVPM0_EL2, read_mpamvpm0_el2());
write_ctx_reg(ctx, CTX_MPAMVPMV_EL2, read_mpamvpmv_el2());
/*
* The number of MPAMVPM registers is implementation defined, their
* number is stored in the MPAMIDR_EL1 register.
*/
switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
case 7:
write_ctx_reg(ctx, CTX_MPAMVPM7_EL2, read_mpamvpm7_el2());
__fallthrough;
case 6:
write_ctx_reg(ctx, CTX_MPAMVPM6_EL2, read_mpamvpm6_el2());
__fallthrough;
case 5:
write_ctx_reg(ctx, CTX_MPAMVPM5_EL2, read_mpamvpm5_el2());
__fallthrough;
case 4:
write_ctx_reg(ctx, CTX_MPAMVPM4_EL2, read_mpamvpm4_el2());
__fallthrough;
case 3:
write_ctx_reg(ctx, CTX_MPAMVPM3_EL2, read_mpamvpm3_el2());
__fallthrough;
case 2:
write_ctx_reg(ctx, CTX_MPAMVPM2_EL2, read_mpamvpm2_el2());
__fallthrough;
case 1:
write_ctx_reg(ctx, CTX_MPAMVPM1_EL2, read_mpamvpm1_el2());
break;
}
}
static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx)
{
u_register_t mpam_idr = read_mpamidr_el1();
write_mpam2_el2(read_ctx_reg(ctx, CTX_MPAM2_EL2));
if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
return;
}
write_mpamhcr_el2(read_ctx_reg(ctx, CTX_MPAMHCR_EL2));
write_mpamvpm0_el2(read_ctx_reg(ctx, CTX_MPAMVPM0_EL2));
write_mpamvpmv_el2(read_ctx_reg(ctx, CTX_MPAMVPMV_EL2));
switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
case 7:
write_mpamvpm7_el2(read_ctx_reg(ctx, CTX_MPAMVPM7_EL2));
__fallthrough;
case 6:
write_mpamvpm6_el2(read_ctx_reg(ctx, CTX_MPAMVPM6_EL2));
__fallthrough;
case 5:
write_mpamvpm5_el2(read_ctx_reg(ctx, CTX_MPAMVPM5_EL2));
__fallthrough;
case 4:
write_mpamvpm4_el2(read_ctx_reg(ctx, CTX_MPAMVPM4_EL2));
__fallthrough;
case 3:
write_mpamvpm3_el2(read_ctx_reg(ctx, CTX_MPAMVPM3_EL2));
__fallthrough;
case 2:
write_mpamvpm2_el2(read_ctx_reg(ctx, CTX_MPAMVPM2_EL2));
__fallthrough;
case 1:
write_mpamvpm1_el2(read_ctx_reg(ctx, CTX_MPAMVPM1_EL2));
break;
}
}
/*******************************************************************************
* Save EL2 sysreg context
******************************************************************************/
@ -856,15 +946,12 @@ void cm_el2_sysregs_context_save(uint32_t security_state)
el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
el2_sysregs_context_save_common(el2_sysregs_ctx);
#if ENABLE_SPE_FOR_LOWER_ELS
el2_sysregs_context_save_spe(el2_sysregs_ctx);
#endif
#if CTX_INCLUDE_MTE_REGS
el2_sysregs_context_save_mte(el2_sysregs_ctx);
#endif
#if ENABLE_MPAM_FOR_LOWER_ELS
el2_sysregs_context_save_mpam(el2_sysregs_ctx);
#endif
if (is_feat_mpam_supported()) {
el2_sysregs_context_save_mpam(el2_sysregs_ctx);
}
if (is_feat_fgt_supported()) {
el2_sysregs_context_save_fgt(el2_sysregs_ctx);
@ -873,9 +960,12 @@ void cm_el2_sysregs_context_save(uint32_t security_state)
#if ENABLE_FEAT_ECV
el2_sysregs_context_save_ecv(el2_sysregs_ctx);
#endif
#if ENABLE_FEAT_VHE
el2_sysregs_context_save_vhe(el2_sysregs_ctx);
#endif
if (is_feat_vhe_supported()) {
write_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2,
read_contextidr_el2());
write_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2,
read_ttbr1_el2());
}
#if RAS_EXTENSION
el2_sysregs_context_save_ras(el2_sysregs_ctx);
#endif
@ -919,15 +1009,12 @@ void cm_el2_sysregs_context_restore(uint32_t security_state)
el2_sysregs_ctx = get_el2_sysregs_ctx(ctx);
el2_sysregs_context_restore_common(el2_sysregs_ctx);
#if ENABLE_SPE_FOR_LOWER_ELS
el2_sysregs_context_restore_spe(el2_sysregs_ctx);
#endif
#if CTX_INCLUDE_MTE_REGS
el2_sysregs_context_restore_mte(el2_sysregs_ctx);
#endif
#if ENABLE_MPAM_FOR_LOWER_ELS
el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
#endif
if (is_feat_mpam_supported()) {
el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
}
if (is_feat_fgt_supported()) {
el2_sysregs_context_restore_fgt(el2_sysregs_ctx);
@ -936,9 +1023,10 @@ void cm_el2_sysregs_context_restore(uint32_t security_state)
#if ENABLE_FEAT_ECV
el2_sysregs_context_restore_ecv(el2_sysregs_ctx);
#endif
#if ENABLE_FEAT_VHE
el2_sysregs_context_restore_vhe(el2_sysregs_ctx);
#endif
if (is_feat_vhe_supported()) {
write_contextidr_el2(read_ctx_reg(el2_sysregs_ctx, CTX_CONTEXTIDR_EL2));
write_ttbr1_el2(read_ctx_reg(el2_sysregs_ctx, CTX_TTBR1_EL2));
}
#if RAS_EXTENSION
el2_sysregs_context_restore_ras(el2_sysregs_ctx);
#endif

5
lib/extensions/mpam/mpam.c

@ -13,11 +13,6 @@
void mpam_enable(bool el2_unused)
{
/* Check if MPAM is implemented */
if (get_mpam_version() == 0U) {
return;
}
/*
* Enable MPAM, and disable trapping to EL3 when lower ELs access their
* own MPAM registers.

17
lib/extensions/spe/spe.c

@ -7,6 +7,7 @@
#include <stdbool.h>
#include <arch.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/el3_runtime/pubsub.h>
#include <lib/extensions/spe.h>
@ -20,21 +21,10 @@ static inline void psb_csync(void)
__asm__ volatile("hint #17");
}
bool spe_supported(void)
{
uint64_t features;
features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
return (features & ID_AA64DFR0_PMS_MASK) > 0ULL;
}
void spe_enable(bool el2_unused)
{
uint64_t v;
if (!spe_supported())
return;
if (el2_unused) {
/*
* MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
@ -69,9 +59,6 @@ void spe_disable(void)
{
uint64_t v;
if (!spe_supported())
return;
/* Drain buffered data */
psb_csync();
dsbnsh();
@ -85,7 +72,7 @@ void spe_disable(void)
static void *spe_drain_buffers_hook(const void *arg)
{
if (!spe_supported())
if (!is_feat_spe_supported())
return (void *)-1;
/* Drain buffered data */

4
make_helpers/defaults.mk

@ -355,11 +355,11 @@ V := 0
WARMBOOT_ENABLE_DCACHE_EARLY := 0
# Build option to enable/disable the Statistical Profiling Extensions
ENABLE_SPE_FOR_LOWER_ELS := 1
ENABLE_SPE_FOR_NS := 2
# SPE is only supported on AArch64 so disable it on AArch32.
ifeq (${ARCH},aarch32)
override ENABLE_SPE_FOR_LOWER_ELS := 0
override ENABLE_SPE_FOR_NS := 0
endif
# Include Memory Tagging Extension registers in cpu context. This must be set

2
plat/allwinner/common/allwinner-common.mk

@ -87,7 +87,7 @@ endif
COLD_BOOT_SINGLE_CPU := 1
# Do not enable SPE (not supported on ARM v8.0).
ENABLE_SPE_FOR_LOWER_ELS := 0
ENABLE_SPE_FOR_NS := 0
# Do not enable SVE (not supported on ARM v8.0).
ENABLE_SVE_FOR_NS := 0

2
plat/arm/board/arm_fpga/fpga_bl31_setup.c

@ -364,7 +364,7 @@ static void fpga_prepare_dtb(void)
fpga_dtb_update_clock(fdt, system_freq);
/* Check whether we support the SPE PMU. Remove the DT node if not. */
if (!spe_supported()) {
if (!is_feat_spe_supported()) {
int node = fdt_node_offset_by_compatible(fdt, 0,
"arm,statistical-profiling-extension-v1");

7
plat/arm/board/fvp/fvp_pm.c

@ -6,6 +6,7 @@
#include <assert.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <drivers/arm/gicv3.h>
@ -53,13 +54,13 @@ static void fvp_cluster_pwrdwn_common(void)
{
uint64_t mpidr = read_mpidr_el1();
#if ENABLE_SPE_FOR_LOWER_ELS
/*
* On power down we need to disable statistical profiling extensions
* before exiting coherency.
*/
spe_disable();
#endif
if (is_feat_spe_supported()) {
spe_disable();
}
/* Disable coherency if this cluster is to be turned off */
fvp_interconnect_disable();

3
plat/arm/board/fvp/platform.mk

@ -470,6 +470,9 @@ ENABLE_FEAT_FGT := 2
ENABLE_FEAT_HCX := 2
ENABLE_FEAT_TCR2 := 2
ENABLE_FEAT_VHE := 2
ENABLE_MPAM_FOR_LOWER_ELS := 2
ifeq (${SPMC_AT_EL3}, 1)
PLAT_BL_COMMON_SOURCES += plat/arm/board/fvp/fvp_el3_spmc.c
endif

2
plat/arm/board/tc/platform.mk

@ -161,7 +161,7 @@ override CTX_INCLUDE_AARCH32_REGS := 0
override CTX_INCLUDE_PAUTH_REGS := 1
override ENABLE_SPE_FOR_LOWER_ELS := 0
override ENABLE_SPE_FOR_NS := 0
override ENABLE_AMU := 1
override ENABLE_AMU_AUXILIARY_COUNTERS := 1

2
plat/qti/msm8916/platform.mk

@ -44,7 +44,7 @@ WARMBOOT_ENABLE_DCACHE_EARLY := 1
# Disable features unsupported in ARMv8.0
ENABLE_AMU := 0
ENABLE_SPE_FOR_LOWER_ELS := 0
ENABLE_SPE_FOR_NS := 0
ENABLE_SVE_FOR_NS := 0
# MSM8916 uses ARM Cortex-A53 r0p0 so likely all the errata apply

2
services/std_svc/sdei/sdei_intr_mgmt.c

@ -270,7 +270,7 @@ static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ct
* HCR_EL2.E2H = 1 and HCR_EL2.TGE = 1
*/
u_register_t hcr_el2 = read_hcr();
bool el_is_in_host = is_armv8_1_vhe_present() &&
bool el_is_in_host = (read_feat_vhe_id_field() != 0U) &&
(hcr_el2 & HCR_TGE_BIT) &&
(hcr_el2 & HCR_E2H_BIT);

Loading…
Cancel
Save