Browse Source

Merge changes from topic "spectre_bhb" into integration

* changes:
  fix(security): loop workaround for CVE-2022-23960 for Cortex-A76
  refactor(el3-runtime): change Cortex-A76 implementation of CVE-2018-3639
pull/1985/head
Madhukar Pappireddy 3 years ago
committed by TrustedFirmware Code Review
parent
commit
a5d15b4c2d
  1. 29
      include/lib/cpus/aarch64/cortex_a76.h
  2. 225
      lib/cpus/aarch64/cortex_a76.S

29
include/lib/cpus/aarch64/cortex_a76.h

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,38 +10,41 @@
#include <lib/utils_def.h>
/* Cortex-A76 MIDR for revision 0 */
#define CORTEX_A76_MIDR U(0x410fd0b0)
#define CORTEX_A76_MIDR U(0x410fd0b0)
/* Cortex-A76 loop count for CVE-2022-23960 mitigation */
#define CORTEX_A76_BHB_LOOP_COUNT U(24)
/*******************************************************************************
* CPU Extended Control register specific definitions.
******************************************************************************/
#define CORTEX_A76_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define CORTEX_A76_CPUECTLR_EL1 S3_0_C15_C1_4
#define CORTEX_A76_CPUPWRCTLR_EL1 S3_0_C15_C2_7
#define CORTEX_A76_CPUECTLR_EL1 S3_0_C15_C1_4
#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2 (ULL(3) << 24)
#define CORTEX_A76_CPUECTLR_EL1_BIT_51 (ULL(1) << 51)
#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2 (ULL(3) << 24)
#define CORTEX_A76_CPUECTLR_EL1_BIT_51 (ULL(1) << 51)
/*******************************************************************************
* CPU Auxiliary Control register specific definitions.
******************************************************************************/
#define CORTEX_A76_CPUACTLR_EL1 S3_0_C15_C1_0
#define CORTEX_A76_CPUACTLR_EL1 S3_0_C15_C1_0
#define CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION (ULL(1) << 6)
#define CORTEX_A76_CPUACTLR_EL1_BIT_13 (ULL(1) << 13)
#define CORTEX_A76_CPUACTLR_EL1_BIT_13 (ULL(1) << 13)
#define CORTEX_A76_CPUACTLR2_EL1 S3_0_C15_C1_1
#define CORTEX_A76_CPUACTLR2_EL1 S3_0_C15_C1_1
#define CORTEX_A76_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
#define CORTEX_A76_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
#define CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE (ULL(1) << 16)
#define CORTEX_A76_CPUACTLR3_EL1 S3_0_C15_C1_2
#define CORTEX_A76_CPUACTLR3_EL1 S3_0_C15_C1_2
#define CORTEX_A76_CPUACTLR3_EL1_BIT_10 (ULL(1) << 10)
#define CORTEX_A76_CPUACTLR3_EL1_BIT_10 (ULL(1) << 10)
/* Definitions of register field mask in CORTEX_A76_CPUPWRCTLR_EL1 */
#define CORTEX_A76_CORE_PWRDN_EN_MASK U(0x1)
#define CORTEX_A76_CORE_PWRDN_EN_MASK U(0x1)
#endif /* CORTEX_A76_H */

225
lib/cpus/aarch64/cortex_a76.S

@ -7,11 +7,11 @@
#include <arch.h>
#include <asm_macros.S>
#include <common/bl_common.h>
#include <context.h>
#include <cortex_a76.h>
#include <cpu_macros.S>
#include <plat_macros.S>
#include <services/arm_arch_svc.h>
#include "wa_cve_2022_23960_bhb.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
@ -35,59 +35,17 @@
*
* The macro saves x2-x3 to the context. In the fast path
* x0-x3 registers do not need to be restored as the calling
* context will have saved them.
* context will have saved them. The macro also saves
* x29-x30 to the context in the sync_exception path.
*/
.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
.if \_is_sync_exception
/*
* Ensure SMC is coming from A64/A32 state on #0
* with W0 = SMCCC_ARCH_WORKAROUND_2
*
* This sequence evaluates as:
* (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
* allowing use of a single branch operation
*/
orr w2, wzr, #SMCCC_ARCH_WORKAROUND_2
cmp x0, x2
mrs x3, esr_el3
mov_imm w2, \_esr_el3_val
ccmp w2, w3, #0, eq
/*
* Static predictor will predict a fall-through, optimizing
* the `SMCCC_ARCH_WORKAROUND_2` fast path.
*/
bne 1f
/*
* The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
* fast path.
*/
cmp x1, xzr /* enable/disable check */
/*
* When the calling context wants mitigation disabled,
* we program the mitigation disable function in the
* CPU context, which gets invoked on subsequent exits from
* EL3 via the `el3_exit` function. Otherwise NULL is
* programmed in the CPU context, which results in caller's
* inheriting the EL3 mitigation state (enabled) on subsequent
* `el3_exit`.
*/
mov x0, xzr
adr x1, cortex_a76_disable_wa_cve_2018_3639
csel x1, x1, x0, eq
str x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
mrs x2, CORTEX_A76_CPUACTLR2_EL1
orr x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
bic x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
csel x3, x3, x1, eq
msr CORTEX_A76_CPUACTLR2_EL1, x3
exception_return /* exception_return contains ISB */
stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
mov_imm w2, \_esr_el3_val
bl apply_cve_2018_3639_sync_wa
ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
.endif
1:
/*
* Always enable v4 mitigation during EL3 execution. This is not
* required for the fast path above because it does not perform any
@ -105,8 +63,10 @@
*/
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
.endm
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
#if DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960
vector_base cortex_a76_wa_cve_vbar
/* ---------------------------------------------------------------------
* Current EL with SP_EL0 : 0x0 - 0x200
@ -153,22 +113,54 @@ end_vector_entry cortex_a76_serror_sp_elx
* ---------------------------------------------------------------------
*/
vector_entry cortex_a76_sync_exception_aarch64
#if WORKAROUND_CVE_2022_23960
apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
#endif /* WORKAROUND_CVE_2022_23960 */
#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
b sync_exception_aarch64
end_vector_entry cortex_a76_sync_exception_aarch64
vector_entry cortex_a76_irq_aarch64
#if WORKAROUND_CVE_2022_23960
apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
#endif /* WORKAROUND_CVE_2022_23960 */
#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
b irq_aarch64
end_vector_entry cortex_a76_irq_aarch64
vector_entry cortex_a76_fiq_aarch64
#if WORKAROUND_CVE_2022_23960
apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
#endif /* WORKAROUND_CVE_2022_23960 */
#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
b fiq_aarch64
end_vector_entry cortex_a76_fiq_aarch64
vector_entry cortex_a76_serror_aarch64
#if WORKAROUND_CVE_2022_23960
apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
#endif /* WORKAROUND_CVE_2022_23960 */
#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
b serror_aarch64
end_vector_entry cortex_a76_serror_aarch64
@ -177,24 +169,130 @@ end_vector_entry cortex_a76_serror_aarch64
* ---------------------------------------------------------------------
*/
vector_entry cortex_a76_sync_exception_aarch32
#if WORKAROUND_CVE_2022_23960
apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
#endif /* WORKAROUND_CVE_2022_23960 */
#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
b sync_exception_aarch32
end_vector_entry cortex_a76_sync_exception_aarch32
vector_entry cortex_a76_irq_aarch32
#if WORKAROUND_CVE_2022_23960
apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
#endif /* WORKAROUND_CVE_2022_23960 */
#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
b irq_aarch32
end_vector_entry cortex_a76_irq_aarch32
vector_entry cortex_a76_fiq_aarch32
#if WORKAROUND_CVE_2022_23960
apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
#endif /* WORKAROUND_CVE_2022_23960 */
#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
b fiq_aarch32
end_vector_entry cortex_a76_fiq_aarch32
vector_entry cortex_a76_serror_aarch32
#if WORKAROUND_CVE_2022_23960
apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
#endif /* WORKAROUND_CVE_2022_23960 */
#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
b serror_aarch32
end_vector_entry cortex_a76_serror_aarch32
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960 */
#if DYNAMIC_WORKAROUND_CVE_2018_3639
/*
* -----------------------------------------------------------------
* This function applies the mitigation for CVE-2018-3639
* specifically for sync exceptions. It implements a fast path
* where `SMCCC_ARCH_WORKAROUND_2` SMC calls from a lower EL
* running in AArch64 will go through the fast and return early.
*
* In the fast path x0-x3 registers do not need to be restored as the
* calling context will have saved them.
*
* Caller must pass value of esr_el3 to compare via x2.
* Save and restore these registers outside of this function from the
* context before jumping to the main runtime vector table entry.
*
* Shall clobber: x0-x3, x30
* -----------------------------------------------------------------
*/
func apply_cve_2018_3639_sync_wa
/*
* Ensure SMC is coming from A64/A32 state on #0
* with W0 = SMCCC_ARCH_WORKAROUND_2
*
* This sequence evaluates as:
* (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
* allowing use of a single branch operation
* X2 populated outside this function with the SMC FID.
*/
orr w3, wzr, #SMCCC_ARCH_WORKAROUND_2
cmp x0, x3
mrs x3, esr_el3
ccmp w2, w3, #0, eq
/*
* Static predictor will predict a fall-through, optimizing
* the `SMCCC_ARCH_WORKAROUND_2` fast path.
*/
bne 1f
/*
* The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
* fast path.
*/
cmp x1, xzr /* enable/disable check */
/*
* When the calling context wants mitigation disabled,
* we program the mitigation disable function in the
* CPU context, which gets invoked on subsequent exits from
* EL3 via the `el3_exit` function. Otherwise NULL is
* programmed in the CPU context, which results in caller's
* inheriting the EL3 mitigation state (enabled) on subsequent
* `el3_exit`.
*/
mov x0, xzr
adr x1, cortex_a76_disable_wa_cve_2018_3639
csel x1, x1, x0, eq
str x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
mrs x2, CORTEX_A76_CPUACTLR2_EL1
orr x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
bic x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
csel x3, x3, x1, eq
msr CORTEX_A76_CPUACTLR2_EL1, x3
ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
/*
* `SMCCC_ARCH_WORKAROUND_2`fast path return to lower EL.
*/
exception_return /* exception_return contains ISB */
1:
ret
endfunc apply_cve_2018_3639_sync_wa
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
/* --------------------------------------------------
@ -519,6 +617,15 @@ func check_errata_1165522
#endif
endfunc check_errata_1165522
func check_errata_cve_2022_23960
#if WORKAROUND_CVE_2022_23960
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif /* WORKAROUND_CVE_2022_23960 */
ret
endfunc check_errata_cve_2022_23960
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A76.
* Shall clobber: x0-x19
@ -590,16 +697,31 @@ func cortex_a76_reset_func
* The Cortex-A76 generic vectors are overwritten to use the vectors
* defined above. This is required in order to apply mitigation
* against CVE-2018-3639 on exception entry from lower ELs.
* If the below vector table is used, skip overriding it again for
* CVE_2022_23960 as both use the same vbar.
*/
adr x0, cortex_a76_wa_cve_2018_3639_a76_vbar
adr x0, cortex_a76_wa_cve_vbar
msr vbar_el3, x0
isb
b 2f
#endif /* IMAGE_BL31 */
1:
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
#endif /* WORKAROUND_CVE_2018_3639 */
#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
/*
* The Cortex-A76 generic vectors are overridden to apply errata
* mitigation on exception entry from lower ELs. This will be bypassed
* if DYNAMIC_WORKAROUND_CVE_2018_3639 has overridden the vectors.
*/
adr x0, cortex_a76_wa_cve_vbar
msr vbar_el3, x0
isb
#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
2:
#if ERRATA_DSU_798953
bl errata_dsu_798953_wa
#endif
@ -656,6 +778,7 @@ func cortex_a76_errata_report
report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
report_errata WORKAROUND_CVE_2022_23960, cortex_a76, cve_2022_23960
ldp x8, x30, [sp], #16
ret

Loading…
Cancel
Save