Browse Source

Merge changes from topic "mp/simd_ctxt_mgmt" into integration

* changes:
  feat(fvp): allow SIMD context to be put in TZC DRAM
  docs(simd): introduce CTX_INCLUDE_SVE_REGS build flag
  feat(fvp): add Cactus partition manifest for EL3 SPMC
  chore(simd): remove unused macros and utilities for FP
  feat(el3-spmc): support simd context management upon world switch
  feat(trusty): switch to simd_ctx_save/restore apis
  feat(pncd): switch to simd_ctx_save/restore apis
  feat(spm-mm): switch to simd_ctx_save/restore APIs
  feat(simd): add rules to rationalize simd ctxt mgmt
  feat(simd): introduce simd context helper APIs
  feat(simd): add routines to save, restore sve state
  feat(simd): add sve state to simd ctxt struct
  feat(simd): add data struct for simd ctxt management
pull/2005/merge
Manish V Badarkhe 3 months ago
committed by TrustedFirmware Code Review
parent
commit
4b6e4e618e
  1. 66
      Makefile
  2. 1
      bl31/bl31.mk
  3. 3
      changelog.yaml
  4. 47
      docs/getting_started/build-options.rst
  5. 78
      include/lib/el3_runtime/aarch64/context.h
  6. 97
      include/lib/el3_runtime/simd_ctx.h
  7. 8
      include/lib/extensions/sve.h
  8. 303
      lib/el3_runtime/aarch64/context.S
  9. 81
      lib/el3_runtime/simd_ctx.c
  10. 7
      make_helpers/defaults.mk
  11. 28
      plat/arm/board/fvp/fdts/fvp_cactus_sp_manifest.dts
  12. 30
      plat/arm/board/fvp/include/plat.ld.S
  13. 4
      plat/arm/board/fvp/platform.mk
  14. 6
      services/spd/pncd/pncd_common.c
  15. 6
      services/spd/pncd/pncd_main.c
  16. 17
      services/spd/trusty/trusty.c
  17. 23
      services/std_svc/spm/spm_mm/spm_mm_main.c
  18. 26
      services/std_svc/spmd/spmd_main.c

66
Makefile

@ -457,6 +457,9 @@ ifneq (${SPD},none)
ifeq ($(SPMC_AT_EL3),1)
$(error SPM cannot be enabled in both S-EL2 and EL3.)
endif
ifeq ($(CTX_INCLUDE_SVE_REGS),1)
$(error SVE context management not needed with Hafnium SPMC.)
endif
endif
ifeq ($(findstring optee_sp,$(ARM_SPMC_MANIFEST_DTS)),optee_sp)
@ -975,25 +978,52 @@ ifeq (${ENABLE_SME_FOR_SWD},1)
endif
endif #(ENABLE_SME_FOR_SWD)
# Enabling SVE for SWD requires enabling SVE for NWD due to ENABLE_FEAT
# mechanism.
ifeq (${ENABLE_SVE_FOR_SWD},1)
ifeq (${ENABLE_SVE_FOR_NS},0)
$(error "ENABLE_SVE_FOR_SWD requires ENABLE_SVE_FOR_NS")
endif
endif #(ENABLE_SVE_FOR_SWD)
ifeq (${ENABLE_SVE_FOR_NS},0)
$(error "ENABLE_SVE_FOR_SWD requires ENABLE_SVE_FOR_NS")
endif
endif
# SVE and SME cannot be used with CTX_INCLUDE_FPREGS since secure manager does
# its own context management including FPU registers.
ifeq (${CTX_INCLUDE_FPREGS},1)
ifneq (${ENABLE_SME_FOR_NS},0)
$(error "ENABLE_SME_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
endif
# Enabling SVE for both the worlds typically requires the context
# management of SVE registers. The only exception being SPMC at S-EL2.
ifeq (${ENABLE_SVE_FOR_SWD}, 1)
ifneq (${ENABLE_SVE_FOR_NS}, 0)
ifeq (${CTX_INCLUDE_SVE_REGS}-$(SPMD_SPM_AT_SEL2),0-0)
$(warning "ENABLE_SVE_FOR_SWD and ENABLE_SVE_FOR_NS together require CTX_INCLUDE_SVE_REGS")
endif
endif
endif
ifeq (${ENABLE_SVE_FOR_NS},1)
# Warning instead of error due to CI dependency on this
$(warning "ENABLE_SVE_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
$(warning "Forced ENABLE_SVE_FOR_NS=0")
override ENABLE_SVE_FOR_NS := 0
endif
# Enabling SVE in either world while enabling CTX_INCLUDE_FPREGS requires
# CTX_INCLUDE_SVE_REGS to be enabled due to architectural dependency between FP
# and SVE registers.
ifeq (${CTX_INCLUDE_FPREGS}, 1)
ifneq (${ENABLE_SVE_FOR_NS},0)
ifeq (${CTX_INCLUDE_SVE_REGS},0)
# Warning instead of error due to CI dependency on this
$(warning "CTX_INCLUDE_FPREGS and ENABLE_SVE_FOR_NS together require CTX_INCLUDE_SVE_REGS")
$(warning "Forced ENABLE_SVE_FOR_NS=0")
override ENABLE_SVE_FOR_NS := 0
endif
endif
endif #(CTX_INCLUDE_FPREGS)
# SVE context management is only required if secure world has access to SVE/FP
# functionality.
ifeq (${CTX_INCLUDE_SVE_REGS},1)
ifeq (${ENABLE_SVE_FOR_SWD},0)
$(error "CTX_INCLUDE_SVE_REGS requires ENABLE_SVE_FOR_SWD to also be enabled")
endif
endif
# SME cannot be used with CTX_INCLUDE_FPREGS since SPM does its own context
# management including FPU registers.
ifeq (${CTX_INCLUDE_FPREGS},1)
ifneq (${ENABLE_SME_FOR_NS},0)
$(error "ENABLE_SME_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
endif
endif #(CTX_INCLUDE_FPREGS)
ifeq ($(DRTM_SUPPORT),1)
@ -1130,6 +1160,7 @@ $(eval $(call assert_booleans,\
CREATE_KEYS \
CTX_INCLUDE_AARCH32_REGS \
CTX_INCLUDE_FPREGS \
CTX_INCLUDE_SVE_REGS \
CTX_INCLUDE_EL2_REGS \
CTX_INCLUDE_MPAM_REGS \
DEBUG \
@ -1168,6 +1199,7 @@ $(eval $(call assert_booleans,\
SEPARATE_CODE_AND_RODATA \
SEPARATE_BL2_NOLOAD_REGION \
SEPARATE_NOBITS_REGION \
SEPARATE_SIMD_SECTION \
SPIN_ON_BL1_EXIT \
SPM_MM \
SPMC_AT_EL3 \
@ -1288,6 +1320,7 @@ $(eval $(call add_defines,\
COLD_BOOT_SINGLE_CPU \
CTX_INCLUDE_AARCH32_REGS \
CTX_INCLUDE_FPREGS \
CTX_INCLUDE_SVE_REGS \
CTX_INCLUDE_PAUTH_REGS \
CTX_INCLUDE_MPAM_REGS \
EL3_EXCEPTION_HANDLING \
@ -1340,6 +1373,7 @@ $(eval $(call add_defines,\
SEPARATE_CODE_AND_RODATA \
SEPARATE_BL2_NOLOAD_REGION \
SEPARATE_NOBITS_REGION \
SEPARATE_SIMD_SECTION \
RECLAIM_INIT_CODE \
SPD_${SPD} \
SPIN_ON_BL1_EXIT \

1
bl31/bl31.mk

@ -47,6 +47,7 @@ BL31_SOURCES += bl31/bl31_main.c \
plat/common/aarch64/platform_mp_stack.S \
services/arm_arch_svc/arm_arch_svc_setup.c \
services/std_svc/std_svc_setup.c \
lib/el3_runtime/simd_ctx.c \
${PSCI_LIB_SOURCES} \
${SPMD_SOURCES} \
${SPM_MM_SOURCES} \

3
changelog.yaml

@ -805,6 +805,9 @@ subsections:
- title: RAS
scope: ras
- title: SIMD
scope: simd
- title: FCONF
scope: fconf

47
docs/getting_started/build-options.rst

@ -204,6 +204,13 @@ Common build options
Note that Pointer Authentication is enabled for Non-secure world irrespective
of the value of this flag if the CPU supports it.
- ``CTX_INCLUDE_SVE_REGS``: Boolean option that, when set to 1, will cause the
SVE registers to be included when saving and restoring the CPU context. Note
that this build option requires ``ENABLE_SVE_FOR_SWD`` to be enabled. In
general, it is recommended to perform SVE context management in lower ELs
and skip in EL3 due to the additional cost of maintaining large data
structures to track the SVE state. Hence, the default value is 0.
- ``DEBUG``: Chooses between a debug and release build. It can take either 0
(release) or 1 (debug) as values. 0 is the default.
@ -505,21 +512,26 @@ Common build options
- ``ENABLE_SVE_FOR_NS``: Numeric value to enable Scalable Vector Extension
(SVE) for the Non-secure world only. SVE is an optional architectural feature
for AArch64. Note that when SVE is enabled for the Non-secure world, access
to SIMD and floating-point functionality from the Secure world is disabled by
default and controlled with ENABLE_SVE_FOR_SWD.
This is to avoid corruption of the Non-secure world data in the Z-registers
which are aliased by the SIMD and FP registers. The build option is not
compatible with the ``CTX_INCLUDE_FPREGS`` build option, and will raise an
assert on platforms where SVE is implemented and ``ENABLE_SVE_FOR_NS``
enabled. This flag can take the values 0 to 2, to align with the
``ENABLE_FEAT`` mechanism. At this time, this build option cannot be
used on systems that have SPM_MM enabled. The default is 1.
- ``ENABLE_SVE_FOR_SWD``: Boolean option to enable SVE for the Secure world.
SVE is an optional architectural feature for AArch64. Note that this option
requires ENABLE_SVE_FOR_NS to be enabled. The default is 0 and it is
automatically disabled when the target architecture is AArch32.
for AArch64. This flag can take the values 0 to 2, to align with the
``ENABLE_FEAT`` mechanism. At this time, this build option cannot be used on
systems that have SPM_MM enabled. The default value is 2.
Note that when SVE is enabled for the Non-secure world, access
to SVE, SIMD and floating-point functionality from the Secure world is
independently controlled by build option ``ENABLE_SVE_FOR_SWD``. When enabling
``CTX_INCLUDE_FPREGS`` and ``ENABLE_SVE_FOR_NS`` together, it is mandatory to
enable ``CTX_INCLUDE_SVE_REGS``. This is to avoid corruption of the Non-secure
world data in the Z-registers which are aliased by the SIMD and FP registers.
- ``ENABLE_SVE_FOR_SWD``: Boolean option to enable SVE and FPU/SIMD functionality
for the Secure world. SVE is an optional architectural feature for AArch64.
The default is 0 and it is automatically disabled when the target architecture
is AArch32.
.. note::
This build flag requires ``ENABLE_SVE_FOR_NS`` to be enabled. When enabling
``ENABLE_SVE_FOR_SWD``, a developer must carefully consider whether
``CTX_INCLUDE_SVE_REGS`` is also needed.
- ``ENABLE_STACK_PROTECTOR``: String option to enable the stack protection
checks in GCC. Allowed values are "all", "strong", "default" and "none". The
@ -885,6 +897,11 @@ Common build options
flag is disabled by default and NOLOAD sections are placed in RAM immediately
following the loaded firmware image.
- ``SEPARATE_SIMD_SECTION``: Setting this option to ``1`` allows the SIMD context
data structures to be put in a dedicated memory region as decided by platform
integrator. Default value is ``0`` which means the SIMD context is put in BSS
section of EL3 firmware.
- ``SMC_PCI_SUPPORT``: This option allows platforms to handle PCI configuration
access requests via a standard SMCCC defined in `DEN0115`_. When combined with
UEFI+ACPI this can provide a certain amount of OS forward compatibility

78
include/lib/el3_runtime/aarch64/context.h

@ -10,6 +10,7 @@
#include <lib/el3_runtime/context_el1.h>
#include <lib/el3_runtime/context_el2.h>
#include <lib/el3_runtime/cpu_data.h>
#include <lib/el3_runtime/simd_ctx.h>
#include <lib/utils_def.h>
/*******************************************************************************
@ -82,60 +83,11 @@
#define CTX_EL3STATE_END U(0x50) /* Align to the next 16 byte boundary */
#endif /* FFH_SUPPORT */
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'fp_regs'
* structure at their correct offsets.
******************************************************************************/
# define CTX_FPREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
#if CTX_INCLUDE_FPREGS
#define CTX_FP_Q0 U(0x0)
#define CTX_FP_Q1 U(0x10)
#define CTX_FP_Q2 U(0x20)
#define CTX_FP_Q3 U(0x30)
#define CTX_FP_Q4 U(0x40)
#define CTX_FP_Q5 U(0x50)
#define CTX_FP_Q6 U(0x60)
#define CTX_FP_Q7 U(0x70)
#define CTX_FP_Q8 U(0x80)
#define CTX_FP_Q9 U(0x90)
#define CTX_FP_Q10 U(0xa0)
#define CTX_FP_Q11 U(0xb0)
#define CTX_FP_Q12 U(0xc0)
#define CTX_FP_Q13 U(0xd0)
#define CTX_FP_Q14 U(0xe0)
#define CTX_FP_Q15 U(0xf0)
#define CTX_FP_Q16 U(0x100)
#define CTX_FP_Q17 U(0x110)
#define CTX_FP_Q18 U(0x120)
#define CTX_FP_Q19 U(0x130)
#define CTX_FP_Q20 U(0x140)
#define CTX_FP_Q21 U(0x150)
#define CTX_FP_Q22 U(0x160)
#define CTX_FP_Q23 U(0x170)
#define CTX_FP_Q24 U(0x180)
#define CTX_FP_Q25 U(0x190)
#define CTX_FP_Q26 U(0x1a0)
#define CTX_FP_Q27 U(0x1b0)
#define CTX_FP_Q28 U(0x1c0)
#define CTX_FP_Q29 U(0x1d0)
#define CTX_FP_Q30 U(0x1e0)
#define CTX_FP_Q31 U(0x1f0)
#define CTX_FP_FPSR U(0x200)
#define CTX_FP_FPCR U(0x208)
#if CTX_INCLUDE_AARCH32_REGS
#define CTX_FP_FPEXC32_EL2 U(0x210)
#define CTX_FPREGS_END U(0x220) /* Align to the next 16 byte boundary */
#else
#define CTX_FPREGS_END U(0x210) /* Align to the next 16 byte boundary */
#endif /* CTX_INCLUDE_AARCH32_REGS */
#else
#define CTX_FPREGS_END U(0)
#endif /* CTX_INCLUDE_FPREGS */
/*******************************************************************************
* Registers related to CVE-2018-3639
******************************************************************************/
#define CTX_CVE_2018_3639_OFFSET (CTX_FPREGS_OFFSET + CTX_FPREGS_END)
#define CTX_CVE_2018_3639_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
#define CTX_CVE_2018_3639_DISABLE U(0)
#define CTX_CVE_2018_3639_END U(0x10) /* Align to the next 16 byte boundary */
@ -230,9 +182,6 @@
/* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_FPREGS
# define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
#endif
#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT)
#define CTX_CVE_2018_3639_ALL (CTX_CVE_2018_3639_END >> DWORD_SHIFT)
@ -252,15 +201,6 @@
*/
DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
/*
* AArch64 floating point register context structure for preserving
* the floating point state during switches from one security state to
* another.
*/
#if CTX_INCLUDE_FPREGS
DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
#endif
/*
* Miscellaneous registers used by EL3 firmware to maintain its state
* across exception entries and exits
@ -300,9 +240,6 @@ typedef struct cpu_context {
gp_regs_t gpregs_ctx;
el3_state_t el3state_ctx;
#if CTX_INCLUDE_FPREGS
fp_regs_t fpregs_ctx;
#endif
cve_2018_3639_t cve_2018_3639_ctx;
#if ERRATA_SPECULATIVE_AT
@ -335,9 +272,6 @@ extern per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM];
/* Macros to access members of the 'cpu_context_t' structure */
#define get_el3state_ctx(h) (&((cpu_context_t *) h)->el3state_ctx)
#if CTX_INCLUDE_FPREGS
# define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx)
#endif
#define get_el1_sysregs_ctx(h) (&((cpu_context_t *) h)->el1_sysregs_ctx)
#if CTX_INCLUDE_EL2_REGS
# define get_el2_sysregs_ctx(h) (&((cpu_context_t *) h)->el2_sysregs_ctx)
@ -364,10 +298,6 @@ CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx),
CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx),
assert_core_context_el3state_offset_mismatch);
#if CTX_INCLUDE_FPREGS
CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx),
assert_core_context_fp_offset_mismatch);
#endif /* CTX_INCLUDE_FPREGS */
CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx),
assert_core_context_cve_2018_3639_offset_mismatch);
@ -422,8 +352,8 @@ CASSERT(CTX_PAUTH_REGS_OFFSET == __builtin_offsetof(cpu_context_t, pauth_ctx),
* Function prototypes
******************************************************************************/
#if CTX_INCLUDE_FPREGS
void fpregs_context_save(fp_regs_t *regs);
void fpregs_context_restore(fp_regs_t *regs);
void fpregs_context_save(simd_regs_t *regs);
void fpregs_context_restore(simd_regs_t *regs);
#endif
#endif /* __ASSEMBLER__ */

97
include/lib/el3_runtime/simd_ctx.h

@ -0,0 +1,97 @@
/*
* Copyright (c) 2024, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2022, Google LLC. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SIMD_CTX_H
#define SIMD_CTX_H
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'simd_context'
* structure at their correct offsets.
******************************************************************************/
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
#if CTX_INCLUDE_SVE_REGS
#define SIMD_VECTOR_LEN_BYTES (SVE_VECTOR_LEN / 8) /* Length of vector in bytes */
#elif CTX_INCLUDE_FPREGS
#define SIMD_VECTOR_LEN_BYTES U(16) /* 128 bits fixed vector length for FPU */
#endif /* CTX_INCLUDE_SVE_REGS */
#define CTX_SIMD_VECTORS U(0)
/* there are 32 vector registers, each of size SIMD_VECTOR_LEN_BYTES */
#define CTX_SIMD_FPSR (CTX_SIMD_VECTORS + (32 * SIMD_VECTOR_LEN_BYTES))
#define CTX_SIMD_FPCR (CTX_SIMD_FPSR + 8)
#if CTX_INCLUDE_FPREGS && CTX_INCLUDE_AARCH32_REGS
#define CTX_SIMD_FPEXC32 (CTX_SIMD_FPCR + 8)
#define CTX_SIMD_PREDICATES (CTX_SIMD_FPEXC32 + 16)
#else
#define CTX_SIMD_PREDICATES (CTX_SIMD_FPCR + 8)
#endif /* CTX_INCLUDE_FPREGS && CTX_INCLUDE_AARCH32_REGS */
/*
* Each predicate register is 1/8th the size of a vector register and there are 16
* predicate registers
*/
#define CTX_SIMD_FFR (CTX_SIMD_PREDICATES + (16 * (SIMD_VECTOR_LEN_BYTES / 8)))
#ifndef __ASSEMBLER__
#include <stdint.h>
#include <lib/cassert.h>
/*
* Please don't change order of fields in this struct as that may violate
* alignment requirements and affect how assembly code accesses members of this
* struct.
*/
typedef struct {
uint8_t vectors[32][SIMD_VECTOR_LEN_BYTES];
uint8_t fpsr[8];
uint8_t fpcr[8];
#if CTX_INCLUDE_FPREGS && CTX_INCLUDE_AARCH32_REGS
/* 16 bytes to align to next 16 byte boundary when CTX_INCLUDE_SVE_REGS is 0 */
uint8_t fpexc32_el2[16];
#endif
#if CTX_INCLUDE_SVE_REGS
/* FFR and each of predicates is one-eigth of the SVE vector length */
uint8_t predicates[16][SIMD_VECTOR_LEN_BYTES / 8];
uint8_t ffr[SIMD_VECTOR_LEN_BYTES / 8];
/* SMCCCv1.3 FID[16] hint bit state recorded on EL3 entry */
bool hint;
#endif /* CTX_INCLUDE_SVE_REGS */
} __aligned(16) simd_regs_t;
CASSERT(CTX_SIMD_VECTORS == __builtin_offsetof(simd_regs_t, vectors),
assert_vectors_mismatch);
CASSERT(CTX_SIMD_FPSR == __builtin_offsetof(simd_regs_t, fpsr),
assert_fpsr_mismatch);
CASSERT(CTX_SIMD_FPCR == __builtin_offsetof(simd_regs_t, fpcr),
assert_fpcr_mismatch);
#if CTX_INCLUDE_FPREGS && CTX_INCLUDE_AARCH32_REGS
CASSERT(CTX_SIMD_FPEXC32 == __builtin_offsetof(simd_regs_t, fpexc32_el2),
assert_fpex32_mismtatch);
#endif
#if CTX_INCLUDE_SVE_REGS
CASSERT(CTX_SIMD_PREDICATES == __builtin_offsetof(simd_regs_t, predicates),
assert_predicates_mismatch);
CASSERT(CTX_SIMD_FFR == __builtin_offsetof(simd_regs_t, ffr),
assert_ffr_mismatch);
#endif
void simd_ctx_save(uint32_t security_state, bool hint_sve);
void simd_ctx_restore(uint32_t security_state);
#endif /* __ASSEMBLER__ */
#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */
#endif /* SIMD_CTX_H */

8
include/lib/extensions/sve.h

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,6 +10,7 @@
#include <context.h>
#if (ENABLE_SME_FOR_NS || ENABLE_SVE_FOR_NS)
void sve_init_el2_unused(void);
void sve_enable_per_world(per_world_context_t *per_world_ctx);
void sve_disable_per_world(per_world_context_t *per_world_ctx);
@ -25,4 +26,9 @@ static inline void sve_disable_per_world(per_world_context_t *per_world_ctx)
}
#endif /* ( ENABLE_SME_FOR_NS | ENABLE_SVE_FOR_NS ) */
#if CTX_INCLUDE_SVE_REGS
void sve_context_save(simd_regs_t *regs);
void sve_context_restore(simd_regs_t *regs);
#endif
#endif /* SVE_H */

303
lib/el3_runtime/aarch64/context.S

@ -9,12 +9,18 @@
#include <assert_macros.S>
#include <context.h>
#include <el3_common_macros.S>
#include <platform_def.h>
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
.global fpregs_context_restore
#endif /* CTX_INCLUDE_FPREGS */
#if CTX_INCLUDE_SVE_REGS
.global sve_context_save
.global sve_context_restore
#endif /* CTX_INCLUDE_SVE_REGS */
#if ERRATA_SPECULATIVE_AT
.global save_and_update_ptw_el1_sys_regs
#endif /* ERRATA_SPECULATIVE_AT */
@ -23,6 +29,36 @@
.global restore_gp_pmcr_pauth_regs
.global el3_exit
/* Following macros will be used if any of CTX_INCLUDE_FPREGS or CTX_INCLUDE_SVE_REGS is enabled */
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
.macro fpregs_state_save base:req hold:req
mrs \hold, fpsr
str \hold, [\base, #CTX_SIMD_FPSR]
mrs \hold, fpcr
str \hold, [\base, #CTX_SIMD_FPCR]
#if CTX_INCLUDE_AARCH32_REGS && CTX_INCLUDE_FPREGS
mrs \hold, fpexc32_el2
str \hold, [\base, #CTX_SIMD_FPEXC32]
#endif
.endm
.macro fpregs_state_restore base:req hold:req
ldr \hold, [\base, #CTX_SIMD_FPSR]
msr fpsr, \hold
ldr \hold, [\base, #CTX_SIMD_FPCR]
msr fpcr, \hold
#if CTX_INCLUDE_AARCH32_REGS && CTX_INCLUDE_FPREGS
ldr \hold, [\base, #CTX_SIMD_FPEXC32]
msr fpexc32_el2, \hold
#endif
.endm
#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */
/* ------------------------------------------------------------------
* The following function follows the aapcs_64 strictly to use
* x9-x17 (temporary caller-saved registers according to AArch64 PCS)
@ -39,33 +75,25 @@
*/
#if CTX_INCLUDE_FPREGS
func fpregs_context_save
stp q0, q1, [x0, #CTX_FP_Q0]
stp q2, q3, [x0, #CTX_FP_Q2]
stp q4, q5, [x0, #CTX_FP_Q4]
stp q6, q7, [x0, #CTX_FP_Q6]
stp q8, q9, [x0, #CTX_FP_Q8]
stp q10, q11, [x0, #CTX_FP_Q10]
stp q12, q13, [x0, #CTX_FP_Q12]
stp q14, q15, [x0, #CTX_FP_Q14]
stp q16, q17, [x0, #CTX_FP_Q16]
stp q18, q19, [x0, #CTX_FP_Q18]
stp q20, q21, [x0, #CTX_FP_Q20]
stp q22, q23, [x0, #CTX_FP_Q22]
stp q24, q25, [x0, #CTX_FP_Q24]
stp q26, q27, [x0, #CTX_FP_Q26]
stp q28, q29, [x0, #CTX_FP_Q28]
stp q30, q31, [x0, #CTX_FP_Q30]
mrs x9, fpsr
str x9, [x0, #CTX_FP_FPSR]
mrs x10, fpcr
str x10, [x0, #CTX_FP_FPCR]
#if CTX_INCLUDE_AARCH32_REGS
mrs x11, fpexc32_el2
str x11, [x0, #CTX_FP_FPEXC32_EL2]
#endif /* CTX_INCLUDE_AARCH32_REGS */
stp q0, q1, [x0], #32
stp q2, q3, [x0], #32
stp q4, q5, [x0], #32
stp q6, q7, [x0], #32
stp q8, q9, [x0], #32
stp q10, q11, [x0], #32
stp q12, q13, [x0], #32
stp q14, q15, [x0], #32
stp q16, q17, [x0], #32
stp q18, q19, [x0], #32
stp q20, q21, [x0], #32
stp q22, q23, [x0], #32
stp q24, q25, [x0], #32
stp q26, q27, [x0], #32
stp q28, q29, [x0], #32
stp q30, q31, [x0], #32
fpregs_state_save x0, x9
ret
endfunc fpregs_context_save
@ -84,51 +112,196 @@ endfunc fpregs_context_save
* ------------------------------------------------------------------
*/
func fpregs_context_restore
ldp q0, q1, [x0, #CTX_FP_Q0]
ldp q2, q3, [x0, #CTX_FP_Q2]
ldp q4, q5, [x0, #CTX_FP_Q4]
ldp q6, q7, [x0, #CTX_FP_Q6]
ldp q8, q9, [x0, #CTX_FP_Q8]
ldp q10, q11, [x0, #CTX_FP_Q10]
ldp q12, q13, [x0, #CTX_FP_Q12]
ldp q14, q15, [x0, #CTX_FP_Q14]
ldp q16, q17, [x0, #CTX_FP_Q16]
ldp q18, q19, [x0, #CTX_FP_Q18]
ldp q20, q21, [x0, #CTX_FP_Q20]
ldp q22, q23, [x0, #CTX_FP_Q22]
ldp q24, q25, [x0, #CTX_FP_Q24]
ldp q26, q27, [x0, #CTX_FP_Q26]
ldp q28, q29, [x0, #CTX_FP_Q28]
ldp q30, q31, [x0, #CTX_FP_Q30]
ldr x9, [x0, #CTX_FP_FPSR]
msr fpsr, x9
ldr x10, [x0, #CTX_FP_FPCR]
msr fpcr, x10
#if CTX_INCLUDE_AARCH32_REGS
ldr x11, [x0, #CTX_FP_FPEXC32_EL2]
msr fpexc32_el2, x11
#endif /* CTX_INCLUDE_AARCH32_REGS */
/*
* No explict ISB required here as ERET to
* switch to secure EL1 or non-secure world
* covers it
*/
ldp q0, q1, [x0], #32
ldp q2, q3, [x0], #32
ldp q4, q5, [x0], #32
ldp q6, q7, [x0], #32
ldp q8, q9, [x0], #32
ldp q10, q11, [x0], #32
ldp q12, q13, [x0], #32
ldp q14, q15, [x0], #32
ldp q16, q17, [x0], #32
ldp q18, q19, [x0], #32
ldp q20, q21, [x0], #32
ldp q22, q23, [x0], #32
ldp q24, q25, [x0], #32
ldp q26, q27, [x0], #32
ldp q28, q29, [x0], #32
ldp q30, q31, [x0], #32
fpregs_state_restore x0, x9
ret
endfunc fpregs_context_restore
#endif /* CTX_INCLUDE_FPREGS */
#if CTX_INCLUDE_SVE_REGS
/*
* Helper macros for SVE predicates save/restore operations.
*/
.macro sve_predicate_op op:req reg:req
\op p0, [\reg, #0, MUL VL]
\op p1, [\reg, #1, MUL VL]
\op p2, [\reg, #2, MUL VL]
\op p3, [\reg, #3, MUL VL]
\op p4, [\reg, #4, MUL VL]
\op p5, [\reg, #5, MUL VL]
\op p6, [\reg, #6, MUL VL]
\op p7, [\reg, #7, MUL VL]
\op p8, [\reg, #8, MUL VL]
\op p9, [\reg, #9, MUL VL]
\op p10, [\reg, #10, MUL VL]
\op p11, [\reg, #11, MUL VL]
\op p12, [\reg, #12, MUL VL]
\op p13, [\reg, #13, MUL VL]
\op p14, [\reg, #14, MUL VL]
\op p15, [\reg, #15, MUL VL]
.endm
.macro sve_vectors_op op:req reg:req
\op z0, [\reg, #0, MUL VL]
\op z1, [\reg, #1, MUL VL]
\op z2, [\reg, #2, MUL VL]
\op z3, [\reg, #3, MUL VL]
\op z4, [\reg, #4, MUL VL]
\op z5, [\reg, #5, MUL VL]
\op z6, [\reg, #6, MUL VL]
\op z7, [\reg, #7, MUL VL]
\op z8, [\reg, #8, MUL VL]
\op z9, [\reg, #9, MUL VL]
\op z10, [\reg, #10, MUL VL]
\op z11, [\reg, #11, MUL VL]
\op z12, [\reg, #12, MUL VL]
\op z13, [\reg, #13, MUL VL]
\op z14, [\reg, #14, MUL VL]
\op z15, [\reg, #15, MUL VL]
\op z16, [\reg, #16, MUL VL]
\op z17, [\reg, #17, MUL VL]
\op z18, [\reg, #18, MUL VL]
\op z19, [\reg, #19, MUL VL]
\op z20, [\reg, #20, MUL VL]
\op z21, [\reg, #21, MUL VL]
\op z22, [\reg, #22, MUL VL]
\op z23, [\reg, #23, MUL VL]
\op z24, [\reg, #24, MUL VL]
\op z25, [\reg, #25, MUL VL]
\op z26, [\reg, #26, MUL VL]
\op z27, [\reg, #27, MUL VL]
\op z28, [\reg, #28, MUL VL]
\op z29, [\reg, #29, MUL VL]
\op z30, [\reg, #30, MUL VL]
\op z31, [\reg, #31, MUL VL]
.endm
/* ------------------------------------------------------------------
* The following function follows the aapcs_64 strictly to use x9-x17
* (temporary caller-saved registers according to AArch64 PCS) to
* restore SVE register context. It assumes that 'x0' is
* pointing to a 'sve_regs_t' structure to which the register context
* will be saved.
* ------------------------------------------------------------------
*/
func sve_context_save
.arch_extension sve
/* Temporarily enable SVE */
mrs x10, cptr_el3
orr x11, x10, #CPTR_EZ_BIT
bic x11, x11, #TFP_BIT
msr cptr_el3, x11
isb
/* zcr_el3 */
mrs x12, S3_6_C1_C2_0
mov x13, #((SVE_VECTOR_LEN >> 7) - 1)
msr S3_6_C1_C2_0, x13
isb
/* Predicate registers */
mov x13, #CTX_SIMD_PREDICATES
add x9, x0, x13
sve_predicate_op str, x9
/* Save FFR after predicates */
mov x13, #CTX_SIMD_FFR
add x9, x0, x13
rdffr p0.b
str p0, [x9]
/* Save vector registers */
mov x13, #CTX_SIMD_VECTORS
add x9, x0, x13
sve_vectors_op str, x9
/* Restore SVE enablement */
msr S3_6_C1_C2_0, x12 /* zcr_el3 */
msr cptr_el3, x10
isb
.arch_extension nosve
/* Save FPSR, FPCR and FPEXC32 */
fpregs_state_save x0, x9
ret
endfunc sve_context_save
/* ------------------------------------------------------------------
* The following function follows the aapcs_64 strictly to use x9-x17
* (temporary caller-saved registers according to AArch64 PCS) to
* restore SVE register context. It assumes that 'x0' is pointing to
* a 'sve_regs_t' structure from where the register context will be
* restored.
* ------------------------------------------------------------------
*/
func sve_context_restore
.arch_extension sve
/* Temporarily enable SVE for EL3 */
mrs x10, cptr_el3
orr x11, x10, #CPTR_EZ_BIT
bic x11, x11, #TFP_BIT
msr cptr_el3, x11
isb
/* zcr_el3 */
mrs x12, S3_6_C1_C2_0
mov x13, #((SVE_VECTOR_LEN >> 7) - 1)
msr S3_6_C1_C2_0, x13
isb
/* Restore FFR register before predicates */
mov x13, #CTX_SIMD_FFR
add x9, x0, x13
ldr p0, [x9]
wrffr p0.b
/* Restore predicate registers */
mov x13, #CTX_SIMD_PREDICATES
add x9, x0, x13
sve_predicate_op ldr, x9
/* Restore vector registers */
mov x13, #CTX_SIMD_VECTORS
add x9, x0, x13
sve_vectors_op ldr, x9
/* Restore SVE enablement */
msr S3_6_C1_C2_0, x12 /* zcr_el3 */
msr cptr_el3, x10
isb
.arch_extension nosve
/* Restore FPSR, FPCR and FPEXC32 */
fpregs_state_restore x0, x9
ret
endfunc sve_context_restore
#endif /* CTX_INCLUDE_SVE_REGS */
/*
* Set SCR_EL3.EA bit to enable SErrors at EL3
*/
.macro enable_serror_at_el3
mrs x8, scr_el3
orr x8, x8, #SCR_EA_BIT
msr scr_el3, x8
mrs x8, scr_el3
orr x8, x8, #SCR_EA_BIT
msr scr_el3, x8
.endm
/*
@ -147,8 +320,8 @@ endfunc fpregs_context_restore
and x8, x8, #(ID_AA64PFR0_DIT_MASK << ID_AA64PFR0_DIT_SHIFT)
cbz x8, 1f
#endif
mov x8, #DIT_BIT
msr DIT, x8
mov x8, #DIT_BIT
msr DIT, x8
1:
#endif /* ENABLE_FEAT_DIT */
.endm /* set_unset_pstate_bits */

81
lib/el3_runtime/simd_ctx.c

@ -0,0 +1,81 @@
/*
* Copyright (c) 2024, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2022, Google LLC. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdint.h>
#include <common/debug.h>
#include <lib/el3_runtime/aarch64/context.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/el3_runtime/cpu_data.h>
#include <lib/el3_runtime/simd_ctx.h>
#include <lib/extensions/sve.h>
#include <plat/common/platform.h>
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
/* SIMD context managed for Secure and Normal Worlds. */
#define SIMD_CTXT_COUNT 2
#if SEPARATE_SIMD_SECTION
__section(".simd_context")
#else
__section(".bss.simd_context")
#endif
static simd_regs_t simd_context[SIMD_CTXT_COUNT][PLATFORM_CORE_COUNT];
void simd_ctx_save(uint32_t security_state, bool hint_sve)
{
simd_regs_t *regs;
if (security_state != NON_SECURE && security_state != SECURE) {
ERROR("Unsupported security state specified for SIMD context: %u\n",
security_state);
panic();
}
regs = &simd_context[security_state][plat_my_core_pos()];
#if CTX_INCLUDE_SVE_REGS
regs->hint = hint_sve;
if (hint_sve) {
/*
* Hint bit denoting absence of SVE live state. Hence, only
* save FP context.
*/
fpregs_context_save(regs);
} else {
sve_context_save(regs);
}
#elif CTX_INCLUDE_FPREGS
fpregs_context_save(regs);
#endif
}
void simd_ctx_restore(uint32_t security_state)
{
simd_regs_t *regs;
if (security_state != NON_SECURE && security_state != SECURE) {
ERROR("Unsupported security state specified for SIMD context: %u\n",
security_state);
panic();
}
regs = &simd_context[security_state][plat_my_core_pos()];
#if CTX_INCLUDE_SVE_REGS
if (regs->hint) {
fpregs_context_restore(regs);
} else {
sve_context_restore(regs);
}
#elif CTX_INCLUDE_FPREGS
fpregs_context_restore(regs);
#endif
}
#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */

7
make_helpers/defaults.mk

@ -63,6 +63,9 @@ CTX_INCLUDE_AARCH32_REGS := 1
# Include FP registers in cpu context
CTX_INCLUDE_FPREGS := 0
# Include SVE registers in cpu context
CTX_INCLUDE_SVE_REGS := 0
# Debug build
DEBUG := 0
@ -237,6 +240,10 @@ SEPARATE_NOBITS_REGION := 0
# region, platform Makefile is free to override this value.
SEPARATE_BL2_NOLOAD_REGION := 0
# Put SIMD context data structures in a separate memory region. Platforms
# have the choice to put it outside of default BSS region of EL3 firmware.
SEPARATE_SIMD_SECTION := 0
# If the BL31 image initialisation code is recalimed after use for the secondary
# cores stack
RECLAIM_INIT_CODE := 0

28
plat/arm/board/fvp/fdts/fvp_cactus_sp_manifest.dts

@ -0,0 +1,28 @@
/*
* Copyright (c) 2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* This file is a Partition Manifest (PM) for a minimal Secure Partition (SP)
* that will be consumed by EL3 SPMC.
*
*/
/dts-v1/;
/ {
compatible = "arm,ffa-manifest-1.0";
#address-cells = <2>;
#size-cells = <1>;
/* Properties */
ffa-version = <0x00010001>; /* 31:16 - Major, 15:0 - Minor */
id = <0x8001>;
uuid = <0x1e67b5b4 0xe14f904a 0x13fb1fb8 0xcbdae1da>;
messaging-method = <3>; /* Direct messaging only */
exception-level = <2>; /* S-EL1 */
execution-state = <0>; /* AARCH64 */
execution-ctx-count = <8>;
/* Boot protocol */
gp-register-num = <0>;
};

30
plat/arm/board/fvp/include/plat.ld.S

@ -1,12 +1,38 @@
/*
* Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2024, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PLAT_LD_S
#define PLAT_LD_S
#include <plat/arm/common/arm_tzc_dram.ld.S>
#include <lib/xlat_tables/xlat_tables_defs.h>
MEMORY {
EL3_SEC_DRAM (rw): ORIGIN = ARM_EL3_TZC_DRAM1_BASE, LENGTH = ARM_EL3_TZC_DRAM1_SIZE
}
SECTIONS
{
. = ARM_EL3_TZC_DRAM1_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"ARM_EL3_TZC_DRAM_BASE address is not aligned on a page boundary.")
.el3_tzc_dram (NOLOAD) : ALIGN(PAGE_SIZE) {
__PLAT_SPMC_SHMEM_DATASTORE_START__ = .;
*(.arm_spmc_shmem_datastore)
__PLAT_SPMC_SHMEM_DATASTORE_END__ = .;
__EL3_SEC_DRAM_START__ = .;
*(.arm_el3_tzc_dram)
#if SEPARATE_SIMD_SECTION
. = ALIGN(16);
*(.simd_context)
#endif
__EL3_SEC_DRAM_UNALIGNED_END__ = .;
. = ALIGN(PAGE_SIZE);
__EL3_SEC_DRAM_END__ = .;
} >EL3_SEC_DRAM
}
#if RECLAIM_INIT_CODE
#include <plat/arm/common/arm_reclaim_init.ld.S>

4
plat/arm/board/fvp/platform.mk

@ -47,6 +47,10 @@ ifeq (${SPM_MM}, 0)
ifeq (${CTX_INCLUDE_FPREGS}, 0)
ENABLE_SME_FOR_NS := 2
ENABLE_SME2_FOR_NS := 2
else
ENABLE_SVE_FOR_NS := 0
ENABLE_SME_FOR_NS := 0
ENABLE_SME2_FOR_NS := 0
endif
endif

6
services/spd/pncd/pncd_common.c

@ -67,8 +67,9 @@ uint64_t pncd_synchronous_sp_entry(pnc_context_t *pnc_ctx)
/* Apply the Secure EL1 system register context and switch to it */
assert(cm_get_context(SECURE) == &pnc_ctx->cpu_ctx);
cm_el1_sysregs_context_restore(SECURE);
#if CTX_INCLUDE_FPREGS
fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
simd_ctx_restore(SECURE);
#endif
cm_set_next_eret_context(SECURE);
@ -90,8 +91,9 @@ void pncd_synchronous_sp_exit(pnc_context_t *pnc_ctx, uint64_t ret)
/* Save the Secure EL1 system register context */
assert(cm_get_context(SECURE) == &pnc_ctx->cpu_ctx);
cm_el1_sysregs_context_save(SECURE);
#if CTX_INCLUDE_FPREGS
fpregs_context_save(get_fpregs_ctx(cm_get_context(SECURE)));
simd_ctx_save(SECURE, false);
#endif
assert(pnc_ctx->c_rt_ctx != 0);

6
services/spd/pncd/pncd_main.c

@ -55,8 +55,9 @@ static void context_save(unsigned long security_state)
assert(sec_state_is_valid(security_state));
cm_el1_sysregs_context_save((uint32_t) security_state);
#if CTX_INCLUDE_FPREGS
fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
simd_ctx_save((uint32_t)security_state, false);
#endif
}
@ -72,8 +73,9 @@ static void *context_restore(unsigned long security_state)
/* Restore state */
cm_el1_sysregs_context_restore((uint32_t) security_state);
#if CTX_INCLUDE_FPREGS
fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));
simd_ctx_restore((uint32_t)security_state);
#endif
cm_set_next_eret_context((uint32_t) security_state);

17
services/spd/trusty/trusty.c

@ -118,8 +118,10 @@ static struct smc_args trusty_context_switch(uint32_t security_state, uint64_t r
* when it's needed the PSCI caller has preserved FP context before
* going here.
*/
if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state)));
if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME) {
simd_ctx_save(security_state, false);
}
cm_el1_sysregs_context_save(security_state);
ctx->saved_security_state = security_state;
@ -128,8 +130,9 @@ static struct smc_args trusty_context_switch(uint32_t security_state, uint64_t r
assert(ctx->saved_security_state == ((security_state == 0U) ? 1U : 0U));
cm_el1_sysregs_context_restore(security_state);
if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state)));
if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME) {
simd_ctx_restore(security_state);
}
cm_set_next_eret_context(security_state);
@ -320,7 +323,7 @@ static int32_t trusty_init(void)
ep_info = bl31_plat_get_next_image_ep_info(SECURE);
assert(ep_info != NULL);
fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
simd_ctx_save(NON_SECURE, false);
cm_el1_sysregs_context_save(NON_SECURE);
cm_set_context(&ctx->cpu_ctx, SECURE);
@ -337,7 +340,7 @@ static int32_t trusty_init(void)
}
cm_el1_sysregs_context_restore(SECURE);
fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
simd_ctx_restore(SECURE);
cm_set_next_eret_context(SECURE);
ctx->saved_security_state = ~0U; /* initial saved state is invalid */
@ -346,7 +349,7 @@ static int32_t trusty_init(void)
(void)trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
cm_el1_sysregs_context_restore(NON_SECURE);
fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
simd_ctx_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
return 1;

23
services/std_svc/spm/spm_mm/spm_mm_main.c

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -13,6 +13,7 @@
#include <common/debug.h>
#include <common/runtime_svc.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/el3_runtime/simd_ctx.h>
#include <lib/smccc.h>
#include <lib/spinlock.h>
#include <lib/utils.h>
@ -190,13 +191,13 @@ uint64_t spm_mm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3)
uint64_t rc;
sp_context_t *sp_ptr = &sp_ctx;
#if CTX_INCLUDE_FPREGS
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
/*
* SP runs to completion, no need to restore FP registers of secure context.
* Save FP registers only for non secure context.
* SP runs to completion, no need to restore FP/SVE registers of secure context.
* Save FP/SVE registers only for non secure context.
*/
fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
#endif
simd_ctx_save(NON_SECURE, false);
#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */
/* Wait until the Secure Partition is idle and set it to busy. */
sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
@ -216,13 +217,13 @@ uint64_t spm_mm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3)
assert(sp_ptr->state == SP_STATE_BUSY);
sp_state_set(sp_ptr, SP_STATE_IDLE);
#if CTX_INCLUDE_FPREGS
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
/*
* SP runs to completion, no need to save FP registers of secure context.
* Restore only non secure world FP registers.
* SP runs to completion, no need to save FP/SVE registers of secure context.
* Restore only non secure world FP/SVE registers.
*/
fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
#endif
simd_ctx_restore(NON_SECURE);
#endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */
return rc;
}

26
services/std_svc/spmd/spmd_main.c

@ -215,6 +215,14 @@ static uint64_t spmd_secure_interrupt_handler(uint32_t id,
cm_el2_sysregs_context_save(NON_SECURE);
#else
cm_el1_sysregs_context_save(NON_SECURE);
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
/*
* The hint bit denoting absence of SVE live state is effectively false
* in this scenario where execution was trapped to EL3 due to FIQ.
*/
simd_ctx_save(NON_SECURE, false);
#endif
#endif
/* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
@ -230,7 +238,14 @@ static uint64_t spmd_secure_interrupt_handler(uint32_t id,
/* Mark current core as handling a secure interrupt. */
ctx->secure_interrupt_ongoing = true;
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
simd_ctx_restore(SECURE);
#endif
rc = spmd_spm_core_sync_entry(ctx);
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
simd_ctx_save(SECURE, false);
#endif
if (rc != 0ULL) {
ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos());
}
@ -241,6 +256,10 @@ static uint64_t spmd_secure_interrupt_handler(uint32_t id,
cm_el2_sysregs_context_restore(NON_SECURE);
#else
cm_el1_sysregs_context_restore(NON_SECURE);
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
simd_ctx_restore(NON_SECURE);
#endif
#endif
cm_set_next_eret_context(NON_SECURE);
@ -678,6 +697,10 @@ uint64_t spmd_smc_switch_state(uint32_t smc_fid,
cm_el2_sysregs_context_save(secure_state_in);
#else
cm_el1_sysregs_context_save(secure_state_in);
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
/* Forward the hint bit denoting the absence of SVE live state. */
simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true)));
#endif
#endif
/* Restore outgoing security state */
@ -685,6 +708,9 @@ uint64_t spmd_smc_switch_state(uint32_t smc_fid,
cm_el2_sysregs_context_restore(secure_state_out);
#else
cm_el1_sysregs_context_restore(secure_state_out);
#if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
simd_ctx_restore(secure_state_out);
#endif
#endif
cm_set_next_eret_context(secure_state_out);

Loading…
Cancel
Save