From ffb7ce70b3413ac0eaae3469a6c89dcc3a45c268 Mon Sep 17 00:00:00 2001 From: Antonio Nino Diaz Date: Tue, 30 Oct 2018 11:34:23 +0000 Subject: [PATCH] SPM: Map memory regions from RD SPM needs to map a number of regions on behalf of the secure partition. Previously, it used to get a list of them from platform code using the plat_get_secure_partition_mmap() API. Now it gets them from the resource description structure. The SPM<->SP shared buffer is mapped dynamically at EL3. This buffer is used to pass information between SPM and SP, so it must be mapped at EL3 as well in order to be used by SPM. Dynamic translation tables have been enabled when the Trusted Firmware is compiled with SPM support. Change-Id: I64ad335e931661812a0a60558e60372e1e5e6b72 Co-authored-by: Sandrine Bailleux Signed-off-by: Antonio Nino Diaz --- plat/arm/board/fvp/include/platform_def.h | 2 +- services/std_svc/spm/sp_setup.c | 13 +- services/std_svc/spm/sp_xlat.c | 231 ++++++++++++++++++++++ services/std_svc/spm/spm_private.h | 7 + 4 files changed, 240 insertions(+), 13 deletions(-) diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h index 70e51fd95..4e1ebd92a 100644 --- a/plat/arm/board/fvp/include/platform_def.h +++ b/plat/arm/board/fvp/include/platform_def.h @@ -13,7 +13,7 @@ # define PLAT_XLAT_TABLES_DYNAMIC 1 # endif #else -# if defined(IMAGE_BL31) && RESET_TO_BL31 +# if defined(IMAGE_BL31) && (RESET_TO_BL31 || (ENABLE_SPM && !SPM_DEPRECATED)) # define PLAT_XLAT_TABLES_DYNAMIC 1 # endif #endif /* AARCH32 */ diff --git a/services/std_svc/spm/sp_setup.c b/services/std_svc/spm/sp_setup.c index ecb8bd363..833907842 100644 --- a/services/std_svc/spm/sp_setup.c +++ b/services/std_svc/spm/sp_setup.c @@ -57,18 +57,7 @@ void spm_sp_setup(sp_context_t *sp_ctx) * ------------------------ */ - /* This region contains the exception vectors used at S-EL1. */ - const mmap_region_t sel1_exception_vectors = - MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START, - SPM_SHIM_EXCEPTIONS_SIZE, - MT_CODE | MT_SECURE | MT_PRIVILEGED); - mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, - &sel1_exception_vectors); - - mmap_add_ctx(sp_ctx->xlat_ctx_handle, - plat_get_secure_partition_mmap(NULL)); - - init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle); + sp_map_memory_regions(sp_ctx); /* * MMU-related registers diff --git a/services/std_svc/spm/sp_xlat.c b/services/std_svc/spm/sp_xlat.c index 881d97f35..77aa7be3a 100644 --- a/services/std_svc/spm/sp_xlat.c +++ b/services/std_svc/spm/sp_xlat.c @@ -8,14 +8,22 @@ #include #include #include +#include #include #include +#include #include +#include +#include #include #include "spm_private.h" #include "spm_shim_private.h" +/******************************************************************************* + * Instantiation of translation table context + ******************************************************************************/ + /* Place translation tables by default along with the ones used by BL31. */ #ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME #define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table" @@ -37,6 +45,229 @@ xlat_ctx_t *spm_get_sp_xlat_context(void) return &sp_xlat_ctx; }; +/******************************************************************************* + * Functions to allocate memory for regions. + ******************************************************************************/ + +/* + * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is + * reserved for SPM to use as heap to allocate memory regions of Secure + * Partitions. This is only done at boot. + */ +static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U, + PLAT_SPM_HEAP_SIZE); + +static uintptr_t spm_alloc_heap(size_t size) +{ + return (uintptr_t)pool_alloc_n(&spm_heap_mem, size); +} + +/******************************************************************************* + * Functions to map memory regions described in the resource description. + ******************************************************************************/ +static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr) +{ + unsigned int index = attr & RD_MEM_MASK; + + const unsigned int mmap_attr_arr[8] = { + MT_DEVICE | MT_RW | MT_SECURE, /* RD_MEM_DEVICE */ + MT_CODE | MT_SECURE, /* RD_MEM_NORMAL_CODE */ + MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_DATA */ + MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_BSS */ + MT_RO_DATA | MT_SECURE, /* RD_MEM_NORMAL_RODATA */ + MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */ + MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */ + MT_MEMORY | MT_RW | MT_SECURE /* RD_MEM_NORMAL_MISCELLANEOUS */ + }; + + if (index >= ARRAY_SIZE(mmap_attr_arr)) { + ERROR("Unsupported RD memory attributes 0x%x\n", attr); + panic(); + } + + return mmap_attr_arr[index]; +} + +/* + * The data provided in the resource description structure is not directly + * compatible with a mmap_region structure. This function handles the conversion + * and maps it. + */ +static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem) +{ + int rc; + mmap_region_t mmap; + + /* Location of the SP image */ + uintptr_t sp_size = sp_ctx->image_size; + uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address; + unsigned long long sp_base_pa = sp_ctx->image_base; + + /* Location of the memory region to map */ + size_t rd_size = rdmem->size; + uintptr_t rd_base_va = rdmem->base; + unsigned long long rd_base_pa; + + unsigned int memtype = rdmem->attr & RD_MEM_MASK; + + VERBOSE("Adding memory region '%s'\n", rdmem->name); + + mmap.granularity = REGION_DEFAULT_GRANULARITY; + + /* Check if the RD region is inside of the SP image or not */ + int is_outside = (rd_base_va + rd_size <= sp_base_va) || + (sp_base_va + sp_size <= rd_base_va); + + /* Set to 1 if it is needed to zero this region */ + int zero_region = 0; + + switch (memtype) { + case RD_MEM_DEVICE: + /* Device regions are mapped 1:1 */ + rd_base_pa = rd_base_va; + break; + + case RD_MEM_NORMAL_CODE: + case RD_MEM_NORMAL_RODATA: + { + if (is_outside == 1) { + ERROR("Code and rodata sections must be fully contained in the image."); + panic(); + } + + /* Get offset into the image */ + rd_base_pa = sp_base_pa + rd_base_va - sp_base_va; + break; + } + case RD_MEM_NORMAL_DATA: + { + if (is_outside == 1) { + ERROR("Data sections must be fully contained in the image."); + panic(); + } + + rd_base_pa = spm_alloc_heap(rd_size); + + /* Get offset into the image */ + void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va); + + VERBOSE(" Copying data from %p to 0x%llx\n", img_pa, rd_base_pa); + + /* Map destination */ + rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa, + rd_size, MT_MEMORY | MT_RW | MT_SECURE); + if (rc != 0) { + ERROR("Unable to map data region at EL3: %d\n", rc); + panic(); + } + + /* Copy original data to destination */ + memcpy((void *)rd_base_pa, img_pa, rd_size); + + /* Unmap destination region */ + rc = mmap_remove_dynamic_region(rd_base_pa, rd_size); + if (rc != 0) { + ERROR("Unable to remove data region at EL3: %d\n", rc); + panic(); + } + + break; + } + case RD_MEM_NORMAL_MISCELLANEOUS: + /* Allow SPM to change the attributes of the region. */ + mmap.granularity = PAGE_SIZE; + rd_base_pa = spm_alloc_heap(rd_size); + zero_region = 1; + break; + + case RD_MEM_NORMAL_SPM_SP_SHARED_MEM: + if ((sp_ctx->spm_sp_buffer_base != 0) || + (sp_ctx->spm_sp_buffer_size != 0)) { + ERROR("A partition must have only one SPM<->SP buffer.\n"); + panic(); + } + rd_base_pa = spm_alloc_heap(rd_size); + zero_region = 1; + /* Save location of this buffer, it is needed by SPM */ + sp_ctx->spm_sp_buffer_base = rd_base_pa; + sp_ctx->spm_sp_buffer_size = rd_size; + break; + + case RD_MEM_NORMAL_CLIENT_SHARED_MEM: + /* Fallthrough */ + case RD_MEM_NORMAL_BSS: + rd_base_pa = spm_alloc_heap(rd_size); + zero_region = 1; + break; + + default: + panic(); + } + + mmap.base_pa = rd_base_pa; + mmap.base_va = rd_base_va; + mmap.size = rd_size; + + /* Only S-EL0 mappings supported for now */ + mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER; + + VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n", + mmap.base_va, mmap.base_pa, mmap.size, mmap.attr); + + /* Map region in the context of the Secure Partition */ + mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap); + + if (zero_region == 1) { + VERBOSE(" Zeroing region...\n"); + + rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa, + mmap.size, MT_MEMORY | MT_RW | MT_SECURE); + if (rc != 0) { + ERROR("Unable to map memory at EL3 to zero: %d\n", + rc); + panic(); + } + + zeromem((void *)mmap.base_pa, mmap.size); + + /* + * Unmap destination region unless it is the SPM<->SP buffer, + * which must be used by SPM. + */ + if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) { + rc = mmap_remove_dynamic_region(rd_base_pa, rd_size); + if (rc != 0) { + ERROR("Unable to remove region at EL3: %d\n", rc); + panic(); + } + } + } +} + +void sp_map_memory_regions(sp_context_t *sp_ctx) +{ + /* This region contains the exception vectors used at S-EL1. */ + const mmap_region_t sel1_exception_vectors = + MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START, + SPM_SHIM_EXCEPTIONS_SIZE, + MT_CODE | MT_SECURE | MT_PRIVILEGED); + + mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, + &sel1_exception_vectors); + + struct sp_rd_sect_mem_region *rdmem; + + for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) { + map_rdmem(sp_ctx, rdmem); + } + + init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle); +} + +/******************************************************************************* + * Functions to manipulate memory regions + ******************************************************************************/ + /* * Attributes are encoded using a different format in the SMC interface than in * the Trusted Firmware, where the mmap_attr_t enum type is used. This function diff --git a/services/std_svc/spm/spm_private.h b/services/std_svc/spm/spm_private.h index ee13e94e7..7f9778ebf 100644 --- a/services/std_svc/spm/spm_private.h +++ b/services/std_svc/spm/spm_private.h @@ -54,15 +54,22 @@ typedef struct sp_context { sp_state_t state; spinlock_t state_lock; + + /* Base and size of the shared SPM<->SP buffer */ + uintptr_t spm_sp_buffer_base; + size_t spm_sp_buffer_size; } sp_context_t; /* Assembly helpers */ uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx); void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret); +/* Secure Partition setup */ void spm_sp_setup(sp_context_t *sp_ctx); +/* Functions related to the translation tables management */ xlat_ctx_t *spm_get_sp_xlat_context(void); +void sp_map_memory_regions(sp_context_t *sp_ctx); int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx, uintptr_t base_va);