Browse Source

fvp: Make use of the generic MMU translation table setup code

Change-Id: I559c5a4d86cad55ce3f6ad71285b538d3cfd76dc
Signed-off-by: Jon Medhurst <tixy@linaro.org>
pull/21/merge
Jon Medhurst 11 years ago
committed by Dan Handley
parent
commit
38aa76a87f
  1. 1
      bl32/tsp/tsp-fvp.mk
  2. 357
      plat/fvp/aarch64/bl32_setup_xlat.c
  3. 70
      plat/fvp/aarch64/plat_common.c
  4. 322
      plat/fvp/aarch64/plat_setup_xlat.c
  5. 19
      plat/fvp/platform.h
  6. 9
      plat/fvp/platform.mk

1
bl32/tsp/tsp-fvp.mk

@ -34,5 +34,4 @@ vpath %.S ${PLAT_BL2_S_VPATH}
# TSP source files specific to FVP platform
BL32_SOURCES += bl32_plat_setup.c \
bl32_setup_xlat.c \
plat_common.c

357
plat/fvp/aarch64/bl32_setup_xlat.c

@ -1,357 +0,0 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <platform.h>
#include <bl_common.h>
#define BL32_NUM_L3_PAGETABLES 3
#define BL32_TZRAM_PAGETABLE 0
#define BL32_TZDRAM_PAGETABLE 1
#define BL32_NSRAM_PAGETABLE 2
/*******************************************************************************
* Level 1 translation tables need 4 entries for the 4GB address space accessib-
* le by the secure firmware. Input address space will be restricted using the
* T0SZ settings in the TCR.
******************************************************************************/
static unsigned long l1_xlation_table[ADDR_SPACE_SIZE >> 30]
__attribute__ ((aligned((ADDR_SPACE_SIZE >> 30) << 3)));
/*******************************************************************************
* Level 2 translation tables describe the first & second gb of the address
* space needed to address secure peripherals e.g. trusted ROM and RAM.
******************************************************************************/
static unsigned long l2_xlation_table[NUM_L2_PAGETABLES][NUM_2MB_IN_GB]
__attribute__ ((aligned(NUM_2MB_IN_GB << 3),
section("xlat_table")));
/*******************************************************************************
* Level 3 translation tables (2 sets) describe the trusted & non-trusted RAM
* regions at a granularity of 4K.
******************************************************************************/
static unsigned long l3_xlation_table[BL32_NUM_L3_PAGETABLES][NUM_4K_IN_2MB]
__attribute__ ((aligned(NUM_4K_IN_2MB << 3),
section("xlat_table")));
/*******************************************************************************
* Create page tables as per the platform memory map. Certain aspects of page
* talble creating have been abstracted in the above routines. This can be impr-
* oved further.
* TODO: Move the page table setup helpers into the arch or lib directory
*******************************************************************************/
unsigned long fill_xlation_tables(meminfo *tzdram_layout,
unsigned long ro_start,
unsigned long ro_limit,
unsigned long coh_start,
unsigned long coh_limit)
{
unsigned long l2_desc, l3_desc;
unsigned long *xt_addr = 0, *pt_addr, off = 0;
unsigned long trom_start_index, trom_end_index;
unsigned long tzram_start_index, tzram_end_index;
unsigned long flash0_start_index, flash0_end_index;
unsigned long flash1_start_index, flash1_end_index;
unsigned long vram_start_index, vram_end_index;
unsigned long nsram_start_index, nsram_end_index;
unsigned long tzdram_start_index, tzdram_end_index;
unsigned long dram_start_index, dram_end_index;
unsigned long dev0_start_index, dev0_end_index;
unsigned long dev1_start_index, dev1_end_index;
unsigned int idx;
/*****************************************************************
* LEVEL1 PAGETABLE SETUP
*
* Find the start and end indices of the memory peripherals in the
* first level pagetables. These are the main areas we care about.
* Also bump the end index by one if its equal to the start to
* allow for regions which lie completely in a GB.
*****************************************************************/
trom_start_index = ONE_GB_INDEX(TZROM_BASE);
dev0_start_index = ONE_GB_INDEX(TZRNG_BASE);
dram_start_index = ONE_GB_INDEX(DRAM_BASE);
dram_end_index = ONE_GB_INDEX(DRAM_BASE + DRAM_SIZE);
if (dram_end_index == dram_start_index)
dram_end_index++;
/*
* Fill up the level1 translation table first
*/
for (idx = 0; idx < (ADDR_SPACE_SIZE >> 30); idx++) {
/*
* Fill up the entry for the TZROM. This will cover
* everything in the first GB.
*/
if (idx == trom_start_index) {
xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][0];
l1_xlation_table[idx] = create_table_desc(xt_addr);
continue;
}
/*
* Mark the second gb as device
*/
if (idx == dev0_start_index) {
xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][0];
l1_xlation_table[idx] = create_table_desc(xt_addr);
continue;
}
/*
* Fill up the block entry for the DRAM with Normal
* inner-WBWA outer-WBWA non-transient attributes.
* This will cover 2-4GB. Note that the acesses are
* marked as non-secure.
*/
if ((idx >= dram_start_index) && (idx < dram_end_index)) {
l1_xlation_table[idx] = create_rwmem_block(idx, LEVEL1,
NS);
continue;
}
assert(0);
}
/*****************************************************************
* LEVEL2 PAGETABLE SETUP
*
* Find the start and end indices of the memory & peripherals in the
* second level pagetables.
******************************************************************/
/* Initializations for the 1st GB */
trom_start_index = TWO_MB_INDEX(TZROM_BASE);
trom_end_index = TWO_MB_INDEX(TZROM_BASE + TZROM_SIZE);
if (trom_end_index == trom_start_index)
trom_end_index++;
tzdram_start_index = TWO_MB_INDEX(TZDRAM_BASE);
tzdram_end_index = TWO_MB_INDEX(TZDRAM_BASE + TZDRAM_SIZE);
if (tzdram_end_index == tzdram_start_index)
tzdram_end_index++;
flash0_start_index = TWO_MB_INDEX(FLASH0_BASE);
flash0_end_index = TWO_MB_INDEX(FLASH0_BASE + TZROM_SIZE);
if (flash0_end_index == flash0_start_index)
flash0_end_index++;
flash1_start_index = TWO_MB_INDEX(FLASH1_BASE);
flash1_end_index = TWO_MB_INDEX(FLASH1_BASE + FLASH1_SIZE);
if (flash1_end_index == flash1_start_index)
flash1_end_index++;
vram_start_index = TWO_MB_INDEX(VRAM_BASE);
vram_end_index = TWO_MB_INDEX(VRAM_BASE + VRAM_SIZE);
if (vram_end_index == vram_start_index)
vram_end_index++;
dev0_start_index = TWO_MB_INDEX(DEVICE0_BASE);
dev0_end_index = TWO_MB_INDEX(DEVICE0_BASE + DEVICE0_SIZE);
if (dev0_end_index == dev0_start_index)
dev0_end_index++;
dev1_start_index = TWO_MB_INDEX(DEVICE1_BASE);
dev1_end_index = TWO_MB_INDEX(DEVICE1_BASE + DEVICE1_SIZE);
if (dev1_end_index == dev1_start_index)
dev1_end_index++;
/* Since the size is < 2M this is a single index */
tzram_start_index = TWO_MB_INDEX(TZRAM_BASE);
nsram_start_index = TWO_MB_INDEX(NSRAM_BASE);
/*
* Fill up the level2 translation table for the first GB next
*/
for (idx = 0; idx < NUM_2MB_IN_GB; idx++) {
l2_desc = INVALID_DESC;
xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][idx];
/* Block entries for 64M of trusted Boot ROM */
if ((idx >= trom_start_index) && (idx < trom_end_index))
l2_desc = create_romem_block(idx, LEVEL2, 0);
/* Single L3 page table entry for 256K of TZRAM */
if (idx == tzram_start_index) {
pt_addr = &l3_xlation_table[BL32_TZRAM_PAGETABLE][0];
l2_desc = create_table_desc(pt_addr);
}
/*
* Single L3 page table entry for first 2MB of TZDRAM.
* TODO: We are assuming the BL32 image will not
* exceed this
*/
if (idx == tzdram_start_index) {
pt_addr = &l3_xlation_table[BL32_TZDRAM_PAGETABLE][0];
l2_desc = create_table_desc(pt_addr);
}
/* Block entries for 32M of trusted DRAM */
if ((idx >= tzdram_start_index + 1) && (idx <= tzdram_end_index))
l2_desc = create_rwmem_block(idx, LEVEL2, 0);
/* Block entries for 64M of aliased trusted Boot ROM */
if ((idx >= flash0_start_index) && (idx < flash0_end_index))
l2_desc = create_romem_block(idx, LEVEL2, 0);
/* Block entries for 64M of flash1 */
if ((idx >= flash1_start_index) && (idx < flash1_end_index))
l2_desc = create_romem_block(idx, LEVEL2, 0);
/* Block entries for 32M of VRAM */
if ((idx >= vram_start_index) && (idx < vram_end_index))
l2_desc = create_rwmem_block(idx, LEVEL2, 0);
/* Block entries for all the devices in the first gb */
if ((idx >= dev0_start_index) && (idx < dev0_end_index))
l2_desc = create_device_block(idx, LEVEL2, 0);
/* Block entries for all the devices in the first gb */
if ((idx >= dev1_start_index) && (idx < dev1_end_index))
l2_desc = create_device_block(idx, LEVEL2, 0);
/* Single L3 page table entry for 64K of NSRAM */
if (idx == nsram_start_index) {
pt_addr = &l3_xlation_table[BL32_NSRAM_PAGETABLE][0];
l2_desc = create_table_desc(pt_addr);
}
*xt_addr = l2_desc;
}
/*
* Initializations for the 2nd GB. Mark everything as device
* for the time being as the memory map is not final. Each
* index will need to be offset'ed to allow absolute values
*/
off = NUM_2MB_IN_GB;
for (idx = off; idx < (NUM_2MB_IN_GB + off); idx++) {
l2_desc = create_device_block(idx, LEVEL2, 0);
xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][idx - off];
*xt_addr = l2_desc;
}
/*****************************************************************
* LEVEL3 PAGETABLE SETUP
*****************************************************************/
/* Fill up the level3 pagetable for the trusted SRAM. */
tzram_start_index = FOUR_KB_INDEX(TZRAM_BASE);
tzram_end_index = FOUR_KB_INDEX(TZRAM_BASE + TZRAM_SIZE);
if (tzram_end_index == tzram_start_index)
tzram_end_index++;
/* Each index will need to be offset'ed to allow absolute values */
off = FOUR_KB_INDEX(TZRAM_BASE);
for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
l3_desc = INVALID_DESC;
xt_addr = &l3_xlation_table[BL32_TZRAM_PAGETABLE][idx - off];
/*
* TODO: All TZRAM memory is being mapped as RW while
* earlier boot stages map parts of it as Device. This
* could lead to possible coherency issues. Ditto for
* the way earlier boot loader stages map TZDRAM
*/
if (idx >= tzram_start_index && idx < tzram_end_index)
l3_desc = create_rwmem_block(idx, LEVEL3, 0);
*xt_addr = l3_desc;
}
/* Fill up the level3 pagetable for the trusted DRAM. */
tzdram_start_index = FOUR_KB_INDEX(tzdram_layout->total_base);
tzdram_end_index = FOUR_KB_INDEX(tzdram_layout->total_base +
tzdram_layout->total_size);
if (tzdram_end_index == tzdram_start_index)
tzdram_end_index++;
/* Reusing trom* to mark RO memory. */
trom_start_index = FOUR_KB_INDEX(ro_start);
trom_end_index = FOUR_KB_INDEX(ro_limit);
if (trom_end_index == trom_start_index)
trom_end_index++;
/* Reusing dev* to mark coherent device memory. */
dev0_start_index = FOUR_KB_INDEX(coh_start);
dev0_end_index = FOUR_KB_INDEX(coh_limit);
if (dev0_end_index == dev0_start_index)
dev0_end_index++;
/* Each index will need to be offset'ed to allow absolute values */
off = FOUR_KB_INDEX(TZDRAM_BASE);
for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
l3_desc = INVALID_DESC;
xt_addr = &l3_xlation_table[BL32_TZDRAM_PAGETABLE][idx - off];
if (idx >= tzdram_start_index && idx < tzdram_end_index)
l3_desc = create_rwmem_block(idx, LEVEL3, 0);
if (idx >= trom_start_index && idx < trom_end_index)
l3_desc = create_romem_block(idx, LEVEL3, 0);
if (idx >= dev0_start_index && idx < dev0_end_index)
l3_desc = create_device_block(idx, LEVEL3, 0);
*xt_addr = l3_desc;
}
/* Fill up the level3 pagetable for the non-trusted SRAM. */
nsram_start_index = FOUR_KB_INDEX(NSRAM_BASE);
nsram_end_index = FOUR_KB_INDEX(NSRAM_BASE + NSRAM_SIZE);
if (nsram_end_index == nsram_start_index)
nsram_end_index++;
/* Each index will need to be offset'ed to allow absolute values */
off = FOUR_KB_INDEX(NSRAM_BASE);
for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
l3_desc = INVALID_DESC;
xt_addr = &l3_xlation_table[BL32_NSRAM_PAGETABLE][idx - off];
if (idx >= nsram_start_index && idx < nsram_end_index)
l3_desc = create_rwmem_block(idx, LEVEL3, NS);
*xt_addr = l3_desc;
}
return (unsigned long) l1_xlation_table;
}

70
plat/fvp/aarch64/plat_common.c

@ -28,14 +28,11 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <platform.h>
#include <bl_common.h>
/* Included only for error codes */
#include <psci.h>
#include "debug.h"
#include <platform.h>
#include <xlat_tables.h>
unsigned char platform_normal_stacks[PLATFORM_STACK_SIZE][PLATFORM_CORE_COUNT]
__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
@ -50,13 +47,6 @@ __attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
******************************************************************************/
static unsigned long platform_config[CONFIG_LIMIT];
/*******************************************************************************
* An internal global pointer of the level 1 translation tables which should not
* change once setup by the primary cpu during a cold boot.
*******************************************************************************/
unsigned long l1_xlation_table __aligned(PLATFORM_CACHE_LINE_SIZE)
__attribute__ ((section("tzfw_coherent_mem")));
/*******************************************************************************
* Enable the MMU assuming that the pagetables have already been created
*******************************************************************************/
@ -64,10 +54,6 @@ void enable_mmu()
{
unsigned long mair, tcr, ttbr, sctlr;
unsigned long current_el = read_current_el();
#if DEBUG
unsigned int l1_table_desc_bits;
unsigned int l1_table_align;
#endif
/* Set the attributes in the right indices of the MAIR */
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
@ -91,16 +77,7 @@ void enable_mmu()
write_tcr(tcr);
/* Set TTBR bits. Ensure the alignment for level 1 page table */
#if DEBUG
#define BITS_PER_4K_L3DESC 12
#define BITS_PER_4K_L2DESC (9 + BITS_PER_4K_L3DESC)
#define BITS_PER_4K_L1DESC (9 + BITS_PER_4K_L2DESC)
l1_table_desc_bits = (64 - TCR_T0SZ_4GB - BITS_PER_4K_L1DESC);
l1_table_align = l1_table_desc_bits + 3;
assert(((unsigned long) l1_xlation_table &
((1 << l1_table_align) - 1)) == 0);
#endif
/* Set TTBR bits as well */
ttbr = (unsigned long) l1_xlation_table;
write_ttbr0(ttbr);
@ -126,6 +103,26 @@ void disable_mmu(void)
return;
}
/*
* Table of regions to map using the MMU.
* This doesn't include TZRAM as the 'mem_layout' argument passed to to
* configure_mmu() will give the available subset of that,
*/
const mmap_region mmap[] = {
{ TZROM_BASE, TZROM_SIZE, MT_MEMORY | MT_RO | MT_SECURE },
{ TZDRAM_BASE, TZDRAM_SIZE, MT_MEMORY | MT_RW | MT_SECURE },
{ FLASH0_BASE, FLASH0_SIZE, MT_MEMORY | MT_RO | MT_SECURE },
{ FLASH1_BASE, FLASH1_SIZE, MT_MEMORY | MT_RO | MT_SECURE },
{ VRAM_BASE, VRAM_SIZE, MT_MEMORY | MT_RW | MT_SECURE },
{ DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_SECURE },
{ NSRAM_BASE, NSRAM_SIZE, MT_MEMORY | MT_RW | MT_NS },
{ DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_SECURE },
/* 2nd GB as device for now...*/
{ 0x40000000, 0x40000000, MT_DEVICE | MT_RW | MT_SECURE },
{ DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS },
{0}
};
/*******************************************************************************
* Setup the pagetables as per the platform memory map & initialize the mmu
*******************************************************************************/
@ -135,16 +132,17 @@ void configure_mmu(meminfo *mem_layout,
unsigned long coh_start,
unsigned long coh_limit)
{
assert(IS_PAGE_ALIGNED(ro_start));
assert(IS_PAGE_ALIGNED(ro_limit));
assert(IS_PAGE_ALIGNED(coh_start));
assert(IS_PAGE_ALIGNED(coh_limit));
l1_xlation_table = fill_xlation_tables(mem_layout,
ro_start,
ro_limit,
coh_start,
coh_limit);
mmap_add_region(mem_layout->total_base, mem_layout->total_size,
MT_MEMORY | MT_RW | MT_SECURE);
mmap_add_region(ro_start, ro_limit - ro_start,
MT_MEMORY | MT_RO | MT_SECURE);
mmap_add_region(coh_start, coh_limit - coh_start,
MT_DEVICE | MT_RW | MT_SECURE);
mmap_add(mmap);
init_xlat_tables();
enable_mmu();
return;
}

322
plat/fvp/aarch64/plat_setup_xlat.c

@ -1,322 +0,0 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <assert.h>
#include <arch_helpers.h>
#include <platform.h>
#include <bl_common.h>
/*******************************************************************************
* TODO: Check page table alignment to avoid space wastage
******************************************************************************/
/*******************************************************************************
* Level 1 translation tables need 4 entries for the 4GB address space accessib-
* le by the secure firmware. Input address space will be restricted using the
* T0SZ settings in the TCR.
******************************************************************************/
static unsigned long l1_xlation_table[NUM_GB_IN_4GB]
__attribute__ ((aligned((NUM_GB_IN_4GB) << 3)));
/*******************************************************************************
* Level 2 translation tables describe the first & second gb of the address
* space needed to address secure peripherals e.g. trusted ROM and RAM.
******************************************************************************/
static unsigned long l2_xlation_table[NUM_L2_PAGETABLES][NUM_2MB_IN_GB]
__attribute__ ((aligned(NUM_2MB_IN_GB << 3),
section("xlat_table")));
/*******************************************************************************
* Level 3 translation tables (2 sets) describe the trusted & non-trusted RAM
* regions at a granularity of 4K.
******************************************************************************/
static unsigned long l3_xlation_table[NUM_L3_PAGETABLES][NUM_4K_IN_2MB]
__attribute__ ((aligned(NUM_4K_IN_2MB << 3),
section("xlat_table")));
/*******************************************************************************
* Create page tables as per the platform memory map. Certain aspects of page
* talble creating have been abstracted in the above routines. This can be impr-
* oved further.
* TODO: Move the page table setup helpers into the arch or lib directory
*******************************************************************************/
unsigned long fill_xlation_tables(meminfo *tzram_layout,
unsigned long ro_start,
unsigned long ro_limit,
unsigned long coh_start,
unsigned long coh_limit)
{
unsigned long l2_desc, l3_desc;
unsigned long *xt_addr = 0, *pt_addr, off = 0;
unsigned long trom_start_index, trom_end_index;
unsigned long tzram_start_index, tzram_end_index;
unsigned long flash0_start_index, flash0_end_index;
unsigned long flash1_start_index, flash1_end_index;
unsigned long vram_start_index, vram_end_index;
unsigned long nsram_start_index, nsram_end_index;
unsigned long tdram_start_index, tdram_end_index;
unsigned long dram_start_index, dram_end_index;
unsigned long dev0_start_index, dev0_end_index;
unsigned long dev1_start_index, dev1_end_index;
unsigned int idx;
/*****************************************************************
* LEVEL1 PAGETABLE SETUP
*
* Find the start and end indices of the memory peripherals in the
* first level pagetables. These are the main areas we care about.
* Also bump the end index by one if its equal to the start to
* allow for regions which lie completely in a GB.
*****************************************************************/
trom_start_index = ONE_GB_INDEX(TZROM_BASE);
dev0_start_index = ONE_GB_INDEX(TZRNG_BASE);
dram_start_index = ONE_GB_INDEX(DRAM_BASE);
dram_end_index = ONE_GB_INDEX(DRAM_BASE + DRAM_SIZE);
if (dram_end_index == dram_start_index)
dram_end_index++;
/*
* Fill up the level1 translation table first
*/
for (idx = 0; idx < NUM_GB_IN_4GB; idx++) {
/*
* Fill up the entry for the TZROM. This will cover
* everything in the first GB.
*/
if (idx == trom_start_index) {
xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][0];
l1_xlation_table[idx] = create_table_desc(xt_addr);
continue;
}
/*
* Mark the second gb as device
*/
if (idx == dev0_start_index) {
xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][0];
l1_xlation_table[idx] = create_table_desc(xt_addr);
continue;
}
/*
* Fill up the block entry for the DRAM with Normal
* inner-WBWA outer-WBWA non-transient attributes.
* This will cover 2-4GB. Note that the acesses are
* marked as non-secure.
*/
if ((idx >= dram_start_index) && (idx < dram_end_index)) {
l1_xlation_table[idx] = create_rwmem_block(idx, LEVEL1,
NS);
continue;
}
assert(0);
}
/*****************************************************************
* LEVEL2 PAGETABLE SETUP
*
* Find the start and end indices of the memory & peripherals in the
* second level pagetables.
******************************************************************/
/* Initializations for the 1st GB */
trom_start_index = TWO_MB_INDEX(TZROM_BASE);
trom_end_index = TWO_MB_INDEX(TZROM_BASE + TZROM_SIZE);
if (trom_end_index == trom_start_index)
trom_end_index++;
tdram_start_index = TWO_MB_INDEX(TZDRAM_BASE);
tdram_end_index = TWO_MB_INDEX(TZDRAM_BASE + TZDRAM_SIZE);
if (tdram_end_index == tdram_start_index)
tdram_end_index++;
flash0_start_index = TWO_MB_INDEX(FLASH0_BASE);
flash0_end_index = TWO_MB_INDEX(FLASH0_BASE + TZROM_SIZE);
if (flash0_end_index == flash0_start_index)
flash0_end_index++;
flash1_start_index = TWO_MB_INDEX(FLASH1_BASE);
flash1_end_index = TWO_MB_INDEX(FLASH1_BASE + FLASH1_SIZE);
if (flash1_end_index == flash1_start_index)
flash1_end_index++;
vram_start_index = TWO_MB_INDEX(VRAM_BASE);
vram_end_index = TWO_MB_INDEX(VRAM_BASE + VRAM_SIZE);
if (vram_end_index == vram_start_index)
vram_end_index++;
dev0_start_index = TWO_MB_INDEX(DEVICE0_BASE);
dev0_end_index = TWO_MB_INDEX(DEVICE0_BASE + DEVICE0_SIZE);
if (dev0_end_index == dev0_start_index)
dev0_end_index++;
dev1_start_index = TWO_MB_INDEX(DEVICE1_BASE);
dev1_end_index = TWO_MB_INDEX(DEVICE1_BASE + DEVICE1_SIZE);
if (dev1_end_index == dev1_start_index)
dev1_end_index++;
/* Since the size is < 2M this is a single index */
tzram_start_index = TWO_MB_INDEX(tzram_layout->total_base);
nsram_start_index = TWO_MB_INDEX(NSRAM_BASE);
/*
* Fill up the level2 translation table for the first GB next
*/
for (idx = 0; idx < NUM_2MB_IN_GB; idx++) {
l2_desc = INVALID_DESC;
xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][idx];
/* Block entries for 64M of trusted Boot ROM */
if ((idx >= trom_start_index) && (idx < trom_end_index))
l2_desc = create_romem_block(idx, LEVEL2, 0);
/* Single L3 page table entry for 256K of TZRAM */
if (idx == tzram_start_index) {
pt_addr = &l3_xlation_table[TZRAM_PAGETABLE][0];
l2_desc = create_table_desc(pt_addr);
}
/* Block entries for 32M of trusted DRAM */
if ((idx >= tdram_start_index) && (idx <= tdram_end_index))
l2_desc = create_rwmem_block(idx, LEVEL2, 0);
/* Block entries for 64M of aliased trusted Boot ROM */
if ((idx >= flash0_start_index) && (idx < flash0_end_index))
l2_desc = create_romem_block(idx, LEVEL2, 0);
/* Block entries for 64M of flash1 */
if ((idx >= flash1_start_index) && (idx < flash1_end_index))
l2_desc = create_romem_block(idx, LEVEL2, 0);
/* Block entries for 32M of VRAM */
if ((idx >= vram_start_index) && (idx < vram_end_index))
l2_desc = create_rwmem_block(idx, LEVEL2, 0);
/* Block entries for all the devices in the first gb */
if ((idx >= dev0_start_index) && (idx < dev0_end_index))
l2_desc = create_device_block(idx, LEVEL2, 0);
/* Block entries for all the devices in the first gb */
if ((idx >= dev1_start_index) && (idx < dev1_end_index))
l2_desc = create_device_block(idx, LEVEL2, 0);
/* Single L3 page table entry for 64K of NSRAM */
if (idx == nsram_start_index) {
pt_addr = &l3_xlation_table[NSRAM_PAGETABLE][0];
l2_desc = create_table_desc(pt_addr);
}
*xt_addr = l2_desc;
}
/*
* Initializations for the 2nd GB. Mark everything as device
* for the time being as the memory map is not final. Each
* index will need to be offset'ed to allow absolute values
*/
off = NUM_2MB_IN_GB;
for (idx = off; idx < (NUM_2MB_IN_GB + off); idx++) {
l2_desc = create_device_block(idx, LEVEL2, 0);
xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][idx - off];
*xt_addr = l2_desc;
}
/*****************************************************************
* LEVEL3 PAGETABLE SETUP
*****************************************************************/
/* Fill up the level3 pagetable for the trusted SRAM. */
tzram_start_index = FOUR_KB_INDEX(tzram_layout->total_base);
tzram_end_index = FOUR_KB_INDEX(tzram_layout->total_base +
tzram_layout->total_size);
if (tzram_end_index == tzram_start_index)
tzram_end_index++;
/* Reusing trom* to mark RO memory. */
trom_start_index = FOUR_KB_INDEX(ro_start);
trom_end_index = FOUR_KB_INDEX(ro_limit);
if (trom_end_index == trom_start_index)
trom_end_index++;
/* Reusing dev* to mark coherent device memory. */
dev0_start_index = FOUR_KB_INDEX(coh_start);
dev0_end_index = FOUR_KB_INDEX(coh_limit);
if (dev0_end_index == dev0_start_index)
dev0_end_index++;
/* Each index will need to be offset'ed to allow absolute values */
off = FOUR_KB_INDEX(TZRAM_BASE);
for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
l3_desc = INVALID_DESC;
xt_addr = &l3_xlation_table[TZRAM_PAGETABLE][idx - off];
if (idx >= tzram_start_index && idx < tzram_end_index)
l3_desc = create_rwmem_block(idx, LEVEL3, 0);
if (idx >= trom_start_index && idx < trom_end_index)
l3_desc = create_romem_block(idx, LEVEL3, 0);
if (idx >= dev0_start_index && idx < dev0_end_index)
l3_desc = create_device_block(idx, LEVEL3, 0);
*xt_addr = l3_desc;
}
/* Fill up the level3 pagetable for the non-trusted SRAM. */
nsram_start_index = FOUR_KB_INDEX(NSRAM_BASE);
nsram_end_index = FOUR_KB_INDEX(NSRAM_BASE + NSRAM_SIZE);
if (nsram_end_index == nsram_start_index)
nsram_end_index++;
/* Each index will need to be offset'ed to allow absolute values */
off = FOUR_KB_INDEX(NSRAM_BASE);
for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
l3_desc = INVALID_DESC;
xt_addr = &l3_xlation_table[NSRAM_PAGETABLE][idx - off];
if (idx >= nsram_start_index && idx < nsram_end_index)
l3_desc = create_rwmem_block(idx, LEVEL3, NS);
*xt_addr = l3_desc;
}
return (unsigned long) l1_xlation_table;
}

19
plat/fvp/platform.h

@ -243,20 +243,10 @@
/*******************************************************************************
* Platform specific page table and MMU setup constants
******************************************************************************/
#define EL3_ADDR_SPACE_SIZE (1ull << 32)
#define EL3_NUM_PAGETABLES 2
#define EL3_TROM_PAGETABLE 0
#define EL3_TRAM_PAGETABLE 1
#define ADDR_SPACE_SIZE (1ull << 32)
#define MAX_XLAT_TABLES 3
#define MAX_MMAP_REGIONS 16
#define NUM_L2_PAGETABLES 2
#define GB1_L2_PAGETABLE 0
#define GB2_L2_PAGETABLE 1
#define NUM_L3_PAGETABLES 2
#define TZRAM_PAGETABLE 0
#define NSRAM_PAGETABLE 1
/*******************************************************************************
* CCI-400 related constants
@ -345,11 +335,6 @@ extern void bl2_plat_arch_setup(void);
extern void bl31_plat_arch_setup(void);
extern int platform_setup_pm(plat_pm_ops **);
extern unsigned int platform_get_core_pos(unsigned long mpidr);
extern unsigned long fill_xlation_tables(meminfo *memory_layout,
unsigned long ro_start,
unsigned long ro_limit,
unsigned long coh_start,
unsigned long coh_limit);
extern void disable_mmu(void);
extern void enable_mmu(void);
extern void configure_mmu(meminfo *,

9
plat/fvp/platform.mk

@ -34,6 +34,7 @@ PLAT_INCLUDES := -Idrivers/arm/interconnect/cci-400 \
PLAT_BL1_C_VPATH := drivers/arm/interconnect/cci-400 \
drivers/arm/peripherals/pl011 \
lib/arch/${ARCH} \
lib/semihosting \
lib/stdlib \
drivers/io
@ -42,6 +43,7 @@ PLAT_BL1_S_VPATH := lib/semihosting/${ARCH}
PLAT_BL2_C_VPATH := drivers/arm/interconnect/cci-400 \
drivers/arm/peripherals/pl011 \
lib/arch/${ARCH} \
lib/stdlib \
lib/semihosting \
drivers/io
@ -50,6 +52,7 @@ PLAT_BL2_S_VPATH := lib/semihosting/${ARCH}
PLAT_BL31_C_VPATH := drivers/arm/interconnect/cci-400 \
drivers/arm/peripherals/pl011 \
lib/arch/${ARCH} \
lib/semihosting \
lib/stdlib \
drivers/power \
@ -65,22 +68,20 @@ PLAT_BL_COMMON_SOURCES := semihosting_call.S \
plat_io_storage.c \
io_semihosting.c \
io_fip.c \
io_memmap.c
io_memmap.c \
xlat_tables.c
BL1_SOURCES += bl1_plat_setup.c \
bl1_plat_helpers.S \
plat_helpers.S \
plat_common.c \
plat_setup_xlat.c \
cci400.c
BL2_SOURCES += bl2_plat_setup.c \
plat_setup_xlat.c \
plat_common.c
BL31_SOURCES += bl31_plat_setup.c \
plat_helpers.S \
plat_setup_xlat.c \
plat_common.c \
plat_pm.c \
plat_topology.c \

Loading…
Cancel
Save