Browse Source

style: normalize linker script code style

There are a variety of code styles used by the various linker scripts
around the code-base. This change brings them in line with one another
and attempts to make the scripts more friendly for skim-readers.

Change-Id: Ibee2afad0d543129c9ba5a8a22e3ec17d77e36ea
Signed-off-by: Chris Kay <chris.kay@arm.com>
pull/1994/head
Chris Kay 2 years ago
parent
commit
f90fe02f06
  1. 71
      bl1/bl1.ld.S
  2. 61
      bl2/bl2.ld.S
  3. 131
      bl2/bl2_el3.ld.S
  4. 66
      bl2u/bl2u.ld.S
  5. 125
      bl31/bl31.ld.S
  6. 90
      bl32/sp_min/sp_min.ld.S
  7. 63
      bl32/tsp/tsp.ld.S
  8. 59
      lib/romlib/romlib.ld.S

71
bl1/bl1.ld.S

@ -5,9 +5,8 @@
*/
/*
* The .data section gets copied from ROM to RAM at runtime.
* Its LMA should be 16-byte aligned to allow efficient copying of 16-bytes
* aligned regions in it.
* The .data section gets copied from ROM to RAM at runtime. Its LMA should be
* 16-byte aligned to allow efficient copying of 16-bytes aligned regions in it.
* Its VMA must be page-aligned as it marks the first read/write page.
*/
#define DATA_ALIGN 16
@ -24,23 +23,26 @@ MEMORY {
RAM (rwx): ORIGIN = BL1_RW_BASE, LENGTH = BL1_RW_LIMIT - BL1_RW_BASE
}
SECTIONS
{
SECTIONS {
. = BL1_RO_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL1_RO_BASE address is not aligned on a page boundary.")
"BL1_RO_BASE address is not aligned on a page boundary.")
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
*bl1_entrypoint.o(.text*)
*(SORT_BY_ALIGNMENT(.text*))
*(.vectors)
. = ALIGN(PAGE_SIZE);
__TEXT_END__ = .;
} >ROM
/* .ARM.extab and .ARM.exidx are only added because Clang need them */
/* .ARM.extab and .ARM.exidx are only added because Clang needs them */
.ARM.extab . : {
*(.ARM.extab* .gnu.linkonce.armextab.*)
} >ROM
@ -51,51 +53,57 @@ SECTIONS
.rodata . : {
__RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
/*
* No need to pad out the .rodata section to a page boundary. Next is
* the .data section, which can mapped in ROM with the same memory
* attributes as the .rodata section.
*
* Pad out to 16 bytes though as .data section needs to be 16 byte
* aligned and lld does not align the LMA to the aligment specified
* Pad out to 16 bytes though as .data section needs to be 16-byte
* aligned and lld does not align the LMA to the alignment specified
* on the .data section.
*/
__RODATA_END__ = .;
. = ALIGN(16);
. = ALIGN(16);
} >ROM
#else
#else /* SEPARATE_CODE_AND_RODATA */
ro . : {
__RO_START__ = .;
*bl1_entrypoint.o(.text*)
*(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
*(.vectors)
__RO_END__ = .;
/*
* Pad out to 16 bytes as .data section needs to be 16 byte aligned and
* lld does not align the LMA to the aligment specified on the .data
* section.
* Pad out to 16 bytes as the .data section needs to be 16-byte aligned
* and lld does not align the LMA to the alignment specified on the
* .data section.
*/
. = ALIGN(16);
. = ALIGN(16);
} >ROM
#endif
#endif /* SEPARATE_CODE_AND_RODATA */
ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
"cpu_ops not defined for this platform.")
"cpu_ops not defined for this platform.")
. = BL1_RW_BASE;
ASSERT(BL1_RW_BASE == ALIGN(PAGE_SIZE),
"BL1_RW_BASE address is not aligned on a page boundary.")
"BL1_RW_BASE address is not aligned on a page boundary.")
DATA_SECTION >RAM AT>ROM
__DATA_RAM_START__ = __DATA_START__;
__DATA_RAM_END__ = __DATA_END__;
@ -105,24 +113,26 @@ SECTIONS
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
* The base address of the coherent memory section must be page-aligned to
* guarantee that the coherent data are stored on their own pages and are
* not mixed with normal data. This is required to set up the correct memory
* attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as device
* memory. No other unexpected data must creep in. Ensure the rest of
* the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__COHERENT_RAM_END__ = .;
} >RAM
#endif
#endif /* USE_COHERENT_MEM */
__BL1_RAM_START__ = ADDR(.data);
__BL1_RAM_END__ = .;
@ -135,15 +145,16 @@ SECTIONS
* of BL1's actual content in Trusted ROM.
*/
__BL1_ROM_END__ = __DATA_ROM_START__ + __DATA_SIZE__;
ASSERT(__BL1_ROM_END__ <= BL1_RO_LIMIT,
"BL1's ROM content has exceeded its limit.")
"BL1's ROM content has exceeded its limit.")
__BSS_SIZE__ = SIZEOF(.bss);
#if USE_COHERENT_MEM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
#endif /* USE_COHERENT_MEM */
ASSERT(. <= BL1_RW_LIMIT, "BL1's RW section has exceeded its limit.")
}

61
bl2/bl2.ld.S

@ -15,28 +15,31 @@ MEMORY {
RAM (rwx): ORIGIN = BL2_BASE, LENGTH = BL2_LIMIT - BL2_BASE
}
SECTIONS
{
SECTIONS {
. = BL2_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL2_BASE address is not aligned on a page boundary.")
"BL2_BASE address is not aligned on a page boundary.")
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
#if ENABLE_RME
*bl2_rme_entrypoint.o(.text*)
#else /* ENABLE_RME */
*bl2_entrypoint.o(.text*)
#endif /* ENABLE_RME */
*(SORT_BY_ALIGNMENT(.text*))
*(.vectors)
. = ALIGN(PAGE_SIZE);
__TEXT_END__ = .;
} >RAM
/* .ARM.extab and .ARM.exidx are only added because Clang need them */
/* .ARM.extab and .ARM.exidx are only added because Clang needs them */
.ARM.extab . : {
*(.ARM.extab* .gnu.linkonce.armextab.*)
} >RAM
@ -47,39 +50,41 @@ SECTIONS
.rodata . : {
__RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
. = ALIGN(PAGE_SIZE);
__RODATA_END__ = .;
} >RAM
#else
#else /* SEPARATE_CODE_AND_RODATA */
ro . : {
__RO_START__ = .;
*bl2_entrypoint.o(.text*)
*(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
*(.vectors)
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as
* read-only, executable. No RW data from the next section must
* creep in. Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as read-only,
* executable. No RW data from the next section must creep in. Ensure
* that the rest of the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__RO_END__ = .;
} >RAM
#endif
#endif /* SEPARATE_CODE_AND_RODATA */
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
__RW_START__ = .;
DATA_SECTION >RAM
STACK_SECTION >RAM
@ -88,29 +93,27 @@ SECTIONS
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* The base address of the coherent memory section must be page-aligned to
* guarantee that the coherent data are stored on their own pages and are
* not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as device
* memory. No other unexpected data must creep in. Ensure the rest of
* the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__COHERENT_RAM_END__ = .;
} >RAM
#endif
#endif /* USE_COHERENT_MEM */
/*
* Define a linker symbol to mark end of the RW memory area for this
* image.
*/
__RW_END__ = .;
__BL2_END__ = .;
@ -119,7 +122,7 @@ SECTIONS
#if USE_COHERENT_MEM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
#endif /* USE_COHERENT_MEM */
ASSERT(. <= BL2_LIMIT, "BL2 image has exceeded its limit.")
}

131
bl2/bl2_el3.ld.S

@ -15,140 +15,158 @@ MEMORY {
#if BL2_IN_XIP_MEM
ROM (rx): ORIGIN = BL2_RO_BASE, LENGTH = BL2_RO_LIMIT - BL2_RO_BASE
RAM (rwx): ORIGIN = BL2_RW_BASE, LENGTH = BL2_RW_LIMIT - BL2_RW_BASE
#else
#else /* BL2_IN_XIP_MEM */
RAM (rwx): ORIGIN = BL2_BASE, LENGTH = BL2_LIMIT - BL2_BASE
#endif
#endif /* BL2_IN_XIP_MEM */
#if SEPARATE_BL2_NOLOAD_REGION
RAM_NOLOAD (rw!a): ORIGIN = BL2_NOLOAD_START, LENGTH = BL2_NOLOAD_LIMIT - BL2_NOLOAD_START
#else
#define RAM_NOLOAD RAM
#endif
#else /* SEPARATE_BL2_NOLOAD_REGION */
# define RAM_NOLOAD RAM
#endif /* SEPARATE_BL2_NOLOAD_REGION */
}
#if !BL2_IN_XIP_MEM
#define ROM RAM
#endif
# define ROM RAM
#endif /* !BL2_IN_XIP_MEM */
SECTIONS
{
SECTIONS {
#if BL2_IN_XIP_MEM
. = BL2_RO_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL2_RO_BASE address is not aligned on a page boundary.")
#else
"BL2_RO_BASE address is not aligned on a page boundary.")
#else /* BL2_IN_XIP_MEM */
. = BL2_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL2_BASE address is not aligned on a page boundary.")
#endif
"BL2_BASE address is not aligned on a page boundary.")
#endif /* BL2_IN_XIP_MEM */
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
__TEXT_RESIDENT_START__ = .;
*bl2_el3_entrypoint.o(.text*)
*(.text.asm.*)
__TEXT_RESIDENT_END__ = .;
__TEXT_RESIDENT_START__ = .;
*bl2_el3_entrypoint.o(.text*)
*(.text.asm.*)
__TEXT_RESIDENT_END__ = .;
*(SORT_BY_ALIGNMENT(.text*))
*(.vectors)
. = ALIGN(PAGE_SIZE);
__TEXT_END__ = .;
} >ROM
} >ROM
.rodata . : {
__RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
. = ALIGN(PAGE_SIZE);
__RODATA_END__ = .;
} >ROM
ASSERT(__TEXT_RESIDENT_END__ - __TEXT_RESIDENT_START__ <= PAGE_SIZE,
"Resident part of BL2 has exceeded its limit.")
#else
"Resident part of BL2 has exceeded its limit.")
#else /* SEPARATE_CODE_AND_RODATA */
ro . : {
__RO_START__ = .;
__TEXT_RESIDENT_START__ = .;
*bl2_el3_entrypoint.o(.text*)
*(.text.asm.*)
__TEXT_RESIDENT_END__ = .;
__TEXT_RESIDENT_START__ = .;
*bl2_el3_entrypoint.o(.text*)
*(.text.asm.*)
__TEXT_RESIDENT_END__ = .;
*(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
*(.vectors)
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as
* read-only, executable. No RW data from the next section must
* creep in. Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as read-only,
* executable. No RW data from the next section must creep in. Ensure
* that the rest of the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__RO_END__ = .;
} >ROM
#endif
#endif /* SEPARATE_CODE_AND_RODATA */
ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
"cpu_ops not defined for this platform.")
"cpu_ops not defined for this platform.")
#if BL2_IN_XIP_MEM
. = BL2_RW_BASE;
ASSERT(BL2_RW_BASE == ALIGN(PAGE_SIZE),
"BL2_RW_BASE address is not aligned on a page boundary.")
#endif
#endif /* BL2_IN_XIP_MEM */
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
__RW_START__ = .;
DATA_SECTION >RAM AT>ROM
__DATA_RAM_START__ = __DATA_START__;
__DATA_RAM_END__ = __DATA_END__;
RELA_SECTION >RAM
#if SEPARATE_BL2_NOLOAD_REGION
SAVED_ADDR = .;
. = BL2_NOLOAD_START;
__BL2_NOLOAD_START__ = .;
#endif
#endif /* SEPARATE_BL2_NOLOAD_REGION */
STACK_SECTION >RAM_NOLOAD
BSS_SECTION >RAM_NOLOAD
XLAT_TABLE_SECTION >RAM_NOLOAD
#if SEPARATE_BL2_NOLOAD_REGION
__BL2_NOLOAD_END__ = .;
. = SAVED_ADDR;
#endif
#endif /* SEPARATE_BL2_NOLOAD_REGION */
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* The base address of the coherent memory section must be page-aligned to
* guarantee that the coherent data are stored on their own pages and are
* not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as device
* memory. No other unexpected data must creep in. Ensure the rest of
* the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__COHERENT_RAM_END__ = .;
} >RAM
#endif
#endif /* USE_COHERENT_MEM */
/*
* Define a linker symbol to mark end of the RW memory area for this
* image.
*/
__RW_END__ = .;
__BL2_END__ = .;
@ -165,23 +183,24 @@ SECTIONS
/*
* The .data section is the last PROGBITS section so its end marks the end
* of BL2's RO content in XIP memory..
* of BL2's RO content in XIP memory.
*/
__BL2_ROM_END__ = __DATA_ROM_START__ + __DATA_SIZE__;
ASSERT(__BL2_ROM_END__ <= BL2_RO_LIMIT,
"BL2's RO content has exceeded its limit.")
#endif
__BSS_SIZE__ = SIZEOF(.bss);
#endif /* BL2_IN_XIP_MEM */
__BSS_SIZE__ = SIZEOF(.bss);
#if USE_COHERENT_MEM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
#endif /* USE_COHERENT_MEM */
#if BL2_IN_XIP_MEM
ASSERT(. <= BL2_RW_LIMIT, "BL2's RW content has exceeded its limit.")
#else
#else /* BL2_IN_XIP_MEM */
ASSERT(. <= BL2_LIMIT, "BL2 image has exceeded its limit.")
#endif
#endif /* BL2_IN_XIP_MEM */
}

66
bl2u/bl2u.ld.S

@ -17,67 +17,69 @@ MEMORY {
RAM (rwx): ORIGIN = BL2U_BASE, LENGTH = BL2U_LIMIT - BL2U_BASE
}
SECTIONS
{
SECTIONS {
. = BL2U_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL2U_BASE address is not aligned on a page boundary.")
"BL2U_BASE address is not aligned on a page boundary.")
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
*bl2u_entrypoint.o(.text*)
*(SORT_BY_ALIGNMENT(.text*))
*(.vectors)
. = ALIGN(PAGE_SIZE);
__TEXT_END__ = .;
} >RAM
} >RAM
/* .ARM.extab and .ARM.exidx are only added because Clang need them */
.ARM.extab . : {
/* .ARM.extab and .ARM.exidx are only added because Clang needs them */
.ARM.extab . : {
*(.ARM.extab* .gnu.linkonce.armextab.*)
} >RAM
} >RAM
.ARM.exidx . : {
.ARM.exidx . : {
*(.ARM.exidx* .gnu.linkonce.armexidx.*)
} >RAM
} >RAM
.rodata . : {
__RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
. = ALIGN(PAGE_SIZE);
__RODATA_END__ = .;
} >RAM
#else
#else /* SEPARATE_CODE_AND_RODATA */
ro . : {
__RO_START__ = .;
*bl2u_entrypoint.o(.text*)
*(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
*(.vectors)
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as
* read-only, executable. No RW data from the next section must
* creep in. Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as read-only,
* executable. No RW data from the next section must creep in. Ensure
* that the rest of the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__RO_END__ = .;
} >RAM
#endif
#endif /* SEPARATE_CODE_AND_RODATA */
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
__RW_START__ = .;
DATA_SECTION >RAM
STACK_SECTION >RAM
@ -86,29 +88,27 @@ SECTIONS
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* The base address of the coherent memory section must be page-aligned to
* guarantee that the coherent data are stored on their own pages and are
* not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as device
* memory. No other unexpected data must creep in. Ensure the rest of
* the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__COHERENT_RAM_END__ = .;
} >RAM
#endif
#endif /* USE_COHERENT_MEM */
/*
* Define a linker symbol to mark end of the RW memory area for this
* image.
*/
__RW_END__ = .;
__BL2U_END__ = .;

125
bl31/bl31.ld.S

@ -11,137 +11,145 @@ OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
ENTRY(bl31_entrypoint)
MEMORY {
RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
#if SEPARATE_NOBITS_REGION
NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
#else
#define NOBITS RAM
#endif
#else /* SEPARATE_NOBITS_REGION */
# define NOBITS RAM
#endif /* SEPARATE_NOBITS_REGION */
}
#ifdef PLAT_EXTRA_LD_SCRIPT
#include <plat.ld.S>
#endif
# include <plat.ld.S>
#endif /* PLAT_EXTRA_LD_SCRIPT */
SECTIONS
{
SECTIONS {
. = BL31_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL31_BASE address is not aligned on a page boundary.")
"BL31_BASE address is not aligned on a page boundary.")
__BL31_START__ = .;
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
*bl31_entrypoint.o(.text*)
*(SORT_BY_ALIGNMENT(SORT(.text*)))
*(.vectors)
. = ALIGN(PAGE_SIZE);
__TEXT_END__ = .;
} >RAM
.rodata . : {
__RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*))
#if PLAT_EXTRA_RODATA_INCLUDES
#include <plat.ld.rodata.inc>
#endif
# if PLAT_EXTRA_RODATA_INCLUDES
# include <plat.ld.rodata.inc>
# endif /* PLAT_EXTRA_RODATA_INCLUDES */
RODATA_COMMON
RODATA_COMMON
/* Place pubsub sections for events */
. = ALIGN(8);
#include <lib/el3_runtime/pubsub_events.h>
# include <lib/el3_runtime/pubsub_events.h>
. = ALIGN(PAGE_SIZE);
__RODATA_END__ = .;
} >RAM
#else
#else /* SEPARATE_CODE_AND_RODATA */
ro . : {
__RO_START__ = .;
*bl31_entrypoint.o(.text*)
*(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
/* Place pubsub sections for events */
. = ALIGN(8);
#include <lib/el3_runtime/pubsub_events.h>
# include <lib/el3_runtime/pubsub_events.h>
*(.vectors)
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as read-only,
* executable. No RW data from the next section must creep in.
* Ensure the rest of the current memory page is unused.
* executable. No RW data from the next section must creep in. Ensure
* that the rest of the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__RO_END__ = .;
} >RAM
#endif
#endif /* SEPARATE_CODE_AND_RODATA */
ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
"cpu_ops not defined for this platform.")
"cpu_ops not defined for this platform.")
#if SPM_MM
#ifndef SPM_SHIM_EXCEPTIONS_VMA
#define SPM_SHIM_EXCEPTIONS_VMA RAM
#endif
# ifndef SPM_SHIM_EXCEPTIONS_VMA
# define SPM_SHIM_EXCEPTIONS_VMA RAM
# endif /* SPM_SHIM_EXCEPTIONS_VMA */
/*
* Exception vectors of the SPM shim layer. They must be aligned to a 2K
* address, but we need to place them in a separate page so that we can set
* individual permissions to them, so the actual alignment needed is 4K.
* address but we need to place them in a separate page so that we can set
* individual permissions on them, so the actual alignment needed is the
* page size.
*
* There's no need to include this into the RO section of BL31 because it
* doesn't need to be accessed by BL31.
*/
spm_shim_exceptions : ALIGN(PAGE_SIZE) {
__SPM_SHIM_EXCEPTIONS_START__ = .;
*(.spm_shim_exceptions)
. = ALIGN(PAGE_SIZE);
__SPM_SHIM_EXCEPTIONS_END__ = .;
} >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
. = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
#endif
#endif /* SPM_MM */
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
__RW_START__ = .;
DATA_SECTION >RAM
RELA_SECTION >RAM
#ifdef BL31_PROGBITS_LIMIT
ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
#endif
#endif /* BL31_PROGBITS_LIMIT */
#if SEPARATE_NOBITS_REGION
/*
* Define a linker symbol to mark end of the RW memory area for this
* image.
*/
. = ALIGN(PAGE_SIZE);
__RW_END__ = .;
__BL31_END__ = .;
ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
. = BL31_NOBITS_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL31 NOBITS base address is not aligned on a page boundary.")
"BL31 NOBITS base address is not aligned on a page boundary.")
__NOBITS_START__ = .;
#endif
#endif /* SEPARATE_NOBITS_REGION */
STACK_SECTION >NOBITS
BSS_SECTION >NOBITS
@ -149,49 +157,44 @@ SECTIONS
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* The base address of the coherent memory section must be page-aligned to
* guarantee that the coherent data are stored on their own pages and are
* not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
/*
* Bakery locks are stored in coherent memory
*
* Each lock's data is contiguous and fully allocated by the compiler
* Bakery locks are stored in coherent memory. Each lock's data is
* contiguous and fully allocated by the compiler.
*/
*(bakery_lock)
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as device
* memory. No other unexpected data must creep in. Ensure the rest of
* the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__COHERENT_RAM_END__ = .;
} >NOBITS
#endif
#endif /* USE_COHERENT_MEM */
#if SEPARATE_NOBITS_REGION
/*
* Define a linker symbol to mark end of the NOBITS memory area for this
* image.
*/
__NOBITS_END__ = .;
ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
#else
/*
* Define a linker symbol to mark end of the RW memory area for this
* image.
*/
#else /* SEPARATE_NOBITS_REGION */
__RW_END__ = .;
__BL31_END__ = .;
ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
#endif
#endif /* SEPARATE_NOBITS_REGION */
/DISCARD/ : {
*(.dynsym .dynstr .hash .gnu.hash)

90
bl32/sp_min/sp_min.ld.S

@ -16,130 +16,132 @@ MEMORY {
}
#ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT
#include <plat_sp_min.ld.S>
#endif
# include <plat_sp_min.ld.S>
#endif /* PLAT_SP_MIN_EXTRA_LD_SCRIPT */
SECTIONS
{
SECTIONS {
. = BL32_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL32_BASE address is not aligned on a page boundary.")
"BL32_BASE address is not aligned on a page boundary.")
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
*entrypoint.o(.text*)
*(SORT_BY_ALIGNMENT(.text*))
*(.vectors)
. = ALIGN(PAGE_SIZE);
__TEXT_END__ = .;
} >RAM
/* .ARM.extab and .ARM.exidx are only added because Clang need them */
.ARM.extab . : {
/* .ARM.extab and .ARM.exidx are only added because Clang needs them */
.ARM.extab . : {
*(.ARM.extab* .gnu.linkonce.armextab.*)
} >RAM
} >RAM
.ARM.exidx . : {
.ARM.exidx . : {
*(.ARM.exidx* .gnu.linkonce.armexidx.*)
} >RAM
} >RAM
.rodata . : {
__RODATA_START__ = .;
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
/* Place pubsub sections for events */
. = ALIGN(8);
#include <lib/el3_runtime/pubsub_events.h>
# include <lib/el3_runtime/pubsub_events.h>
. = ALIGN(PAGE_SIZE);
__RODATA_END__ = .;
} >RAM
#else
#else /* SEPARATE_CODE_AND_RODATA */
ro . : {
__RO_START__ = .;
*entrypoint.o(.text*)
*(SORT_BY_ALIGNMENT(.text*))
*(SORT_BY_ALIGNMENT(.rodata*))
RODATA_COMMON
RODATA_COMMON
/* Place pubsub sections for events */
. = ALIGN(8);
#include <lib/el3_runtime/pubsub_events.h>
# include <lib/el3_runtime/pubsub_events.h>
*(.vectors)
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as
* read-only, executable. No RW data from the next section must
* creep in. Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as device
* memory. No other unexpected data must creep in. Ensure that the rest
* of the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__RO_END__ = .;
} >RAM
#endif
#endif /* SEPARATE_CODE_AND_RODATA */
ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
"cpu_ops not defined for this platform.")
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
"cpu_ops not defined for this platform.")
__RW_START__ = .;
DATA_SECTION >RAM
RELA_SECTION >RAM
#ifdef BL32_PROGBITS_LIMIT
ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.")
#endif
#endif /* BL32_PROGBITS_LIMIT */
STACK_SECTION >RAM
BSS_SECTION >RAM
XLAT_TABLE_SECTION >RAM
__BSS_SIZE__ = SIZEOF(.bss);
__BSS_SIZE__ = SIZEOF(.bss);
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* The base address of the coherent memory section must be page-aligned to
* guarantee that the coherent data are stored on their own pages and are
* not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
/*
* Bakery locks are stored in coherent memory
*
* Each lock's data is contiguous and fully allocated by the compiler
* Bakery locks are stored in coherent memory. Each lock's data is
* contiguous and fully allocated by the compiler.
*/
*(bakery_lock)
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as device
* memory. No other unexpected data must creep in. Ensure that the rest
* of the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__COHERENT_RAM_END__ = .;
} >RAM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
#endif /* USE_COHERENT_MEM */
/*
* Define a linker symbol to mark the end of the RW memory area for this
* image.
*/
__RW_END__ = .;
__BL32_END__ = .;
/DISCARD/ : {

63
bl32/tsp/tsp.ld.S

@ -11,71 +11,73 @@ OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
ENTRY(tsp_entrypoint)
MEMORY {
RAM (rwx): ORIGIN = TSP_SEC_MEM_BASE, LENGTH = TSP_SEC_MEM_SIZE
}
SECTIONS
{
SECTIONS {
. = BL32_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL32_BASE address is not aligned on a page boundary.")
"BL32_BASE address is not aligned on a page boundary.")
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
*tsp_entrypoint.o(.text*)
*(.text*)
*(.vectors)
. = ALIGN(PAGE_SIZE);
__TEXT_END__ = .;
} >RAM
.rodata . : {
__RODATA_START__ = .;
*(.rodata*)
RODATA_COMMON
RODATA_COMMON
. = ALIGN(PAGE_SIZE);
__RODATA_END__ = .;
} >RAM
#else
#else /* SEPARATE_CODE_AND_RODATA */
ro . : {
__RO_START__ = .;
*tsp_entrypoint.o(.text*)
*(.text*)
*(.rodata*)
RODATA_COMMON
RODATA_COMMON
*(.vectors)
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as
* read-only, executable. No RW data from the next section must
* creep in. Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as read-only,
* executable. No RW data from the next section must creep in. Ensure
* that the rest of the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__RO_END__ = .;
} >RAM
#endif
#endif /* SEPARATE_CODE_AND_RODATA */
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
__RW_START__ = .;
DATA_SECTION >RAM
RELA_SECTION >RAM
#ifdef TSP_PROGBITS_LIMIT
ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.")
#endif
#endif /* TSP_PROGBITS_LIMIT */
STACK_SECTION >RAM
BSS_SECTION >RAM
@ -83,29 +85,27 @@ SECTIONS
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
* The base address of the coherent memory section must be page-aligned to
* guarantee that the coherent data are stored on their own pages and are
* not mixed with normal data. This is required to set up the correct memory
* attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
* Memory page(s) mapped to this section will be marked as device
* memory. No other unexpected data must creep in. Ensure that the rest
* of the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__COHERENT_RAM_END__ = .;
} >RAM
#endif
#endif /* USE_COHERENT_MEM */
/*
* Define a linker symbol to mark the end of the RW memory area for this
* image.
*/
__RW_END__ = .;
__BL32_END__ = .;
@ -114,10 +114,11 @@ SECTIONS
}
__BSS_SIZE__ = SIZEOF(.bss);
#if USE_COHERENT_MEM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
#endif /* USE_COHERENT_MEM */
ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
}

59
lib/romlib/romlib.ld.S

@ -8,37 +8,42 @@
#include <platform_def.h>
MEMORY {
ROM (rx): ORIGIN = ROMLIB_RO_BASE, LENGTH = ROMLIB_RO_LIMIT - ROMLIB_RO_BASE
RAM (rwx): ORIGIN = ROMLIB_RW_BASE, LENGTH = ROMLIB_RW_END - ROMLIB_RW_BASE
ROM (rx): ORIGIN = ROMLIB_RO_BASE, LENGTH = ROMLIB_RO_LIMIT - ROMLIB_RO_BASE
RAM (rwx): ORIGIN = ROMLIB_RW_BASE, LENGTH = ROMLIB_RW_END - ROMLIB_RW_BASE
}
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
ENTRY(jmptbl)
SECTIONS
{
. = ROMLIB_RO_BASE;
.text : {
*jmptbl.o(.text)
*(.text*)
*(.rodata*)
} >ROM
__DATA_ROM_START__ = LOADADDR(.data);
.data : {
__DATA_RAM_START__ = .;
*(.data*)
__DATA_RAM_END__ = .;
} >RAM AT>ROM
__DATA_SIZE__ = SIZEOF(.data);
.bss : {
__BSS_START__ = .;
*(.bss*)
__BSS_END__ = .;
} >RAM
__BSS_SIZE__ = SIZEOF(.bss);
SECTIONS {
. = ROMLIB_RO_BASE;
.text : {
*jmptbl.o(.text)
*(.text*)
*(.rodata*)
} >ROM
__DATA_ROM_START__ = LOADADDR(.data);
.data : {
__DATA_RAM_START__ = .;
*(.data*)
__DATA_RAM_END__ = .;
} >RAM AT>ROM
__DATA_SIZE__ = SIZEOF(.data);
.bss : {
__BSS_START__ = .;
*(.bss*)
__BSS_END__ = .;
} >RAM
__BSS_SIZE__ = SIZEOF(.bss);
}

Loading…
Cancel
Save