Browse Source

xlat v2: Support the EL2 translation regime

The translation library is useful elsewhere. Even though this repository
doesn't exercise the EL2 support of the library, it is better to have it
here as well to make it easier to maintain.

enable_mmu_secure() and enable_mmu_direct() have been deprecated. The
functions are still present, but they are behind ERROR_DEPRECATED and
they call the new functions enable_mmu_svc_mon() and
enable_mmu_direct_svc_mon().

Change-Id: I13ad10cd048d9cc2d55e0fff9a5133671b67dcba
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
pull/1519/head
Antonio Nino Diaz 6 years ago
parent
commit
1a92a0e00a
  1. 2
      bl1/aarch32/bl1_exceptions.S
  2. 26
      include/lib/aarch32/arch.h
  3. 7
      include/lib/aarch32/arch_helpers.h
  4. 2
      include/lib/aarch64/arch.h
  5. 11
      include/lib/xlat_tables/xlat_mmu_helpers.h
  6. 1
      include/lib/xlat_tables/xlat_tables_v2.h
  7. 16
      lib/xlat_tables/aarch32/xlat_tables.c
  8. 60
      lib/xlat_tables_v2/aarch32/enable_mmu.S
  9. 73
      lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
  10. 28
      lib/xlat_tables_v2/aarch64/enable_mmu.S
  11. 13
      lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
  12. 32
      lib/xlat_tables_v2/xlat_tables_context.c
  13. 4
      lib/xlat_tables_v2/xlat_tables_core.c
  14. 7
      lib/xlat_tables_v2/xlat_tables_utils.c
  15. 2
      plat/common/aarch32/plat_common.c

2
bl1/aarch32/bl1_exceptions.S

@ -116,7 +116,7 @@ func smc_handler
/* Turn on the MMU */ /* Turn on the MMU */
mov r0, #DISABLE_DCACHE mov r0, #DISABLE_DCACHE
bl enable_mmu_secure bl enable_mmu_svc_mon
/* Enable the data cache. */ /* Enable the data cache. */
ldcopr r9, SCTLR ldcopr r9, SCTLR

26
include/lib/aarch32/arch.h

@ -313,6 +313,28 @@
#define TTBCR_T0SZ_SHIFT U(0) #define TTBCR_T0SZ_SHIFT U(0)
#define TTBCR_T0SZ_MASK U(0x7) #define TTBCR_T0SZ_MASK U(0x7)
/*
* HTCR definitions
*/
#define HTCR_RES1 ((U(1) << 31) | (U(1) << 23))
#define HTCR_SH0_NON_SHAREABLE (U(0x0) << 12)
#define HTCR_SH0_OUTER_SHAREABLE (U(0x2) << 12)
#define HTCR_SH0_INNER_SHAREABLE (U(0x3) << 12)
#define HTCR_RGN0_OUTER_NC (U(0x0) << 10)
#define HTCR_RGN0_OUTER_WBA (U(0x1) << 10)
#define HTCR_RGN0_OUTER_WT (U(0x2) << 10)
#define HTCR_RGN0_OUTER_WBNA (U(0x3) << 10)
#define HTCR_RGN0_INNER_NC (U(0x0) << 8)
#define HTCR_RGN0_INNER_WBA (U(0x1) << 8)
#define HTCR_RGN0_INNER_WT (U(0x2) << 8)
#define HTCR_RGN0_INNER_WBNA (U(0x3) << 8)
#define HTCR_T0SZ_SHIFT U(0)
#define HTCR_T0SZ_MASK U(0x7)
#define MODE_RW_SHIFT U(0x4) #define MODE_RW_SHIFT U(0x4)
#define MODE_RW_MASK U(0x1) #define MODE_RW_MASK U(0x1)
#define MODE_RW_32 U(0x1) #define MODE_RW_32 U(0x1)
@ -433,6 +455,7 @@
#define TLBIMVA p15, 0, c8, c7, 1 #define TLBIMVA p15, 0, c8, c7, 1
#define TLBIMVAA p15, 0, c8, c7, 3 #define TLBIMVAA p15, 0, c8, c7, 3
#define TLBIMVAAIS p15, 0, c8, c3, 3 #define TLBIMVAAIS p15, 0, c8, c3, 3
#define TLBIMVAHIS p15, 4, c8, c3, 1
#define BPIALLIS p15, 0, c7, c1, 6 #define BPIALLIS p15, 0, c7, c1, 6
#define BPIALL p15, 0, c7, c5, 6 #define BPIALL p15, 0, c7, c5, 6
#define ICIALLU p15, 0, c7, c5, 0 #define ICIALLU p15, 0, c7, c5, 0
@ -448,6 +471,8 @@
#define CLIDR p15, 1, c0, c0, 1 #define CLIDR p15, 1, c0, c0, 1
#define CSSELR p15, 2, c0, c0, 0 #define CSSELR p15, 2, c0, c0, 0
#define CCSIDR p15, 1, c0, c0, 0 #define CCSIDR p15, 1, c0, c0, 0
#define HTCR p15, 4, c2, c0, 2
#define HMAIR0 p15, 4, c10, c2, 0
#define DBGOSDLR p14, 0, c1, c3, 4 #define DBGOSDLR p14, 0, c1, c3, 4
/* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */ /* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
@ -487,6 +512,7 @@
#define CNTVOFF_64 p15, 4, c14 #define CNTVOFF_64 p15, 4, c14
#define VTTBR_64 p15, 6, c2 #define VTTBR_64 p15, 6, c2
#define CNTPCT_64 p15, 0, c14 #define CNTPCT_64 p15, 0, c14
#define HTTBR_64 p15, 4, c2
/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */ /* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */
#define ICC_SGI1R_EL1_64 p15, 0, c12 #define ICC_SGI1R_EL1_64 p15, 0, c12

7
include/lib/aarch32/arch_helpers.h

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -300,6 +300,7 @@ DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA) DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA) DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS) DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
DEFINE_TLBIOP_PARAM_FUNC(mvahis, TLBIMVAHIS)
/* /*
* BPI operation prototypes. * BPI operation prototypes.
@ -320,6 +321,10 @@ DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
#define IS_IN_SECURE() \ #define IS_IN_SECURE() \
(GET_NS_BIT(read_scr()) == 0) (GET_NS_BIT(read_scr()) == 0)
#define IS_IN_HYP() (GET_M32(read_cpsr()) == MODE32_hyp)
#define IS_IN_SVC() (GET_M32(read_cpsr()) == MODE32_svc)
#define IS_IN_MON() (GET_M32(read_cpsr()) == MODE32_mon)
#define IS_IN_EL2() IS_IN_HYP()
/* /*
* If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3 * If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3
*/ */

2
include/lib/aarch64/arch.h

@ -364,7 +364,9 @@
* TCR defintions * TCR defintions
*/ */
#define TCR_EL3_RES1 ((U(1) << 31) | (U(1) << 23)) #define TCR_EL3_RES1 ((U(1) << 31) | (U(1) << 23))
#define TCR_EL2_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
#define TCR_EL1_IPS_SHIFT U(32) #define TCR_EL1_IPS_SHIFT U(32)
#define TCR_EL2_PS_SHIFT U(16)
#define TCR_EL3_PS_SHIFT U(16) #define TCR_EL3_PS_SHIFT U(16)
#define TCR_TxSZ_MIN ULL(16) #define TCR_TxSZ_MIN ULL(16)

11
include/lib/xlat_tables/xlat_mmu_helpers.h

@ -67,15 +67,24 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
#ifdef AARCH32 #ifdef AARCH32
/* AArch32 specific translation table API */ /* AArch32 specific translation table API */
#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags); void enable_mmu_secure(unsigned int flags);
void enable_mmu_direct(unsigned int flags); void enable_mmu_direct(unsigned int flags);
#endif
void enable_mmu_svc_mon(unsigned int flags);
void enable_mmu_hyp(unsigned int flags);
void enable_mmu_direct_svc_mon(unsigned int flags);
void enable_mmu_direct_hyp(unsigned int flags);
#else #else
/* AArch64 specific translation table APIs */ /* AArch64 specific translation table APIs */
void enable_mmu_el1(unsigned int flags); void enable_mmu_el1(unsigned int flags);
void enable_mmu_el2(unsigned int flags);
void enable_mmu_el3(unsigned int flags); void enable_mmu_el3(unsigned int flags);
void enable_mmu_direct_el1(unsigned int flags); void enable_mmu_direct_el1(unsigned int flags);
void enable_mmu_direct_el2(unsigned int flags);
void enable_mmu_direct_el3(unsigned int flags); void enable_mmu_direct_el3(unsigned int flags);
#endif /* AARCH32 */ #endif /* AARCH32 */

1
include/lib/xlat_tables/xlat_tables_v2.h

@ -125,6 +125,7 @@ typedef struct mmap_region {
* library to detect it at runtime. * library to detect it at runtime.
*/ */
#define EL1_EL0_REGIME 1 #define EL1_EL0_REGIME 1
#define EL2_REGIME 2
#define EL3_REGIME 3 #define EL3_REGIME 3
#define EL_REGIME_INVALID -1 #define EL_REGIME_INVALID -1

16
lib/xlat_tables/aarch32/xlat_tables.c

@ -65,7 +65,19 @@ void init_xlat_tables(void)
* Function for enabling the MMU in Secure PL1, assuming that the * Function for enabling the MMU in Secure PL1, assuming that the
* page-tables have already been created. * page-tables have already been created.
******************************************************************************/ ******************************************************************************/
#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags) void enable_mmu_secure(unsigned int flags)
{
enable_mmu_svc_mon(flags);
}
void enable_mmu_direct(unsigned int flags)
{
enable_mmu_direct_svc_mon(flags);
}
#endif
void enable_mmu_svc_mon(unsigned int flags)
{ {
unsigned int mair0, ttbcr, sctlr; unsigned int mair0, ttbcr, sctlr;
uint64_t ttbr0; uint64_t ttbr0;
@ -131,7 +143,7 @@ void enable_mmu_secure(unsigned int flags)
isb(); isb();
} }
void enable_mmu_direct(unsigned int flags) void enable_mmu_direct_svc_mon(unsigned int flags)
{ {
enable_mmu_secure(flags); enable_mmu_svc_mon(flags);
} }

60
lib/xlat_tables_v2/aarch32/enable_mmu.S

@ -8,9 +8,11 @@
#include <assert_macros.S> #include <assert_macros.S>
#include <xlat_tables_v2.h> #include <xlat_tables_v2.h>
.global enable_mmu_direct .global enable_mmu_direct_svc_mon
.global enable_mmu_direct_hyp
func enable_mmu_direct /* void enable_mmu_direct_svc_mon(unsigned int flags) */
func enable_mmu_direct_svc_mon
/* Assert that MMU is turned off */ /* Assert that MMU is turned off */
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
ldcopr r1, SCTLR ldcopr r1, SCTLR
@ -63,4 +65,56 @@ func enable_mmu_direct
isb isb
bx lr bx lr
endfunc enable_mmu_direct endfunc enable_mmu_direct_svc_mon
/* void enable_mmu_direct_hyp(unsigned int flags) */
func enable_mmu_direct_hyp
/* Assert that MMU is turned off */
#if ENABLE_ASSERTIONS
ldcopr r1, HSCTLR
tst r1, #HSCTLR_M_BIT
ASM_ASSERT(eq)
#endif
/* Invalidate TLB entries */
TLB_INVALIDATE(r0, TLBIALL)
mov r3, r0
ldr r0, =mmu_cfg_params
/* HMAIR0 */
ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
stcopr r1, HMAIR0
/* HTCR */
ldr r2, [r0, #(MMU_CFG_TCR << 3)]
stcopr r2, HTCR
/* HTTBR */
ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
stcopr16 r1, r2, HTTBR_64
/*
* Ensure all translation table writes have drained into memory, the TLB
* invalidation is complete, and translation register writes are
* committed before enabling the MMU
*/
dsb ish
isb
/* Enable enable MMU by honoring flags */
ldcopr r1, HSCTLR
ldr r2, =(HSCTLR_WXN_BIT | HSCTLR_C_BIT | HSCTLR_M_BIT)
orr r1, r1, r2
/* Clear C bit if requested */
tst r3, #DISABLE_DCACHE
bicne r1, r1, #HSCTLR_C_BIT
stcopr r1, HSCTLR
isb
bx lr
endfunc enable_mmu_direct_hyp

73
lib/xlat_tables_v2/aarch32/xlat_tables_arch.c

@ -43,22 +43,38 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
} }
#endif /* ENABLE_ASSERTIONS*/ #endif /* ENABLE_ASSERTIONS*/
bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused) bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
{ {
return (read_sctlr() & SCTLR_M_BIT) != 0; if (ctx->xlat_regime == EL1_EL0_REGIME) {
assert(xlat_arch_current_el() == 1U);
return (read_sctlr() & SCTLR_M_BIT) != 0U;
} else {
assert(ctx->xlat_regime == EL2_REGIME);
assert(xlat_arch_current_el() == 2U);
return (read_hsctlr() & HSCTLR_M_BIT) != 0U;
}
} }
bool is_dcache_enabled(void) bool is_dcache_enabled(void)
{ {
return (read_sctlr() & SCTLR_C_BIT) != 0; if (IS_IN_EL2()) {
return (read_hsctlr() & HSCTLR_C_BIT) != 0U;
} else {
return (read_sctlr() & SCTLR_C_BIT) != 0U;
}
} }
uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime __unused) uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
{ {
if (xlat_regime == EL1_EL0_REGIME) {
return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN);
} else {
assert(xlat_regime == EL2_REGIME);
return UPPER_ATTRS(XN); return UPPER_ATTRS(XN);
}
} }
void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime __unused) void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
{ {
/* /*
* Ensure the translation table write has drained into memory before * Ensure the translation table write has drained into memory before
@ -66,7 +82,12 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime __unused)
*/ */
dsbishst(); dsbishst();
if (xlat_regime == EL1_EL0_REGIME) {
tlbimvaais(TLBI_ADDR(va)); tlbimvaais(TLBI_ADDR(va));
} else {
assert(xlat_regime == EL2_REGIME);
tlbimvahis(TLBI_ADDR(va));
}
} }
void xlat_arch_tlbi_va_sync(void) void xlat_arch_tlbi_va_sync(void)
@ -97,19 +118,25 @@ void xlat_arch_tlbi_va_sync(void)
unsigned int xlat_arch_current_el(void) unsigned int xlat_arch_current_el(void)
{ {
if (IS_IN_HYP()) {
return 2U;
} else {
assert(IS_IN_SVC() || IS_IN_MON());
/* /*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System, * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor,
* SVC, Abort, UND, IRQ and FIQ modes) execute at EL3. * System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
* *
* The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime * The PL1&0 translation regime in AArch32 behaves like the
* in AArch64 except for the XN bits, but we set and unset them at the * EL1&0 regime in AArch64 except for the XN bits, but we set
* same time, so there's no difference in practice. * and unset them at the same time, so there's no difference in
* practice.
*/ */
return 1U; return 1U;
}
} }
/******************************************************************************* /*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the page tables * Function for enabling the MMU in PL1 or PL2, assuming that the page tables
* have already been created. * have already been created.
******************************************************************************/ ******************************************************************************/
void setup_mmu_cfg(uint64_t *params, unsigned int flags, void setup_mmu_cfg(uint64_t *params, unsigned int flags,
@ -119,8 +146,6 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
uint64_t mair, ttbr0; uint64_t mair, ttbr0;
uint32_t ttbcr; uint32_t ttbcr;
assert(IS_IN_SECURE());
/* Set attributes in the right indices of the MAIR */ /* Set attributes in the right indices of the MAIR */
mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
@ -129,18 +154,32 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
ATTR_NON_CACHEABLE_INDEX); ATTR_NON_CACHEABLE_INDEX);
/* /*
* Configure the control register for stage 1 of the PL1&0 translation * Configure the control register for stage 1 of the PL1&0 or EL2
* regime. * translation regimes.
*/ */
/* Use the Long-descriptor translation table format. */ /* Use the Long-descriptor translation table format. */
ttbcr = TTBCR_EAE_BIT; ttbcr = TTBCR_EAE_BIT;
if (xlat_regime == EL1_EL0_REGIME) {
assert(IS_IN_SVC() || IS_IN_MON());
/* /*
* Disable translation table walk for addresses that are translated * Disable translation table walk for addresses that are
* using TTBR1. Therefore, only TTBR0 is used. * translated using TTBR1. Therefore, only TTBR0 is used.
*/ */
ttbcr |= TTBCR_EPD1_BIT; ttbcr |= TTBCR_EPD1_BIT;
} else {
assert(xlat_regime == EL2_REGIME);
assert(IS_IN_HYP());
/*
* Set HTCR bits as well. Set HTTBR table properties
* as Inner & outer WBWA & shareable.
*/
ttbcr |= HTCR_RES1 |
HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA |
HTCR_RGN0_INNER_WBA;
}
/* /*
* Limit the input address ranges and memory region sizes translated * Limit the input address ranges and memory region sizes translated

28
lib/xlat_tables_v2/aarch64/enable_mmu.S

@ -9,6 +9,7 @@
#include <xlat_tables_v2.h> #include <xlat_tables_v2.h>
.global enable_mmu_direct_el1 .global enable_mmu_direct_el1
.global enable_mmu_direct_el2
.global enable_mmu_direct_el3 .global enable_mmu_direct_el3
/* Macros to read and write to system register for a given EL. */ /* Macros to read and write to system register for a given EL. */
@ -20,6 +21,19 @@
mrs \gp_reg, \reg_name\()_el\()\el mrs \gp_reg, \reg_name\()_el\()\el
.endm .endm
.macro tlbi_invalidate_all el
.if \el == 1
TLB_INVALIDATE(vmalle1)
.elseif \el == 2
TLB_INVALIDATE(alle2)
.elseif \el == 3
TLB_INVALIDATE(alle3)
.else
.error "EL must be 1, 2 or 3"
.endif
.endm
/* void enable_mmu_direct_el<x>(unsigned int flags) */
.macro define_mmu_enable_func el .macro define_mmu_enable_func el
func enable_mmu_direct_\()el\el func enable_mmu_direct_\()el\el
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
@ -27,17 +41,8 @@
tst x1, #SCTLR_M_BIT tst x1, #SCTLR_M_BIT
ASM_ASSERT(eq) ASM_ASSERT(eq)
#endif #endif
/* Invalidate all TLB entries */
/* Invalidate TLB entries */ tlbi_invalidate_all \el
.if \el == 1
TLB_INVALIDATE(vmalle1)
.else
.if \el == 3
TLB_INVALIDATE(alle3)
.else
.error "EL must be 1 or 3"
.endif
.endif
mov x7, x0 mov x7, x0
ldr x0, =mmu_cfg_params ldr x0, =mmu_cfg_params
@ -86,4 +91,5 @@
* enable_mmu_direct_el3 * enable_mmu_direct_el3
*/ */
define_mmu_enable_func 1 define_mmu_enable_func 1
define_mmu_enable_func 2
define_mmu_enable_func 3 define_mmu_enable_func 3

13
lib/xlat_tables_v2/aarch64/xlat_tables_arch.c

@ -105,6 +105,9 @@ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
if (ctx->xlat_regime == EL1_EL0_REGIME) { if (ctx->xlat_regime == EL1_EL0_REGIME) {
assert(xlat_arch_current_el() >= 1U); assert(xlat_arch_current_el() >= 1U);
return (read_sctlr_el1() & SCTLR_M_BIT) != 0U; return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
} else if (ctx->xlat_regime == EL2_REGIME) {
assert(xlat_arch_current_el() >= 2U);
return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
} else { } else {
assert(ctx->xlat_regime == EL3_REGIME); assert(ctx->xlat_regime == EL3_REGIME);
assert(xlat_arch_current_el() >= 3U); assert(xlat_arch_current_el() >= 3U);
@ -118,6 +121,8 @@ bool is_dcache_enabled(void)
if (el == 1U) { if (el == 1U) {
return (read_sctlr_el1() & SCTLR_C_BIT) != 0U; return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
} else if (el == 2U) {
return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
} else { } else {
return (read_sctlr_el3() & SCTLR_C_BIT) != 0U; return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
} }
@ -128,7 +133,8 @@ uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
if (xlat_regime == EL1_EL0_REGIME) { if (xlat_regime == EL1_EL0_REGIME) {
return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN); return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
} else { } else {
assert(xlat_regime == EL3_REGIME); assert((xlat_regime == EL2_REGIME) ||
(xlat_regime == EL3_REGIME));
return UPPER_ATTRS(XN); return UPPER_ATTRS(XN);
} }
} }
@ -151,6 +157,9 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
if (xlat_regime == EL1_EL0_REGIME) { if (xlat_regime == EL1_EL0_REGIME) {
assert(xlat_arch_current_el() >= 1U); assert(xlat_arch_current_el() >= 1U);
tlbivaae1is(TLBI_ADDR(va)); tlbivaae1is(TLBI_ADDR(va));
} else if (xlat_regime == EL2_REGIME) {
assert(xlat_arch_current_el() >= 2U);
tlbivae2is(TLBI_ADDR(va));
} else { } else {
assert(xlat_regime == EL3_REGIME); assert(xlat_regime == EL3_REGIME);
assert(xlat_arch_current_el() >= 3U); assert(xlat_arch_current_el() >= 3U);
@ -245,6 +254,8 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
* that are translated using TTBR1_EL1. * that are translated using TTBR1_EL1.
*/ */
tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT); tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
} else if (xlat_regime == EL2_REGIME) {
tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
} else { } else {
assert(xlat_regime == EL3_REGIME); assert(xlat_regime == EL3_REGIME);
tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT); tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);

32
lib/xlat_tables_v2/xlat_tables_context.c

@ -82,6 +82,8 @@ void init_xlat_tables(void)
if (current_el == 1U) { if (current_el == 1U) {
tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME; tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
} else if (current_el == 2U) {
tf_xlat_ctx.xlat_regime = EL2_REGIME;
} else { } else {
assert(current_el == 3U); assert(current_el == 3U);
tf_xlat_ctx.xlat_regime = EL3_REGIME; tf_xlat_ctx.xlat_regime = EL3_REGIME;
@ -119,12 +121,32 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
#ifdef AARCH32 #ifdef AARCH32
#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags) void enable_mmu_secure(unsigned int flags)
{
enable_mmu_svc_mon(flags);
}
void enable_mmu_direct(unsigned int flags)
{
enable_mmu_direct_svc_mon(flags);
}
#endif
void enable_mmu_svc_mon(unsigned int flags)
{ {
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME); tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct(flags); enable_mmu_direct_svc_mon(flags);
}
void enable_mmu_hyp(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL2_REGIME);
enable_mmu_direct_hyp(flags);
} }
#else #else
@ -137,6 +159,14 @@ void enable_mmu_el1(unsigned int flags)
enable_mmu_direct_el1(flags); enable_mmu_direct_el1(flags);
} }
void enable_mmu_el2(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL2_REGIME);
enable_mmu_direct_el2(flags);
}
void enable_mmu_el3(unsigned int flags) void enable_mmu_el3(unsigned int flags)
{ {
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,

4
lib/xlat_tables_v2/xlat_tables_core.c

@ -142,7 +142,8 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED); desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
} }
} else { } else {
assert(ctx->xlat_regime == EL3_REGIME); assert((ctx->xlat_regime == EL2_REGIME) ||
(ctx->xlat_regime == EL3_REGIME));
desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1); desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
} }
@ -1016,6 +1017,7 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
assert(ctx != NULL); assert(ctx != NULL);
assert(!ctx->initialized); assert(!ctx->initialized);
assert((ctx->xlat_regime == EL3_REGIME) || assert((ctx->xlat_regime == EL3_REGIME) ||
(ctx->xlat_regime == EL2_REGIME) ||
(ctx->xlat_regime == EL1_EL0_REGIME)); (ctx->xlat_regime == EL1_EL0_REGIME));
assert(!is_mmu_enabled_ctx(ctx)); assert(!is_mmu_enabled_ctx(ctx));

7
lib/xlat_tables_v2/xlat_tables_utils.c

@ -60,8 +60,8 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
tf_printf("DEV"); tf_printf("DEV");
} }
if (xlat_regime == EL3_REGIME) { if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
/* For EL3 only check the AP[2] and XN bits. */ /* For EL3 and EL2 only check the AP[2] and XN bits. */
tf_printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW"); tf_printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
tf_printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC"); tf_printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
} else { } else {
@ -200,6 +200,8 @@ void xlat_tables_print(xlat_ctx_t *ctx)
if (ctx->xlat_regime == EL1_EL0_REGIME) { if (ctx->xlat_regime == EL1_EL0_REGIME) {
xlat_regime_str = "1&0"; xlat_regime_str = "1&0";
} else if (ctx->xlat_regime == EL2_REGIME) {
xlat_regime_str = "2";
} else { } else {
assert(ctx->xlat_regime == EL3_REGIME); assert(ctx->xlat_regime == EL3_REGIME);
xlat_regime_str = "3"; xlat_regime_str = "3";
@ -329,6 +331,7 @@ static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
assert(ctx != NULL); assert(ctx != NULL);
assert(ctx->initialized); assert(ctx->initialized);
assert((ctx->xlat_regime == EL1_EL0_REGIME) || assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
(ctx->xlat_regime == EL2_REGIME) ||
(ctx->xlat_regime == EL3_REGIME)); (ctx->xlat_regime == EL3_REGIME));
virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL; virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;

2
plat/common/aarch32/plat_common.c

@ -17,5 +17,5 @@
void bl32_plat_enable_mmu(uint32_t flags) void bl32_plat_enable_mmu(uint32_t flags)
{ {
enable_mmu_secure(flags); enable_mmu_svc_mon(flags);
} }

Loading…
Cancel
Save