@ -1,15 +1,27 @@
/*
* @ : Copyright ( c ) 2021 Phytium Information Technology , Inc .
* Copyright : ( C ) 2022 Phytium Information Technology , Inc .
* All Rights Reserved .
*
* This program is OPEN SOURCE software : you can redistribute it and / or modify it
* under the terms of the Phytium Public License as published by the Phytium Technology Co . , Ltd ,
* either version 1.0 of the License , or ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful , but WITHOUT ANY WARRANTY ;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE .
* See the Phytium Public License for more details .
*
* SPDX - License - Identifier : Apache - 2.0 .
*
* @ Date : 2021 - 11 - 29 09 : 13 : 18
* @ LastEditTime : 2021 - 11 - 29 16 : 00 : 23
* @ Description : Description of file
* @ Modify History :
* * * Ver Who Date Changes
* * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* FilePath : mmu . c
* Date : 2022 - 02 - 10 14 : 53 : 41
* LastEditTime : 2022 - 02 - 17 17 : 33 : 35
* Description : This files is for
*
* Modify History :
* Ver Who Date Changes
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*/
# include "aarch64.h"
# include "cache.h"
# include <sys/errno.h>
@ -17,6 +29,7 @@
# include "ft_assert.h"
# include "mmu.h"
# include "kernel.h"
# include "l3cache.h"
/**************************** Type Definitions *******************************/
@ -52,23 +65,23 @@
# define L3_XLAT_VA_SIZE_SHIFT PAGE_SIZE_SHIFT
/* Number of VA bits to assign to each table (9 bits) */
# define Ln _XLAT_VA_SIZE_SHIFT (PAGE_SIZE_SHIFT - 3)
# define LN _XLAT_VA_SIZE_SHIFT (PAGE_SIZE_SHIFT - 3)
/* Starting bit in the VA address for each level */
# define L2_XLAT_VA_SIZE_SHIFT (L3_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT)
# define L1_XLAT_VA_SIZE_SHIFT (L2_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT)
# define L0_XLAT_VA_SIZE_SHIFT (L1_XLAT_VA_SIZE_SHIFT + Ln_XLAT_VA_SIZE_SHIFT)
# define L2_XLAT_VA_SIZE_SHIFT (L3_XLAT_VA_SIZE_SHIFT + LN_XLAT_VA_SIZE_SHIFT) /* 21 */
# define L1_XLAT_VA_SIZE_SHIFT (L2_XLAT_VA_SIZE_SHIFT + LN_XLAT_VA_SIZE_SHIFT) /* 30 */
# define L0_XLAT_VA_SIZE_SHIFT (L1_XLAT_VA_SIZE_SHIFT + LN_XLAT_VA_SIZE_SHIFT) /* 39 */
# define LEVEL_TO_VA_SIZE_SHIFT(level) \
( PAGE_SIZE_SHIFT + ( Ln _XLAT_VA_SIZE_SHIFT * \
( XLAT_LAST_LEVEL - ( level ) ) ) ) // 12 + (9*(3-level))
( PAGE_SIZE_SHIFT + ( LN _XLAT_VA_SIZE_SHIFT * \
( XLAT_LAST_LEVEL - ( level ) ) ) ) /* 12 + (9*(3-level)) */
/* Number of entries for each table (512) */
# define Ln _XLAT_NUM_ENTRIES ((1U << PAGE_SIZE_SHIFT) / 8U)
# define LN _XLAT_NUM_ENTRIES ((1U << PAGE_SIZE_SHIFT) / 8U)
/* Virtual Address Index within a given translation table level */
# define XLAT_TABLE_VA_IDX(va_addr, level) \
( ( va_addr > > LEVEL_TO_VA_SIZE_SHIFT ( level ) ) & ( Ln _XLAT_NUM_ENTRIES - 1 ) )
( ( va_addr > > LEVEL_TO_VA_SIZE_SHIFT ( level ) ) & ( LN _XLAT_NUM_ENTRIES - 1 ) )
/*
* Calculate the initial translation table level from CONFIG_ARM64_VA_BITS
@ -182,11 +195,11 @@
/************************** Variable Definitions *****************************/
static u64 xlat_tables [ CONFIG_MAX_XLAT_TABLES * Ln _XLAT_NUM_ENTRIES ] __aligned ( Ln _XLAT_NUM_ENTRIES * sizeof ( u64 ) ) ;
static u64 xlat_tables [ CONFIG_MAX_XLAT_TABLES * LN _XLAT_NUM_ENTRIES ] __aligned ( LN _XLAT_NUM_ENTRIES * sizeof ( u64 ) ) ;
static u16 xlat_use_count [ CONFIG_MAX_XLAT_TABLES ] ;
/************************** Function Prototypes ******************************/
extern void __asm_invalidate_tlb_a ll( ) ;
extern void AsmInvalidateTlbA ll( ) ;
/* Returns a reference to a free table */
static u64 * NewTable ( void )
{
@ -198,7 +211,7 @@ static u64 *NewTable(void)
if ( xlat_use_count [ i ] = = 0U )
{
xlat_use_count [ i ] = 1U ;
return & xlat_tables [ i * Ln _XLAT_NUM_ENTRIES ] ;
return & xlat_tables [ i * LN _XLAT_NUM_ENTRIES ] ;
}
}
@ -208,9 +221,9 @@ static u64 *NewTable(void)
static inline unsigned int TableIndex ( u64 * pte )
{
unsigned int i = ( pte - xlat_tables ) / Ln _XLAT_NUM_ENTRIES ;
unsigned int i = ( pte - xlat_tables ) / LN _XLAT_NUM_ENTRIES ;
FT_ ASSERT ( i < CONFIG_MAX_XLAT_TABLES , " table %x out of range " , pte ) ;
FASSERT_MSG ( i < CONFIG_MAX_XLAT_TABLES , " table %x out of range " , pte ) ;
return i ;
}
@ -220,7 +233,7 @@ static void FreeTable(u64 *table)
unsigned int i = TableIndex ( table ) ;
MMU_DEBUG ( " freeing table [%d]%x \r \n " , i , table ) ;
FT_ ASSERT ( xlat_use_count [ i ] = = 1U , " table still in use " ) ;
FASSERT_MSG ( xlat_use_count [ i ] = = 1U , " table still in use " ) ;
xlat_use_count [ i ] = 0U ;
}
@ -230,7 +243,7 @@ static int TableUsage(u64 *table, int adjustment)
unsigned int i = TableIndex ( table ) ;
xlat_use_count [ i ] + = adjustment ;
FT_ ASSERT ( xlat_use_count [ i ] > 0 , " usage count underflow " ) ;
FASSERT_MSG ( xlat_use_count [ i ] > 0 , " usage count underflow " ) ;
return xlat_use_count [ i ] ;
}
@ -270,9 +283,10 @@ static inline int IsDescSuperset(u64 desc1, u64 desc2,
return ( desc1 & mask ) = = ( desc2 & mask ) ;
}
# if DUMP_PTE
static void DebugShowPte ( u64 * pte , unsigned int level )
{
# if DUMP_PTE
MMU_DEBUG ( " %d " , level ) ;
MMU_DEBUG ( " %.*s " , level * 2U , " . . . " ) ;
MMU_DEBUG ( " [%d]%x: " , TableIndex ( pte ) , pte ) ;
@ -309,12 +323,9 @@ static void DebugShowPte(u64 *pte, unsigned int level)
MMU_DEBUG ( ( * pte & PTE_BLOCK_DESC_PXN ) ? " -PXN " : " -PX " ) ;
MMU_DEBUG ( ( * pte & PTE_BLOCK_DESC_UXN ) ? " -UXN " : " -UX " ) ;
MMU_DEBUG ( " \r \n " ) ;
}
# else
static inline void DebugShowPte ( u64 * pte , unsigned int level )
{
}
# endif
return ;
}
static void SetPteTableDesc ( u64 * pte , u64 * table , unsigned int level )
{
@ -337,7 +348,7 @@ static u64 *ExpandToTable(u64 *pte, unsigned int level)
{
u64 * table ;
FT_ ASSERT ( level < XLAT_LAST_LEVEL , " can't expand last level " ) ;
FASSERT_MSG ( level < XLAT_LAST_LEVEL , " can't expand last level " ) ;
table = NewTable ( ) ;
if ( ! table )
@ -356,7 +367,7 @@ static u64 *ExpandToTable(u64 *pte, unsigned int level)
MMU_DEBUG ( " expanding PTE 0x%016llx into table [%d]%x \r \n " ,
desc , TableIndex ( table ) , table ) ;
FT_ ASSERT ( IsBlockDesc ( desc ) , " " ) ;
FASSERT_MSG ( IsBlockDesc ( desc ) , " " ) ;
if ( level + 1 = = XLAT_LAST_LEVEL )
{
@ -364,11 +375,11 @@ static u64 *ExpandToTable(u64 *pte, unsigned int level)
}
stride_shift = LEVEL_TO_VA_SIZE_SHIFT ( level + 1 ) ;
for ( i = 0U ; i < Ln _XLAT_NUM_ENTRIES ; i + + )
for ( i = 0U ; i < LN _XLAT_NUM_ENTRIES ; i + + )
{
table [ i ] = desc | ( i < < stride_shift ) ;
}
TableUsage ( table , Ln _XLAT_NUM_ENTRIES ) ;
TableUsage ( table , LN _XLAT_NUM_ENTRIES ) ;
}
else
{
@ -386,8 +397,9 @@ static u64 *ExpandToTable(u64 *pte, unsigned int level)
return table ;
}
static int SetMapping ( struct ArmMmuPtables * ptables ,
uintptr_t virt , size_t size ,
uintptr virt , f size_t size ,
u64 desc , int may_overwrite )
{
u64 * pte , * ptes [ XLAT_LAST_LEVEL + 1 ] ;
@ -398,7 +410,7 @@ static int SetMapping(struct ArmMmuPtables *ptables,
while ( size )
{
FT_ ASSERT ( level < = XLAT_LAST_LEVEL ,
FASSERT_MSG ( level < = XLAT_LAST_LEVEL ,
" max translation table level exceeded \r \n " ) ;
/* Locate PTE for given virtual address and page table level */
@ -435,7 +447,7 @@ static int SetMapping(struct ArmMmuPtables *ptables,
}
goto move_on ;
}
if ( ( size < level_size ) | | ( virt & ( level_size - 1 ) ) )
{
/* Range doesn't fit, create subtable */
@ -485,7 +497,7 @@ static int SetMapping(struct ArmMmuPtables *ptables,
return ret ;
}
static u64 GetRegionDesc ( uint32_t attrs )
static u64 GetRegionDesc ( u32 attrs )
{
unsigned int mem_type ;
u64 desc = 0U ;
@ -519,8 +531,8 @@ static u64 GetRegionDesc(uint32_t attrs)
switch ( mem_type )
{
case MT_DEVICE_nGnRn E :
case MT_DEVICE_nGn RE :
case MT_DEVICE_NGNRN E :
case MT_DEVICE_NGN RE :
case MT_DEVICE_GRE :
/* Access to Device memory and non-cacheable memory are coherent
* for all observers in the system and are treated as
@ -543,7 +555,7 @@ static u64 GetRegionDesc(uint32_t attrs)
desc | = PTE_BLOCK_DESC_UXN ;
if ( mem_type = = MT_NORMAL )
desc | = PTE_BLOCK_DESC_INN ER_SHARE ;
desc | = PTE_BLOCK_DESC_OUT ER_SHARE ;
else
desc | = PTE_BLOCK_DESC_OUTER_SHARE ;
}
@ -551,36 +563,29 @@ static u64 GetRegionDesc(uint32_t attrs)
return desc ;
}
static int __AddMap ( struct ArmMmuPtables * ptables , const char * name ,
uintptr_t phys , uintptr_t virt , size_t size , uint32_t attrs )
static int AddMap ( struct ArmMmuPtables * ptables , const char * name ,
uintptr phys , uintptr virt , fsize_t size , u32 attrs )
{
u64 desc = GetRegionDesc ( attrs ) ;
int may_overwrite = ! ( attrs & MT_NO_OVERWRITE ) ;
MMU_DEBUG ( " mmap [%s]: virt %p phys %p size %p attr %p \r \n " ,
name , virt , phys , size , desc ) ;
FT_ ASSERT ( ( ( virt | phys | size ) & ( CONFIG_MMU_PAGE_SIZE - 1 ) ) = = 0 ,
FASSERT_MSG ( ( ( virt | phys | size ) & ( CONFIG_MMU_PAGE_SIZE - 1 ) ) = = 0 ,
" address/size are not page aligned \r \n " ) ;
desc | = phys ;
return SetMapping ( ptables , virt , size , desc , may_overwrite ) ;
}
// static int AddMap(struct ArmMmuPtables *ptables, const char *name,
// uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs)
// {
// int ret;
// ret = __AddMap(ptables, name, phys, virt, size, attrs);
// return ret;
// }
static int RemoveMap ( struct ArmMmuPtables * ptables , const char * name ,
uintptr_t virt , size_t size )
uintptr virt , fsize_t size )
{
int ret ;
MMU_DEBUG ( " unmmap [%s]: virt %p size %p \r \n " , name , virt , size ) ;
FT_ ASSERT ( ( ( virt | size ) & ( CONFIG_MMU_PAGE_SIZE - 1 ) ) = = 0 ,
FASSERT_MSG ( ( ( virt | size ) & ( CONFIG_MMU_PAGE_SIZE - 1 ) ) = = 0 ,
" address/size are not page aligned \r \n " ) ;
ret = SetMapping ( ptables , virt , size , 0 , 1 ) ;
@ -589,12 +594,12 @@ static int RemoveMap(struct ArmMmuPtables *ptables, const char *name,
static inline void AddArmMmuRegion ( struct ArmMmuPtables * ptables ,
const struct ArmMmuRegion * region ,
uint32_t extra_flags )
u32 extra_flags )
{
if ( region - > size | | region - > attrs )
{
/* MMU not yet active: must use unlocked version */
__ AddMap( ptables , region - > name , region - > base_pa , region - > base_va ,
AddMap ( ptables , region - > name , region - > base_pa , region - > base_va ,
region - > size , region - > attrs | extra_flags ) ;
}
}
@ -603,12 +608,13 @@ static void SetupPageTables(struct ArmMmuPtables *ptables)
{
unsigned int index ;
const struct ArmMmuRegion * region ;
uintptr_t max_va = 0 , max_pa = 0 ;
uintptr max_va = 0 , max_pa = 0 ;
MMU_DEBUG ( " xlat tables: \r \n " ) ;
for ( index = 0U ; index < CONFIG_MAX_XLAT_TABLES ; index + + )
MMU_DEBUG ( " %d: %x \r \n " , index , xlat_tables + index * Ln _XLAT_NUM_ENTRIES ) ;
MMU_DEBUG ( " %d: %x \r \n " , index , xlat_tables + index * LN _XLAT_NUM_ENTRIES ) ;
/* 从不同的board 中获取,内存映射表中地址范围 */
for ( index = 0U ; index < mmu_config . num_regions ; index + + )
{
region = & mmu_config . mmu_regions [ index ] ;
@ -616,19 +622,19 @@ static void SetupPageTables(struct ArmMmuPtables *ptables)
max_pa = max ( max_pa , region - > base_pa + region - > size ) ;
}
FT_ ASSERT ( max_va < = ( 1ULL < < CONFIG_ARM64_VA_BITS ) ,
FASSERT_MSG ( max_va < = ( 1ULL < < CONFIG_ARM64_VA_BITS ) ,
" Maximum VA not supported \r \n " ) ;
FT_ ASSERT ( max_pa < = ( 1ULL < < CONFIG_ARM64_PA_BITS ) ,
FASSERT_MSG ( max_pa < = ( 1ULL < < CONFIG_ARM64_PA_BITS ) ,
" Maximum PA not supported \r \n " ) ;
/* setup translation table for zephyr execution regions */
/* setup translation table for execution regions */
for ( index = 0U ; index < mmu_config . num_regions ; index + + )
{
region = & mmu_config . mmu_regions [ index ] ;
AddArmMmuRegion ( ptables , region , 0 ) ;
}
__asm_invalidate_tlb_a ll( ) ;
AsmInvalidateTlbA ll( ) ;
}
/* Translation table control register settings */
@ -696,120 +702,109 @@ static void EnableMmuEl1(struct ArmMmuPtables *ptables, unsigned int flags)
static struct ArmMmuPtables kernel_ptables ;
/*
* @ brief MMU default configuration
*
* This function provides the default configuration mechanism for the Memory
* Management Unit ( MMU ) .
/**
* @ name : MmuInit
* @ msg : This function provides the default configuration mechanism for the Memory
* Management Unit ( MMU )
* @ return { * }
*/
void MmuInit ( void )
{
unsigned int flags = 0U ;
u32 val = 0U ;
FT_ASSERT ( CONFIG_MMU_PAGE_SIZE = = KB ( 4 ) ,
u64 val = 0U ;
FCacheL3CacheFlush ( ) ;
/* 增加粒度判断 */
val = AARCH64_READ_SYSREG ( ID_AA64MMFR0_EL1 ) ;
FASSERT_MSG ( ( CONFIG_MMU_PAGE_SIZE = = KB ( 4 ) ) & & ( ! ( val & ID_AA64MMFR0_EL1_4K_NO_SURPOORT ) ) ,
" Only 4K page size is supported \r \n " ) ;
/* Current MMU code supports only EL1 */
__asm__ volatile ( " mrs %0, CurrentEL "
: " =r " ( val ) ) ;
val = AARCH64_READ_SYSREG ( CurrentEL ) ;
FT_ ASSERT ( GET_EL ( val ) = = MODE_EL1 , " Exception level not EL1, MMU not enabled! \n " ) ;
FASSERT_MSG ( GET_EL ( val ) = = MODE_EL1 , " Exception level not EL1, MMU not enabled! \n " ) ;
/* Ensure that MMU is already not enabled */
__asm__ volatile ( " mrs %0, sctlr_el1 "
: " =r " ( val ) ) ;
FT_ASSERT ( ( val & SCTLR_ELx_M ) = = 0 , " MMU is already enabled \n " ) ;
val = AARCH64_READ_SYSREG ( sctlr_el1 ) ;
FASSERT_MSG ( ( val & SCTLR_ELx_M ) = = 0 , " MMU is already enabled \n " ) ;
/*
* Only booting core setup up the page tables .
*/
kernel_ptables . base_xlat_table = NewTable ( ) ;
SetupPageTables ( & kernel_ptables ) ;
FCacheL3CacheDisable ( ) ;
/* currently only EL1 is supported */
EnableMmuEl1 ( & kernel_ptables , flags ) ;
}
static int __ArchMemMap ( void * virt , uintptr_t phys , size_t size , uint32_t flags )
{
struct ArmMmuPtables * ptables ;
uint32_t entry_flags = MT_SECURE | MT_P_RX_U_NA ;
/* Always map in the kernel page tables */
ptables = & kernel_ptables ;
/* Translate flags argument into HW-recognized entry flags. */
switch ( flags & K_MEM_CACHE_MASK )
static void ArchMemMap ( uintptr virt , uintptr phys , fsize_t size , u32 flags )
{
int ret = AddMap ( & kernel_ptables , " dynamic " , phys , virt , size , flags ) ;
if ( ret )
{
/*
* K_MEM_CACHE_NONE = > MT_DEVICE_nGnRnE
* ( Device memory nGnRnE )
* K_MEM_CACHE_WB = > MT_NORMAL
* ( Normal memory Outer WB + Inner WB )
* K_MEM_CACHE_WT = > MT_NORMAL_WT
* ( Normal memory Outer WT + Inner WT )
*/
case K_MEM_CACHE_NONE :
entry_flags | = MT_DEVICE_nGnRnE ;
break ;
case K_MEM_CACHE_WT :
entry_flags | = MT_NORMAL_WT ;
break ;
case K_MEM_CACHE_WB :
entry_flags | = MT_NORMAL ;
break ;
default :
return - ENOTSUP ;
MMU_WRNING ( " warning AddMap() returned %d " , ret ) ;
}
if ( ( flags & K_MEM_PERM_RW ) ! = 0U )
else
{
entry_flags | = MT_RW ;
AsmInvalidateTlbAll ( ) ;
}
}
if ( ( flags & K_MEM_PERM_EXEC ) = = 0U )
{
entry_flags | = MT_P_EXECUTE_NEVER ;
}
static fsize_t MemRegionAlign ( uintptr * aligned_addr , fsize_t * aligned_size ,
uintptr addr , fsize_t size , fsize_t align )
{
fsize_t addr_offset ;
if ( ( flags & K_MEM_PERM_USER ) ! = 0U )
{
return - ENOTSUP ;
}
* aligned_addr = rounddown ( addr , align ) ;
addr_offset = addr - * aligned_addr ;
* aligned_size = roundup ( size + , align ) ;
return __AddMap ( ptables , " generic " , phys , ( uintptr_t ) virt , size , entry_flags ) ;
return addr_offset ;
}
void ArchMemMap ( void * virt , uintptr_t phys , size_t size , uint32_t flags )
/**
* @ name : FSetTlbAttributes
* @ msg : This function sets the memory attributes for a section
* @ param { uintptr } addr is 32 - bit address for which the attributes need to be set .
* @ param { fsize_t } size of the mapped memory region in bytes
* @ param { u32 } attrib or the specified memory region . mmu . h contains commonly used memory attributes definitions which can be
* utilized for this function .
* @ return { * }
*/
void FSetTlbAttributes ( uintptr addr , fsize_t size , u32 attrib )
{
int ret = __ArchMemMap ( virt , phys , size , flags ) ;
uintptr_t aligned_phys , addr_offset ;
size_t aligned_size ;
MemRegionAlign ( & aligned_phys , & aligned_size ,
addr , size , CONFIG_MMU_PAGE_SIZE ) ;
if ( ret )
{
MMU_DEBUG ( " __ArchMemMap() returned %d " , ret ) ;
}
else
{
__asm_invalidate_tlb_all ( ) ;
}
FASSERT_MSG ( aligned_size ! = 0U , " 0-length mapping at 0x%lx " , aligned_phys ) ;
FASSERT_MSG ( aligned_phys < ( aligned_phys + ( aligned_size - 1 ) ) ,
" wraparound for physical address 0x%lx (size %zu) " ,
aligned_phys , aligned_size ) ;
MMU_DEBUG ( " addr %p,size %d,aligned_phys %p,aligned_size %d \r \n " , addr , size , aligned_phys , aligned_size ) ;
ArchMemMap ( aligned_phys , aligned_phys , aligned_size , attrib ) ;
}
void ArchMemUnmap ( void * addr , size_t size )
void ArchMemUnmap ( uintptr addr , f size_t size )
{
int ret = RemoveMap ( & kernel_ptables , " gener ic" , ( uintptr_t ) addr , size ) ;
int ret = RemoveMap ( & kernel_ptables , " dynam ic" , ( uintptr ) addr , size ) ;
if ( ret )
{
MMU_DEBU G ( " RemoveMap() returned %d " , ret ) ;
MMU_WRNIN G ( " RemoveMap() returned %d " , ret ) ;
}
else
{
__asm_invalidate_tlb_a ll( ) ;
AsmInvalidateTlbA ll( ) ;
}
}
int ArchPagePhysGet ( void * virt , uintptr_t * phys )
int ArchPagePhysGet ( uintptr virt , uintptr * phys )
{
u64 par ;
int key ;