Commit 99f60798 authored by Sandrine Bailleux's avatar Sandrine Bailleux
Browse files

xlat lib v2: Remove init_xlat_tables_arch() function



In both the AArch32 and AArch64 versions, this function used to check
the sanity of the PLAT_PHY_ADDR_SPACE_SIZE in regard to the
architectural maximum value. Instead, export the
xlat_arch_get_max_supported_pa() function and move the debug
assertion in AArch-agnostic code.

The AArch64 used to also precalculate the TCR.PS field value, based
on the size of the physical address space. This is now done directly
by enable_mmu_arch(), which now receives the physical address space size
in argument.

Change-Id: Ie77ea92eb06db586f28784fdb479c6e27dd1acc1
Signed-off-by: default avatarSandrine Bailleux <sandrine.bailleux@arm.com>
parent a9ad848c
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include "../xlat_tables_private.h" #include "../xlat_tables_private.h"
#if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS
static unsigned long long xlat_arch_get_max_supported_pa(void) unsigned long long xlat_arch_get_max_supported_pa(void)
{ {
/* Physical address space size for long descriptor format. */ /* Physical address space size for long descriptor format. */
return (1ull << 40) - 1ull; return (1ull << 40) - 1ull;
...@@ -81,18 +81,11 @@ uint64_t xlat_arch_get_xn_desc(int el __unused) ...@@ -81,18 +81,11 @@ uint64_t xlat_arch_get_xn_desc(int el __unused)
return UPPER_ATTRS(XN); return UPPER_ATTRS(XN);
} }
void init_xlat_tables_arch(unsigned long long max_pa)
{
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
xlat_arch_get_max_supported_pa());
}
/******************************************************************************* /*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the * Function for enabling the MMU in Secure PL1, assuming that the
* page-tables have already been created. * page-tables have already been created.
******************************************************************************/ ******************************************************************************/
void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table) void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table)
{ {
u_register_t mair0, ttbcr, sctlr; u_register_t mair0, ttbcr, sctlr;
uint64_t ttbr0; uint64_t ttbr0;
...@@ -158,7 +151,9 @@ void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table) ...@@ -158,7 +151,9 @@ void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table)
isb(); isb();
} }
void enable_mmu_arch(unsigned int flags, uint64_t *base_table) void enable_mmu_arch(unsigned int flags,
uint64_t *base_table,
unsigned long long max_pa)
{ {
enable_mmu_internal_secure(flags, base_table); enable_mmu_internal_secure(flags, base_table);
} }
...@@ -60,7 +60,7 @@ static const unsigned int pa_range_bits_arr[] = { ...@@ -60,7 +60,7 @@ static const unsigned int pa_range_bits_arr[] = {
PARANGE_0101 PARANGE_0101
}; };
static unsigned long long xlat_arch_get_max_supported_pa(void) unsigned long long xlat_arch_get_max_supported_pa(void)
{ {
u_register_t pa_range = read_id_aa64mmfr0_el1() & u_register_t pa_range = read_id_aa64mmfr0_el1() &
ID_AA64MMFR0_EL1_PARANGE_MASK; ID_AA64MMFR0_EL1_PARANGE_MASK;
...@@ -146,24 +146,6 @@ uint64_t xlat_arch_get_xn_desc(int el) ...@@ -146,24 +146,6 @@ uint64_t xlat_arch_get_xn_desc(int el)
} }
} }
void init_xlat_tables_arch(unsigned long long max_pa)
{
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
xlat_arch_get_max_supported_pa());
/*
* If dynamic allocation of new regions is enabled the code can't make
* assumptions about the max physical address because it could change
* after adding new regions. If this functionality is disabled it is
* safer to restrict the max physical address as much as possible.
*/
#ifdef PLAT_XLAT_TABLES_DYNAMIC
tcr_ps_bits = calc_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE);
#else
tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
#endif
}
/******************************************************************************* /*******************************************************************************
* Macro generating the code for the function enabling the MMU in the given * Macro generating the code for the function enabling the MMU in the given
* exception level, assuming that the pagetables have already been created. * exception level, assuming that the pagetables have already been created.
...@@ -247,8 +229,16 @@ DEFINE_ENABLE_MMU_EL(3, ...@@ -247,8 +229,16 @@ DEFINE_ENABLE_MMU_EL(3,
tlbialle3) tlbialle3)
#endif #endif
void enable_mmu_arch(unsigned int flags, uint64_t *base_table) void enable_mmu_arch(unsigned int flags,
uint64_t *base_table,
unsigned long long max_pa)
{ {
/*
* It is safer to restrict the max physical address accessible by the
* hardware as much as possible.
*/
tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
#if IMAGE_EL == 1 #if IMAGE_EL == 1
assert(IS_IN_EL(1)); assert(IS_IN_EL(1));
enable_mmu_internal_el1(flags, base_table); enable_mmu_internal_el1(flags, base_table);
......
...@@ -1178,14 +1178,14 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx) ...@@ -1178,14 +1178,14 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
mm++; mm++;
} }
ctx->initialized = 1; assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
xlat_arch_get_max_supported_pa());
xlat_tables_print(ctx);
assert(ctx->max_va <= ctx->va_max_address); assert(ctx->max_va <= ctx->va_max_address);
assert(ctx->max_pa <= ctx->pa_max_address); assert(ctx->max_pa <= ctx->pa_max_address);
init_xlat_tables_arch(ctx->max_pa); ctx->initialized = 1;
xlat_tables_print(ctx);
} }
void init_xlat_tables(void) void init_xlat_tables(void)
...@@ -1193,23 +1193,40 @@ void init_xlat_tables(void) ...@@ -1193,23 +1193,40 @@ void init_xlat_tables(void)
init_xlat_tables_ctx(&tf_xlat_ctx); init_xlat_tables_ctx(&tf_xlat_ctx);
} }
/*
* If dynamic allocation of new regions is disabled then by the time we call the
* function enabling the MMU, we'll have registered all the memory regions to
* map for the system's lifetime. Therefore, at this point we know the maximum
* physical address that will ever be mapped.
*
* If dynamic allocation is enabled then we can't make any such assumption
* because the maximum physical address could get pushed while adding a new
* region. Therefore, in this case we have to assume that the whole address
* space size might be mapped.
*/
#ifdef PLAT_XLAT_TABLES_DYNAMIC
#define MAX_PHYS_ADDR PLAT_PHY_ADDR_SPACE_SIZE
#else
#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
#endif
#ifdef AARCH32 #ifdef AARCH32
void enable_mmu_secure(unsigned int flags) void enable_mmu_secure(unsigned int flags)
{ {
enable_mmu_arch(flags, tf_xlat_ctx.base_table); enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR);
} }
#else #else
void enable_mmu_el1(unsigned int flags) void enable_mmu_el1(unsigned int flags)
{ {
enable_mmu_arch(flags, tf_xlat_ctx.base_table); enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR);
} }
void enable_mmu_el3(unsigned int flags) void enable_mmu_el3(unsigned int flags)
{ {
enable_mmu_arch(flags, tf_xlat_ctx.base_table); enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR);
} }
#endif /* AARCH32 */ #endif /* AARCH32 */
...@@ -72,11 +72,16 @@ int xlat_arch_current_el(void); ...@@ -72,11 +72,16 @@ int xlat_arch_current_el(void);
*/ */
uint64_t xlat_arch_get_xn_desc(int el); uint64_t xlat_arch_get_xn_desc(int el);
/* Execute architecture-specific translation table initialization code. */ /*
void init_xlat_tables_arch(unsigned long long max_pa); * Return the maximum physical address supported by the hardware.
* This value depends on the execution state (AArch32/AArch64).
*/
unsigned long long xlat_arch_get_max_supported_pa(void);
/* Enable MMU and configure it to use the specified translation tables. */ /* Enable MMU and configure it to use the specified translation tables. */
void enable_mmu_arch(unsigned int flags, uint64_t *base_table); void enable_mmu_arch(unsigned int flags,
uint64_t *base_table,
unsigned long long max_pa);
/* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */ /* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */
int is_mmu_enabled(void); int is_mmu_enabled(void);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment