Commit 347621bb authored by Sandrine Bailleux's avatar Sandrine Bailleux
Browse files

xlat lib v2: Remove hard-coded virtual address space size



Previous patches have made it possible to specify the physical and
virtual address spaces sizes for each translation context. However,
there are still some places in the code where the physical (resp.
virtual) address space size is assumed to be PLAT_PHY_ADDR_SPACE_SIZE
(resp. PLAT_VIRT_ADDR_SPACE_SIZE).

This patch removes them and reads the relevant address space size
from the translation context itself instead. This information is now
passed in argument to the enable_mmu_arch() function, which needs it
to configure the TCR_ELx.T0SZ field (in AArch64) or the TTBCR.T0SZ
field (in AArch32) appropriately.

Change-Id: I20b0e68b03a143e998695d42911d9954328a06aa
Signed-off-by: default avatarSandrine Bailleux <sandrine.bailleux@arm.com>
parent d83f3579
...@@ -87,7 +87,8 @@ uint64_t xlat_arch_get_xn_desc(int el __unused) ...@@ -87,7 +87,8 @@ uint64_t xlat_arch_get_xn_desc(int el __unused)
******************************************************************************/ ******************************************************************************/
void enable_mmu_arch(unsigned int flags, void enable_mmu_arch(unsigned int flags,
uint64_t *base_table, uint64_t *base_table,
unsigned long long max_pa) unsigned long long max_pa,
uintptr_t max_va)
{ {
u_register_t mair0, ttbcr, sctlr; u_register_t mair0, ttbcr, sctlr;
uint64_t ttbr0; uint64_t ttbr0;
...@@ -123,9 +124,18 @@ void enable_mmu_arch(unsigned int flags, ...@@ -123,9 +124,18 @@ void enable_mmu_arch(unsigned int flags,
/* /*
* Limit the input address ranges and memory region sizes translated * Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size. * using TTBR0 to the given virtual address space size, if smaller than
* 32 bits.
*/ */
ttbcr |= 32 - __builtin_ctzl((uintptr_t) PLAT_VIRT_ADDR_SPACE_SIZE); if (max_va != UINT32_MAX) {
uintptr_t virtual_addr_space_size = max_va + 1;
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
/*
* __builtin_ctzl(0) is undefined but here we are guaranteed
* that virtual_addr_space_size is in the range [1, UINT32_MAX].
*/
ttbcr |= 32 - __builtin_ctzl(virtual_addr_space_size);
}
/* /*
* Set the cacheability and shareability attributes for memory * Set the cacheability and shareability attributes for memory
......
...@@ -196,7 +196,8 @@ DEFINE_ENABLE_MMU_EL(3, tlbialle3) ...@@ -196,7 +196,8 @@ DEFINE_ENABLE_MMU_EL(3, tlbialle3)
void enable_mmu_arch(unsigned int flags, void enable_mmu_arch(unsigned int flags,
uint64_t *base_table, uint64_t *base_table,
unsigned long long max_pa) unsigned long long max_pa,
uintptr_t max_va)
{ {
uint64_t mair, ttbr, tcr; uint64_t mair, ttbr, tcr;
...@@ -215,7 +216,14 @@ void enable_mmu_arch(unsigned int flags, ...@@ -215,7 +216,14 @@ void enable_mmu_arch(unsigned int flags,
* Limit the input address ranges and memory region sizes translated * Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size. * using TTBR0 to the given virtual address space size.
*/ */
tcr = 64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE); assert(max_va < UINTPTR_MAX);
uintptr_t virtual_addr_space_size = max_va + 1;
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
/*
* __builtin_ctzl(0) is undefined but here we are guaranteed that
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
*/
tcr = 64 - __builtin_ctzl(virtual_addr_space_size);
/* /*
* Set the cacheability and shareability attributes for memory * Set the cacheability and shareability attributes for memory
......
...@@ -1178,8 +1178,7 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx) ...@@ -1178,8 +1178,7 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
mm++; mm++;
} }
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
xlat_arch_get_max_supported_pa());
assert(ctx->max_va <= ctx->va_max_address); assert(ctx->max_va <= ctx->va_max_address);
assert(ctx->max_pa <= ctx->pa_max_address); assert(ctx->max_pa <= ctx->pa_max_address);
...@@ -1205,7 +1204,7 @@ void init_xlat_tables(void) ...@@ -1205,7 +1204,7 @@ void init_xlat_tables(void)
* space size might be mapped. * space size might be mapped.
*/ */
#ifdef PLAT_XLAT_TABLES_DYNAMIC #ifdef PLAT_XLAT_TABLES_DYNAMIC
#define MAX_PHYS_ADDR PLAT_PHY_ADDR_SPACE_SIZE #define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
#else #else
#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa #define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
#endif #endif
...@@ -1214,19 +1213,22 @@ void init_xlat_tables(void) ...@@ -1214,19 +1213,22 @@ void init_xlat_tables(void)
void enable_mmu_secure(unsigned int flags) void enable_mmu_secure(unsigned int flags)
{ {
enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR); enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address);
} }
#else #else
void enable_mmu_el1(unsigned int flags) void enable_mmu_el1(unsigned int flags)
{ {
enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR); enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address);
} }
void enable_mmu_el3(unsigned int flags) void enable_mmu_el3(unsigned int flags)
{ {
enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR); enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address);
} }
#endif /* AARCH32 */ #endif /* AARCH32 */
...@@ -79,9 +79,8 @@ uint64_t xlat_arch_get_xn_desc(int el); ...@@ -79,9 +79,8 @@ uint64_t xlat_arch_get_xn_desc(int el);
unsigned long long xlat_arch_get_max_supported_pa(void); unsigned long long xlat_arch_get_max_supported_pa(void);
/* Enable MMU and configure it to use the specified translation tables. */ /* Enable MMU and configure it to use the specified translation tables. */
void enable_mmu_arch(unsigned int flags, void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
uint64_t *base_table, unsigned long long pa, uintptr_t max_va);
unsigned long long max_pa);
/* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */ /* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */
int is_mmu_enabled(void); int is_mmu_enabled(void);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment