Commit a90bfa33 authored by Sandrine Bailleux's avatar Sandrine Bailleux
Browse files

juno: Provide per-EL MMU setup functions

Instead of having a single version of the MMU setup functions for all
bootloader images that can execute either in EL3 or in EL1, provide
separate functions for EL1 and EL3. Each bootloader image can then
call the appropriate version of these functions. The aim is to reduce
the amount of code compiled in each BL image by embedding only what's
needed (e.g. BL1 to embed only EL3 variants).

Change-Id: Ie3f6fb58f7d9ea4e4085b5069e27d6b9dceaa286
parent fe23b15d
...@@ -36,75 +36,70 @@ ...@@ -36,75 +36,70 @@
#include <xlat_tables.h> #include <xlat_tables.h>
void enable_mmu() /*******************************************************************************
{ * Macro generating the code for the function enabling the MMU in the given
unsigned long mair, tcr, ttbr, sctlr; * exception level, assuming that the pagetables have already been created.
unsigned long current_el = read_current_el(); *
* _el: Exception level at which the function will run
/* Set the attributes in the right indices of the MAIR */ * _tcr_extra: Extra bits to set in the TCR register. This mask will
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); * be OR'ed with the default TCR value.
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, * _tlbi_fct: Function to invalidate the TLBs at the current
ATTR_IWBWA_OWBWA_NTR_INDEX); * exception level
******************************************************************************/
/* #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
* Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32 void enable_mmu_el##_el(void) \
*/ { \
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | uint64_t mair, tcr, ttbr; \
TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; uint32_t sctlr; \
\
/* Set TTBR bits as well */ assert(IS_IN_EL(_el)); \
ttbr = (unsigned long) l1_xlation_table; assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
\
if (GET_EL(current_el) == MODE_EL3) { /* Set attributes in the right indices of the MAIR */ \
assert((read_sctlr_el3() & SCTLR_M_BIT) == 0); mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
write_mair_el3(mair); ATTR_IWBWA_OWBWA_NTR_INDEX); \
tcr |= TCR_EL3_RES1; write_mair_el##_el(mair); \
/* Invalidate EL3 TLBs */ \
tlbialle3(); /* Invalidate TLBs at the current exception level */ \
_tlbi_fct(); \
write_tcr_el3(tcr); \
write_ttbr0_el3(ttbr); /* Set TCR bits as well. */ \
/* Inner & outer WBWA & shareable + T0SZ = 32 */ \
/* ensure all translation table writes have drained into memory, tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
* the TLB invalidation is complete, and translation register TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; \
* writes are committed before enabling the MMU tcr |= _tcr_extra; \
*/ write_tcr_el##_el(tcr); \
dsb(); \
isb(); /* Set TTBR bits as well */ \
ttbr = (uint64_t) l1_xlation_table; \
sctlr = read_sctlr_el3(); write_ttbr0_el##_el(ttbr); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; \
sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; /* Ensure all translation table writes have drained */ \
write_sctlr_el3(sctlr); /* into memory, the TLB invalidation is complete, */ \
} else { /* and translation register writes are committed */ \
assert((read_sctlr_el1() & SCTLR_M_BIT) == 0); /* before enabling the MMU */ \
dsb(); \
write_mair_el1(mair); isb(); \
/* Invalidate EL1 TLBs */ \
tlbivmalle1(); sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; \
write_tcr_el1(tcr); sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; \
write_ttbr0_el1(ttbr); write_sctlr_el##_el(sctlr); \
\
/* ensure all translation table writes have drained into memory, /* Ensure the MMU enable takes effect immediately */ \
* the TLB invalidation is complete, and translation register isb(); \
* writes are committed before enabling the MMU
*/
dsb();
isb();
sctlr = read_sctlr_el1();
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
write_sctlr_el1(sctlr);
} }
/* ensure the MMU enable takes effect immediately */
isb();
return; /* Define EL1 and EL3 variants of the function enabling the MMU */
} DEFINE_ENABLE_MMU_EL(1, 0, tlbivmalle1)
DEFINE_ENABLE_MMU_EL(3, TCR_EL3_RES1, tlbialle3)
/*
* Table of regions to map using the MMU.
* This doesn't include Trusted RAM as the 'mem_layout' argument passed to
* configure_mmu_elx() will give the available subset of that,
*/
static const mmap_region_t juno_mmap[] = { static const mmap_region_t juno_mmap[] = {
{ TZROM_BASE, TZROM_SIZE, MT_MEMORY | MT_RO | MT_SECURE }, { TZROM_BASE, TZROM_SIZE, MT_MEMORY | MT_RO | MT_SECURE },
{ MHU_SECURE_BASE, MHU_SECURE_SIZE, (MHU_PAYLOAD_CACHED ? MT_MEMORY : MT_DEVICE) { MHU_SECURE_BASE, MHU_SECURE_SIZE, (MHU_PAYLOAD_CACHED ? MT_MEMORY : MT_DEVICE)
...@@ -122,26 +117,34 @@ static const mmap_region_t juno_mmap[] = { ...@@ -122,26 +117,34 @@ static const mmap_region_t juno_mmap[] = {
{0} {0}
}; };
void configure_mmu(meminfo_t *mem_layout, /*******************************************************************************
unsigned long ro_start, + * Macro generating the code for the function setting up the pagetables as per
unsigned long ro_limit, + * the platform memory map & initialize the mmu, for the given exception level
unsigned long coh_start, + ******************************************************************************/
unsigned long coh_limit) #define DEFINE_CONFIGURE_MMU_EL(_el) \
{ void configure_mmu_el##_el(meminfo_t *mem_layout, \
mmap_add_region(mem_layout->total_base, mem_layout->total_size, unsigned long ro_start, \
MT_MEMORY | MT_RW | MT_SECURE); unsigned long ro_limit, \
mmap_add_region(ro_start, ro_limit - ro_start, unsigned long coh_start, \
MT_MEMORY | MT_RO | MT_SECURE); unsigned long coh_limit) \
mmap_add_region(coh_start, coh_limit - coh_start, { \
MT_DEVICE | MT_RW | MT_SECURE); mmap_add_region(mem_layout->total_base, \
mem_layout->total_size, \
mmap_add(juno_mmap); MT_MEMORY | MT_RW | MT_SECURE); \
mmap_add_region(ro_start, ro_limit - ro_start, \
MT_MEMORY | MT_RO | MT_SECURE); \
mmap_add_region(coh_start, coh_limit - coh_start, \
MT_DEVICE | MT_RW | MT_SECURE); \
mmap_add(juno_mmap); \
init_xlat_tables(); \
\
enable_mmu_el##_el(); \
}
/* Define EL1 and EL3 variants of the function initialising the MMU */
DEFINE_CONFIGURE_MMU_EL(1)
DEFINE_CONFIGURE_MMU_EL(3)
init_xlat_tables();
enable_mmu();
return;
}
unsigned long plat_get_ns_image_entrypoint(void) unsigned long plat_get_ns_image_entrypoint(void)
{ {
......
...@@ -228,9 +228,9 @@ void bl1_plat_arch_setup(void) ...@@ -228,9 +228,9 @@ void bl1_plat_arch_setup(void)
*/ */
cci_enable_coherency(read_mpidr()); cci_enable_coherency(read_mpidr());
configure_mmu(&bl1_tzram_layout, configure_mmu_el3(&bl1_tzram_layout,
TZROM_BASE, TZROM_BASE,
TZROM_BASE + TZROM_SIZE, TZROM_BASE + TZROM_SIZE,
BL1_COHERENT_RAM_BASE, BL1_COHERENT_RAM_BASE,
BL1_COHERENT_RAM_LIMIT); BL1_COHERENT_RAM_LIMIT);
} }
...@@ -199,9 +199,9 @@ void bl2_platform_setup() ...@@ -199,9 +199,9 @@ void bl2_platform_setup()
******************************************************************************/ ******************************************************************************/
void bl2_plat_arch_setup() void bl2_plat_arch_setup()
{ {
configure_mmu(&bl2_tzram_layout, configure_mmu_el1(&bl2_tzram_layout,
BL2_RO_BASE, BL2_RO_BASE,
BL2_RO_LIMIT, BL2_RO_LIMIT,
BL2_COHERENT_RAM_BASE, BL2_COHERENT_RAM_BASE,
BL2_COHERENT_RAM_LIMIT); BL2_COHERENT_RAM_LIMIT);
} }
...@@ -140,9 +140,9 @@ void bl31_platform_setup(void) ...@@ -140,9 +140,9 @@ void bl31_platform_setup(void)
******************************************************************************/ ******************************************************************************/
void bl31_plat_arch_setup() void bl31_plat_arch_setup()
{ {
configure_mmu(&bl2_to_bl31_args.bl31_meminfo, configure_mmu_el3(&bl2_to_bl31_args.bl31_meminfo,
BL31_RO_BASE, BL31_RO_BASE,
BL31_RO_LIMIT, BL31_RO_LIMIT,
BL31_COHERENT_RAM_BASE, BL31_COHERENT_RAM_BASE,
BL31_COHERENT_RAM_LIMIT); BL31_COHERENT_RAM_LIMIT);
} }
...@@ -108,9 +108,9 @@ void bl32_platform_setup(void) ...@@ -108,9 +108,9 @@ void bl32_platform_setup(void)
******************************************************************************/ ******************************************************************************/
void bl32_plat_arch_setup(void) void bl32_plat_arch_setup(void)
{ {
configure_mmu(&bl32_tzdram_layout, configure_mmu_el1(&bl32_tzdram_layout,
BL32_RO_BASE, BL32_RO_BASE,
BL32_RO_LIMIT, BL32_RO_LIMIT,
BL32_COHERENT_RAM_BASE, BL32_COHERENT_RAM_BASE,
BL32_COHERENT_RAM_LIMIT); BL32_COHERENT_RAM_LIMIT);
} }
...@@ -341,12 +341,18 @@ extern void bl2_plat_arch_setup(void); ...@@ -341,12 +341,18 @@ extern void bl2_plat_arch_setup(void);
extern void bl31_plat_arch_setup(void); extern void bl31_plat_arch_setup(void);
extern int platform_setup_pm(const struct plat_pm_ops **); extern int platform_setup_pm(const struct plat_pm_ops **);
extern unsigned int platform_get_core_pos(unsigned long mpidr); extern unsigned int platform_get_core_pos(unsigned long mpidr);
extern void enable_mmu(void); extern void enable_mmu_el1(void);
extern void configure_mmu(struct meminfo *, extern void enable_mmu_el3(void);
unsigned long, extern void configure_mmu_el1(meminfo_t *mem_layout,
unsigned long, unsigned long ro_start,
unsigned long, unsigned long ro_limit,
unsigned long); unsigned long coh_start,
unsigned long coh_limit);
extern void configure_mmu_el3(meminfo_t *mem_layout,
unsigned long ro_start,
unsigned long ro_limit,
unsigned long coh_start,
unsigned long coh_limit);
extern void plat_report_exception(unsigned long); extern void plat_report_exception(unsigned long);
extern unsigned long plat_get_ns_image_entrypoint(void); extern unsigned long plat_get_ns_image_entrypoint(void);
extern unsigned long platform_get_stack(unsigned long mpidr); extern unsigned long platform_get_stack(unsigned long mpidr);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment