Unverified Commit 224e1aba authored by danh-arm's avatar danh-arm Committed by GitHub
Browse files

Merge pull request #1481 from antonio-nino-diaz-arm/an/xlat-refactor

xlat: More refactoring
parents e4686fd8 1dd6c051
...@@ -41,10 +41,28 @@ ...@@ -41,10 +41,28 @@
*/ */
#define XLAT_TABLE_NC (U(1) << 1) #define XLAT_TABLE_NC (U(1) << 1)
/*
* Offsets into a mmu_cfg_params array generated by setup_mmu_cfg(). All
* parameters are 64 bits wide.
*/
#define MMU_CFG_MAIR 0
#define MMU_CFG_TCR 1
#define MMU_CFG_TTBR0 2
#define MMU_CFG_PARAM_MAX 3
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <sys/types.h> #include <sys/types.h>
/*
* Return the values that the MMU configuration registers must contain for the
* specified translation context. `params` must be a pointer to array of size
* MMU_CFG_PARAM_MAX.
*/
void setup_mmu_cfg(uint64_t *params, unsigned int flags,
const uint64_t *base_table, unsigned long long max_pa,
uintptr_t max_va, int xlat_regime);
#ifdef AARCH32 #ifdef AARCH32
/* AArch32 specific translation table API */ /* AArch32 specific translation table API */
void enable_mmu_secure(unsigned int flags); void enable_mmu_secure(unsigned int flags);
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
/* /*
* The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or * The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or
* 64KB. However, TF only supports the 4KB case at the moment. * 64KB. However, only 4KB are supported at the moment.
*/ */
#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT #define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
#define PAGE_SIZE (U(1) << PAGE_SIZE_SHIFT) #define PAGE_SIZE (U(1) << PAGE_SIZE_SHIFT)
......
...@@ -121,10 +121,12 @@ typedef struct mmap_region { ...@@ -121,10 +121,12 @@ typedef struct mmap_region {
} mmap_region_t; } mmap_region_t;
/* /*
* Translation regimes supported by this library. * Translation regimes supported by this library. EL_REGIME_INVALID tells the
* library to detect it at runtime.
*/ */
#define EL1_EL0_REGIME 1 #define EL1_EL0_REGIME 1
#define EL3_REGIME 3 #define EL3_REGIME 3
#define EL_REGIME_INVALID -1
/* /*
* Declare the translation context type. * Declare the translation context type.
...@@ -165,8 +167,7 @@ typedef struct xlat_ctx xlat_ctx_t; ...@@ -165,8 +167,7 @@ typedef struct xlat_ctx xlat_ctx_t;
(_xlat_tables_count), \ (_xlat_tables_count), \
(_virt_addr_space_size), \ (_virt_addr_space_size), \
(_phy_addr_space_size), \ (_phy_addr_space_size), \
IMAGE_XLAT_DEFAULT_REGIME, \ EL_REGIME_INVALID, "xlat_table")
"xlat_table")
/* /*
* Same as REGISTER_XLAT_CONTEXT plus the additional parameters: * Same as REGISTER_XLAT_CONTEXT plus the additional parameters:
......
...@@ -16,13 +16,6 @@ ...@@ -16,13 +16,6 @@
#error "Do not include this header file directly. Include xlat_tables_v2.h instead." #error "Do not include this header file directly. Include xlat_tables_v2.h instead."
#endif #endif
/* Offsets into mmu_cfg_params array. All parameters are 32 bits wide. */
#define MMU_CFG_MAIR0 0
#define MMU_CFG_TCR 1
#define MMU_CFG_TTBR0_LO 2
#define MMU_CFG_TTBR0_HI 3
#define MMU_CFG_PARAM_MAX 4
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <cassert.h> #include <cassert.h>
...@@ -31,9 +24,6 @@ ...@@ -31,9 +24,6 @@
#include <xlat_tables_arch.h> #include <xlat_tables_arch.h>
#include <xlat_tables_defs.h> #include <xlat_tables_defs.h>
/* Parameters of register values required when enabling MMU */
extern uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
/* Forward declaration */ /* Forward declaration */
struct mmap_region; struct mmap_region;
...@@ -172,29 +162,4 @@ struct xlat_ctx { ...@@ -172,29 +162,4 @@ struct xlat_ctx {
#endif /*__ASSEMBLY__*/ #endif /*__ASSEMBLY__*/
#if AARCH64
/*
* This IMAGE_EL macro must not to be used outside the library, and it is only
* used in AArch64.
*/
#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
# define IMAGE_EL 3
# define IMAGE_XLAT_DEFAULT_REGIME EL3_REGIME
#else
# define IMAGE_EL 1
# define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
#endif
#else /* if AARCH32 */
/*
* The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime in
* AArch64 except for the XN bits, but we set and unset them at the same time,
* so there's no difference in practice.
*/
#define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
#endif /* AARCH64 */
#endif /* __XLAT_TABLES_V2_HELPERS_H__ */ #endif /* __XLAT_TABLES_V2_HELPERS_H__ */
...@@ -195,6 +195,10 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa, ...@@ -195,6 +195,10 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC; desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0; desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO); desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
/*
* Always set the access flag, as this library assumes access flag
* faults aren't managed.
*/
desc |= LOWER_ATTRS(ACCESS_FLAG); desc |= LOWER_ATTRS(ACCESS_FLAG);
desc |= ap1_mask; desc |= ap1_mask;
...@@ -222,9 +226,10 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa, ...@@ -222,9 +226,10 @@ static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
} else { /* Normal memory */ } else { /* Normal memory */
/* /*
* Always map read-write normal memory as execute-never. * Always map read-write normal memory as execute-never.
* (Trusted Firmware doesn't self-modify its code, therefore * This library assumes that it is used by software that does
* R/W memory is reserved for data storage, which must not be * not self-modify its code, therefore R/W memory is reserved
* executable.) * for data storage, which must not be executable.
*
* Note that setting the XN bit here is for consistency only. * Note that setting the XN bit here is for consistency only.
* The function that enables the MMU sets the SCTLR_ELx.WXN bit, * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
* which makes any writable memory region to be treated as * which makes any writable memory region to be treated as
......
...@@ -24,17 +24,17 @@ func enable_mmu_direct ...@@ -24,17 +24,17 @@ func enable_mmu_direct
mov r3, r0 mov r3, r0
ldr r0, =mmu_cfg_params ldr r0, =mmu_cfg_params
/* MAIR0 */ /* MAIR0. Only the lower 32 bits are used. */
ldr r1, [r0, #(MMU_CFG_MAIR0 << 2)] ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
stcopr r1, MAIR0 stcopr r1, MAIR0
/* TTBCR */ /* TTBCR. Only the lower 32 bits are used. */
ldr r2, [r0, #(MMU_CFG_TCR << 2)] ldr r2, [r0, #(MMU_CFG_TCR << 3)]
stcopr r2, TTBCR stcopr r2, TTBCR
/* TTBR0 */ /* TTBR0 */
ldr r1, [r0, #(MMU_CFG_TTBR0_LO << 2)] ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
ldr r2, [r0, #(MMU_CFG_TTBR0_HI << 2)] ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
stcopr16 r1, r2, TTBR0_64 stcopr16 r1, r2, TTBR0_64
/* TTBR1 is unused right now; set it to 0. */ /* TTBR1 is unused right now; set it to 0. */
......
...@@ -18,16 +18,14 @@ ...@@ -18,16 +18,14 @@
#error ARMv7 target does not support LPAE MMU descriptors #error ARMv7 target does not support LPAE MMU descriptors
#endif #endif
uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
/* /*
* Returns 1 if the provided granule size is supported, 0 otherwise. * Returns 1 if the provided granule size is supported, 0 otherwise.
*/ */
int xlat_arch_is_granule_size_supported(size_t size) int xlat_arch_is_granule_size_supported(size_t size)
{ {
/* /*
* The Trusted Firmware uses long descriptor translation table format, * The library uses the long descriptor translation table format, which
* which supports 4 KiB pages only. * supports 4 KiB pages only.
*/ */
return (size == (4U * 1024U)); return (size == (4U * 1024U));
} }
...@@ -50,18 +48,12 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused) ...@@ -50,18 +48,12 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
return (read_sctlr() & SCTLR_M_BIT) != 0; return (read_sctlr() & SCTLR_M_BIT) != 0;
} }
void xlat_arch_tlbi_va(uintptr_t va) uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime __unused)
{ {
/* return UPPER_ATTRS(XN);
* Ensure the translation table write has drained into memory before
* invalidating the TLB entry.
*/
dsbishst();
tlbimvaais(TLBI_ADDR(va));
} }
void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime __unused) void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime __unused)
{ {
/* /*
* Ensure the translation table write has drained into memory before * Ensure the translation table write has drained into memory before
...@@ -103,29 +95,32 @@ int xlat_arch_current_el(void) ...@@ -103,29 +95,32 @@ int xlat_arch_current_el(void)
/* /*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System, * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
* SVC, Abort, UND, IRQ and FIQ modes) execute at EL3. * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
*
* The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime
* in AArch64 except for the XN bits, but we set and unset them at the
* same time, so there's no difference in practice.
*/ */
return 3; return 1;
} }
/******************************************************************************* /*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the page tables * Function for enabling the MMU in Secure PL1, assuming that the page tables
* have already been created. * have already been created.
******************************************************************************/ ******************************************************************************/
void setup_mmu_cfg(unsigned int flags, void setup_mmu_cfg(uint64_t *params, unsigned int flags,
const uint64_t *base_table, const uint64_t *base_table, unsigned long long max_pa,
unsigned long long max_pa, uintptr_t max_va, __unused int xlat_regime)
uintptr_t max_va)
{ {
u_register_t mair0, ttbcr; uint64_t mair, ttbr0;
uint64_t ttbr0; uint32_t ttbcr;
assert(IS_IN_SECURE()); assert(IS_IN_SECURE());
/* Set attributes in the right indices of the MAIR */ /* Set attributes in the right indices of the MAIR */
mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
ATTR_IWBWA_OWBWA_NTR_INDEX); ATTR_IWBWA_OWBWA_NTR_INDEX);
mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE, mair |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
ATTR_NON_CACHEABLE_INDEX); ATTR_NON_CACHEABLE_INDEX);
/* /*
...@@ -173,17 +168,17 @@ void setup_mmu_cfg(unsigned int flags, ...@@ -173,17 +168,17 @@ void setup_mmu_cfg(unsigned int flags,
/* Set TTBR0 bits as well */ /* Set TTBR0 bits as well */
ttbr0 = (uint64_t)(uintptr_t) base_table; ttbr0 = (uint64_t)(uintptr_t) base_table;
#if ARM_ARCH_AT_LEAST(8, 2) #if ARM_ARCH_AT_LEAST(8, 2)
/* /*
* Enable CnP bit so as to share page tables with all PEs. * Enable CnP bit so as to share page tables with all PEs. This
* Mandatory for ARMv8.2 implementations. * is mandatory for ARMv8.2 implementations.
*/ */
ttbr0 |= TTBR_CNP_BIT; ttbr0 |= TTBR_CNP_BIT;
#endif #endif
/* Now populate MMU configuration */ /* Now populate MMU configuration */
mmu_cfg_params[MMU_CFG_MAIR0] = mair0; params[MMU_CFG_MAIR] = mair;
mmu_cfg_params[MMU_CFG_TCR] = ttbcr; params[MMU_CFG_TCR] = (uint64_t) ttbcr;
mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr0; params[MMU_CFG_TTBR0] = ttbr0;
mmu_cfg_params[MMU_CFG_TTBR0_HI] = ttbr0 >> 32;
} }
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
#define __XLAT_TABLES_ARCH_PRIVATE_H__
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
/*
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
*/
static inline uint64_t xlat_arch_regime_get_xn_desc(int regime __unused)
{
return UPPER_ATTRS(XN);
}
#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
...@@ -43,17 +43,15 @@ ...@@ -43,17 +43,15 @@
ldr x0, =mmu_cfg_params ldr x0, =mmu_cfg_params
/* MAIR */ /* MAIR */
ldr w1, [x0, #(MMU_CFG_MAIR0 << 2)] ldr x1, [x0, #(MMU_CFG_MAIR << 3)]
_msr mair, \el, x1 _msr mair, \el, x1
/* TCR */ /* TCR */
ldr w2, [x0, #(MMU_CFG_TCR << 2)] ldr x2, [x0, #(MMU_CFG_TCR << 3)]
_msr tcr, \el, x2 _msr tcr, \el, x2
/* TTBR */ /* TTBR */
ldr w3, [x0, #(MMU_CFG_TTBR0_LO << 2)] ldr x3, [x0, #(MMU_CFG_TTBR0 << 3)]
ldr w4, [x0, #(MMU_CFG_TTBR0_HI << 2)]
orr x3, x3, x4, lsl #32
_msr ttbr0, \el, x3 _msr ttbr0, \el, x3
/* /*
......
...@@ -13,8 +13,6 @@ ...@@ -13,8 +13,6 @@
#include <xlat_tables_v2.h> #include <xlat_tables_v2.h>
#include "../xlat_tables_private.h" #include "../xlat_tables_private.h"
uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
/* /*
* Returns 1 if the provided granule size is supported, 0 otherwise. * Returns 1 if the provided granule size is supported, 0 otherwise.
*/ */
...@@ -113,19 +111,17 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx) ...@@ -113,19 +111,17 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
} }
} }
uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
void xlat_arch_tlbi_va(uintptr_t va)
{ {
#if IMAGE_EL == 1 if (xlat_regime == EL1_EL0_REGIME) {
assert(IS_IN_EL(1)); return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
xlat_arch_tlbi_va_regime(va, EL1_EL0_REGIME); } else {
#elif IMAGE_EL == 3 assert(xlat_regime == EL3_REGIME);
assert(IS_IN_EL(3)); return UPPER_ATTRS(XN);
xlat_arch_tlbi_va_regime(va, EL3_REGIME); }
#endif
} }
void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime) void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
{ {
/* /*
* Ensure the translation table write has drained into memory before * Ensure the translation table write has drained into memory before
...@@ -182,12 +178,11 @@ int xlat_arch_current_el(void) ...@@ -182,12 +178,11 @@ int xlat_arch_current_el(void)
return el; return el;
} }
void setup_mmu_cfg(unsigned int flags, void setup_mmu_cfg(uint64_t *params, unsigned int flags,
const uint64_t *base_table, const uint64_t *base_table, unsigned long long max_pa,
unsigned long long max_pa, uintptr_t max_va, int xlat_regime)
uintptr_t max_va)
{ {
uint64_t mair, ttbr, tcr; uint64_t mair, ttbr0, tcr;
uintptr_t virtual_addr_space_size; uintptr_t virtual_addr_space_size;
/* Set attributes in the right indices of the MAIR. */ /* Set attributes in the right indices of the MAIR. */
...@@ -195,8 +190,6 @@ void setup_mmu_cfg(unsigned int flags, ...@@ -195,8 +190,6 @@ void setup_mmu_cfg(unsigned int flags,
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX); mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
ttbr = (uint64_t) base_table;
/* /*
* Limit the input address ranges and memory region sizes translated * Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size. * using TTBR0 to the given virtual address space size.
...@@ -232,30 +225,29 @@ void setup_mmu_cfg(unsigned int flags, ...@@ -232,30 +225,29 @@ void setup_mmu_cfg(unsigned int flags,
*/ */
unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa); unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
#if IMAGE_EL == 1 if (xlat_regime == EL1_EL0_REGIME) {
assert(IS_IN_EL(1));
/*
* TCR_EL1.EPD1: Disable translation table walk for addresses that are
* translated using TTBR1_EL1.
*/
tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
#elif IMAGE_EL == 3
assert(IS_IN_EL(3));
tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
#endif
mmu_cfg_params[MMU_CFG_MAIR0] = (uint32_t) mair;
mmu_cfg_params[MMU_CFG_TCR] = (uint32_t) tcr;
/* Set TTBR bits as well */
if (ARM_ARCH_AT_LEAST(8, 2)) {
/* /*
* Enable CnP bit so as to share page tables with all PEs. This * TCR_EL1.EPD1: Disable translation table walk for addresses
* is mandatory for ARMv8.2 implementations. * that are translated using TTBR1_EL1.
*/ */
ttbr |= TTBR_CNP_BIT; tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
} else {
assert(xlat_regime == EL3_REGIME);
tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
} }
mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr; /* Set TTBR bits as well */
mmu_cfg_params[MMU_CFG_TTBR0_HI] = (uint32_t) (ttbr >> 32); ttbr0 = (uint64_t) base_table;
#if ARM_ARCH_AT_LEAST(8, 2)
/*
* Enable CnP bit so as to share page tables with all PEs. This
* is mandatory for ARMv8.2 implementations.
*/
ttbr0 |= TTBR_CNP_BIT;
#endif
params[MMU_CFG_MAIR] = mair;
params[MMU_CFG_TCR] = tcr;
params[MMU_CFG_TTBR0] = ttbr0;
} }
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
#define __XLAT_TABLES_ARCH_PRIVATE_H__
#include <assert.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
/*
* Return the execute-never mask that will prevent instruction fetch at all ELs
* that are part of the given translation regime.
*/
static inline uint64_t xlat_arch_regime_get_xn_desc(int regime)
{
if (regime == EL1_EL0_REGIME) {
return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
} else {
assert(regime == EL3_REGIME);
return UPPER_ATTRS(XN);
}
}
#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
...@@ -10,5 +10,3 @@ XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \ ...@@ -10,5 +10,3 @@ XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
xlat_tables_context.c \ xlat_tables_context.c \
xlat_tables_core.c \ xlat_tables_core.c \
xlat_tables_utils.c) xlat_tables_utils.c)
INCLUDES += -Ilib/xlat_tables_v2/${ARCH}
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include <assert.h>
#include <debug.h> #include <debug.h>
#include <platform_def.h> #include <platform_def.h>
#include <xlat_tables_defs.h> #include <xlat_tables_defs.h>
...@@ -11,6 +12,12 @@ ...@@ -11,6 +12,12 @@
#include "xlat_tables_private.h" #include "xlat_tables_private.h"
/*
* MMU configuration register values for the active translation context. Used
* from the MMU assembly helpers.
*/
uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
/* /*
* Each platform can define the size of its physical and virtual address spaces. * Each platform can define the size of its physical and virtual address spaces.
* If the platform hasn't defined one or both of them, default to * If the platform hasn't defined one or both of them, default to
...@@ -69,6 +76,17 @@ int mmap_remove_dynamic_region(uintptr_t base_va, size_t size) ...@@ -69,6 +76,17 @@ int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
void init_xlat_tables(void) void init_xlat_tables(void)
{ {
assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
int current_el = xlat_arch_current_el();
if (current_el == 1) {
tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
} else {
assert(current_el == 3);
tf_xlat_ctx.xlat_regime = EL3_REGIME;
}
init_xlat_tables_ctx(&tf_xlat_ctx); init_xlat_tables_ctx(&tf_xlat_ctx);
} }
...@@ -93,8 +111,9 @@ void init_xlat_tables(void) ...@@ -93,8 +111,9 @@ void init_xlat_tables(void)
void enable_mmu_secure(unsigned int flags) void enable_mmu_secure(unsigned int flags)
{ {
setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.va_max_address); tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct(flags); enable_mmu_direct(flags);
} }
...@@ -102,15 +121,17 @@ void enable_mmu_secure(unsigned int flags) ...@@ -102,15 +121,17 @@ void enable_mmu_secure(unsigned int flags)
void enable_mmu_el1(unsigned int flags) void enable_mmu_el1(unsigned int flags)
{ {
setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.va_max_address); tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
enable_mmu_direct_el1(flags); enable_mmu_direct_el1(flags);
} }
void enable_mmu_el3(unsigned int flags) void enable_mmu_el3(unsigned int flags)
{ {
setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR, setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.va_max_address); tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL3_REGIME);
enable_mmu_direct_el3(flags); enable_mmu_direct_el3(flags);
} }
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <string.h> #include <string.h>
#include <types.h> #include <types.h>
#include <utils_def.h> #include <utils_def.h>
#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h> #include <xlat_tables_defs.h>
#include <xlat_tables_v2.h> #include <xlat_tables_v2.h>
...@@ -104,12 +103,14 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr, ...@@ -104,12 +103,14 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
*/ */
desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC; desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
/* /*
* Always set the access flag, as TF doesn't manage access flag faults. * Always set the access flag, as this library assumes access flag
* faults aren't managed.
*/
desc |= LOWER_ATTRS(ACCESS_FLAG);
/*
* Deduce other fields of the descriptor based on the MT_NS and MT_RW * Deduce other fields of the descriptor based on the MT_NS and MT_RW
* memory region attributes. * memory region attributes.
*/ */
desc |= LOWER_ATTRS(ACCESS_FLAG);
desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0; desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO); desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
...@@ -155,9 +156,10 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr, ...@@ -155,9 +156,10 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
} else { /* Normal memory */ } else { /* Normal memory */
/* /*
* Always map read-write normal memory as execute-never. * Always map read-write normal memory as execute-never.
* (Trusted Firmware doesn't self-modify its code, therefore * This library assumes that it is used by software that does
* R/W memory is reserved for data storage, which must not be * not self-modify its code, therefore R/W memory is reserved
* executable.) * for data storage, which must not be executable.
*
* Note that setting the XN bit here is for consistency only. * Note that setting the XN bit here is for consistency only.
* The function that enables the MMU sets the SCTLR_ELx.WXN bit, * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
* which makes any writable memory region to be treated as * which makes any writable memory region to be treated as
...@@ -311,7 +313,7 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm, ...@@ -311,7 +313,7 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
if (action == ACTION_WRITE_BLOCK_ENTRY) { if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] = INVALID_DESC; table_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va_regime(table_idx_va, ctx->xlat_regime); xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
} else if (action == ACTION_RECURSE_INTO_TABLE) { } else if (action == ACTION_RECURSE_INTO_TABLE) {
...@@ -327,8 +329,8 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm, ...@@ -327,8 +329,8 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
*/ */
if (xlat_table_is_empty(ctx, subtable)) { if (xlat_table_is_empty(ctx, subtable)) {
table_base[table_idx] = INVALID_DESC; table_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va_regime(table_idx_va, xlat_arch_tlbi_va(table_idx_va,
ctx->xlat_regime); ctx->xlat_regime);
} }
} else { } else {
......
...@@ -35,22 +35,24 @@ ...@@ -35,22 +35,24 @@
#endif /* PLAT_XLAT_TABLES_DYNAMIC */ #endif /* PLAT_XLAT_TABLES_DYNAMIC */
/*
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
*/
uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime);
/* /*
* Invalidate all TLB entries that match the given virtual address. This * Invalidate all TLB entries that match the given virtual address. This
* operation applies to all PEs in the same Inner Shareable domain as the PE * operation applies to all PEs in the same Inner Shareable domain as the PE
* that executes this function. This functions must be called for every * that executes this function. This functions must be called for every
* translation table entry that is modified. * translation table entry that is modified. It only affects the specified
* * translation regime.
* xlat_arch_tlbi_va() applies the invalidation to the exception level of the
* current translation regime, whereas xlat_arch_tlbi_va_regime() applies it to
* the given translation regime.
* *
* Note, however, that it is architecturally UNDEFINED to invalidate TLB entries * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
* pertaining to a higher exception level, e.g. invalidating EL3 entries from * pertaining to a higher exception level, e.g. invalidating EL3 entries from
* S-EL1. * S-EL1.
*/ */
void xlat_arch_tlbi_va(uintptr_t va); void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime);
/* /*
* This function has to be called at the end of any code that uses the function * This function has to be called at the end of any code that uses the function
...@@ -86,10 +88,6 @@ int xlat_arch_current_el(void); ...@@ -86,10 +88,6 @@ int xlat_arch_current_el(void);
*/ */
unsigned long long xlat_arch_get_max_supported_pa(void); unsigned long long xlat_arch_get_max_supported_pa(void);
/* Enable MMU and configure it to use the specified translation tables. */
void setup_mmu_cfg(unsigned int flags, const uint64_t *base_table,
unsigned long long max_pa, uintptr_t max_va);
/* /*
* Return 1 if the MMU of the translation regime managed by the given xlat_ctx_t * Return 1 if the MMU of the translation regime managed by the given xlat_ctx_t
* is enabled, 0 otherwise. * is enabled, 0 otherwise.
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <platform_def.h> #include <platform_def.h>
#include <types.h> #include <types.h>
#include <utils_def.h> #include <utils_def.h>
#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h> #include <xlat_tables_defs.h>
#include <xlat_tables_v2.h> #include <xlat_tables_v2.h>
...@@ -544,7 +543,7 @@ int change_mem_attributes(xlat_ctx_t *ctx, ...@@ -544,7 +543,7 @@ int change_mem_attributes(xlat_ctx_t *ctx,
*entry = INVALID_DESC; *entry = INVALID_DESC;
/* Invalidate any cached copy of this mapping in the TLBs. */ /* Invalidate any cached copy of this mapping in the TLBs. */
xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime); xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
/* Ensure completion of the invalidation. */ /* Ensure completion of the invalidation. */
xlat_arch_tlbi_va_sync(); xlat_arch_tlbi_va_sync();
......
...@@ -107,38 +107,22 @@ void spm_sp_setup(sp_context_t *sp_ctx) ...@@ -107,38 +107,22 @@ void spm_sp_setup(sp_context_t *sp_ctx)
* MMU-related registers * MMU-related registers
* --------------------- * ---------------------
*/ */
xlat_ctx_t *xlat_ctx = sp_ctx->xlat_ctx_handle;
/* Set attributes in the right indices of the MAIR */ uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
u_register_t mair_el1 =
MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX) |
MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX) |
MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1, mair_el1);
/* Setup TCR_EL1. */
u_register_t tcr_ps_bits = tcr_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE);
u_register_t tcr_el1 =
/* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */
(64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE)) |
/* Inner and outer WBWA, shareable. */
TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA |
/* Set the granularity to 4KB. */
TCR_TG0_4K |
/* Limit Intermediate Physical Address Size. */
tcr_ps_bits << TCR_EL1_IPS_SHIFT |
/* Disable translations using TBBR1_EL1. */
TCR_EPD1_BIT
/* The remaining fields related to TBBR1_EL1 are left as zero. */
;
tcr_el1 &= ~( setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table,
/* Enable translations using TBBR0_EL1 */ xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
TCR_EPD0_BIT EL1_EL0_REGIME);
);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1,
mmu_cfg_params[MMU_CFG_MAIR]);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1,
mmu_cfg_params[MMU_CFG_TCR]);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1, tcr_el1); write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
mmu_cfg_params[MMU_CFG_TTBR0]);
/* Setup SCTLR_EL1 */ /* Setup SCTLR_EL1 */
u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1); u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);
...@@ -174,13 +158,6 @@ void spm_sp_setup(sp_context_t *sp_ctx) ...@@ -174,13 +158,6 @@ void spm_sp_setup(sp_context_t *sp_ctx)
write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1); write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
uint64_t *xlat_base =
((xlat_ctx_t *)sp_ctx->xlat_ctx_handle)->base_table;
/* Point TTBR0_EL1 at the tables of the context created for the SP. */
write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
(u_register_t)xlat_base);
/* /*
* Setup other system registers * Setup other system registers
* ---------------------------- * ----------------------------
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment