Commit 78b4c5b0 authored by danh-arm's avatar danh-arm Committed by GitHub
Browse files

Merge pull request #778 from antonio-nino-diaz-arm/an/xlat-fixes

Fixes and improvements to translation tables library
parents 17612953 2240f45b
......@@ -443,7 +443,19 @@ constant must also be defined:
* **#define : ADDR_SPACE_SIZE**
Defines the total size of the address space in bytes. For example, for a 32
bit address space, this value should be `(1ull << 32)`.
bit address space, this value should be `(1ull << 32)`. This definition is
now deprecated, platforms should use `PLAT_PHY_ADDR_SPACE_SIZE` and
`PLAT_VIRT_ADDR_SPACE_SIZE` instead.
* **#define : PLAT_VIRT_ADDR_SPACE_SIZE**
Defines the total size of the virtual address space in bytes. For example,
for a 32 bit virtual address space, this value should be `(1ull << 32)`.
* **#define : PLAT_PHY_ADDR_SPACE_SIZE**
Defines the total size of the physical address space in bytes. For example,
for a 32 bit physical address space, this value should be `(1ull << 32)`.
If the platform port uses the IO storage framework, the following constants
must also be defined:
......
......@@ -134,6 +134,16 @@
#define ID_AA64PFR0_GIC_WIDTH 4
#define ID_AA64PFR0_GIC_MASK ((1 << ID_AA64PFR0_GIC_WIDTH) - 1)
/* ID_AA64MMFR0_EL1 definitions */
#define ID_AA64MMFR0_EL1_PARANGE_MASK 0xf
#define PARANGE_0000 32
#define PARANGE_0001 36
#define PARANGE_0010 40
#define PARANGE_0011 42
#define PARANGE_0100 44
#define PARANGE_0101 48
/* ID_PFR1_EL1 definitions */
#define ID_PFR1_VIRTEXT_SHIFT 12
#define ID_PFR1_VIRTEXT_MASK 0xf
......
......@@ -196,6 +196,7 @@ void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
******************************************************************************/
DEFINE_SYSREG_READ_FUNC(midr_el1)
DEFINE_SYSREG_READ_FUNC(mpidr_el1)
DEFINE_SYSREG_READ_FUNC(id_aa64mmfr0_el1)
DEFINE_SYSREG_RW_FUNCS(scr_el3)
DEFINE_SYSREG_RW_FUNCS(hcr_el2)
......
......@@ -93,6 +93,11 @@
#define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
#define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
#define L0_XLAT_ADDRESS_SHIFT (L1_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
#define XLAT_ADDR_SHIFT(level) (PAGE_SIZE_SHIFT + \
((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
#define XLAT_BLOCK_SIZE(level) ((u_register_t)1 << XLAT_ADDR_SHIFT(level))
#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) - 1)
/*
* AP[1] bit is ignored by hardware and is
......
......@@ -205,7 +205,8 @@
* Required platform porting definitions common to all ARM standard platforms
*****************************************************************************/
#define ADDR_SPACE_SIZE (1ull << 32)
#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32)
/*
* This macro defines the deepest retention state possible. A higher state
......
......@@ -39,49 +39,60 @@
/*
* Each platform can define the size of the virtual address space, which is
* defined in ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus the width
* of said address space. The value of TTBCR.TxSZ must be in the range 0 to
* 7 [1], which means that the virtual address space width must be in the range
* 32 to 25 bits.
* defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus
* the width of said address space. The value of TTBCR.TxSZ must be in the
* range 0 to 7 [1], which means that the virtual address space width must be
* in the range 32 to 25 bits.
*
* Here we calculate the initial lookup level from the value of ADDR_SPACE_SIZE.
* For a 4 KB page size, level 1 supports virtual address spaces of widths 32
* to 31 bits, and level 2 from 30 to 25. Wider or narrower address spaces are
* not supported. As a result, level 3 cannot be used as initial lookup level
* with 4 KB granularity [1].
* Here we calculate the initial lookup level from the value of
* PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual
* address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or
* narrower address spaces are not supported. As a result, level 3 cannot be
* used as initial lookup level with 4 KB granularity [1].
*
* For example, for a 31-bit address space (i.e. ADDR_SPACE_SIZE == 1 << 31),
* TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table G4-5 in
* the ARM ARM, the initial lookup level for such an address space is 1.
* For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
* 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
* G4-5 in the ARM ARM, the initial lookup level for an address space like that
* is 1.
*
* See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information:
* [1] Section G4.6.5
*/
#if ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
# error "ADDR_SPACE_SIZE is too big."
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
#elif ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 1
# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
#elif ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
# define XLAT_TABLE_LEVEL_BASE 2
# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
#else
# error "ADDR_SPACE_SIZE is too small."
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
#endif
static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
#if DEBUG
static unsigned long long get_max_supported_pa(void)
{
/* Physical address space size for long descriptor format. */
return (1ULL << 40) - 1ULL;
}
#endif
void init_xlat_tables(void)
{
unsigned long long max_pa;
......@@ -89,7 +100,10 @@ void init_xlat_tables(void)
print_mmap();
init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
&max_va, &max_pa);
assert(max_va < ADDR_SPACE_SIZE);
assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
}
/*******************************************************************************
......@@ -122,7 +136,7 @@ void enable_mmu_secure(unsigned int flags)
ttbcr = TTBCR_EAE_BIT |
TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
TTBCR_RGN0_INNER_WBA |
(32 - __builtin_ctzl((uintptr_t)ADDR_SPACE_SIZE));
(32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
ttbcr |= TTBCR_EPD1_BIT;
write_ttbcr(ttbcr);
......
......@@ -31,28 +31,33 @@
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <cassert.h>
#include <common_def.h>
#include <platform_def.h>
#include <sys/types.h>
#include <utils.h>
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
/*
* Each platform can define the size of the virtual address space, which is
* defined in ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the width of
* said address space. The value of TCR.TxSZ must be in the range 16 to 39 [1],
* which means that the virtual address space width must be in the range 48 to
* 25 bits.
* defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the
* width of said address space. The value of TCR.TxSZ must be in the range 16
* to 39 [1], which means that the virtual address space width must be in the
* range 48 to 25 bits.
*
* Here we calculate the initial lookup level from the value of ADDR_SPACE_SIZE.
* For a 4 KB page size, level 0 supports virtual address spaces of widths 48 to
* 40 bits, level 1 from 39 to 31, and level 2 from 30 to 25. Wider or narrower
* address spaces are not supported. As a result, level 3 cannot be used as
* initial lookup level with 4 KB granularity. [2]
* Here we calculate the initial lookup level from the value of
* PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual
* address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2
* from 30 to 25. Wider or narrower address spaces are not supported. As a
* result, level 3 cannot be used as initial lookup level with 4 KB
* granularity. [2]
*
* For example, for a 35-bit address space (i.e. ADDR_SPACE_SIZE == 1 << 35),
* TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table D4-11 in
* the ARM ARM, the initial lookup level for such an address space is 1.
* For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
* 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
* D4-11 in the ARM ARM, the initial lookup level for an address space like
* that is 1.
*
* See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information:
......@@ -60,28 +65,31 @@
* [2] Section D4.2.5
*/
#if ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
# error "ADDR_SPACE_SIZE is too big."
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
#elif ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 0
# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
#elif ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 1
# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
#elif ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
# define XLAT_TABLE_LEVEL_BASE 2
# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
# define NUM_BASE_LEVEL_ENTRIES \
(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
#else
# error "ADDR_SPACE_SIZE is too small."
# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
#endif
......@@ -119,6 +127,25 @@ static unsigned long long calc_physical_addr_size_bits(
return TCR_PS_BITS_4GB;
}
#if DEBUG
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
PARANGE_0101
};
static unsigned long long get_max_supported_pa(void)
{
u_register_t pa_range = read_id_aa64mmfr0_el1() &
ID_AA64MMFR0_EL1_PARANGE_MASK;
/* All other values are reserved */
assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
}
#endif
void init_xlat_tables(void)
{
unsigned long long max_pa;
......@@ -126,8 +153,12 @@ void init_xlat_tables(void)
print_mmap();
init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
&max_va, &max_pa);
assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
assert(max_va < ADDR_SPACE_SIZE);
}
/*******************************************************************************
......@@ -165,7 +196,7 @@ void init_xlat_tables(void)
/* Set T0SZ to (64 - width of virtual address space) */ \
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
TCR_RGN_INNER_WBA | \
(64 - __builtin_ctzl(ADDR_SPACE_SIZE)); \
(64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
\
......
......@@ -32,12 +32,14 @@
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <common_def.h>
#include <debug.h>
#include <platform_def.h>
#include <string.h>
#include <types.h>
#include <utils.h>
#include <xlat_tables.h>
#include "xlat_tables_private.h"
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
#define LVL0_SPACER ""
......@@ -102,6 +104,11 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
assert(base_pa < end_pa); /* Check for overflows */
assert(base_va < end_va);
assert((base_va + (uintptr_t)size - (uintptr_t)1) <=
(PLAT_VIRT_ADDR_SPACE_SIZE - 1));
assert((base_pa + (unsigned long long)size - 1ULL) <=
(PLAT_PHY_ADDR_SPACE_SIZE - 1));
#if DEBUG
/* Check for PAs and VAs overlaps with all other regions */
......@@ -198,6 +205,9 @@ static uint64_t mmap_desc(unsigned attr, unsigned long long addr_pa,
uint64_t desc;
int mem_type;
/* Make sure that the granularity is fine enough to map this address. */
assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
desc = addr_pa;
/*
* There are different translation table descriptors for level 3 and the
......@@ -343,7 +353,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
if (mm->base_va > base_va + level_size - 1) {
/* Next region is after this area. Nothing to map yet */
desc = INVALID_DESC;
} else {
/* Make sure that the current level allows block descriptors */
} else if (level >= XLAT_BLOCK_LEVEL_MIN) {
/*
* Try to get attributes of this area. It will fail if
* there are partially overlapping regions. On success,
......@@ -372,7 +383,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
*table++ = desc;
base_va += level_size;
} while ((base_va & level_index_mask) && (base_va - 1 < ADDR_SPACE_SIZE - 1));
} while ((base_va & level_index_mask) &&
(base_va - 1 < PLAT_VIRT_ADDR_SPACE_SIZE - 1));
return mm;
}
......
......@@ -32,10 +32,61 @@
#define __XLAT_TABLES_PRIVATE_H__
#include <cassert.h>
#include <platform_def.h>
#include <utils.h>
/* The virtual address space size must be a power of two. */
CASSERT(IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size);
/*
* If the platform hasn't defined a physical and a virtual address space size
* default to ADDR_SPACE_SIZE.
*/
#if ERROR_DEPRECATED
# ifdef ADDR_SPACE_SIZE
# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
# endif
#elif defined(ADDR_SPACE_SIZE)
# ifndef PLAT_PHY_ADDR_SPACE_SIZE
# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
# endif
#endif
/* The virtual and physical address space sizes must be powers of two. */
CASSERT(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE),
assert_valid_virt_addr_space_size);
CASSERT(IS_POWER_OF_TWO(PLAT_PHY_ADDR_SPACE_SIZE),
assert_valid_phy_addr_space_size);
/*
* In AArch32 state, the MMU only supports 4KB page granularity, which means
* that the first translation table level is either 1 or 2. Both of them are
* allowed to have block and table descriptors. See section G4.5.6 of the
* ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
*
* In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
* granularity. For 4KB granularity, a level 0 table descriptor doesn't support
* block translation. For 16KB, the same thing happens to levels 0 and 1. For
* 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
* Reference Manual (DDI 0487A.k) for more information.
*
* The define below specifies the first table level that allows block
* descriptors.
*/
#ifdef AARCH32
# define XLAT_BLOCK_LEVEL_MIN 1
#else /* if AArch64 */
# if PAGE_SIZE == (4*1024) /* 4KB */
# define XLAT_BLOCK_LEVEL_MIN 1
# else /* 16KB or 64KB */
# define XLAT_BLOCK_LEVEL_MIN 2
# endif
#endif /* AARCH32 */
void print_mmap(void);
void init_xlation_table(uintptr_t base_va, uint64_t *table,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment