Commit 3ca9928d authored by Soby Mathew's avatar Soby Mathew
Browse files

Refactor the xlat_tables library code

The AArch32 long descriptor format and the AArch64 descriptor format
correspond to each other which allows possible sharing of xlat_tables
library code between AArch64 and AArch32. This patch refactors the
xlat_tables library code to seperate the common functionality from
architecture specific code. Prior to this patch, all of the xlat_tables
library code were in `lib/aarch64/xlat_tables.c` file. The refactored code
is now in `lib/xlat_tables/` directory. The AArch64 specific programming
for xlat_tables is in `lib/xlat_tables/aarch64/xlat_tables.c` and the rest
of the code common to AArch64 and AArch32 is in
`lib/xlat_tables/xlat_tables_common.c`. Also the data types used in
xlat_tables library APIs are reworked to make it compatible between AArch64
and AArch32.

The `lib/aarch64/xlat_tables.c` file now includes the new xlat_tables
library files to retain compatibility for existing platform ports.
The macros related to xlat_tables library are also moved from
`include/lib/aarch64/arch.h` to the header `include/lib/xlat_tables.h`.

NOTE: THE `lib/aarch64/xlat_tables.c` FILE IS DEPRECATED AND PLATFORM PORTS
ARE EXPECTED TO INCLUDE THE NEW XLAT_TABLES LIBRARY FILES IN THEIR MAKEFILES.

Change-Id: I3d17217d24aaf3a05a4685d642a31d4d56255a0f
parent 72c1dc14
......@@ -38,6 +38,7 @@
#include <io_storage.h>
#include <platform.h>
#include <string.h>
#include <xlat_tables.h>
unsigned long page_align(unsigned long value, unsigned dir)
{
......
......@@ -359,80 +359,6 @@
#define clr_cntp_ctl_enable(x) (x &= ~(1 << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) (x &= ~(1 << CNTP_CTL_IMASK_SHIFT))
/* Miscellaneous MMU related constants */
#define NUM_2MB_IN_GB (1 << 9)
#define NUM_4K_IN_2MB (1 << 9)
#define NUM_GB_IN_4GB (1 << 2)
#define TWO_MB_SHIFT 21
#define ONE_GB_SHIFT 30
#define FOUR_KB_SHIFT 12
#define ONE_GB_INDEX(x) ((x) >> ONE_GB_SHIFT)
#define TWO_MB_INDEX(x) ((x) >> TWO_MB_SHIFT)
#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
#define INVALID_DESC 0x0
#define BLOCK_DESC 0x1
#define TABLE_DESC 0x3
#define FIRST_LEVEL_DESC_N ONE_GB_SHIFT
#define SECOND_LEVEL_DESC_N TWO_MB_SHIFT
#define THIRD_LEVEL_DESC_N FOUR_KB_SHIFT
#define LEVEL1 1
#define LEVEL2 2
#define LEVEL3 3
#define XN (1ull << 2)
#define PXN (1ull << 1)
#define CONT_HINT (1ull << 0)
#define UPPER_ATTRS(x) (x & 0x7) << 52
#define NON_GLOBAL (1 << 9)
#define ACCESS_FLAG (1 << 8)
#define NSH (0x0 << 6)
#define OSH (0x2 << 6)
#define ISH (0x3 << 6)
#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
#define PAGE_SIZE (1 << PAGE_SIZE_SHIFT)
#define PAGE_SIZE_MASK (PAGE_SIZE - 1)
#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
#define XLAT_ENTRY_SIZE_SHIFT 3 /* Each MMU table entry is 8 bytes (1 << 3) */
#define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SIZE_SHIFT)
#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT
#define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SIZE_SHIFT)
/* Values for number of entries in each MMU translation table */
#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
#define XLAT_TABLE_ENTRIES (1 << XLAT_TABLE_ENTRIES_SHIFT)
#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
/* Values to convert a memory address to an index into a translation table */
#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
#define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
#define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
/*
* AP[1] bit is ignored by hardware and is
* treated as if it is One in EL2/EL3
*/
#define AP_RO (0x1 << 5)
#define AP_RW (0x0 << 5)
#define NS (0x1 << 3)
#define ATTR_NON_CACHEABLE_INDEX 0x2
#define ATTR_DEVICE_INDEX 0x1
#define ATTR_IWBWA_OWBWA_NTR_INDEX 0x0
#define LOWER_ATTRS(x) (((x) & 0xfff) << 2)
#define ATTR_NON_CACHEABLE (0x44)
#define ATTR_DEVICE (0x4)
#define ATTR_IWBWA_OWBWA_NTR (0xff)
#define MAIR_ATTR_SET(attr, index) (attr << (index << 3))
/* Exception Syndrome register bits and bobs */
#define ESR_EC_SHIFT 26
#define ESR_EC_MASK 0x3f
......
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -31,6 +31,79 @@
#ifndef __XLAT_TABLES_H__
#define __XLAT_TABLES_H__
/* Miscellaneous MMU related constants */
#define NUM_2MB_IN_GB (1 << 9)
#define NUM_4K_IN_2MB (1 << 9)
#define NUM_GB_IN_4GB (1 << 2)
#define TWO_MB_SHIFT 21
#define ONE_GB_SHIFT 30
#define FOUR_KB_SHIFT 12
#define ONE_GB_INDEX(x) ((x) >> ONE_GB_SHIFT)
#define TWO_MB_INDEX(x) ((x) >> TWO_MB_SHIFT)
#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
#define INVALID_DESC 0x0
#define BLOCK_DESC 0x1
#define TABLE_DESC 0x3
#define FIRST_LEVEL_DESC_N ONE_GB_SHIFT
#define SECOND_LEVEL_DESC_N TWO_MB_SHIFT
#define THIRD_LEVEL_DESC_N FOUR_KB_SHIFT
#define LEVEL1 1
#define LEVEL2 2
#define LEVEL3 3
#define XN (1ull << 2)
#define PXN (1ull << 1)
#define CONT_HINT (1ull << 0)
#define UPPER_ATTRS(x) (x & 0x7) << 52
#define NON_GLOBAL (1 << 9)
#define ACCESS_FLAG (1 << 8)
#define NSH (0x0 << 6)
#define OSH (0x2 << 6)
#define ISH (0x3 << 6)
#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
#define PAGE_SIZE (1 << PAGE_SIZE_SHIFT)
#define PAGE_SIZE_MASK (PAGE_SIZE - 1)
#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
#define XLAT_ENTRY_SIZE_SHIFT 3 /* Each MMU table entry is 8 bytes (1 << 3) */
#define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SIZE_SHIFT)
#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT
#define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SIZE_SHIFT)
/* Values for number of entries in each MMU translation table */
#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
#define XLAT_TABLE_ENTRIES (1 << XLAT_TABLE_ENTRIES_SHIFT)
#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
/* Values to convert a memory address to an index into a translation table */
#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
#define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
#define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
/*
* AP[1] bit is ignored by hardware and is
* treated as if it is One in EL2/EL3
*/
#define AP_RO (0x1 << 5)
#define AP_RW (0x0 << 5)
#define NS (0x1 << 3)
#define ATTR_NON_CACHEABLE_INDEX 0x2
#define ATTR_DEVICE_INDEX 0x1
#define ATTR_IWBWA_OWBWA_NTR_INDEX 0x0
#define LOWER_ATTRS(x) (((x) & 0xfff) << 2)
#define ATTR_NON_CACHEABLE (0x44)
#define ATTR_DEVICE (0x4)
#define ATTR_IWBWA_OWBWA_NTR (0xff)
#define MAIR_ATTR_SET(attr, index) (attr << (index << 3))
/*
* Flags to override default values used to program system registers while
......@@ -39,6 +112,7 @@
#define DISABLE_DCACHE (1 << 0)
#ifndef __ASSEMBLY__
#include <stddef.h>
#include <stdint.h>
/* Helper macro to define entries for mmap_region_t. It creates
......@@ -93,20 +167,21 @@ typedef enum {
* Structure for specifying a single region of memory.
*/
typedef struct mmap_region {
unsigned long base_pa;
unsigned long base_va;
unsigned long size;
mmap_attr_t attr;
unsigned long long base_pa;
uintptr_t base_va;
size_t size;
mmap_attr_t attr;
} mmap_region_t;
void mmap_add_region(unsigned long base_pa, unsigned long base_va,
unsigned long size, unsigned attr);
void mmap_add(const mmap_region_t *mm);
/* Generic translation table APIs */
void init_xlat_tables(void);
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr);
void mmap_add(const mmap_region_t *mm);
void enable_mmu_el1(uint32_t flags);
void enable_mmu_el3(uint32_t flags);
/* AArch64 specific translation table APIs */
void enable_mmu_el1(unsigned int flags);
void enable_mmu_el3(unsigned int flags);
#endif /*__ASSEMBLY__*/
#endif /* __XLAT_TABLES_H__ */
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -28,366 +28,11 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <cassert.h>
#include <debug.h>
#include <platform_def.h>
#include <string.h>
#include <xlat_tables.h>
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
#define LVL0_SPACER ""
#define LVL1_SPACER " "
#define LVL2_SPACER " "
#define LVL3_SPACER " "
#define get_level_spacer(level) \
(((level) == 0) ? LVL0_SPACER : \
(((level) == 1) ? LVL1_SPACER : \
(((level) == 2) ? LVL2_SPACER : LVL3_SPACER)))
#define debug_print(...) tf_printf(__VA_ARGS__)
#else
#define debug_print(...) ((void)0)
#endif
#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
/*
* The virtual address space size must be a power of two (as set in TCR.T0SZ).
* As we start the initial lookup at level 1, it must also be between 2 GB and
* 512 GB (with the virtual address size therefore 31 to 39 bits). See section
* D4.2.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.i) for more
* information.
*/
CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 39) &&
IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size);
#define UNSET_DESC ~0ul
#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
__aligned(XLAT_TABLE_SIZE) __section("xlat_table");
static unsigned next_xlat;
static unsigned long max_pa;
static unsigned long max_va;
static unsigned long tcr_ps_bits;
/*
* Array of all memory regions stored in order of ascending base address.
* The list is terminated by the first entry with size == 0.
* This file is deprecated and is retained here only for compatibility.
* The xlat_tables library can be found in `lib/xlat_tables` directory.
*/
static mmap_region_t mmap[MAX_MMAP_REGIONS + 1];
static void print_mmap(void)
{
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
debug_print("mmap:\n");
mmap_region_t *mm = mmap;
while (mm->size) {
debug_print(" VA:0x%lx PA:0x%lx size:0x%lx attr:0x%x\n",
mm->base_va, mm->base_pa, mm->size, mm->attr);
++mm;
};
debug_print("\n");
#if !ERROR_DEPRECATED
#include "../xlat_tables/xlat_tables_common.c"
#include "../xlat_tables/aarch64/xlat_tables.c"
#endif
}
void mmap_add_region(unsigned long base_pa, unsigned long base_va,
unsigned long size, unsigned attr)
{
mmap_region_t *mm = mmap;
mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
unsigned long pa_end = base_pa + size - 1;
unsigned long va_end = base_va + size - 1;
assert(IS_PAGE_ALIGNED(base_pa));
assert(IS_PAGE_ALIGNED(base_va));
assert(IS_PAGE_ALIGNED(size));
if (!size)
return;
/* Find correct place in mmap to insert new region */
while (mm->base_va < base_va && mm->size)
++mm;
/* Make room for new region by moving other regions up by one place */
memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
/* Check we haven't lost the empty sentinal from the end of the array */
assert(mm_last->size == 0);
mm->base_pa = base_pa;
mm->base_va = base_va;
mm->size = size;
mm->attr = attr;
if (pa_end > max_pa)
max_pa = pa_end;
if (va_end > max_va)
max_va = va_end;
}
void mmap_add(const mmap_region_t *mm)
{
while (mm->size) {
mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
++mm;
}
}
static unsigned long mmap_desc(unsigned attr, unsigned long addr_pa,
unsigned level)
{
unsigned long desc = addr_pa;
int mem_type;
desc |= level == 3 ? TABLE_DESC : BLOCK_DESC;
desc |= attr & MT_NS ? LOWER_ATTRS(NS) : 0;
desc |= attr & MT_RW ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
desc |= LOWER_ATTRS(ACCESS_FLAG);
mem_type = MT_TYPE(attr);
if (mem_type == MT_MEMORY) {
desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
if (attr & MT_RW)
desc |= UPPER_ATTRS(XN);
} else if (mem_type == MT_NON_CACHEABLE) {
desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
if (attr & MT_RW)
desc |= UPPER_ATTRS(XN);
} else {
assert(mem_type == MT_DEVICE);
desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
desc |= UPPER_ATTRS(XN);
}
debug_print((mem_type == MT_MEMORY) ? "MEM" :
((mem_type == MT_NON_CACHEABLE) ? "NC" : "DEV"));
debug_print(attr & MT_RW ? "-RW" : "-RO");
debug_print(attr & MT_NS ? "-NS" : "-S");
return desc;
}
static int mmap_region_attr(mmap_region_t *mm, unsigned long base_va,
unsigned long size)
{
int attr = mm->attr;
int old_mem_type, new_mem_type;
for (;;) {
++mm;
if (!mm->size)
return attr; /* Reached end of list */
if (mm->base_va >= base_va + size)
return attr; /* Next region is after area so end */
if (mm->base_va + mm->size <= base_va)
continue; /* Next region has already been overtaken */
if ((mm->attr & attr) == attr)
continue; /* Region doesn't override attribs so skip */
/*
* Update memory mapping attributes in 2 steps:
* 1) Update access permissions and security state flags
* 2) Update memory type.
*
* See xlat_tables.h for details about the attributes priority
* system and the rules dictating whether attributes should be
* updated.
*/
old_mem_type = MT_TYPE(attr);
new_mem_type = MT_TYPE(mm->attr);
attr &= mm->attr;
if (new_mem_type < old_mem_type)
attr = (attr & ~MT_TYPE_MASK) | new_mem_type;
if (mm->base_va > base_va ||
mm->base_va + mm->size < base_va + size)
return -1; /* Region doesn't fully cover our area */
}
}
static mmap_region_t *init_xlation_table(mmap_region_t *mm,
unsigned long base_va,
unsigned long *table, unsigned level)
{
unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
XLAT_TABLE_ENTRIES_SHIFT;
unsigned level_size = 1 << level_size_shift;
unsigned long level_index_mask = XLAT_TABLE_ENTRIES_MASK << level_size_shift;
assert(level <= 3);
debug_print("New xlat table:\n");
do {
unsigned long desc = UNSET_DESC;
if (!mm->size) {
/* Done mapping regions; finish zeroing the table */
desc = INVALID_DESC;
} else if (mm->base_va + mm->size <= base_va) {
/* Area now after the region so skip it */
++mm;
continue;
}
debug_print("%s VA:0x%lx size:0x%x ", get_level_spacer(level),
base_va, level_size);
if (mm->base_va >= base_va + level_size) {
/* Next region is after area so nothing to map yet */
desc = INVALID_DESC;
} else if (mm->base_va <= base_va && mm->base_va + mm->size >=
base_va + level_size) {
/* Next region covers all of area */
int attr = mmap_region_attr(mm, base_va, level_size);
if (attr >= 0)
desc = mmap_desc(attr,
base_va - mm->base_va + mm->base_pa,
level);
}
/* else Next region only partially covers area, so need */
if (desc == UNSET_DESC) {
/* Area not covered by a region so need finer table */
unsigned long *new_table = xlat_tables[next_xlat++];
assert(next_xlat <= MAX_XLAT_TABLES);
desc = TABLE_DESC | (unsigned long)new_table;
/* Recurse to fill in new table */
mm = init_xlation_table(mm, base_va,
new_table, level+1);
}
debug_print("\n");
*table++ = desc;
base_va += level_size;
} while ((base_va & level_index_mask) && (base_va < ADDR_SPACE_SIZE));
return mm;
}
static unsigned int calc_physical_addr_size_bits(unsigned long max_addr)
{
/* Physical address can't exceed 48 bits */
assert((max_addr & ADDR_MASK_48_TO_63) == 0);
/* 48 bits address */
if (max_addr & ADDR_MASK_44_TO_47)
return TCR_PS_BITS_256TB;
/* 44 bits address */
if (max_addr & ADDR_MASK_42_TO_43)
return TCR_PS_BITS_16TB;
/* 42 bits address */
if (max_addr & ADDR_MASK_40_TO_41)
return TCR_PS_BITS_4TB;
/* 40 bits address */
if (max_addr & ADDR_MASK_36_TO_39)
return TCR_PS_BITS_1TB;
/* 36 bits address */
if (max_addr & ADDR_MASK_32_TO_35)
return TCR_PS_BITS_64GB;
return TCR_PS_BITS_4GB;
}
void init_xlat_tables(void)
{
print_mmap();
init_xlation_table(mmap, 0, l1_xlation_table, 1);
tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
assert(max_va < ADDR_SPACE_SIZE);
}
/*******************************************************************************
* Macro generating the code for the function enabling the MMU in the given
* exception level, assuming that the pagetables have already been created.
*
* _el: Exception level at which the function will run
* _tcr_extra: Extra bits to set in the TCR register. This mask will
* be OR'ed with the default TCR value.
* _tlbi_fct: Function to invalidate the TLBs at the current
* exception level
******************************************************************************/
#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
void enable_mmu_el##_el(uint32_t flags) \
{ \
uint64_t mair, tcr, ttbr; \
uint32_t sctlr; \
\
assert(IS_IN_EL(_el)); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
\
/* Set attributes in the right indices of the MAIR */ \
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
ATTR_IWBWA_OWBWA_NTR_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
ATTR_NON_CACHEABLE_INDEX); \
write_mair_el##_el(mair); \
\
/* Invalidate TLBs at the current exception level */ \
_tlbi_fct(); \
\
/* Set TCR bits as well. */ \
/* Inner & outer WBWA & shareable + T0SZ = 32 */ \
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
TCR_RGN_INNER_WBA | \
(64 - __builtin_ctzl(ADDR_SPACE_SIZE)); \
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
\
/* Set TTBR bits as well */ \
ttbr = (uint64_t) l1_xlation_table; \
write_ttbr0_el##_el(ttbr); \
\
/* Ensure all translation table writes have drained */ \
/* into memory, the TLB invalidation is complete, */ \
/* and translation register writes are committed */ \
/* before enabling the MMU */ \
dsb(); \
isb(); \
\
sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
if (flags & DISABLE_DCACHE) \
sctlr &= ~SCTLR_C_BIT; \
else \
sctlr |= SCTLR_C_BIT; \
\
write_sctlr_el##_el(sctlr); \
\
/* Ensure the MMU enable takes effect immediately */ \
isb(); \
}
/* Define EL1 and EL3 variants of the function enabling the MMU */
DEFINE_ENABLE_MMU_EL(1,
(tcr_ps_bits << TCR_EL1_IPS_SHIFT),
tlbivmalle1)
DEFINE_ENABLE_MMU_EL(3,
TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
tlbialle3)
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <platform_def.h>
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
/*
* The virtual address space size must be a power of two (as set in TCR.T0SZ).
* As we start the initial lookup at level 1, it must also be between 2 GB and
* 512 GB (with the virtual address size therefore 31 to 39 bits). See section
* D4.2.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.i) for more
* information.
*/
CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 39) &&
IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size);
#define UNSET_DESC ~0ul
#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
static uint64_t l1_xlation_table[NUM_L1_ENTRIES]
__aligned(NUM_L1_ENTRIES * sizeof(uint64_t));
static unsigned long long tcr_ps_bits;
static unsigned long long calc_physical_addr_size_bits(
unsigned long long max_addr)
{
/* Physical address can't exceed 48 bits */
assert((max_addr & ADDR_MASK_48_TO_63) == 0);
/* 48 bits address */
if (max_addr & ADDR_MASK_44_TO_47)
return TCR_PS_BITS_256TB;
/* 44 bits address */
if (max_addr & ADDR_MASK_42_TO_43)
return TCR_PS_BITS_16TB;
/* 42 bits address */
if (max_addr & ADDR_MASK_40_TO_41)
return TCR_PS_BITS_4TB;
/* 40 bits address */
if (max_addr & ADDR_MASK_36_TO_39)
return TCR_PS_BITS_1TB;
/* 36 bits address */
if (max_addr & ADDR_MASK_32_TO_35)
return TCR_PS_BITS_64GB;
return TCR_PS_BITS_4GB;
}
void init_xlat_tables(void)
{
unsigned long long max_pa;
uintptr_t max_va;
print_mmap();
init_xlation_table(0, l1_xlation_table, 1, &max_va, &max_pa);
tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
assert(max_va < ADDR_SPACE_SIZE);
}
/*******************************************************************************
* Macro generating the code for the function enabling the MMU in the given
* exception level, assuming that the pagetables have already been created.
*
* _el: Exception level at which the function will run
* _tcr_extra: Extra bits to set in the TCR register. This mask will
* be OR'ed with the default TCR value.
* _tlbi_fct: Function to invalidate the TLBs at the current
* exception level
******************************************************************************/
#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
void enable_mmu_el##_el(unsigned int flags) \
{ \
uint64_t mair, tcr, ttbr; \
uint32_t sctlr; \
\
assert(IS_IN_EL(_el)); \
assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
\
/* Set attributes in the right indices of the MAIR */ \
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
ATTR_IWBWA_OWBWA_NTR_INDEX); \
mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
ATTR_NON_CACHEABLE_INDEX); \
write_mair_el##_el(mair); \
\
/* Invalidate TLBs at the current exception level */ \
_tlbi_fct(); \
\
/* Set TCR bits as well. */ \
/* Inner & outer WBWA & shareable + T0SZ = 32 */ \
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
TCR_RGN_INNER_WBA | \
(64 - __builtin_ctzl(ADDR_SPACE_SIZE)); \
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
\
/* Set TTBR bits as well */ \
ttbr = (uint64_t) l1_xlation_table; \
write_ttbr0_el##_el(ttbr); \
\
/* Ensure all translation table writes have drained */ \
/* into memory, the TLB invalidation is complete, */ \
/* and translation register writes are committed */ \
/* before enabling the MMU */ \
dsb(); \
isb(); \
\
sctlr = read_sctlr_el##_el(); \
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
\
if (flags & DISABLE_DCACHE) \
sctlr &= ~SCTLR_C_BIT; \
else \
sctlr |= SCTLR_C_BIT; \
\
write_sctlr_el##_el(sctlr); \
\
/* Ensure the MMU enable takes effect immediately */ \
isb(); \
}
/* Define EL1 and EL3 variants of the function enabling the MMU */
DEFINE_ENABLE_MMU_EL(1,
(tcr_ps_bits << TCR_EL1_IPS_SHIFT),
tlbivmalle1)
DEFINE_ENABLE_MMU_EL(3,
TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
tlbialle3)
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <cassert.h>
#include <debug.h>
#include <platform_def.h>
#include <string.h>
#include <xlat_tables.h>
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
#define LVL0_SPACER ""
#define LVL1_SPACER " "
#define LVL2_SPACER " "
#define LVL3_SPACER " "
#define get_level_spacer(level) \
(((level) == 0) ? LVL0_SPACER : \
(((level) == 1) ? LVL1_SPACER : \
(((level) == 2) ? LVL2_SPACER : LVL3_SPACER)))
#define debug_print(...) tf_printf(__VA_ARGS__)
#else
#define debug_print(...) ((void)0)
#endif
#define UNSET_DESC ~0ul
static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
__aligned(XLAT_TABLE_SIZE) __section("xlat_table");
static unsigned next_xlat;
static unsigned long long xlat_max_pa;
static uintptr_t xlat_max_va;
/*
* Array of all memory regions stored in order of ascending base address.
* The list is terminated by the first entry with size == 0.
*/
static mmap_region_t mmap[MAX_MMAP_REGIONS + 1];
void print_mmap(void)
{
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
debug_print("mmap:\n");
mmap_region_t *mm = mmap;
while (mm->size) {
debug_print(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
(void *)mm->base_va, mm->base_pa,
mm->size, mm->attr);
++mm;
};
debug_print("\n");
#endif
}
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr)
{
mmap_region_t *mm = mmap;
mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
unsigned long long pa_end = base_pa + size - 1;
uintptr_t va_end = base_va + size - 1;
assert(IS_PAGE_ALIGNED(base_pa));
assert(IS_PAGE_ALIGNED(base_va));
assert(IS_PAGE_ALIGNED(size));
if (!size)
return;
/* Find correct place in mmap to insert new region */
while (mm->base_va < base_va && mm->size)
++mm;
/* Make room for new region by moving other regions up by one place */
memmove(mm + 1, mm, (uintptr_t)mm_last - (uintptr_t)mm);
/* Check we haven't lost the empty sentinal from the end of the array */
assert(mm_last->size == 0);
mm->base_pa = base_pa;
mm->base_va = base_va;
mm->size = size;
mm->attr = attr;
if (pa_end > xlat_max_pa)
xlat_max_pa = pa_end;
if (va_end > xlat_max_va)
xlat_max_va = va_end;
}
void mmap_add(const mmap_region_t *mm)
{
while (mm->size) {
mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
++mm;
}
}
static uint64_t mmap_desc(unsigned attr, unsigned long long addr_pa,
int level)
{
uint64_t desc = addr_pa;
int mem_type;
desc |= level == 3 ? TABLE_DESC : BLOCK_DESC;
desc |= attr & MT_NS ? LOWER_ATTRS(NS) : 0;
desc |= attr & MT_RW ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
desc |= LOWER_ATTRS(ACCESS_FLAG);
mem_type = MT_TYPE(attr);
if (mem_type == MT_MEMORY) {
desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
if (attr & MT_RW)
desc |= UPPER_ATTRS(XN);
} else if (mem_type == MT_NON_CACHEABLE) {
desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
if (attr & MT_RW)
desc |= UPPER_ATTRS(XN);
} else {
assert(mem_type == MT_DEVICE);
desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
desc |= UPPER_ATTRS(XN);
}
debug_print((mem_type == MT_MEMORY) ? "MEM" :
((mem_type == MT_NON_CACHEABLE) ? "NC" : "DEV"));
debug_print(attr & MT_RW ? "-RW" : "-RO");
debug_print(attr & MT_NS ? "-NS" : "-S");
return desc;
}
static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
size_t size)
{
int attr = mm->attr;
int old_mem_type, new_mem_type;
for (;;) {
++mm;
if (!mm->size)
return attr; /* Reached end of list */
if (mm->base_va >= base_va + size)
return attr; /* Next region is after area so end */
if (mm->base_va + mm->size <= base_va)
continue; /* Next region has already been overtaken */
if ((mm->attr & attr) == attr)
continue; /* Region doesn't override attribs so skip */
/*
* Update memory mapping attributes in 2 steps:
* 1) Update access permissions and security state flags
* 2) Update memory type.
*
* See xlat_tables.h for details about the attributes priority
* system and the rules dictating whether attributes should be
* updated.
*/
old_mem_type = MT_TYPE(attr);
new_mem_type = MT_TYPE(mm->attr);
attr &= mm->attr;
if (new_mem_type < old_mem_type)
attr = (attr & ~MT_TYPE_MASK) | new_mem_type;
if (mm->base_va > base_va ||
mm->base_va + mm->size < base_va + size)
return -1; /* Region doesn't fully cover our area */
}
}
static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
uintptr_t base_va,
uint64_t *table,
int level)
{
unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
XLAT_TABLE_ENTRIES_SHIFT;
unsigned level_size = 1 << level_size_shift;
unsigned long long level_index_mask = XLAT_TABLE_ENTRIES_MASK <<
level_size_shift;
assert(level > 0 && level <= 3);
debug_print("New xlat table:\n");
do {
uint64_t desc = UNSET_DESC;
if (!mm->size) {
/* Done mapping regions; finish zeroing the table */
desc = INVALID_DESC;
} else if (mm->base_va + mm->size <= base_va) {
/* Area now after the region so skip it */
++mm;
continue;
}
debug_print("%s VA:%p size:0x%x ", get_level_spacer(level),
(void *)base_va, level_size);
if (mm->base_va >= base_va + level_size) {
/* Next region is after area so nothing to map yet */
desc = INVALID_DESC;
} else if (mm->base_va <= base_va && mm->base_va + mm->size >=
base_va + level_size) {
/* Next region covers all of area */
int attr = mmap_region_attr(mm, base_va, level_size);
if (attr >= 0)
desc = mmap_desc(attr,
base_va - mm->base_va + mm->base_pa,
level);
}
/* else Next region only partially covers area, so need */
if (desc == UNSET_DESC) {
/* Area not covered by a region so need finer table */
uint64_t *new_table = xlat_tables[next_xlat++];
assert(next_xlat <= MAX_XLAT_TABLES);
desc = TABLE_DESC | (unsigned long)new_table;
/* Recurse to fill in new table */
mm = init_xlation_table_inner(mm, base_va,
new_table, level+1);
}
debug_print("\n");
*table++ = desc;
base_va += level_size;
} while ((base_va & level_index_mask) && (base_va < ADDR_SPACE_SIZE));
return mm;
}
void init_xlation_table(uintptr_t base_va, uint64_t *table,
int level, uintptr_t *max_va,
unsigned long long *max_pa)
{
init_xlation_table_inner(mmap, base_va, table, level);
*max_va = xlat_max_va;
*max_pa = xlat_max_pa;
}
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __XLAT_TABLES_PRIVATE_H__
#define __XLAT_TABLES_PRIVATE_H__
void print_mmap(void);
void init_xlation_table(uintptr_t base_va, uint64_t *table,
int level, uintptr_t *max_va,
unsigned long long *max_pa);
#endif /* __XLAT_TABLES_PRIVATE_H__ */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment