Commit 0b64f4ef authored by Antonio Nino Diaz's avatar Antonio Nino Diaz
Browse files

Add dynamic region support to xlat tables lib v2



Added APIs to add and remove regions to the translation tables
dynamically while the MMU is enabled. Only static regions are allowed
to overlap other static ones (for backwards compatibility).

A new private attribute (MT_DYNAMIC / MT_STATIC) has been added to
flag each region as such.

The dynamic mapping functionality can be enabled or disabled when
compiling by setting the build option PLAT_XLAT_TABLES_DYNAMIC to 1
or 0. This can be done per-image.

TLB maintenance code during dynamic table mapping and unmapping has
also been added.

Fixes ARM-software/tf-issues#310

Change-Id: I19e8992005c4292297a382824394490c5387aa3b
Signed-off-by: default avatarAntonio Nino Diaz <antonio.ninodiaz@arm.com>
parent f10644c5
...@@ -420,14 +420,23 @@ platform, the following constants must also be defined: ...@@ -420,14 +420,23 @@ platform, the following constants must also be defined:
TSP's interrupt handling code. TSP's interrupt handling code.
If the platform port uses the translation table library code, the following If the platform port uses the translation table library code, the following
constant must also be defined: constants must also be defined:
* **#define : PLAT_XLAT_TABLES_DYNAMIC**
Optional flag that can be set per-image to enable the dynamic allocation of
regions even when the MMU is enabled. If not defined, only static
functionality will be available, if defined and set to 1 it will also
include the dynamic functionality.
* **#define : MAX_XLAT_TABLES** * **#define : MAX_XLAT_TABLES**
Defines the maximum number of translation tables that are allocated by the Defines the maximum number of translation tables that are allocated by the
translation table library code. To minimize the amount of runtime memory translation table library code. To minimize the amount of runtime memory
used, choose the smallest value needed to map the required virtual addresses used, choose the smallest value needed to map the required virtual addresses
for each BL stage. for each BL stage. If `PLAT_XLAT_TABLES_DYNAMIC` flag is enabled for a BL
image, `MAX_XLAT_TABLES` must be defined to accommodate the dynamic regions
as well.
* **#define : MAX_MMAP_REGIONS** * **#define : MAX_MMAP_REGIONS**
...@@ -438,7 +447,9 @@ constant must also be defined: ...@@ -438,7 +447,9 @@ constant must also be defined:
that should be mapped. Then, the translation table library will create the that should be mapped. Then, the translation table library will create the
corresponding tables and descriptors at runtime. To minimize the amount of corresponding tables and descriptors at runtime. To minimize the amount of
runtime memory used, choose the smallest value needed to register the runtime memory used, choose the smallest value needed to register the
required regions for each BL stage. required regions for each BL stage. If `PLAT_XLAT_TABLES_DYNAMIC` flag is
enabled for a BL image, `MAX_MMAP_REGIONS` must be defined to accommodate
the dynamic regions as well.
* **#define : ADDR_SPACE_SIZE** * **#define : ADDR_SPACE_SIZE**
......
/* /*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -331,6 +331,15 @@ ...@@ -331,6 +331,15 @@
#define PMCR_N_MASK 0x1f #define PMCR_N_MASK 0x1f
#define PMCR_N_BITS (PMCR_N_MASK << PMCR_N_SHIFT) #define PMCR_N_BITS (PMCR_N_MASK << PMCR_N_SHIFT)
/*******************************************************************************
* Definitions of register offsets, fields and macros for CPU system
* instructions.
******************************************************************************/
#define TLBI_ADDR_SHIFT 0
#define TLBI_ADDR_MASK 0xFFFFF000
#define TLBI_ADDR(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
/******************************************************************************* /*******************************************************************************
* Definitions of register offsets and fields in the CNTCTLBase Frame of the * Definitions of register offsets and fields in the CNTCTLBase Frame of the
* system level implementation of the Generic Timer. * system level implementation of the Generic Timer.
...@@ -378,6 +387,8 @@ ...@@ -378,6 +387,8 @@
#define TLBIALLIS p15, 0, c8, c3, 0 #define TLBIALLIS p15, 0, c8, c3, 0
#define TLBIMVA p15, 0, c8, c7, 1 #define TLBIMVA p15, 0, c8, c7, 1
#define TLBIMVAA p15, 0, c8, c7, 3 #define TLBIMVAA p15, 0, c8, c7, 3
#define TLBIMVAAIS p15, 0, c8, c3, 3
#define BPIALLIS p15, 0, c7, c1, 6
#define HSCTLR p15, 4, c1, c0, 0 #define HSCTLR p15, 4, c1, c0, 0
#define HCR p15, 4, c1, c1, 0 #define HCR p15, 4, c1, c1, 0
#define HCPTR p15, 4, c1, c1, 2 #define HCPTR p15, 4, c1, c1, 2
......
...@@ -131,6 +131,13 @@ static inline void tlbi##_op(void) \ ...@@ -131,6 +131,13 @@ static inline void tlbi##_op(void) \
__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\ __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
} }
#define _DEFINE_BPIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
static inline void bpi##_op(void) \
{ \
u_register_t v = 0; \
__asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
}
#define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \ #define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2) \
static inline void tlbi##_op(u_register_t v) \ static inline void tlbi##_op(u_register_t v) \
{ \ { \
...@@ -145,6 +152,10 @@ static inline void tlbi##_op(u_register_t v) \ ...@@ -145,6 +152,10 @@ static inline void tlbi##_op(u_register_t v) \
#define DEFINE_TLBIOP_PARAM_FUNC(_op, ...) \ #define DEFINE_TLBIOP_PARAM_FUNC(_op, ...) \
_DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__) _DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__)
/* Define function for simple BPI operation */
#define DEFINE_BPIOP_FUNC(_op, ...) \
_DEFINE_BPIOP_FUNC(_op, __VA_ARGS__)
/********************************************************************** /**********************************************************************
* Macros to create inline functions for DC operations * Macros to create inline functions for DC operations
*********************************************************************/ *********************************************************************/
...@@ -199,6 +210,7 @@ DEFINE_SYSOP_FUNC(sev) ...@@ -199,6 +210,7 @@ DEFINE_SYSOP_FUNC(sev)
DEFINE_SYSOP_TYPE_FUNC(dsb, sy) DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
DEFINE_SYSOP_TYPE_FUNC(dmb, sy) DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
DEFINE_SYSOP_TYPE_FUNC(dsb, ish) DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
DEFINE_SYSOP_TYPE_FUNC(dmb, ish) DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
DEFINE_SYSOP_FUNC(isb) DEFINE_SYSOP_FUNC(isb)
...@@ -263,6 +275,12 @@ DEFINE_TLBIOP_FUNC(all, TLBIALL) ...@@ -263,6 +275,12 @@ DEFINE_TLBIOP_FUNC(all, TLBIALL)
DEFINE_TLBIOP_FUNC(allis, TLBIALLIS) DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA) DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA) DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
/*
* BPI operation prototypes.
*/
DEFINE_BPIOP_FUNC(allis, BPIALLIS)
/* /*
* DC operation prototypes * DC operation prototypes
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#ifndef __ARCH_H__ #ifndef __ARCH_H__
#define __ARCH_H__ #define __ARCH_H__
#include <utils.h>
/******************************************************************************* /*******************************************************************************
* MIDR bit definitions * MIDR bit definitions
...@@ -417,6 +418,15 @@ ...@@ -417,6 +418,15 @@
#define EC_BITS(x) (x >> ESR_EC_SHIFT) & ESR_EC_MASK #define EC_BITS(x) (x >> ESR_EC_SHIFT) & ESR_EC_MASK
/*******************************************************************************
* Definitions of register offsets, fields and macros for CPU system
* instructions.
******************************************************************************/
#define TLBI_ADDR_SHIFT 12
#define TLBI_ADDR_MASK ULL(0x00000FFFFFFFFFFF)
#define TLBI_ADDR(x) (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
/******************************************************************************* /*******************************************************************************
* Definitions of register offsets and fields in the CNTCTLBase Frame of the * Definitions of register offsets and fields in the CNTCTLBase Frame of the
* system level implementation of the Generic Timer. * system level implementation of the Generic Timer.
......
...@@ -124,6 +124,13 @@ DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3) ...@@ -124,6 +124,13 @@ DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is) DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1) DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae3is)
DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale3is)
/******************************************************************************* /*******************************************************************************
* Cache maintenance accessor prototypes * Cache maintenance accessor prototypes
******************************************************************************/ ******************************************************************************/
...@@ -181,6 +188,7 @@ DEFINE_SYSOP_TYPE_FUNC(dmb, sy) ...@@ -181,6 +188,7 @@ DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
DEFINE_SYSOP_TYPE_FUNC(dmb, st) DEFINE_SYSOP_TYPE_FUNC(dmb, st)
DEFINE_SYSOP_TYPE_FUNC(dmb, ld) DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
DEFINE_SYSOP_TYPE_FUNC(dsb, ish) DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
DEFINE_SYSOP_TYPE_FUNC(dmb, ish) DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
DEFINE_SYSOP_FUNC(isb) DEFINE_SYSOP_FUNC(isb)
......
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#define MT_SEC_SHIFT 4 #define MT_SEC_SHIFT 4
/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */ /* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
#define MT_EXECUTE_SHIFT 5 #define MT_EXECUTE_SHIFT 5
/* All other bits are reserved */
/* /*
* Memory mapping attributes * Memory mapping attributes
...@@ -115,6 +116,20 @@ void init_xlat_tables(void); ...@@ -115,6 +116,20 @@ void init_xlat_tables(void);
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr); size_t size, unsigned int attr);
/*
* Add a region with defined base PA and base VA. This type of region can be
* added and removed even if the MMU is enabled.
*
* Returns:
* 0: Success.
* EINVAL: Invalid values were used as arguments.
* ERANGE: Memory limits were surpassed.
* ENOMEM: Not enough space in the mmap array or not enough free xlat tables.
* EPERM: It overlaps another region in an invalid way.
*/
int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
size_t size, unsigned int attr);
/* /*
* Add an array of static regions with defined base PA and base VA. This type * Add an array of static regions with defined base PA and base VA. This type
* of region can only be added before initializing the MMU and cannot be * of region can only be added before initializing the MMU and cannot be
...@@ -122,5 +137,16 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, ...@@ -122,5 +137,16 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
*/ */
void mmap_add(const mmap_region_t *mm); void mmap_add(const mmap_region_t *mm);
/*
* Remove a region with the specified base VA and size. Only dynamic regions can
* be removed, and they can be removed even if the MMU is enabled.
*
* Returns:
* 0: Success.
* EINVAL: The specified region wasn't found.
* EPERM: Trying to remove a static region.
*/
int mmap_remove_dynamic_region(uintptr_t base_va, size_t size);
#endif /*__ASSEMBLY__*/ #endif /*__ASSEMBLY__*/
#endif /* __XLAT_TABLES_V2_H__ */ #endif /* __XLAT_TABLES_V2_H__ */
/* /*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -72,6 +72,9 @@ ...@@ -72,6 +72,9 @@
* Provide relatively optimised values for the runtime images (BL31 and BL32). * Provide relatively optimised values for the runtime images (BL31 and BL32).
* Optimisation is less important for the other, transient boot images so a * Optimisation is less important for the other, transient boot images so a
* common, maximum value is used across these images. * common, maximum value is used across these images.
*
* They are also used for the dynamically mapped regions in the images that
* enable dynamic memory mapping.
*/ */
#if defined(IMAGE_BL31) || defined(IMAGE_BL32) #if defined(IMAGE_BL31) || defined(IMAGE_BL32)
# define PLAT_ARM_MMAP_ENTRIES 6 # define PLAT_ARM_MMAP_ENTRIES 6
......
...@@ -50,6 +50,47 @@ int is_mmu_enabled(void) ...@@ -50,6 +50,47 @@ int is_mmu_enabled(void)
return (read_sctlr() & SCTLR_M_BIT) != 0; return (read_sctlr() & SCTLR_M_BIT) != 0;
} }
#if PLAT_XLAT_TABLES_DYNAMIC
void xlat_arch_tlbi_va(uintptr_t va)
{
/*
* Ensure the translation table write has drained into memory before
* invalidating the TLB entry.
*/
dsbishst();
tlbimvaais(TLBI_ADDR(va));
}
void xlat_arch_tlbi_va_sync(void)
{
/* Invalidate all entries from branch predictors. */
bpiallis();
/*
* A TLB maintenance instruction can complete at any time after
* it is issued, but is only guaranteed to be complete after the
* execution of DSB by the PE that executed the TLB maintenance
* instruction. After the TLB invalidate instruction is
* complete, no new memory accesses using the invalidated TLB
* entries will be observed by any observer of the system
* domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
* "Ordering and completion of TLB maintenance instructions".
*/
dsbish();
/*
* The effects of a completed TLB maintenance instruction are
* only guaranteed to be visible on the PE that executed the
* instruction after the execution of an ISB instruction by the
* PE that executed the TLB maintenance instruction.
*/
isb();
}
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
void init_xlat_tables_arch(unsigned long long max_pa) void init_xlat_tables_arch(unsigned long long max_pa)
{ {
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
......
...@@ -107,12 +107,66 @@ int is_mmu_enabled(void) ...@@ -107,12 +107,66 @@ int is_mmu_enabled(void)
#endif #endif
} }
#if PLAT_XLAT_TABLES_DYNAMIC
void xlat_arch_tlbi_va(uintptr_t va)
{
/*
* Ensure the translation table write has drained into memory before
* invalidating the TLB entry.
*/
dsbishst();
#if IMAGE_EL == 1
assert(IS_IN_EL(1));
tlbivaae1is(TLBI_ADDR(va));
#elif IMAGE_EL == 3
assert(IS_IN_EL(3));
tlbivae3is(TLBI_ADDR(va));
#endif
}
void xlat_arch_tlbi_va_sync(void)
{
/*
* A TLB maintenance instruction can complete at any time after
* it is issued, but is only guaranteed to be complete after the
* execution of DSB by the PE that executed the TLB maintenance
* instruction. After the TLB invalidate instruction is
* complete, no new memory accesses using the invalidated TLB
* entries will be observed by any observer of the system
* domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
* "Ordering and completion of TLB maintenance instructions".
*/
dsbish();
/*
* The effects of a completed TLB maintenance instruction are
* only guaranteed to be visible on the PE that executed the
* instruction after the execution of an ISB instruction by the
* PE that executed the TLB maintenance instruction.
*/
isb();
}
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
void init_xlat_tables_arch(unsigned long long max_pa) void init_xlat_tables_arch(unsigned long long max_pa)
{ {
assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
xlat_arch_get_max_supported_pa()); xlat_arch_get_max_supported_pa());
/*
* If dynamic allocation of new regions is enabled the code can't make
* assumptions about the max physical address because it could change
* after adding new regions. If this functionality is disabled it is
* safer to restrict the max physical address as much as possible.
*/
#ifdef PLAT_XLAT_TABLES_DYNAMIC
tcr_ps_bits = calc_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE);
#else
tcr_ps_bits = calc_physical_addr_size_bits(max_pa); tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
#endif
} }
/******************************************************************************* /*******************************************************************************
......
...@@ -60,6 +60,10 @@ static uint64_t tf_base_xlat_table[NUM_BASE_LEVEL_ENTRIES] ...@@ -60,6 +60,10 @@ static uint64_t tf_base_xlat_table[NUM_BASE_LEVEL_ENTRIES]
static mmap_region_t tf_mmap[MAX_MMAP_REGIONS + 1]; static mmap_region_t tf_mmap[MAX_MMAP_REGIONS + 1];
#if PLAT_XLAT_TABLES_DYNAMIC
static int xlat_tables_mapped_regions[MAX_XLAT_TABLES];
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
xlat_ctx_t tf_xlat_ctx = { xlat_ctx_t tf_xlat_ctx = {
.pa_max_address = PLAT_PHY_ADDR_SPACE_SIZE - 1, .pa_max_address = PLAT_PHY_ADDR_SPACE_SIZE - 1,
...@@ -70,6 +74,9 @@ xlat_ctx_t tf_xlat_ctx = { ...@@ -70,6 +74,9 @@ xlat_ctx_t tf_xlat_ctx = {
.tables = tf_xlat_tables, .tables = tf_xlat_tables,
.tables_num = MAX_XLAT_TABLES, .tables_num = MAX_XLAT_TABLES,
#if PLAT_XLAT_TABLES_DYNAMIC
.tables_mapped_regions = xlat_tables_mapped_regions,
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
.base_table = tf_base_xlat_table, .base_table = tf_base_xlat_table,
.base_table_entries = NUM_BASE_LEVEL_ENTRIES, .base_table_entries = NUM_BASE_LEVEL_ENTRIES,
...@@ -104,6 +111,27 @@ void mmap_add(const mmap_region_t *mm) ...@@ -104,6 +111,27 @@ void mmap_add(const mmap_region_t *mm)
} }
} }
#if PLAT_XLAT_TABLES_DYNAMIC
int mmap_add_dynamic_region(unsigned long long base_pa,
uintptr_t base_va, size_t size, unsigned int attr)
{
mmap_region_t mm = {
.base_va = base_va,
.base_pa = base_pa,
.size = size,
.attr = attr,
};
return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
}
int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
{
return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, base_va, size);
}
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
void init_xlat_tables(void) void init_xlat_tables(void)
{ {
assert(!is_mmu_enabled()); assert(!is_mmu_enabled());
......
...@@ -47,6 +47,63 @@ ...@@ -47,6 +47,63 @@
#endif #endif
#include "xlat_tables_private.h" #include "xlat_tables_private.h"
#if PLAT_XLAT_TABLES_DYNAMIC
/*
* The following functions assume that they will be called using subtables only.
* The base table can't be unmapped, so it is not needed to do any special
* handling for it.
*/
/*
* Returns the index of the array corresponding to the specified translation
* table.
*/
static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
{
for (int i = 0; i < ctx->tables_num; i++)
if (ctx->tables[i] == table)
return i;
/*
* Maybe we were asked to get the index of the base level table, which
* should never happen.
*/
assert(0);
return -1;
}
/* Returns a pointer to an empty translation table. */
static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
{
for (int i = 0; i < ctx->tables_num; i++)
if (ctx->tables_mapped_regions[i] == 0)
return ctx->tables[i];
return NULL;
}
/* Increments region count for a given table. */
static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
{
ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
}
/* Decrements region count for a given table. */
static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
{
ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
}
/* Returns 0 if the speficied table isn't empty, otherwise 1. */
static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
{
return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
}
#else /* PLAT_XLAT_TABLES_DYNAMIC */
/* Returns a pointer to the first empty translation table. */ /* Returns a pointer to the first empty translation table. */
static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx) static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
{ {
...@@ -55,6 +112,8 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx) ...@@ -55,6 +112,8 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
return ctx->tables[ctx->next_table++]; return ctx->tables[ctx->next_table++];
} }
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
/* Returns a block/page table descriptor for the given level and attributes. */ /* Returns a block/page table descriptor for the given level and attributes. */
static uint64_t xlat_desc(unsigned int attr, unsigned long long addr_pa, static uint64_t xlat_desc(unsigned int attr, unsigned long long addr_pa,
int level) int level)
...@@ -156,6 +215,142 @@ typedef enum { ...@@ -156,6 +215,142 @@ typedef enum {
} action_t; } action_t;
#if PLAT_XLAT_TABLES_DYNAMIC
/*
* Recursive function that writes to the translation tables and unmaps the
* specified region.
*/
static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
const uintptr_t table_base_va,
uint64_t *const table_base,
const int table_entries,
const int level)
{
assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
uint64_t *subtable;
uint64_t desc;
uintptr_t table_idx_va;
uintptr_t table_idx_end_va; /* End VA of this entry */
uintptr_t region_end_va = mm->base_va + mm->size - 1;
int table_idx;
if (mm->base_va > table_base_va) {
/* Find the first index of the table affected by the region. */
table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
table_idx = (table_idx_va - table_base_va) >>
XLAT_ADDR_SHIFT(level);
assert(table_idx < table_entries);
} else {
/* Start from the beginning of the table. */
table_idx_va = table_base_va;
table_idx = 0;
}
while (table_idx < table_entries) {
table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
desc = table_base[table_idx];
uint64_t desc_type = desc & DESC_MASK;
action_t action = ACTION_NONE;
if ((mm->base_va <= table_idx_va) &&
(region_end_va >= table_idx_end_va)) {
/* Region covers all block */
if (level == 3) {
/*
* Last level, only page descriptors allowed,
* erase it.
*/
assert(desc_type == PAGE_DESC);
action = ACTION_WRITE_BLOCK_ENTRY;
} else {
/*
* Other levels can have table descriptors. If
* so, recurse into it and erase descriptors
* inside it as needed. If there is a block
* descriptor, just erase it. If an invalid
* descriptor is found, this table isn't
* actually mapped, which shouldn't happen.
*/
if (desc_type == TABLE_DESC) {
action = ACTION_RECURSE_INTO_TABLE;
} else {
assert(desc_type == BLOCK_DESC);
action = ACTION_WRITE_BLOCK_ENTRY;
}
}
} else if ((mm->base_va <= table_idx_end_va) ||
(region_end_va >= table_idx_va)) {
/*
* Region partially covers block.
*
* It can't happen in level 3.
*
* There must be a table descriptor here, if not there
* was a problem when mapping the region.
*/
assert(level < 3);
assert(desc_type == TABLE_DESC);
action = ACTION_RECURSE_INTO_TABLE;
}
if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va(table_idx_va);
} else if (action == ACTION_RECURSE_INTO_TABLE) {
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
/* Recurse to write into subtable */
xlat_tables_unmap_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
level + 1);
/*
* If the subtable is now empty, remove its reference.
*/
if (xlat_table_is_empty(ctx, subtable)) {
table_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va(table_idx_va);
}
} else {
assert(action == ACTION_NONE);
}
table_idx++;
table_idx_va += XLAT_BLOCK_SIZE(level);
/* If reached the end of the region, exit */
if (region_end_va <= table_idx_va)
break;
}
if (level > ctx->base_level)
xlat_table_dec_regions_count(ctx, table_base);
}
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
/* /*
* From the given arguments, it decides which action to take when mapping the * From the given arguments, it decides which action to take when mapping the
* specified region. * specified region.
...@@ -287,9 +482,11 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm, ...@@ -287,9 +482,11 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
/* /*
* Recursive function that writes to the translation tables and maps the * Recursive function that writes to the translation tables and maps the
* specified region. * specified region. On success, it returns the VA of the last byte that was
* succesfully mapped. On error, it returns the VA of the next entry that
* should have been mapped.
*/ */
static void xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm, static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
const uintptr_t table_base_va, const uintptr_t table_base_va,
uint64_t *const table_base, uint64_t *const table_base,
const int table_entries, const int table_entries,
...@@ -321,6 +518,11 @@ static void xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm, ...@@ -321,6 +518,11 @@ static void xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
table_idx = 0; table_idx = 0;
} }
#if PLAT_XLAT_TABLES_DYNAMIC
if (level > ctx->base_level)
xlat_table_inc_regions_count(ctx, table_base);
#endif
while (table_idx < table_entries) { while (table_idx < table_entries) {
desc = table_base[table_idx]; desc = table_base[table_idx];
...@@ -338,20 +540,30 @@ static void xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm, ...@@ -338,20 +540,30 @@ static void xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
} else if (action == ACTION_CREATE_NEW_TABLE) { } else if (action == ACTION_CREATE_NEW_TABLE) {
subtable = xlat_table_get_empty(ctx); subtable = xlat_table_get_empty(ctx);
assert(subtable != NULL); if (subtable == NULL) {
/* Recurse to write into subtable */ /* Not enough free tables to map this region */
xlat_tables_map_region(ctx, mm, table_idx_va, subtable, return table_idx_va;
XLAT_TABLE_ENTRIES, level + 1); }
/* Point to new subtable from this one. */ /* Point to new subtable from this one. */
table_base[table_idx] = table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
TABLE_DESC | (unsigned long)subtable;
/* Recurse to write into subtable */
uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
level + 1);
if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
return end_va;
} else if (action == ACTION_RECURSE_INTO_TABLE) { } else if (action == ACTION_RECURSE_INTO_TABLE) {
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK); subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
/* Recurse to write into subtable */ /* Recurse to write into subtable */
xlat_tables_map_region(ctx, mm, table_idx_va, subtable, uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
XLAT_TABLE_ENTRIES, level + 1); subtable, XLAT_TABLE_ENTRIES,
level + 1);
if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
return end_va;
} else { } else {
...@@ -366,6 +578,8 @@ static void xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm, ...@@ -366,6 +578,8 @@ static void xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
if (mm_end_va <= table_idx_va) if (mm_end_va <= table_idx_va)
break; break;
} }
return table_idx_va - 1;
} }
void print_mmap(mmap_region_t *const mmap) void print_mmap(mmap_region_t *const mmap)
...@@ -436,10 +650,14 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa, ...@@ -436,10 +650,14 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
* Full VA overlaps are only allowed if both regions are * Full VA overlaps are only allowed if both regions are
* identity mapped (zero offset) or have the same VA to PA * identity mapped (zero offset) or have the same VA to PA
* offset. Also, make sure that it's not the exact same area. * offset. Also, make sure that it's not the exact same area.
* This can only be done with locked regions. * This can only be done with static regions.
*/ */
if (fully_overlapped_va) { if (fully_overlapped_va) {
#if PLAT_XLAT_TABLES_DYNAMIC
if ((attr & MT_DYNAMIC) || (mm->attr & MT_DYNAMIC))
return -EPERM;
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
if ((mm->base_va - mm->base_pa) != (base_va - base_pa)) if ((mm->base_va - mm->base_pa) != (base_va - base_pa))
return -EPERM; return -EPERM;
...@@ -481,6 +699,9 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) ...@@ -481,6 +699,9 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
if (!mm->size) if (!mm->size)
return; return;
/* Static regions must be added before initializing the xlat tables. */
assert(!ctx->initialized);
ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size, ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size,
mm->attr); mm->attr);
if (ret != 0) { if (ret != 0) {
...@@ -509,6 +730,8 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) ...@@ -509,6 +730,8 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* regions with the loop in xlat_tables_init_internal because the outer * regions with the loop in xlat_tables_init_internal because the outer
* ones won't overwrite block or page descriptors of regions added * ones won't overwrite block or page descriptors of regions added
* previously. * previously.
*
* Overlapping is only allowed for static regions.
*/ */
while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
...@@ -541,6 +764,179 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) ...@@ -541,6 +764,179 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
ctx->max_va = end_va; ctx->max_va = end_va;
} }
#if PLAT_XLAT_TABLES_DYNAMIC
int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
{
mmap_region_t *mm_cursor = ctx->mmap;
mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
unsigned long long end_pa = mm->base_pa + mm->size - 1;
uintptr_t end_va = mm->base_va + mm->size - 1;
int ret;
/* Nothing to do */
if (!mm->size)
return 0;
ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size, mm->attr | MT_DYNAMIC);
if (ret != 0)
return ret;
/*
* Find the adequate entry in the mmap array in the same way done for
* static regions in mmap_add_region_ctx().
*/
while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va && mm_cursor->size)
++mm_cursor;
while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) && (mm_cursor->size < mm->size))
++mm_cursor;
/* Make room for new region by moving other regions up by one place */
memmove(mm_cursor + 1, mm_cursor, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
* Check we haven't lost the empty sentinal from the end of the array.
* This shouldn't happen as we have checked in mmap_add_region_check
* that there is free space.
*/
assert(mm_last->size == 0);
mm_cursor->base_pa = mm->base_pa;
mm_cursor->base_va = mm->base_va;
mm_cursor->size = mm->size;
mm_cursor->attr = mm->attr | MT_DYNAMIC;
/*
* Update the translation tables if the xlat tables are initialized. If
* not, this region will be mapped when they are initialized.
*/
if (ctx->initialized) {
uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor, 0, ctx->base_table,
ctx->base_table_entries, ctx->base_level);
/* Failed to map, remove mmap entry, unmap and return error. */
if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
memmove(mm_cursor, mm_cursor + 1, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
* Check if the mapping function actually managed to map
* anything. If not, just return now.
*/
if (mm_cursor->base_va >= end_va)
return -ENOMEM;
/*
* Something went wrong after mapping some table entries,
* undo every change done up to this point.
*/
mmap_region_t unmap_mm = {
.base_pa = 0,
.base_va = mm->base_va,
.size = end_va - mm->base_va,
.attr = 0
};
xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
ctx->base_table_entries, ctx->base_level);
return -ENOMEM;
}
/*
* Make sure that all entries are written to the memory. There
* is no need to invalidate entries when mapping dynamic regions
* because new table/block/page descriptors only replace old
* invalid descriptors, that aren't TLB cached.
*/
dsbishst();
}
if (end_pa > ctx->max_pa)
ctx->max_pa = end_pa;
if (end_va > ctx->max_va)
ctx->max_va = end_va;
return 0;
}
/*
* Removes the region with given base Virtual Address and size from the given
* context.
*
* Returns:
* 0: Success.
* EINVAL: Invalid values were used as arguments (region not found).
* EPERM: Tried to remove a static region.
*/
int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
size_t size)
{
mmap_region_t *mm = ctx->mmap;
mmap_region_t *mm_last = mm + ctx->mmap_num;
int update_max_va_needed = 0;
int update_max_pa_needed = 0;
/* Check sanity of mmap array. */
assert(mm[ctx->mmap_num].size == 0);
while (mm->size) {
if ((mm->base_va == base_va) && (mm->size == size))
break;
++mm;
}
/* Check that the region was found */
if (mm->size == 0)
return -EINVAL;
/* If the region is static it can't be removed */
if (!(mm->attr & MT_DYNAMIC))
return -EPERM;
/* Check if this region is using the top VAs or PAs. */
if ((mm->base_va + mm->size - 1) == ctx->max_va)
update_max_va_needed = 1;
if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
update_max_pa_needed = 1;
/* Update the translation tables if needed */
if (ctx->initialized) {
xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
ctx->base_table_entries,
ctx->base_level);
xlat_arch_tlbi_va_sync();
}
/* Remove this region by moving the rest down by one place. */
memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
/* Check if we need to update the max VAs and PAs */
if (update_max_va_needed) {
ctx->max_va = 0;
mm = ctx->mmap;
while (mm->size) {
if ((mm->base_va + mm->size - 1) > ctx->max_va)
ctx->max_va = mm->base_va + mm->size - 1;
++mm;
}
}
if (update_max_pa_needed) {
ctx->max_pa = 0;
mm = ctx->mmap;
while (mm->size) {
if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
ctx->max_pa = mm->base_pa + mm->size - 1;
++mm;
}
}
return 0;
}
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* Print the attributes of the specified block descriptor. */ /* Print the attributes of the specified block descriptor. */
...@@ -681,13 +1077,26 @@ void init_xlation_table(xlat_ctx_t *ctx) ...@@ -681,13 +1077,26 @@ void init_xlation_table(xlat_ctx_t *ctx)
ctx->base_table[i] = INVALID_DESC; ctx->base_table[i] = INVALID_DESC;
for (int j = 0; j < ctx->tables_num; j++) { for (int j = 0; j < ctx->tables_num; j++) {
#if PLAT_XLAT_TABLES_DYNAMIC
ctx->tables_mapped_regions[j] = 0;
#endif
for (int i = 0; i < XLAT_TABLE_ENTRIES; i++) for (int i = 0; i < XLAT_TABLE_ENTRIES; i++)
ctx->tables[j][i] = INVALID_DESC; ctx->tables[j][i] = INVALID_DESC;
} }
while (mm->size) while (mm->size) {
xlat_tables_map_region(ctx, mm++, 0, ctx->base_table, uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
ctx->base_table_entries, ctx->base_level); ctx->base_table_entries, ctx->base_level);
if (end_va != mm->base_va + mm->size - 1) {
ERROR("Not enough memory to map region:\n"
" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
(void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
panic();
}
mm++;
}
ctx->initialized = 1; ctx->initialized = 1;
} }
...@@ -82,6 +82,13 @@ typedef struct { ...@@ -82,6 +82,13 @@ typedef struct {
*/ */
uint64_t (*tables)[XLAT_TABLE_ENTRIES]; uint64_t (*tables)[XLAT_TABLE_ENTRIES];
int tables_num; int tables_num;
/*
* Keep track of how many regions are mapped in each table. The base
* table can't be unmapped so it isn't needed to keep track of it.
*/
#if PLAT_XLAT_TABLES_DYNAMIC
int *tables_mapped_regions;
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
int next_table; int next_table;
...@@ -103,6 +110,52 @@ typedef struct { ...@@ -103,6 +110,52 @@ typedef struct {
} xlat_ctx_t; } xlat_ctx_t;
#if PLAT_XLAT_TABLES_DYNAMIC
/*
* Shifts and masks to access fields of an mmap_attr_t
*/
/* Dynamic or static */
#define MT_DYN_SHIFT 30 /* 31 would cause undefined behaviours */
/*
* Memory mapping private attributes
*
* Private attributes not exposed in the mmap_attr_t enum.
*/
typedef enum {
/*
* Regions mapped before the MMU can't be unmapped dynamically (they are
* static) and regions mapped with MMU enabled can be unmapped. This
* behaviour can't be overridden.
*
* Static regions can overlap each other, dynamic regions can't.
*/
MT_STATIC = 0 << MT_DYN_SHIFT,
MT_DYNAMIC = 1 << MT_DYN_SHIFT
} mmap_priv_attr_t;
/*
* Function used to invalidate all levels of the translation walk for a given
* virtual address. It must be called for every translation table entry that is
* modified.
*/
void xlat_arch_tlbi_va(uintptr_t va);
/*
* This function has to be called at the end of any code that uses the function
* xlat_arch_tlbi_va().
*/
void xlat_arch_tlbi_va_sync(void);
/* Add a dynamic region to the specified context. */
int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
/* Remove a dynamic region from the specified context. */
int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
size_t size);
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
/* Print VA, PA, size and attributes of all regions in the mmap array. */ /* Print VA, PA, size and attributes of all regions in the mmap array. */
void print_mmap(mmap_region_t *const mmap); void print_mmap(mmap_region_t *const mmap);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment