Commit 8b9f419e authored by davidcunado-arm's avatar davidcunado-arm Committed by GitHub
Browse files

Merge pull request #1136 from antonio-nino-diaz-arm/an/xlat-get-set-attr

Add APIs to get and modify attributes of memory regions
parents 37b5614f ec0c8fda
......@@ -163,7 +163,9 @@ void inv_dcache_range(uintptr_t addr, size_t size);
void dcsw_op_louis(u_register_t op_type);
void dcsw_op_all(u_register_t op_type);
void disable_mmu_el1(void);
void disable_mmu_el3(void);
void disable_mmu_icache_el1(void);
void disable_mmu_icache_el3(void);
/*******************************************************************************
......
......@@ -24,9 +24,19 @@
#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
#define INVALID_DESC U(0x0)
/*
* A block descriptor points to a region of memory bigger than the granule size
* (e.g. a 2MB region when the granule size is 4KB).
*/
#define BLOCK_DESC U(0x1) /* Table levels 0-2 */
/* A table descriptor points to the next level of translation table. */
#define TABLE_DESC U(0x3) /* Table levels 0-2 */
/*
* A page descriptor points to a page, i.e. a memory region whose size is the
* translation granule size (e.g. 4KB).
*/
#define PAGE_DESC U(0x3) /* Table level 3 */
#define DESC_MASK U(0x3)
#define FIRST_LEVEL_DESC_N ONE_GB_SHIFT
......@@ -84,10 +94,22 @@
#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) - 1)
/* Mask to get the address bits common to a block of a certain table level*/
#define XLAT_ADDR_MASK(level) (~XLAT_BLOCK_MASK(level))
/*
* Extract from the given virtual address the index into the given lookup level.
* This macro assumes the system is using the 4KB translation granule.
*/
#define XLAT_TABLE_IDX(virtual_addr, level) \
(((virtual_addr) >> XLAT_ADDR_SHIFT(level)) & ULL(0x1FF))
/*
* AP[1] bit is ignored by hardware and is
* treated as if it is One in EL2/EL3
* The ARMv8 translation table descriptor format defines AP[2:1] as the Access
* Permissions bits, and does not define an AP[0] bit.
*
* AP[1] is valid only for a stage 1 translation that supports two VA ranges
* (i.e. in the ARMv8A.0 architecture, that is the S-EL1&0 regime).
*
* AP[1] is RES0 for stage 1 translations that support only one VA range
* (e.g. EL3).
*/
#define AP2_SHIFT U(0x7)
#define AP2_RO U(0x1)
......@@ -121,6 +143,28 @@
#define ATTR_INDEX_MASK U(0x3)
#define ATTR_INDEX_GET(attr) (((attr) >> 2) & ATTR_INDEX_MASK)
/*
* Shift values for the attributes fields in a block or page descriptor.
* See section D4.3.3 in the ARMv8-A ARM (issue B.a).
*/
/* Memory attributes index field, AttrIndx[2:0]. */
#define ATTR_INDEX_SHIFT 2
/* Non-secure bit, NS. */
#define NS_SHIFT 5
/* Shareability field, SH[1:0] */
#define SHAREABILITY_SHIFT 8
/* The Access Flag, AF. */
#define ACCESS_FLAG_SHIFT 10
/* The not global bit, nG. */
#define NOT_GLOBAL_SHIFT 11
/* Contiguous hint bit. */
#define CONT_HINT_SHIFT 52
/* Execute-never bits, XN. */
#define PXN_SHIFT 53
#define XN_SHIFT 54
#define UXN_SHIFT XN_SHIFT
/*
* Flags to override default values used to program system registers while
* enabling the MMU.
......
......@@ -251,5 +251,66 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
/*
* Change the memory attributes of the memory region starting from a given
* virtual address in a set of translation tables.
*
* This function can only be used after the translation tables have been
* initialized.
*
* The base address of the memory region must be aligned on a page boundary.
* The size of this memory region must be a multiple of a page size.
* The memory region must be already mapped by the given translation tables
* and it must be mapped at the granularity of a page.
*
* Return 0 on success, a negative value on error.
*
* In case of error, the memory attributes remain unchanged and this function
* has no effect.
*
* ctx
* Translation context to work on.
* base_va:
* Virtual address of the 1st page to change the attributes of.
* size:
* Size in bytes of the memory region.
* attr:
* New attributes of the page tables. The attributes that can be changed are
* data access (MT_RO/MT_RW), instruction access (MT_EXECUTE_NEVER/MT_EXECUTE)
* and user/privileged access (MT_USER/MT_PRIVILEGED) in the case of contexts
* that are used in the EL1&0 translation regime. Also, note that this
* function doesn't allow to remap a region as RW and executable, or to remap
* device memory as executable.
*
* NOTE: The caller of this function must be able to write to the translation
* tables, i.e. the memory where they are stored must be mapped with read-write
* access permissions. This function assumes it is the case. If this is not
* the case then this function might trigger a data abort exception.
*
* NOTE2: The caller is responsible for making sure that the targeted
* translation tables are not modified by any other code while this function is
* executing.
*/
int change_mem_attributes(xlat_ctx_t *ctx, uintptr_t base_va, size_t size,
mmap_attr_t attr);
/*
* Query the memory attributes of a memory page in a set of translation tables.
*
* Return 0 on success, a negative error code on error.
* On success, the attributes are stored into *attributes.
*
* ctx
* Translation context to work on.
* base_va
* Virtual address of the page to get the attributes of.
* There are no alignment restrictions on this address. The attributes of the
* memory page it lies within are returned.
* attributes
* Output parameter where to store the attributes of the targeted memory page.
*/
int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
mmap_attr_t *attributes);
#endif /*__ASSEMBLY__*/
#endif /* __XLAT_TABLES_V2_H__ */
......@@ -162,8 +162,12 @@ struct xlat_ctx {
.initialized = 0, \
}
#if AARCH64
/* This IMAGE_EL macro must not to be used outside the library */
/*
* This IMAGE_EL macro must not to be used outside the library, and it is only
* used in AArch64.
*/
#if IMAGE_BL1 || IMAGE_BL31
# define IMAGE_EL 3
# define IMAGE_XLAT_DEFAULT_REGIME EL3_REGIME
......@@ -172,6 +176,17 @@ struct xlat_ctx {
# define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
#endif
#else /* if AARCH32 */
/*
* The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime in
* AArch64 except for the XN bits, but we set and unset them at the same time,
* so there's no difference in practice.
*/
#define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
#endif /* AARCH64 */
#endif /*__ASSEMBLY__*/
#endif /* __XLAT_TABLES_V2_HELPERS_H__ */
......@@ -18,7 +18,9 @@
.globl zeromem16
.globl memcpy16
.globl disable_mmu_el1
.globl disable_mmu_el3
.globl disable_mmu_icache_el1
.globl disable_mmu_icache_el3
#if SUPPORT_VFP
......@@ -451,11 +453,11 @@ endfunc memcpy16
func disable_mmu_el3
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
do_disable_mmu:
do_disable_mmu_el3:
mrs x0, sctlr_el3
bic x0, x0, x1
msr sctlr_el3, x0
isb // ensure MMU is off
isb /* ensure MMU is off */
dsb sy
ret
endfunc disable_mmu_el3
......@@ -463,9 +465,31 @@ endfunc disable_mmu_el3
func disable_mmu_icache_el3
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
b do_disable_mmu
b do_disable_mmu_el3
endfunc disable_mmu_icache_el3
/* ---------------------------------------------------------------------------
* Disable the MMU at EL1
* ---------------------------------------------------------------------------
*/
func disable_mmu_el1
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
do_disable_mmu_el1:
mrs x0, sctlr_el1
bic x0, x0, x1
msr sctlr_el1, x0
isb /* ensure MMU is off */
dsb sy
ret
endfunc disable_mmu_el1
func disable_mmu_icache_el1
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
b do_disable_mmu_el1
endfunc disable_mmu_icache_el1
/* ---------------------------------------------------------------------------
* Enable the use of VFP at EL3
* ---------------------------------------------------------------------------
......
......@@ -27,8 +27,6 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
return (read_sctlr() & SCTLR_M_BIT) != 0;
}
#if PLAT_XLAT_TABLES_DYNAMIC
void xlat_arch_tlbi_va(uintptr_t va)
{
/*
......@@ -77,8 +75,6 @@ void xlat_arch_tlbi_va_sync(void)
isb();
}
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
int xlat_arch_current_el(void)
{
/*
......
......@@ -1022,7 +1022,7 @@ int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* Print the attributes of the specified block descriptor. */
static void xlat_desc_print(xlat_ctx_t *ctx, uint64_t desc)
static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
{
int mem_type_index = ATTR_INDEX_GET(desc);
xlat_regime_t xlat_regime = ctx->xlat_regime;
......@@ -1315,3 +1315,348 @@ void enable_mmu_el3(unsigned int flags)
}
#endif /* AARCH32 */
/*
* Do a translation table walk to find the block or page descriptor that maps
* virtual_addr.
*
* On success, return the address of the descriptor within the translation
* table. Its lookup level is stored in '*out_level'.
* On error, return NULL.
*
* xlat_table_base
* Base address for the initial lookup level.
* xlat_table_base_entries
* Number of entries in the translation table for the initial lookup level.
* virt_addr_space_size
* Size in bytes of the virtual address space.
*/
static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
void *xlat_table_base,
int xlat_table_base_entries,
unsigned long long virt_addr_space_size,
int *out_level)
{
unsigned int start_level;
uint64_t *table;
int entries;
VERBOSE("%s(%p)\n", __func__, (void *)virtual_addr);
start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
VERBOSE("Starting translation table walk from level %i\n", start_level);
table = xlat_table_base;
entries = xlat_table_base_entries;
for (unsigned int level = start_level;
level <= XLAT_TABLE_LEVEL_MAX;
++level) {
int idx;
uint64_t desc;
uint64_t desc_type;
VERBOSE("Table address: %p\n", (void *)table);
idx = XLAT_TABLE_IDX(virtual_addr, level);
VERBOSE("Index into level %i table: %i\n", level, idx);
if (idx >= entries) {
VERBOSE("Invalid address\n");
return NULL;
}
desc = table[idx];
desc_type = desc & DESC_MASK;
VERBOSE("Descriptor at level %i: 0x%llx\n", level,
(unsigned long long)desc);
if (desc_type == INVALID_DESC) {
VERBOSE("Invalid entry (memory not mapped)\n");
return NULL;
}
if (level == XLAT_TABLE_LEVEL_MAX) {
/*
* There can't be table entries at the final lookup
* level.
*/
assert(desc_type == PAGE_DESC);
VERBOSE("Descriptor mapping a memory page (size: 0x%llx)\n",
(unsigned long long)XLAT_BLOCK_SIZE(XLAT_TABLE_LEVEL_MAX));
*out_level = level;
return &table[idx];
}
if (desc_type == BLOCK_DESC) {
VERBOSE("Descriptor mapping a memory block (size: 0x%llx)\n",
(unsigned long long)XLAT_BLOCK_SIZE(level));
*out_level = level;
return &table[idx];
}
assert(desc_type == TABLE_DESC);
VERBOSE("Table descriptor, continuing xlat table walk...\n");
table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
entries = XLAT_TABLE_ENTRIES;
}
/*
* This shouldn't be reached, the translation table walk should end at
* most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
*/
assert(0);
return NULL;
}
static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
mmap_attr_t *attributes, uint64_t **table_entry,
unsigned long long *addr_pa, int *table_level)
{
uint64_t *entry;
uint64_t desc;
int level;
unsigned long long virt_addr_space_size;
/*
* Sanity-check arguments.
*/
assert(ctx != NULL);
assert(ctx->initialized);
assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
assert(virt_addr_space_size > 0);
entry = find_xlat_table_entry(base_va,
ctx->base_table,
ctx->base_table_entries,
virt_addr_space_size,
&level);
if (entry == NULL) {
WARN("Address %p is not mapped.\n", (void *)base_va);
return -EINVAL;
}
if (addr_pa != NULL) {
*addr_pa = *entry & TABLE_ADDR_MASK;
}
if (table_entry != NULL) {
*table_entry = entry;
}
if (table_level != NULL) {
*table_level = level;
}
desc = *entry;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
VERBOSE("Attributes: ");
xlat_desc_print(ctx, desc);
tf_printf("\n");
#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
assert(attributes != NULL);
*attributes = 0;
int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
*attributes |= MT_MEMORY;
} else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
*attributes |= MT_NON_CACHEABLE;
} else {
assert(attr_index == ATTR_DEVICE_INDEX);
*attributes |= MT_DEVICE;
}
int ap2_bit = (desc >> AP2_SHIFT) & 1;
if (ap2_bit == AP2_RW)
*attributes |= MT_RW;
if (ctx->xlat_regime == EL1_EL0_REGIME) {
int ap1_bit = (desc >> AP1_SHIFT) & 1;
if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
*attributes |= MT_USER;
}
int ns_bit = (desc >> NS_SHIFT) & 1;
if (ns_bit == 1)
*attributes |= MT_NS;
uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
if ((desc & xn_mask) == xn_mask) {
*attributes |= MT_EXECUTE_NEVER;
} else {
assert((desc & xn_mask) == 0);
}
return 0;
}
int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
mmap_attr_t *attributes)
{
return get_mem_attributes_internal(ctx, base_va, attributes,
NULL, NULL, NULL);
}
int change_mem_attributes(xlat_ctx_t *ctx,
uintptr_t base_va,
size_t size,
mmap_attr_t attr)
{
/* Note: This implementation isn't optimized. */
assert(ctx != NULL);
assert(ctx->initialized);
unsigned long long virt_addr_space_size =
(unsigned long long)ctx->va_max_address + 1;
assert(virt_addr_space_size > 0);
if (!IS_PAGE_ALIGNED(base_va)) {
WARN("%s: Address %p is not aligned on a page boundary.\n",
__func__, (void *)base_va);
return -EINVAL;
}
if (size == 0) {
WARN("%s: Size is 0.\n", __func__);
return -EINVAL;
}
if ((size % PAGE_SIZE) != 0) {
WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
__func__, size);
return -EINVAL;
}
if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
WARN("%s() doesn't allow to remap memory as read-write and executable.\n",
__func__);
return -EINVAL;
}
int pages_count = size / PAGE_SIZE;
VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
pages_count, (void *)base_va);
uintptr_t base_va_original = base_va;
/*
* Sanity checks.
*/
for (int i = 0; i < pages_count; ++i) {
uint64_t *entry;
uint64_t desc;
int level;
entry = find_xlat_table_entry(base_va,
ctx->base_table,
ctx->base_table_entries,
virt_addr_space_size,
&level);
if (entry == NULL) {
WARN("Address %p is not mapped.\n", (void *)base_va);
return -EINVAL;
}
desc = *entry;
/*
* Check that all the required pages are mapped at page
* granularity.
*/
if (((desc & DESC_MASK) != PAGE_DESC) ||
(level != XLAT_TABLE_LEVEL_MAX)) {
WARN("Address %p is not mapped at the right granularity.\n",
(void *)base_va);
WARN("Granularity is 0x%llx, should be 0x%x.\n",
(unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
return -EINVAL;
}
/*
* If the region type is device, it shouldn't be executable.
*/
int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
if (attr_index == ATTR_DEVICE_INDEX) {
if ((attr & MT_EXECUTE_NEVER) == 0) {
WARN("Setting device memory as executable at address %p.",
(void *)base_va);
return -EINVAL;
}
}
base_va += PAGE_SIZE;
}
/* Restore original value. */
base_va = base_va_original;
VERBOSE("%s: All pages are mapped, now changing their attributes...\n",
__func__);
for (int i = 0; i < pages_count; ++i) {
mmap_attr_t old_attr, new_attr;
uint64_t *entry;
int level;
unsigned long long addr_pa;
get_mem_attributes_internal(ctx, base_va, &old_attr,
&entry, &addr_pa, &level);
VERBOSE("Old attributes: 0x%x\n", old_attr);
/*
* From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
* MT_USER/MT_PRIVILEGED are taken into account. Any other
* information is ignored.
*/
/* Clean the old attributes so that they can be rebuilt. */
new_attr = old_attr & ~(MT_RW|MT_EXECUTE_NEVER|MT_USER);
/*
* Update attributes, but filter out the ones this function
* isn't allowed to change.
*/
new_attr |= attr & (MT_RW|MT_EXECUTE_NEVER|MT_USER);
VERBOSE("New attributes: 0x%x\n", new_attr);
/*
* The break-before-make sequence requires writing an invalid
* descriptor and making sure that the system sees the change
* before writing the new descriptor.
*/
*entry = INVALID_DESC;
/* Invalidate any cached copy of this mapping in the TLBs. */
xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
/* Ensure completion of the invalidation. */
xlat_arch_tlbi_va_sync();
/* Write new descriptor */
*entry = xlat_desc(ctx, new_attr, addr_pa, level);
base_va += PAGE_SIZE;
}
/* Ensure that the last descriptor writen is seen by the system. */
dsbish();
return 0;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment