Unverified Commit fc3e1591 authored by Antonio Niño Díaz's avatar Antonio Niño Díaz Committed by GitHub
Browse files

Merge pull request #1916 from antonio-nino-diaz-arm/an/spm-xlat

SPM: Some improvements to xlat handling code
parents 18d4d113 75f364b3
......@@ -433,6 +433,9 @@
#define TCR_TxSZ_MAX ULL(39)
#define TCR_TxSZ_MAX_TTST ULL(48)
#define TCR_T0SZ_SHIFT U(0)
#define TCR_T1SZ_SHIFT U(16)
/* (internal) physical address size bits in EL3/EL1 */
#define TCR_PS_BITS_4GB ULL(0x0)
#define TCR_PS_BITS_64GB ULL(0x1)
......@@ -462,12 +465,32 @@
#define TCR_SH_OUTER_SHAREABLE (ULL(0x2) << 12)
#define TCR_SH_INNER_SHAREABLE (ULL(0x3) << 12)
#define TCR_RGN1_INNER_NC (ULL(0x0) << 24)
#define TCR_RGN1_INNER_WBA (ULL(0x1) << 24)
#define TCR_RGN1_INNER_WT (ULL(0x2) << 24)
#define TCR_RGN1_INNER_WBNA (ULL(0x3) << 24)
#define TCR_RGN1_OUTER_NC (ULL(0x0) << 26)
#define TCR_RGN1_OUTER_WBA (ULL(0x1) << 26)
#define TCR_RGN1_OUTER_WT (ULL(0x2) << 26)
#define TCR_RGN1_OUTER_WBNA (ULL(0x3) << 26)
#define TCR_SH1_NON_SHAREABLE (ULL(0x0) << 28)
#define TCR_SH1_OUTER_SHAREABLE (ULL(0x2) << 28)
#define TCR_SH1_INNER_SHAREABLE (ULL(0x3) << 28)
#define TCR_TG0_SHIFT U(14)
#define TCR_TG0_MASK ULL(3)
#define TCR_TG0_4K (ULL(0) << TCR_TG0_SHIFT)
#define TCR_TG0_64K (ULL(1) << TCR_TG0_SHIFT)
#define TCR_TG0_16K (ULL(2) << TCR_TG0_SHIFT)
#define TCR_TG1_SHIFT U(30)
#define TCR_TG1_MASK ULL(3)
#define TCR_TG1_16K (ULL(1) << TCR_TG1_SHIFT)
#define TCR_TG1_4K (ULL(2) << TCR_TG1_SHIFT)
#define TCR_TG1_64K (ULL(3) << TCR_TG1_SHIFT)
#define TCR_EPD0_BIT (ULL(1) << 7)
#define TCR_EPD1_BIT (ULL(1) << 23)
......
/*
* Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -174,12 +174,12 @@ void init_xlat_tables(void)
/* Inner & outer non-cacheable non-shareable. */\
tcr = TCR_SH_NON_SHAREABLE | \
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
(uint64_t) t0sz; \
((uint64_t)t0sz << TCR_T0SZ_SHIFT); \
} else { \
/* Inner & outer WBWA & shareable. */ \
tcr = TCR_SH_INNER_SHAREABLE | \
TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
(uint64_t) t0sz; \
((uint64_t)t0sz << TCR_T0SZ_SHIFT); \
} \
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
......
......@@ -248,7 +248,7 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
*/
int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
tcr = (uint64_t) t0sz;
tcr = (uint64_t)t0sz << TCR_T0SZ_SHIFT;
/*
* Set the cacheability and shareability attributes for memory
......
......@@ -300,6 +300,9 @@ int32_t spm_setup(void)
panic();
}
/* Setup shim layer */
spm_exceptions_xlat_init_context();
/*
* Setup all Secure Partitions.
*/
......@@ -325,9 +328,6 @@ int32_t spm_setup(void)
/* Initialize context of the SP */
INFO("Secure Partition %u context setup start...\n", i);
/* Assign translation tables context. */
ctx->xlat_ctx_handle = spm_sp_xlat_context_alloc();
/* Save location of the image in physical memory */
ctx->image_base = (uintptr_t)sp_base;
ctx->image_size = sp_size;
......
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -95,8 +95,12 @@ void spm_sp_request_increase(sp_context_t *sp_ctx);
void spm_sp_request_decrease(sp_context_t *sp_ctx);
int spm_sp_request_increase_if_zero(sp_context_t *sp_ctx);
/* Functions related to the shim layer translation tables */
void spm_exceptions_xlat_init_context(void);
uint64_t *spm_exceptions_xlat_get_base_table(void);
/* Functions related to the translation tables management */
xlat_ctx_t *spm_sp_xlat_context_alloc(void);
void spm_sp_xlat_context_alloc(sp_context_t *sp_ctx);
void sp_map_memory_regions(sp_context_t *sp_ctx);
/* Functions to handle Secure Partition contexts */
......
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -60,6 +60,9 @@ void spm_sp_setup(sp_context_t *sp_ctx)
* ------------------------
*/
/* Assign translation tables context. */
spm_sp_xlat_context_alloc(sp_ctx);
sp_map_memory_regions(sp_ctx);
/*
......@@ -77,12 +80,24 @@ void spm_sp_setup(sp_context_t *sp_ctx)
write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1,
mmu_cfg_params[MMU_CFG_MAIR]);
/* Enable translations using TTBR1_EL1 */
int t1sz = 64 - __builtin_ctzll(SPM_SHIM_XLAT_VIRT_ADDR_SPACE_SIZE);
mmu_cfg_params[MMU_CFG_TCR] &= ~TCR_EPD1_BIT;
mmu_cfg_params[MMU_CFG_TCR] |=
((uint64_t)t1sz << TCR_T1SZ_SHIFT) |
TCR_SH1_INNER_SHAREABLE |
TCR_RGN1_OUTER_WBA | TCR_RGN1_INNER_WBA |
TCR_TG1_4K;
write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1,
mmu_cfg_params[MMU_CFG_TCR]);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
mmu_cfg_params[MMU_CFG_TTBR0]);
write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR1_EL1,
(uint64_t)spm_exceptions_xlat_get_base_table());
/* Setup SCTLR_EL1 */
u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);
......@@ -122,9 +137,14 @@ void spm_sp_setup(sp_context_t *sp_ctx)
* ----------------------------
*/
/* Shim Exception Vector Base Address */
/*
* Shim exception vector base address. It is mapped at the start of the
* address space accessed by TTBR1_EL1, which means that the base
* address of the exception vectors depends on the size of the address
* space specified in TCR_EL1.T1SZ.
*/
write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1,
SPM_SHIM_EXCEPTIONS_PTR);
UINT64_MAX - (SPM_SHIM_XLAT_VIRT_ADDR_SPACE_SIZE - 1ULL));
/*
* FPEN: Allow the Secure Partition to access FP/SIMD registers.
......
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -23,4 +23,12 @@ IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_END__, SPM_SHIM_EXCEPTIONS_END);
#define SPM_SHIM_EXCEPTIONS_SIZE \
(SPM_SHIM_EXCEPTIONS_END - SPM_SHIM_EXCEPTIONS_START)
/*
* Use the smallest virtual address space size allowed in ARMv8.0 for
* compatibility.
*/
#define SPM_SHIM_XLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 25)
#define SPM_SHIM_MMAP_REGIONS 1
#define SPM_SHIM_XLAT_TABLES 1
#endif /* SPM_SHIM_PRIVATE_H */
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <assert.h>
#include <errno.h>
......@@ -50,17 +51,6 @@ static OBJECT_POOL(sp_xlat_tables_pool, sp_xlat_tables,
XLAT_TABLE_ENTRIES * sizeof(uint64_t),
(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS);
/* Allocate base translation tables. */
static uint64_t sp_xlat_base_tables
[GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)]
[PLAT_SPM_MAX_PARTITIONS]
__aligned(GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
* sizeof(uint64_t))
__section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
static OBJECT_POOL(sp_xlat_base_tables_pool, sp_xlat_base_tables,
GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) * sizeof(uint64_t),
PLAT_SPM_MAX_PARTITIONS);
/* Allocate arrays. */
static int sp_xlat_mapped_regions[PLAT_SP_IMAGE_MAX_XLAT_TABLES]
[PLAT_SPM_MAX_PARTITIONS];
......@@ -73,27 +63,108 @@ static OBJECT_POOL(sp_xlat_ctx_pool, sp_xlat_ctx, sizeof(xlat_ctx_t),
PLAT_SPM_MAX_PARTITIONS);
/* Get handle of Secure Partition translation context */
xlat_ctx_t *spm_sp_xlat_context_alloc(void)
void spm_sp_xlat_context_alloc(sp_context_t *sp_ctx)
{
/* Allocate xlat context elements */
xlat_ctx_t *ctx = pool_alloc(&sp_xlat_ctx_pool);
struct mmap_region *mmap = pool_alloc(&sp_mmap_regions_pool);
uint64_t *base_table = pool_alloc(&sp_xlat_base_tables_pool);
uint64_t *base_table = pool_alloc(&sp_xlat_tables_pool);
uint64_t **tables = pool_alloc_n(&sp_xlat_tables_pool,
PLAT_SP_IMAGE_MAX_XLAT_TABLES);
int *mapped_regions = pool_alloc(&sp_xlat_mapped_regions_pool);
xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1,
PLAT_VIRT_ADDR_SPACE_SIZE - 1, mmap,
/* Calculate the size of the virtual address space needed */
uintptr_t va_size = 0U;
struct sp_rd_sect_mem_region *rdmem;
for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
uintptr_t end_va = (uintptr_t)rdmem->base +
(uintptr_t)rdmem->size;
if (end_va > va_size)
va_size = end_va;
}
if (va_size == 0U) {
ERROR("No regions in resource description.\n");
panic();
}
/*
* Get the power of two that is greater or equal to the top VA. The
* values of base and size in the resource description are 32-bit wide
* so the values will never overflow when using a uintptr_t.
*/
if (!IS_POWER_OF_TWO(va_size)) {
va_size = 1ULL <<
((sizeof(va_size) * 8) - __builtin_clzll(va_size));
}
if (va_size > PLAT_VIRT_ADDR_SPACE_SIZE) {
ERROR("Resource description requested too much virtual memory.\n");
panic();
}
uintptr_t min_va_size;
/* The following sizes are only valid for 4KB pages */
assert(PAGE_SIZE == (4U * 1024U));
if (is_armv8_4_ttst_present()) {
VERBOSE("Using ARMv8.4-TTST\n");
min_va_size = 1ULL << (64 - TCR_TxSZ_MAX_TTST);
} else {
min_va_size = 1ULL << (64 - TCR_TxSZ_MAX);
}
if (va_size < min_va_size) {
va_size = min_va_size;
}
/* Initialize xlat context */
xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1ULL,
va_size - 1ULL, mmap,
PLAT_SP_IMAGE_MMAP_REGIONS, tables,
PLAT_SP_IMAGE_MAX_XLAT_TABLES, base_table,
EL1_EL0_REGIME, mapped_regions);
return ctx;
sp_ctx->xlat_ctx_handle = ctx;
};
/*******************************************************************************
* Translation table context used for S-EL1 exception vectors
******************************************************************************/
REGISTER_XLAT_CONTEXT2(spm_sel1, SPM_SHIM_MMAP_REGIONS, SPM_SHIM_XLAT_TABLES,
SPM_SHIM_XLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME);
void spm_exceptions_xlat_init_context(void)
{
/* This region contains the exception vectors used at S-EL1. */
mmap_region_t sel1_exception_vectors =
MAP_REGION(SPM_SHIM_EXCEPTIONS_PTR,
0x0UL,
SPM_SHIM_EXCEPTIONS_SIZE,
MT_CODE | MT_SECURE | MT_PRIVILEGED);
mmap_add_region_ctx(&spm_sel1_xlat_ctx,
&sel1_exception_vectors);
init_xlat_tables_ctx(&spm_sel1_xlat_ctx);
}
uint64_t *spm_exceptions_xlat_get_base_table(void)
{
return spm_sel1_xlat_ctx.base_table;
}
/*******************************************************************************
* Functions to allocate memory for regions.
******************************************************************************/
......@@ -159,6 +230,11 @@ static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
unsigned int memtype = rdmem->attr & RD_MEM_MASK;
if (rd_size == 0U) {
VERBOSE("Memory region '%s' is empty. Ignored.\n", rdmem->name);
return;
}
VERBOSE("Adding memory region '%s'\n", rdmem->name);
mmap.granularity = REGION_DEFAULT_GRANULARITY;
......@@ -295,15 +371,6 @@ static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
void sp_map_memory_regions(sp_context_t *sp_ctx)
{
/* This region contains the exception vectors used at S-EL1. */
const mmap_region_t sel1_exception_vectors =
MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
SPM_SHIM_EXCEPTIONS_SIZE,
MT_CODE | MT_SECURE | MT_PRIVILEGED);
mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
&sel1_exception_vectors);
struct sp_rd_sect_mem_region *rdmem;
for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment