Unverified Commit edcd266e authored by Dimitris Papastamos's avatar Dimitris Papastamos Committed by GitHub
Browse files

Merge pull request #1395 from antonio-nino-diaz-arm/an/spm-refactor

SPM: Refactor codebase
parents 0d018306 e829a379
......@@ -126,8 +126,7 @@ the rest of this document.
To enable SPM support in TF-A, the source code must be compiled with the build
flag ``ENABLE_SPM=1``. On Arm platforms the build option ``ARM_BL31_IN_DRAM``
can be used to select the location of BL31, both SRAM and DRAM are supported.
Also, the location of the binary that contains the BL32 image
must be set to 1. Also, the location of the binary that contains the BL32 image
(``BL32=path/to/image.bin``) must be specified.
First, build the Standalone MM Secure Partition. To build it, refer to the
......@@ -139,7 +138,7 @@ image in the FIP:
::
BL32=path/to/standalone/mm/sp BL33=path/to/bl33.bin \
make PLAT=fvp ENABLE_SPM=1 fip all
make PLAT=fvp ENABLE_SPM=1 ARM_BL31_IN_DRAM=1 fip all
Describing Secure Partition resources
-------------------------------------
......
......@@ -7,11 +7,11 @@
#ifndef __CM_H__
#define __CM_H__
#ifndef AARCH32
#include <arch.h>
#include <assert.h>
#include <context.h>
#include <context_mgmt.h>
#include <stdint.h>
#endif
/*******************************************************************************
* Forward declarations
......@@ -32,6 +32,7 @@ void cm_set_context(void *context, uint32_t security_state);
void cm_init_my_context(const struct entry_point_info *ep);
void cm_init_context_by_index(unsigned int cpu_idx,
const struct entry_point_info *ep);
void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep);
void cm_prepare_el3_exit(uint32_t security_state);
#ifndef AARCH32
......
......@@ -30,7 +30,9 @@
#elif defined(IMAGE_BL2U)
# define PLATFORM_STACK_SIZE 0x200
#elif defined(IMAGE_BL31)
#ifdef PLAT_XLAT_TABLES_DYNAMIC
#if ENABLE_SPM
# define PLATFORM_STACK_SIZE 0x500
#elif PLAT_XLAT_TABLES_DYNAMIC
# define PLATFORM_STACK_SIZE 0x800
#else
# define PLATFORM_STACK_SIZE 0x400
......@@ -94,7 +96,11 @@
* PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
* little space for growth.
*/
#define PLAT_ARM_MAX_BL31_SIZE 0x20000
#if ENABLE_SPM
# define PLAT_ARM_MAX_BL31_SIZE 0x40000
#else
# define PLAT_ARM_MAX_BL31_SIZE 0x20000
#endif
#ifdef AARCH32
/*
......
......@@ -98,12 +98,6 @@
/* Total number of memory regions with distinct properties */
#define ARM_SP_IMAGE_NUM_MEM_REGIONS 6
/*
* Name of the section to put the translation tables used by the S-EL1/S-EL0
* context of a Secure Partition.
*/
#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "arm_el3_tzc_dram"
/* Cookies passed to the Secure Partition at boot. Not used by ARM platforms. */
#define PLAT_SPM_COOKIE_0 ULL(0)
#define PLAT_SPM_COOKIE_1 ULL(0)
......
......@@ -55,8 +55,4 @@ typedef struct secure_partition_boot_info {
secure_partition_mp_info_t *mp_info;
} secure_partition_boot_info_t;
/* Setup function for secure partitions context. */
void secure_partition_setup(void);
#endif /* __SECURE_PARTITION_H__ */
......@@ -41,8 +41,7 @@ void cm_init(void)
* entry_point_info structure.
*
* The security state to initialize is determined by the SECURE attribute
* of the entry_point_info. The function returns a pointer to the initialized
* context and sets this as the next context to return to.
* of the entry_point_info.
*
* The EE and ST attributes are used to configure the endianness and secure
* timer availability for the new execution context.
......@@ -51,7 +50,7 @@ void cm_init(void)
* el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
* cm_e1_sysreg_context_restore().
******************************************************************************/
static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
{
unsigned int security_state;
uint32_t scr, sctlr;
......@@ -149,7 +148,7 @@ void cm_init_context_by_index(unsigned int cpu_idx,
{
cpu_context_t *ctx;
ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
cm_setup_context(ctx, ep);
}
/*******************************************************************************
......@@ -161,7 +160,7 @@ void cm_init_my_context(const entry_point_info_t *ep)
{
cpu_context_t *ctx;
ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
cm_setup_context(ctx, ep);
}
/*******************************************************************************
......
......@@ -49,8 +49,7 @@ void cm_init(void)
* entry_point_info structure.
*
* The security state to initialize is determined by the SECURE attribute
* of the entry_point_info. The function returns a pointer to the initialized
* context and sets this as the next context to return to.
* of the entry_point_info.
*
* The EE and ST attributes are used to configure the endianess and secure
* timer availability for the new execution context.
......@@ -59,7 +58,7 @@ void cm_init(void)
* el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
* cm_e1_sysreg_context_restore().
******************************************************************************/
static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
{
unsigned int security_state;
uint32_t scr_el3, pmcr_el0;
......@@ -258,7 +257,7 @@ void cm_init_context_by_index(unsigned int cpu_idx,
{
cpu_context_t *ctx;
ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
cm_setup_context(ctx, ep);
}
/*******************************************************************************
......@@ -270,7 +269,7 @@ void cm_init_my_context(const entry_point_info_t *ep)
{
cpu_context_t *ctx;
ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
cm_setup_context(ctx, ep);
}
/*******************************************************************************
......
......@@ -225,6 +225,12 @@ ifneq (${BL2_AT_EL3}, 0)
override BL1_SOURCES =
endif
ifeq (${ENABLE_SPM},1)
ifneq (${ARM_BL31_IN_DRAM},1)
$(error "Error: SPM needs BL31 to be located in DRAM.")
endif
endif
include plat/arm/board/common/board_common.mk
include plat/arm/common/arm_common.mk
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -15,46 +15,28 @@
#include <platform.h>
#include <secure_partition.h>
#include <string.h>
#include <types.h>
#include <xlat_tables_v2.h>
#include "spm_private.h"
#include "spm_shim_private.h"
/* Place translation tables by default along with the ones used by BL31. */
#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
#endif
/* Allocate and initialise the translation context for the secure partition. */
REGISTER_XLAT_CONTEXT2(secure_partition,
PLAT_SP_IMAGE_MMAP_REGIONS,
PLAT_SP_IMAGE_MAX_XLAT_TABLES,
PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME);
/* Export a handle on the secure partition translation context */
xlat_ctx_t *secure_partition_xlat_ctx_handle = &secure_partition_xlat_ctx;
/* Setup context of the Secure Partition */
void secure_partition_setup(void)
void spm_sp_setup(sp_context_t *sp_ctx)
{
VERBOSE("S-EL1/S-EL0 context setup start...\n");
cpu_context_t *ctx = cm_get_context(SECURE);
cpu_context_t *ctx = &(sp_ctx->cpu_ctx);
/* Make sure that we got a Secure context. */
assert(ctx != NULL);
/*
* Initialize CPU context
* ----------------------
*/
/* Assert we are in Secure state. */
assert((read_scr_el3() & SCR_NS_BIT) == 0);
entry_point_info_t ep_info = {0};
/* Disable MMU at EL1. */
disable_mmu_icache_el1();
SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
ep_info.pc = BL32_BASE;
ep_info.spsr = SPSR_64(MODE_EL0, MODE_SP_EL0, DISABLE_ALL_EXCEPTIONS);
/* Invalidate TLBs at EL1. */
tlbivmalle1();
dsbish();
cm_setup_context(ctx, &ep_info);
/*
* General-Purpose registers
......@@ -143,13 +125,13 @@ void secure_partition_setup(void)
MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
SPM_SHIM_EXCEPTIONS_SIZE,
MT_CODE | MT_SECURE | MT_PRIVILEGED);
mmap_add_region_ctx(&secure_partition_xlat_ctx,
mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
&sel1_exception_vectors);
mmap_add_ctx(&secure_partition_xlat_ctx,
mmap_add_ctx(sp_ctx->xlat_ctx_handle,
plat_get_secure_partition_mmap(NULL));
init_xlat_tables_ctx(&secure_partition_xlat_ctx);
init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
/*
* MMU-related registers
......@@ -222,9 +204,12 @@ void secure_partition_setup(void)
write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
uint64_t *xlat_base =
((xlat_ctx_t *)sp_ctx->xlat_ctx_handle)->base_table;
/* Point TTBR0_EL1 at the tables of the context created for the SP. */
write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
(u_register_t)secure_partition_base_xlat_table);
(u_register_t)xlat_base);
/*
* Setup other system registers
......@@ -312,6 +297,4 @@ void secure_partition_setup(void)
if (plat_my_core_pos() == sp_mp_info[index].linear_id)
sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU;
}
VERBOSE("S-EL1/S-EL0 context setup end.\n");
}
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <errno.h>
#include <platform_def.h>
#include <platform.h>
#include <secure_partition.h>
#include <spm_svc.h>
#include <xlat_tables_v2.h>
#include "spm_private.h"
#include "spm_shim_private.h"
/* Place translation tables by default along with the ones used by BL31. */
#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
#endif
/* Allocate and initialise the translation context for the secure partitions. */
REGISTER_XLAT_CONTEXT2(sp,
PLAT_SP_IMAGE_MMAP_REGIONS,
PLAT_SP_IMAGE_MAX_XLAT_TABLES,
PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME);
/* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */
static spinlock_t mem_attr_smc_lock;
/* Get handle of Secure Partition translation context */
xlat_ctx_t *spm_get_sp_xlat_context(void)
{
return &sp_xlat_ctx;
};
/*
* Attributes are encoded using a different format in the SMC interface than in
* the Trusted Firmware, where the mmap_attr_t enum type is used. This function
* converts an attributes value from the SMC format to the mmap_attr_t format by
* setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
* The other fields are left as 0 because they are ignored by the function
* change_mem_attributes().
*/
static unsigned int smc_attr_to_mmap_attr(unsigned int attributes)
{
unsigned int tf_attr = 0U;
unsigned int access = (attributes & SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
>> SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
if (access == SP_MEMORY_ATTRIBUTES_ACCESS_RW) {
tf_attr |= MT_RW | MT_USER;
} else if (access == SP_MEMORY_ATTRIBUTES_ACCESS_RO) {
tf_attr |= MT_RO | MT_USER;
} else {
/* Other values are reserved. */
assert(access == SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS);
/* The only requirement is that there's no access from EL0 */
tf_attr |= MT_RO | MT_PRIVILEGED;
}
if ((attributes & SP_MEMORY_ATTRIBUTES_NON_EXEC) == 0) {
tf_attr |= MT_EXECUTE;
} else {
tf_attr |= MT_EXECUTE_NEVER;
}
return tf_attr;
}
/*
* This function converts attributes from the Trusted Firmware format into the
* SMC interface format.
*/
static unsigned int smc_mmap_to_smc_attr(unsigned int attr)
{
unsigned int smc_attr = 0U;
unsigned int data_access;
if ((attr & MT_USER) == 0) {
/* No access from EL0. */
data_access = SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS;
} else {
if ((attr & MT_RW) != 0) {
assert(MT_TYPE(attr) != MT_DEVICE);
data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RW;
} else {
data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RO;
}
}
smc_attr |= (data_access & SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
<< SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
if ((attr & MT_EXECUTE_NEVER) != 0U) {
smc_attr |= SP_MEMORY_ATTRIBUTES_NON_EXEC;
}
return smc_attr;
}
int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
uintptr_t base_va)
{
uint32_t attributes;
spin_lock(&mem_attr_smc_lock);
int rc = get_mem_attributes(sp_ctx->xlat_ctx_handle,
base_va, &attributes);
spin_unlock(&mem_attr_smc_lock);
/* Convert error codes of get_mem_attributes() into SPM ones. */
assert((rc == 0) || (rc == -EINVAL));
if (rc == 0) {
return (int32_t) smc_mmap_to_smc_attr(attributes);
} else {
return SPM_INVALID_PARAMETER;
}
}
int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
u_register_t page_address,
u_register_t pages_count,
u_register_t smc_attributes)
{
uintptr_t base_va = (uintptr_t) page_address;
size_t size = (size_t) (pages_count * PAGE_SIZE);
uint32_t attributes = (uint32_t) smc_attributes;
INFO(" Start address : 0x%lx\n", base_va);
INFO(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
INFO(" Attributes : 0x%x\n", attributes);
spin_lock(&mem_attr_smc_lock);
int ret = change_mem_attributes(sp_ctx->xlat_ctx_handle,
base_va, size,
smc_attr_to_mmap_attr(attributes));
spin_unlock(&mem_attr_smc_lock);
/* Convert error codes of change_mem_attributes() into SPM ones. */
assert((ret == 0) || (ret == -EINVAL));
return (ret == 0) ? SPM_SUCCESS : SPM_INVALID_PARAMETER;
}
#
# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
......@@ -11,14 +11,12 @@ ifneq (${ARCH},aarch64)
$(error "Error: SPM is only supported on aarch64.")
endif
# SPM sources
SPM_SOURCES := $(addprefix services/std_svc/spm/, \
spm_main.c \
${ARCH}/spm_helpers.S \
secure_partition_setup.c \
${ARCH}/spm_shim_exceptions.S)
${ARCH}/spm_shim_exceptions.S \
spm_main.c \
sp_setup.c \
sp_xlat.c)
# Let the top-level Makefile know that we intend to include a BL32 image
......
......@@ -23,320 +23,229 @@
#include "spm_private.h"
/* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */
static spinlock_t mem_attr_smc_lock;
/*******************************************************************************
* Secure Partition context information.
******************************************************************************/
static secure_partition_context_t sp_ctx;
static sp_context_t sp_ctx;
/*******************************************************************************
* Replace the S-EL1 re-entry information with S-EL0 re-entry
* information
* Set state of a Secure Partition context.
******************************************************************************/
static void spm_setup_next_eret_into_sel0(const cpu_context_t *secure_context)
void sp_state_set(sp_context_t *sp_ptr, sp_state_t state)
{
assert(secure_context == cm_get_context(SECURE));
cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
spin_lock(&(sp_ptr->state_lock));
sp_ptr->state = state;
spin_unlock(&(sp_ptr->state_lock));
}
/*******************************************************************************
* This function takes an SP context pointer and:
* 1. Applies the S-EL1 system register context from sp_ctx->cpu_ctx.
* 2. Saves the current C runtime state (callee-saved registers) on the stack
* frame and saves a reference to this state.
* 3. Calls el3_exit() so that the EL3 system and general purpose registers
* from the sp_ctx->cpu_ctx are used to enter the secure partition image.
* Wait until the state of a Secure Partition is the specified one and change it
* to the desired state.
******************************************************************************/
static uint64_t spm_synchronous_sp_entry(secure_partition_context_t *sp_ctx_ptr)
void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
{
uint64_t rc;
int success = 0;
assert(sp_ctx_ptr != NULL);
assert(sp_ctx_ptr->c_rt_ctx == 0);
assert(cm_get_context(SECURE) == &sp_ctx_ptr->cpu_ctx);
while (success == 0) {
spin_lock(&(sp_ptr->state_lock));
/* Apply the Secure EL1 system register context and switch to it */
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
if (sp_ptr->state == from) {
sp_ptr->state = to;
VERBOSE("%s: We're about to enter the Secure partition...\n", __func__);
rc = spm_secure_partition_enter(&sp_ctx_ptr->c_rt_ctx);
#if ENABLE_ASSERTIONS
sp_ctx_ptr->c_rt_ctx = 0;
#endif
success = 1;
}
return rc;
spin_unlock(&(sp_ptr->state_lock));
}
}
/*******************************************************************************
* This function takes a Secure partition context pointer and:
* 1. Saves the S-EL1 system register context to sp_ctx->cpu_ctx.
* 2. Restores the current C runtime state (callee saved registers) from the
* stack frame using the reference to this state saved in
* spm_secure_partition_enter().
* 3. It does not need to save any general purpose or EL3 system register state
* as the generic smc entry routine should have saved those.
* Check if the state of a Secure Partition is the specified one and, if so,
* change it to the desired state. Returns 0 on success, -1 on error.
******************************************************************************/
static void __dead2 spm_synchronous_sp_exit(
const secure_partition_context_t *sp_ctx_ptr, uint64_t ret)
int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
{
assert(sp_ctx_ptr != NULL);
/* Save the Secure EL1 system register context */
assert(cm_get_context(SECURE) == &sp_ctx_ptr->cpu_ctx);
cm_el1_sysregs_context_save(SECURE);
int ret = -1;
assert(sp_ctx_ptr->c_rt_ctx != 0U);
spm_secure_partition_exit(sp_ctx_ptr->c_rt_ctx, ret);
spin_lock(&(sp_ptr->state_lock));
if (sp_ptr->state == from) {
sp_ptr->state = to;
ret = 0;
}
/* Should never reach here */
assert(0);
spin_unlock(&(sp_ptr->state_lock));
return ret;
}
/*******************************************************************************
* This function passes control to the Secure Partition image (BL32) for the
* first time on the primary cpu after a cold boot. It assumes that a valid
* secure context has already been created by spm_setup() which can be directly
* used. This function performs a synchronous entry into the Secure partition.
* The SP passes control back to this routine through a SMC.
* This function takes an SP context pointer and prepares the CPU to enter.
******************************************************************************/
static int32_t spm_init(void)
static void spm_sp_prepare_enter(sp_context_t *sp_ctx)
{
entry_point_info_t *secure_partition_ep_info;
uint64_t rc;
VERBOSE("%s entry\n", __func__);
/*
* Get information about the Secure Partition (BL32) image. Its
* absence is a critical failure.
*/
secure_partition_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
assert(secure_partition_ep_info != NULL);
assert(sp_ctx != NULL);
/*
* Initialise the common context and then overlay the S-EL0 specific
* context on top of it.
*/
cm_init_my_context(secure_partition_ep_info);
secure_partition_setup();
/* Assign the context of the SP to this CPU */
cm_set_context(&(sp_ctx->cpu_ctx), SECURE);
/*
* Make all CPUs use the same secure context.
*/
for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) {
cm_set_context_by_index(i, &sp_ctx.cpu_ctx, SECURE);
}
/* Restore the context assigned above */
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
/*
* Arrange for an entry into the secure partition.
*/
sp_ctx.sp_init_in_progress = 1;
rc = spm_synchronous_sp_entry(&sp_ctx);
assert(rc == 0);
sp_ctx.sp_init_in_progress = 0;
VERBOSE("SP_MEMORY_ATTRIBUTES_SET_AARCH64 availability has been revoked\n");
/* Invalidate TLBs at EL1. */
tlbivmalle1();
dsbish();
}
return rc;
/*******************************************************************************
* Enter SP after preparing it with spm_sp_prepare_enter().
******************************************************************************/
static uint64_t spm_sp_enter(sp_context_t *sp_ctx)
{
/* Enter Secure Partition */
return spm_secure_partition_enter(&sp_ctx->c_rt_ctx);
}
/*******************************************************************************
* Given a secure partition entrypoint info pointer, entry point PC & pointer to
* a context data structure, this function will initialize the SPM context and
* entry point info for the secure partition.
* Jump to each Secure Partition for the first time.
******************************************************************************/
void spm_init_sp_ep_state(struct entry_point_info *sp_ep_info,
uint64_t pc,
secure_partition_context_t *sp_ctx_ptr)
static int32_t spm_init(void)
{
uint32_t ep_attr;
uint64_t rc = 0;
sp_context_t *ctx;
INFO("Secure Partition init...\n");
ctx = &sp_ctx;
assert(sp_ep_info != NULL);
assert(pc != 0U);
assert(sp_ctx_ptr != NULL);
ctx->state = SP_STATE_RESET;
cm_set_context(&sp_ctx_ptr->cpu_ctx, SECURE);
spm_sp_prepare_enter(ctx);
rc |= spm_sp_enter(ctx);
assert(rc == 0);
/* initialise an entrypoint to set up the CPU context */
ep_attr = SECURE | EP_ST_ENABLE;
if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U)
ep_attr |= EP_EE_BIG;
SET_PARAM_HEAD(sp_ep_info, PARAM_EP, VERSION_1, ep_attr);
ctx->state = SP_STATE_IDLE;
sp_ep_info->pc = pc;
/* The secure partition runs in S-EL0. */
sp_ep_info->spsr = SPSR_64(MODE_EL0,
MODE_SP_EL0,
DISABLE_ALL_EXCEPTIONS);
INFO("Secure Partition initialized.\n");
zeromem(&sp_ep_info->args, sizeof(sp_ep_info->args));
return rc;
}
/*******************************************************************************
* Secure Partition Manager setup. The SPM finds out the SP entrypoint if not
* already known and initialises the context for entry into the SP for its
* initialisation.
* Initialize contexts of all Secure Partitions.
******************************************************************************/
int32_t spm_setup(void)
{
entry_point_info_t *secure_partition_ep_info;
sp_context_t *ctx;
VERBOSE("%s entry\n", __func__);
/* Disable MMU at EL1 (initialized by BL2) */
disable_mmu_icache_el1();
/*
* Get information about the Secure Partition (BL32) image. Its
* absence is a critical failure.
*/
secure_partition_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
if (secure_partition_ep_info == NULL) {
WARN("No SPM provided by BL2 boot loader, Booting device"
" without SPM initialization. SMCs destined for SPM"
" will return SMC_UNK\n");
return 1;
}
/* Initialize context of the SP */
INFO("Secure Partition context setup start...\n");
/*
* If there's no valid entry point for SP, we return a non-zero value
* signalling failure initializing the service. We bail out without
* registering any handlers
*/
if (secure_partition_ep_info->pc == 0U) {
return 1;
}
ctx = &sp_ctx;
spm_init_sp_ep_state(secure_partition_ep_info,
secure_partition_ep_info->pc,
&sp_ctx);
/* Assign translation tables context. */
ctx->xlat_ctx_handle = spm_get_sp_xlat_context();
/*
* All SPM initialization done. Now register our init function with
* BL31 for deferred invocation
*/
spm_sp_setup(ctx);
/* Register init function for deferred init. */
bl31_register_bl32_init(&spm_init);
VERBOSE("%s exit\n", __func__);
INFO("Secure Partition setup done.\n");
return 0;
}
/*
* Attributes are encoded using a different format in the SMC interface than in
* the Trusted Firmware, where the mmap_attr_t enum type is used. This function
* converts an attributes value from the SMC format to the mmap_attr_t format by
* setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
* The other fields are left as 0 because they are ignored by the function
* change_mem_attributes().
*/
static unsigned int smc_attr_to_mmap_attr(unsigned int attributes)
/*******************************************************************************
* MM_COMMUNICATE handler
******************************************************************************/
static uint64_t mm_communicate(uint32_t smc_fid, uint64_t mm_cookie,
uint64_t comm_buffer_address,
uint64_t comm_size_address, void *handle)
{
unsigned int tf_attr = 0U;
sp_context_t *ctx = &sp_ctx;
unsigned int access = (attributes & SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
>> SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
if (access == SP_MEMORY_ATTRIBUTES_ACCESS_RW) {
tf_attr |= MT_RW | MT_USER;
} else if (access == SP_MEMORY_ATTRIBUTES_ACCESS_RO) {
tf_attr |= MT_RO | MT_USER;
} else {
/* Other values are reserved. */
assert(access == SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS);
/* The only requirement is that there's no access from EL0 */
tf_attr |= MT_RO | MT_PRIVILEGED;
/* Cookie. Reserved for future use. It must be zero. */
if (mm_cookie != 0U) {
ERROR("MM_COMMUNICATE: cookie is not zero\n");
SMC_RET1(handle, SPM_INVALID_PARAMETER);
}
if ((attributes & SP_MEMORY_ATTRIBUTES_NON_EXEC) == 0) {
tf_attr |= MT_EXECUTE;
} else {
tf_attr |= MT_EXECUTE_NEVER;
if (comm_buffer_address == 0U) {
ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n");
SMC_RET1(handle, SPM_INVALID_PARAMETER);
}
return tf_attr;
}
/*
* This function converts attributes from the Trusted Firmware format into the
* SMC interface format.
*/
static unsigned int smc_mmap_to_smc_attr(unsigned int attr)
{
unsigned int smc_attr = 0U;
unsigned int data_access;
if ((attr & MT_USER) == 0) {
/* No access from EL0. */
data_access = SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS;
} else {
if ((attr & MT_RW) != 0) {
assert(MT_TYPE(attr) != MT_DEVICE);
data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RW;
} else {
data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RO;
}
if (comm_size_address != 0U) {
VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n");
}
smc_attr |= (data_access & SP_MEMORY_ATTRIBUTES_ACCESS_MASK)
<< SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT;
/* Save the Normal world context */
cm_el1_sysregs_context_save(NON_SECURE);
if ((attr & MT_EXECUTE_NEVER) != 0U) {
smc_attr |= SP_MEMORY_ATTRIBUTES_NON_EXEC;
}
/* Wait until the Secure Partition is IDLE and set it to BUSY. */
sp_state_wait_switch(ctx, SP_STATE_IDLE, SP_STATE_BUSY);
/* Jump to the Secure Partition. */
spm_sp_prepare_enter(ctx);
return smc_attr;
SMC_RET4(&(ctx->cpu_ctx), smc_fid, comm_buffer_address,
comm_size_address, plat_my_core_pos());
}
static int32_t spm_memory_attributes_get_smc_handler(uintptr_t base_va)
/*******************************************************************************
* SP_EVENT_COMPLETE_AARCH64 handler
******************************************************************************/
static uint64_t sp_event_complete(uint64_t x1)
{
uint32_t attributes;
spin_lock(&mem_attr_smc_lock);
int rc = get_mem_attributes(secure_partition_xlat_ctx_handle,
base_va, &attributes);
sp_context_t *ctx = &sp_ctx;
spin_unlock(&mem_attr_smc_lock);
/* Save secure state */
cm_el1_sysregs_context_save(SECURE);
/* Convert error codes of get_mem_attributes() into SPM ones. */
assert((rc == 0) || (rc == -EINVAL));
if (ctx->state == SP_STATE_RESET) {
/*
* SPM reports completion. The SPM must have initiated the
* original request through a synchronous entry into the secure
* partition. Jump back to the original C runtime context.
*/
spm_secure_partition_exit(ctx->c_rt_ctx, x1);
if (rc == 0) {
return (int32_t) smc_mmap_to_smc_attr(attributes);
} else {
return SPM_INVALID_PARAMETER;
/* spm_secure_partition_exit doesn't return */
}
}
static int spm_memory_attributes_set_smc_handler(u_register_t page_address,
u_register_t pages_count,
u_register_t smc_attributes)
{
uintptr_t base_va = (uintptr_t) page_address;
size_t size = (size_t) (pages_count * PAGE_SIZE);
uint32_t attributes = (uint32_t) smc_attributes;
/*
* This is the result from the Secure partition of an earlier request.
* Copy the result into the non-secure context and return to the
* non-secure state.
*/
INFO(" Start address : 0x%lx\n", base_va);
INFO(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
INFO(" Attributes : 0x%x\n", attributes);
/* Mark Secure Partition as idle */
assert(ctx->state == SP_STATE_BUSY);
spin_lock(&mem_attr_smc_lock);
sp_state_set(ctx, SP_STATE_IDLE);
int ret = change_mem_attributes(secure_partition_xlat_ctx_handle,
base_va, size, smc_attr_to_mmap_attr(attributes));
/* Get a reference to the non-secure context */
cpu_context_t *ns_cpu_context = cm_get_context(NON_SECURE);
spin_unlock(&mem_attr_smc_lock);
assert(ns_cpu_context != NULL);
/* Convert error codes of change_mem_attributes() into SPM ones. */
assert((ret == 0) || (ret == -EINVAL));
/* Restore non-secure state */
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
return (ret == 0) ? SPM_SUCCESS : SPM_INVALID_PARAMETER;
/* Return to non-secure world */
SMC_RET1(ns_cpu_context, x1);
}
/*******************************************************************************
* Secure Partition Manager SMC handler.
******************************************************************************/
uint64_t spm_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
......@@ -346,7 +255,6 @@ uint64_t spm_smc_handler(uint32_t smc_fid,
void *handle,
uint64_t flags)
{
cpu_context_t *ns_cpu_context;
unsigned int ns;
/* Determine which security state this SMC originated from */
......@@ -356,66 +264,40 @@ uint64_t spm_smc_handler(uint32_t smc_fid,
/* Handle SMCs from Secure world. */
assert(handle == cm_get_context(SECURE));
/* Make next ERET jump to S-EL0 instead of S-EL1. */
cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
switch (smc_fid) {
case SPM_VERSION_AARCH32:
SMC_RET1(handle, SPM_VERSION_COMPILED);
case SP_EVENT_COMPLETE_AARCH64:
assert(handle == cm_get_context(SECURE));
cm_el1_sysregs_context_save(SECURE);
spm_setup_next_eret_into_sel0(handle);
if (sp_ctx.sp_init_in_progress) {
/*
* SPM reports completion. The SPM must have
* initiated the original request through a
* synchronous entry into the secure
* partition. Jump back to the original C
* runtime context.
*/
spm_synchronous_sp_exit(&sp_ctx, x1);
assert(0);
}
/* Release the Secure Partition context */
spin_unlock(&sp_ctx.lock);
/*
* This is the result from the Secure partition of an
* earlier request. Copy the result into the non-secure
* context, save the secure state and return to the
* non-secure state.
*/
/* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(NON_SECURE);
assert(ns_cpu_context != NULL);
/* Restore non-secure state */
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
/* Return to normal world */
SMC_RET1(ns_cpu_context, x1);
return sp_event_complete(x1);
case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
INFO("Received SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n");
if (!sp_ctx.sp_init_in_progress) {
if (sp_ctx.state != SP_STATE_RESET) {
WARN("SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
SMC_RET1(handle, SPM_NOT_SUPPORTED);
}
SMC_RET1(handle, spm_memory_attributes_get_smc_handler(x1));
SMC_RET1(handle,
spm_memory_attributes_get_smc_handler(
&sp_ctx, x1));
case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
INFO("Received SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
if (!sp_ctx.sp_init_in_progress) {
if (sp_ctx.state != SP_STATE_RESET) {
WARN("SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
SMC_RET1(handle, SPM_NOT_SUPPORTED);
}
SMC_RET1(handle, spm_memory_attributes_set_smc_handler(x1, x2, x3));
SMC_RET1(handle,
spm_memory_attributes_set_smc_handler(
&sp_ctx, x1, x2, x3));
default:
break;
}
......@@ -430,43 +312,7 @@ uint64_t spm_smc_handler(uint32_t smc_fid,
case MM_COMMUNICATE_AARCH32:
case MM_COMMUNICATE_AARCH64:
{
uint64_t mm_cookie = x1;
uint64_t comm_buffer_address = x2;
uint64_t comm_size_address = x3;
/* Cookie. Reserved for future use. It must be zero. */
if (mm_cookie != 0U) {
ERROR("MM_COMMUNICATE: cookie is not zero\n");
SMC_RET1(handle, SPM_INVALID_PARAMETER);
}
if (comm_buffer_address == 0U) {
ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n");
SMC_RET1(handle, SPM_INVALID_PARAMETER);
}
if (comm_size_address != 0U) {
VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n");
}
/* Save the Normal world context */
cm_el1_sysregs_context_save(NON_SECURE);
/* Lock the Secure Partition context. */
spin_lock(&sp_ctx.lock);
/*
* Restore the secure world context and prepare for
* entry in S-EL0
*/
assert(&sp_ctx.cpu_ctx == cm_get_context(SECURE));
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
SMC_RET4(&sp_ctx.cpu_ctx, smc_fid, comm_buffer_address,
comm_size_address, plat_my_core_pos());
}
return mm_communicate(smc_fid, x1, x2, x3, handle);
case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
......@@ -29,30 +29,42 @@
#define SP_C_RT_CTX_SIZE 0x60
#define SP_C_RT_CTX_ENTRIES (SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
#ifndef __ASSEMBLY__
#include <spinlock.h>
#include <stdint.h>
#include <xlat_tables_v2.h>
/* Handle on the Secure partition translation context */
extern xlat_ctx_t *secure_partition_xlat_ctx_handle;
struct entry_point_info;
typedef enum secure_partition_state {
SP_STATE_RESET = 0,
SP_STATE_IDLE,
SP_STATE_BUSY
} sp_state_t;
typedef struct secure_partition_context {
typedef struct sp_context {
uint64_t c_rt_ctx;
cpu_context_t cpu_ctx;
unsigned int sp_init_in_progress;
spinlock_t lock;
} secure_partition_context_t;
xlat_ctx_t *xlat_ctx_handle;
sp_state_t state;
spinlock_t state_lock;
} sp_context_t;
/* Assembly helpers */
uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
void spm_init_sp_ep_state(struct entry_point_info *sp_ep_info,
uint64_t pc,
secure_partition_context_t *sp_ctx_ptr);
void spm_sp_setup(sp_context_t *sp_ctx);
xlat_ctx_t *spm_get_sp_xlat_context(void);
int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
uintptr_t base_va);
int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
u_register_t page_address,
u_register_t pages_count,
u_register_t smc_attributes);
#endif /* __ASSEMBLY__ */
#endif /* __SPM_PRIVATE_H__ */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment