Commit ab5a53ef authored by danh-arm's avatar danh-arm
Browse files

Merge pull request #453 from yatharth-arm/yk/fwu-6

Firmware Update patch stack
parents 0e288c92 0191262d
......@@ -76,6 +76,8 @@ USE_COHERENT_MEM := 1
PSCI_EXTENDED_STATE_ID := 0
# Default FIP file name
FIP_NAME := fip.bin
# Default FWU_FIP file name
FWU_FIP_NAME := fwu_fip.bin
# By default, use the -pedantic option in the gcc command line
DISABLE_PEDANTIC := 0
# Flags to generate the Chain of Trust
......@@ -150,6 +152,7 @@ VERSION_STRING := v${VERSION_MAJOR}.${VERSION_MINOR}(${BUILD_TYPE}):${BUILD_STR
# target 'certificates' to create them all
ifneq (${GENERATE_COT},0)
FIP_DEPS += certificates
FWU_FIP_DEPS += fwu_certificates
endif
......@@ -195,7 +198,8 @@ BL_COMMON_SOURCES += common/bl_common.c \
lib/stdlib/std.c \
plat/common/aarch64/platform_helpers.S
INCLUDES += -Iinclude/bl31 \
INCLUDES += -Iinclude/bl1 \
-Iinclude/bl31 \
-Iinclude/bl31/services \
-Iinclude/common \
-Iinclude/drivers \
......@@ -320,8 +324,10 @@ ifneq (${GENERATE_COT},0)
# Common cert_create options
ifneq (${CREATE_KEYS},0)
$(eval CRT_ARGS += -n)
$(eval FWU_CRT_ARGS += -n)
ifneq (${SAVE_KEYS},0)
$(eval CRT_ARGS += -k)
$(eval FWU_CRT_ARGS += -k)
endif
endif
# Include TBBR makefile (unless the platform indicates otherwise)
......@@ -409,6 +415,11 @@ NEED_BL2 := yes
include bl2/bl2.mk
endif
ifdef BL2U_SOURCES
NEED_BL2U := yes
include bl2u/bl2u.mk
endif
ifdef BL31_SOURCES
# When booting an EL3 payload, there is no need to compile the BL31 image nor
# put it in the FIP.
......@@ -423,7 +434,7 @@ endif
# Build targets
################################################################################
.PHONY: all msg_start clean realclean distclean cscope locate-checkpatch checkcodebase checkpatch fiptool fip certtool
.PHONY: all msg_start clean realclean distclean cscope locate-checkpatch checkcodebase checkpatch fiptool fip fwu_fip certtool
.SUFFIXES:
all: msg_start
......@@ -466,6 +477,12 @@ ifeq (${NEED_BL33},yes)
$(eval $(call FIP_ADD_IMG,BL33,--bl33))
endif
ifeq (${NEED_BL2U},yes)
BL2U_PATH := $(if ${BL2U},${BL2U},$(call IMG_BIN,2u))
$(if ${BL2U}, ,$(eval $(call MAKE_BL,2u)))
$(eval $(call FWU_FIP_ADD_PAYLOAD,${BL2U_PATH},--bl2u))
endif
locate-checkpatch:
ifndef CHECKPATCH
$(error "Please set CHECKPATCH to point to the Linux checkpatch.pl file, eg: CHECKPATCH=../linux/script/checkpatch.pl")
......@@ -524,8 +541,24 @@ ${BUILD_PLAT}/${FIP_NAME}: ${FIP_DEPS} ${FIPTOOL}
@echo "Built $@ successfully"
@echo
ifneq (${GENERATE_COT},0)
fwu_certificates: ${FWU_CRT_DEPS} ${CRTTOOL}
${Q}${CRTTOOL} ${FWU_CRT_ARGS}
@echo
@echo "Built $@ successfully"
@echo "FWU certificates can be found in ${BUILD_PLAT}"
@echo
endif
${BUILD_PLAT}/${FWU_FIP_NAME}: ${FWU_FIP_DEPS} ${FIPTOOL}
${Q}${FIPTOOL} --dump ${FWU_FIP_ARGS} $@
@echo
@echo "Built $@ successfully"
@echo
fiptool: ${FIPTOOL}
fip: ${BUILD_PLAT}/${FIP_NAME}
fwu_fip: ${BUILD_PLAT}/${FWU_FIP_NAME}
.PHONY: ${FIPTOOL}
${FIPTOOL}:
......@@ -551,10 +584,12 @@ help:
@echo " all Build all individual bootloader binaries"
@echo " bl1 Build the BL1 binary"
@echo " bl2 Build the BL2 binary"
@echo " bl2u Build the BL2U binary"
@echo " bl31 Build the BL3-1 binary"
@echo " bl32 Build the BL3-2 binary"
@echo " certificates Build the certificates (requires 'GENERATE_COT=1')"
@echo " fip Build the Firmware Image Package (FIP)"
@echo " fwu_fip Build the FWU Firmware Image Package (FIP)"
@echo " checkcodebase Check the coding style of the entire source tree"
@echo " checkpatch Check the coding style on changes in the current"
@echo " branch against BASE_COMMIT (default origin/master)"
......
......@@ -69,10 +69,14 @@ func bl1_entrypoint
/* --------------------------------------------------
* Initialize platform and jump to our c-entry point
* for this type of reset. Panic if it returns
* for this type of reset.
* --------------------------------------------------
*/
bl bl1_main
panic:
b panic
/* --------------------------------------------------
* Do the transition to next boot image.
* --------------------------------------------------
*/
b el3_exit
endfunc bl1_entrypoint
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -31,7 +31,8 @@
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <runtime_svc.h>
#include <bl1.h>
#include <context.h>
.globl bl1_exceptions
......@@ -115,10 +116,12 @@ SynchronousExceptionA64:
/* Enable the SError interrupt */
msr daifclr, #DAIF_ABT_BIT
str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
/* Expect only SMC exceptions */
mrs x19, esr_el3
ubfx x20, x19, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x20, #EC_AARCH64_SMC
mrs x30, esr_el3
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x30, #EC_AARCH64_SMC
b.ne unexpected_sync_exception
b smc_handler64
......@@ -179,21 +182,40 @@ SErrorA32:
func smc_handler64
/* ----------------------------------------------
* Detect if this is a RUN_IMAGE or other SMC.
* ----------------------------------------------
*/
mov x30, #BL1_SMC_RUN_IMAGE
cmp x30, x0
b.ne smc_handler
/* ------------------------------------------------
* Make sure only Secure world reaches here.
* ------------------------------------------------
*/
mrs x30, scr_el3
tst x30, #SCR_NS_BIT
b.ne unexpected_sync_exception
/* ----------------------------------------------
* Handling RUN_IMAGE SMC. First switch back to
* SP_EL0 for the C runtime stack.
* ----------------------------------------------
*/
ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
msr spsel, #0
mov sp, x30
/* ---------------------------------------------------------------------
* Only a single SMC exception from BL2 to ask BL1 to pass EL3 control
* to BL31 is expected here. It expects:
* - X0 with RUN_IMAGE SMC function ID;
* - X1 with the address of a entry_point_info_t structure describing
* the BL31 entrypoint.
* Pass EL3 control to BL31.
* Here it expects X1 with the address of a entry_point_info_t
* structure describing the BL31 entrypoint.
* ---------------------------------------------------------------------
*/
mov x19, x0
mov x20, x1
mov x0, #RUN_IMAGE
cmp x19, x0
b.ne unexpected_sync_exception
mov x0, x20
bl bl1_print_bl31_ep_info
......@@ -228,3 +250,69 @@ unexpected_sync_exception:
bl plat_report_exception
wfi
b unexpected_sync_exception
/* -----------------------------------------------------
* Save Secure/Normal world context and jump to
* BL1 SMC handler.
* -----------------------------------------------------
*/
smc_handler:
/* -----------------------------------------------------
* Save the GP registers x0-x29.
* TODO: Revisit to store only SMCC specified registers.
* -----------------------------------------------------
*/
bl save_gp_registers
/* -----------------------------------------------------
* Populate the parameters for the SMC handler. We
* already have x0-x4 in place. x5 will point to a
* cookie (not used now). x6 will point to the context
* structure (SP_EL3) and x7 will contain flags we need
* to pass to the handler.
* -----------------------------------------------------
*/
mov x5, xzr
mov x6, sp
/* -----------------------------------------------------
* Restore the saved C runtime stack value which will
* become the new SP_EL0 i.e. EL3 runtime stack. It was
* saved in the 'cpu_context' structure prior to the last
* ERET from EL3.
* -----------------------------------------------------
*/
ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
/* ---------------------------------------------
* Switch back to SP_EL0 for the C runtime stack.
* ---------------------------------------------
*/
msr spsel, #0
mov sp, x12
/* -----------------------------------------------------
* Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there
* is a world switch during SMC handling.
* -----------------------------------------------------
*/
mrs x16, spsr_el3
mrs x17, elr_el3
mrs x18, scr_el3
stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
bfi x7, x18, #0, #1
/* -----------------------------------------------------
* Go to BL1 SMC handler.
* -----------------------------------------------------
*/
bl bl1_smc_handler
/* -----------------------------------------------------
* Do the transition to next BL image.
* -----------------------------------------------------
*/
b el3_exit
#
# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
# Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
......@@ -32,6 +32,14 @@ BL1_SOURCES += bl1/bl1_main.c \
bl1/aarch64/bl1_arch_setup.c \
bl1/aarch64/bl1_entrypoint.S \
bl1/aarch64/bl1_exceptions.S \
lib/cpus/aarch64/cpu_helpers.S
bl1/bl1_context_mgmt.c \
common/aarch64/context.S \
common/context_mgmt.c \
lib/cpus/aarch64/cpu_helpers.S \
plat/common/plat_bl1_common.c
ifeq (${TRUSTED_BOARD_BOOT},1)
BL1_SOURCES += bl1/bl1_fwu.c
endif
BL1_LINKERFILE := bl1/bl1.ld.S
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <context.h>
#include <context_mgmt.h>
#include <platform.h>
/*
* Following array will be used for context management.
* There are 2 instances, for the Secure and Non-Secure contexts.
*/
static cpu_context_t bl1_cpu_context[2];
/* Following contains the cpu context pointers. */
static void *bl1_cpu_context_ptr[2];
void *cm_get_context(uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
return bl1_cpu_context_ptr[security_state];
}
void cm_set_context(void *context, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
bl1_cpu_context_ptr[security_state] = context;
}
/*******************************************************************************
* This function prepares the context for Secure/Normal world images.
* Normal world images are transitioned to EL2(if supported) else EL1.
******************************************************************************/
void bl1_prepare_next_image(unsigned int image_id)
{
unsigned int security_state;
image_desc_t *image_desc;
entry_point_info_t *next_bl_ep;
/* Get the image descriptor. */
image_desc = bl1_plat_get_image_desc(image_id);
assert(image_desc);
/* Get the entry point info. */
next_bl_ep = &image_desc->ep_info;
/* Get the image security state. */
security_state = GET_SEC_STATE(next_bl_ep->h.attr);
/* Setup the Secure/Non-Secure context if not done already. */
if (!cm_get_context(security_state))
cm_set_context(&bl1_cpu_context[security_state], security_state);
/* Prepare the SPSR for the next BL image. */
if (security_state == SECURE) {
next_bl_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
} else {
/* Use EL2 if supported else use EL1. */
if (read_id_aa64pfr0_el1() &
(ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
next_bl_ep->spsr = SPSR_64(MODE_EL2, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
} else {
next_bl_ep->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
}
}
/* Allow platform to make change */
bl1_plat_set_ep_info(image_id, next_bl_ep);
/* Prepare the context for the next BL image. */
cm_init_my_context(next_bl_ep);
cm_prepare_el3_exit(security_state);
/* Indicate that image is in execution state. */
image_desc->state = IMAGE_STATE_EXECUTED;
print_entry_point_info(next_bl_ep);
}
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <arch_helpers.h>
#include <auth_mod.h>
#include <bl1.h>
#include <bl_common.h>
#include <context.h>
#include <context_mgmt.h>
#include <debug.h>
#include <errno.h>
#include <platform.h>
#include <platform_def.h>
#include <smcc_helpers.h>
#include <string.h>
#include "bl1_private.h"
/*
* Function declarations.
*/
static int bl1_fwu_image_copy(unsigned int image_id,
uintptr_t image_addr,
unsigned int block_size,
unsigned int image_size,
unsigned int flags);
static int bl1_fwu_image_auth(unsigned int image_id,
uintptr_t image_addr,
unsigned int image_size,
unsigned int flags);
static int bl1_fwu_image_execute(unsigned int image_id,
void **handle,
unsigned int flags);
static register_t bl1_fwu_image_resume(unsigned int image_id,
register_t image_param,
void **handle,
unsigned int flags);
static int bl1_fwu_sec_image_done(void **handle,
unsigned int flags);
__dead2 static void bl1_fwu_done(void *cookie, void *reserved);
/*
* This keeps track of last executed secure image id.
*/
static unsigned int sec_exec_image_id = INVALID_IMAGE_ID;
/*******************************************************************************
* Top level handler for servicing FWU SMCs.
******************************************************************************/
register_t bl1_fwu_smc_handler(unsigned int smc_fid,
register_t x1,
register_t x2,
register_t x3,
register_t x4,
void *cookie,
void *handle,
unsigned int flags)
{
switch (smc_fid) {
case FWU_SMC_IMAGE_COPY:
SMC_RET1(handle, bl1_fwu_image_copy(x1, x2, x3, x4, flags));
case FWU_SMC_IMAGE_AUTH:
SMC_RET1(handle, bl1_fwu_image_auth(x1, x2, x3, flags));
case FWU_SMC_IMAGE_EXECUTE:
SMC_RET1(handle, bl1_fwu_image_execute(x1, &handle, flags));
case FWU_SMC_IMAGE_RESUME:
SMC_RET1(handle, bl1_fwu_image_resume(x1, x2, &handle, flags));
case FWU_SMC_SEC_IMAGE_DONE:
SMC_RET1(handle, bl1_fwu_sec_image_done(&handle, flags));
case FWU_SMC_UPDATE_DONE:
bl1_fwu_done(cookie, NULL);
/* We should never return from bl1_fwu_done() */
default:
assert(0);
break;
}
SMC_RET0(handle);
}
/*******************************************************************************
* This function is responsible for copying secure images in AP Secure RAM.
******************************************************************************/
static int bl1_fwu_image_copy(unsigned int image_id,
uintptr_t image_src,
unsigned int block_size,
unsigned int image_size,
unsigned int flags)
{
uintptr_t base_addr;
meminfo_t *mem_layout;
/* Get the image descriptor. */
image_desc_t *image_desc = bl1_plat_get_image_desc(image_id);
/* Check if we are in correct state. */
if ((!image_desc) ||
((image_desc->state != IMAGE_STATE_RESET) &&
(image_desc->state != IMAGE_STATE_COPYING))) {
WARN("BL1-FWU: Copy not allowed due to invalid state\n");
return -EPERM;
}
/* Only Normal world is allowed to copy a Secure image. */
if ((GET_SEC_STATE(flags) == SECURE) ||
(GET_SEC_STATE(image_desc->ep_info.h.attr) == NON_SECURE)) {
WARN("BL1-FWU: Copy not allowed for Non-Secure "
"image from Secure-world\n");
return -EPERM;
}
if ((!image_src) || (!block_size)) {
WARN("BL1-FWU: Copy not allowed due to invalid image source"
" or block size\n");
return -ENOMEM;
}
/* Get the image base address. */
base_addr = image_desc->image_info.image_base;
if (image_desc->state == IMAGE_STATE_COPYING) {
/*
* If last block is more than expected then
* clip the block to the required image size.
*/
if (image_desc->image_info.copied_size + block_size >
image_desc->image_info.image_size) {
block_size = image_desc->image_info.image_size -
image_desc->image_info.copied_size;
WARN("BL1-FWU: Copy argument block_size > remaining image size."
" Clipping block_size\n");
}
/* Make sure the image src/size is mapped. */
if (bl1_plat_mem_check(image_src, block_size, flags)) {
WARN("BL1-FWU: Copy arguments source/size not mapped\n");
return -ENOMEM;
}
INFO("BL1-FWU: Continuing image copy in blocks\n");
/* Copy image for given block size. */
base_addr += image_desc->image_info.copied_size;
image_desc->image_info.copied_size += block_size;
memcpy((void *)base_addr, (const void *)image_src, block_size);
flush_dcache_range(base_addr, block_size);
/* Update the state if last block. */
if (image_desc->image_info.copied_size ==
image_desc->image_info.image_size) {
image_desc->state = IMAGE_STATE_COPIED;
INFO("BL1-FWU: Image copy in blocks completed\n");
}
} else {
/* This means image is in RESET state and ready to be copied. */
INFO("BL1-FWU: Fresh call to copy an image\n");
if (!image_size) {
WARN("BL1-FWU: Copy not allowed due to invalid image size\n");
return -ENOMEM;
}
/*
* If block size is more than total size then
* assume block size as the total image size.
*/
if (block_size > image_size) {
block_size = image_size;
WARN("BL1-FWU: Copy argument block_size > image size."
" Clipping block_size\n");
}
/* Make sure the image src/size is mapped. */
if (bl1_plat_mem_check(image_src, block_size, flags)) {
WARN("BL1-FWU: Copy arguments source/size not mapped\n");
return -ENOMEM;
}
/* Find out how much free trusted ram remains after BL1 load */
mem_layout = bl1_plat_sec_mem_layout();
if ((image_desc->image_info.image_base < mem_layout->free_base) ||
(image_desc->image_info.image_base + image_size >
mem_layout->free_base + mem_layout->free_size)) {
WARN("BL1-FWU: Memory not available to copy\n");
return -ENOMEM;
}
/* Update the image size. */
image_desc->image_info.image_size = image_size;
/* Copy image for given size. */
memcpy((void *)base_addr, (const void *)image_src, block_size);
flush_dcache_range(base_addr, block_size);
/* Update the state. */
if (block_size == image_size) {
image_desc->state = IMAGE_STATE_COPIED;
INFO("BL1-FWU: Image is copied successfully\n");
} else {
image_desc->state = IMAGE_STATE_COPYING;
INFO("BL1-FWU: Started image copy in blocks\n");
}
image_desc->image_info.copied_size = block_size;
}
return 0;
}
/*******************************************************************************
* This function is responsible for authenticating Normal/Secure images.
******************************************************************************/
static int bl1_fwu_image_auth(unsigned int image_id,
uintptr_t image_src,
unsigned int image_size,
unsigned int flags)
{
int result;
uintptr_t base_addr;
unsigned int total_size;
/* Get the image descriptor. */
image_desc_t *image_desc = bl1_plat_get_image_desc(image_id);
if (!image_desc)
return -EPERM;
if (GET_SEC_STATE(flags) == SECURE) {
if (image_desc->state != IMAGE_STATE_RESET) {
WARN("BL1-FWU: Authentication from secure world "
"while in invalid state\n");
return -EPERM;
}
} else {
if (GET_SEC_STATE(image_desc->ep_info.h.attr) == SECURE) {
if (image_desc->state != IMAGE_STATE_COPIED) {
WARN("BL1-FWU: Authentication of secure image "
"from non-secure world while not in copied state\n");
return -EPERM;
}
} else {
if (image_desc->state != IMAGE_STATE_RESET) {
WARN("BL1-FWU: Authentication of non-secure image "
"from non-secure world while in invalid state\n");
return -EPERM;
}
}
}
if (image_desc->state == IMAGE_STATE_COPIED) {
/*
* Image is in COPIED state.
* Use the stored address and size.
*/
base_addr = image_desc->image_info.image_base;
total_size = image_desc->image_info.image_size;
} else {
if ((!image_src) || (!image_size)) {
WARN("BL1-FWU: Auth not allowed due to invalid"
" image source/size\n");
return -ENOMEM;
}
/*
* Image is in RESET state.
* Check the parameters and authenticate the source image in place.
*/
if (bl1_plat_mem_check(image_src, image_size, flags)) {
WARN("BL1-FWU: Authentication arguments source/size not mapped\n");
return -ENOMEM;
}
base_addr = image_src;
total_size = image_size;
/* Update the image size in the descriptor. */
image_desc->image_info.image_size = total_size;
}
/*
* Authenticate the image.
*/
INFO("BL1-FWU: Authenticating image_id:%d\n", image_id);
result = auth_mod_verify_img(image_id, (void *)base_addr, total_size);
if (result != 0) {
WARN("BL1-FWU: Authentication Failed err=%d\n", result);
/*
* Authentication has failed.
* Clear the memory if the image was copied.
* This is to prevent an attack where this contains
* some malicious code that can somehow be executed later.
*/
if (image_desc->state == IMAGE_STATE_COPIED) {
/* Clear the memory.*/
memset((void *)base_addr, 0, total_size);
flush_dcache_range(base_addr, total_size);
/* Indicate that image can be copied again*/
image_desc->state = IMAGE_STATE_RESET;
}
return -EAUTH;
}
/* Indicate that image is in authenticated state. */
image_desc->state = IMAGE_STATE_AUTHENTICATED;
/*
* Flush image_info to memory so that other
* secure world images can see changes.
*/
flush_dcache_range((unsigned long)&image_desc->image_info,
sizeof(image_info_t));
INFO("BL1-FWU: Authentication was successful\n");
return 0;
}
/*******************************************************************************
* This function is responsible for executing Secure images.
******************************************************************************/
static int bl1_fwu_image_execute(unsigned int image_id,
void **handle,
unsigned int flags)
{
/* Get the image descriptor. */
image_desc_t *image_desc = bl1_plat_get_image_desc(image_id);
/*
* Execution is NOT allowed if:
* Caller is from Secure world OR
* Image is Non-Secure OR
* Image is Non-Executable OR
* Image is NOT in AUTHENTICATED state.
*/
if ((!image_desc) ||
(GET_SEC_STATE(flags) == SECURE) ||
(GET_SEC_STATE(image_desc->ep_info.h.attr) == NON_SECURE) ||
(GET_EXEC_STATE(image_desc->image_info.h.attr) == NON_EXECUTABLE) ||
(image_desc->state != IMAGE_STATE_AUTHENTICATED)) {
WARN("BL1-FWU: Execution not allowed due to invalid state/args\n");
return -EPERM;
}
INFO("BL1-FWU: Executing Secure image\n");
/* Save NS-EL1 system registers. */
cm_el1_sysregs_context_save(NON_SECURE);
/* Prepare the image for execution. */
bl1_prepare_next_image(image_id);
/* Update the secure image id. */
sec_exec_image_id = image_id;
*handle = cm_get_context(SECURE);
return 0;
}
/*******************************************************************************
* This function is responsible for resuming Secure/Non-Secure images.
******************************************************************************/
static register_t bl1_fwu_image_resume(unsigned int image_id,
register_t image_param,
void **handle,
unsigned int flags)
{
image_desc_t *image_desc;
unsigned int resume_sec_state;
if (GET_SEC_STATE(flags) == SECURE) {
/* Get the image descriptor for last executed secure image id. */
image_desc = bl1_plat_get_image_desc(sec_exec_image_id);
if ((!image_desc) || (image_desc->state != IMAGE_STATE_EXECUTED)) {
WARN("BL1-FWU: Resume not allowed for secure image "
"due to invalid state\n");
return -EPERM;
}
/* Update the flags. */
image_desc->state = IMAGE_STATE_INTERRUPTED;
resume_sec_state = NON_SECURE;
} else {
/* Get the image descriptor for image id to be resumed. */
image_desc = bl1_plat_get_image_desc(image_id);
/* Make sure image is secure and was interrupted. */
if ((!image_desc) ||
(GET_SEC_STATE(image_desc->ep_info.h.attr) == NON_SECURE) ||
(image_desc->state != IMAGE_STATE_INTERRUPTED)) {
WARN("BL1-FWU: Resume not allowed for NS image/ invalid state\n");
return -EPERM;
}
/* Update the flags. */
image_desc->state = IMAGE_STATE_EXECUTED;
resume_sec_state = SECURE;
}
/* Save the EL1 system registers of calling world. */
cm_el1_sysregs_context_save(GET_SEC_STATE(flags));
/* Restore the EL1 system registers of resuming world. */
cm_el1_sysregs_context_restore(resume_sec_state);
/* Update the next context. */
cm_set_next_eret_context(resume_sec_state);
INFO("BL1-FWU: Resuming %s world context\n",
(resume_sec_state == SECURE) ? "Secure" : "Normal");
*handle = cm_get_context(resume_sec_state);
return image_param;
}
/*******************************************************************************
* This function is responsible for resuming normal world context.
******************************************************************************/
static int bl1_fwu_sec_image_done(void **handle, unsigned int flags)
{
/* Get the image descriptor for last executed secure image id. */
image_desc_t *image_desc = bl1_plat_get_image_desc(sec_exec_image_id);
/*
* Make sure caller is from secure world
* and the image is in EXECUTED state.
*/
if ((!image_desc) ||
(GET_SEC_STATE(flags) == NON_SECURE) ||
(image_desc->state != IMAGE_STATE_EXECUTED)) {
WARN("BL1-FWU: Done not allowed for NS caller/ invalid state\n");
return -EPERM;
}
/* Update the flags. */
image_desc->state = IMAGE_STATE_RESET;
sec_exec_image_id = INVALID_IMAGE_ID;
/*
* Secure world is done so no need to save the context.
* Just restore the Non-Secure context.
*/
cm_el1_sysregs_context_restore(NON_SECURE);
/* Update the next context. */
cm_set_next_eret_context(NON_SECURE);
INFO("BL1-FWU: Resuming Normal world context\n");
*handle = cm_get_context(NON_SECURE);
return 0;
}
/*******************************************************************************
* This function provides the opportunity for users to perform any
* platform specific handling after the Firmware update is done.
******************************************************************************/
__dead2 static void bl1_fwu_done(void *cookie, void *reserved)
{
NOTICE("BL1-FWU: *******FWU Process Completed*******\n");
/*
* Call platform done function.
*/
bl1_plat_fwu_done(cookie, reserved);
assert(0);
}
......@@ -32,43 +32,22 @@
#include <arch_helpers.h>
#include <assert.h>
#include <auth_mod.h>
#include <bl1.h>
#include <bl_common.h>
#include <debug.h>
#include <platform.h>
#include <platform_def.h>
#include <smcc_helpers.h>
#include "bl1_private.h"
#include <uuid.h>
/*******************************************************************************
* Runs BL2 from the given entry point. It results in dropping the
* exception level
******************************************************************************/
static void __dead2 bl1_run_bl2(entry_point_info_t *bl2_ep)
{
/* Check bl2 security state is expected as secure */
assert(GET_SECURITY_STATE(bl2_ep->h.attr) == SECURE);
/* Check NS Bit is also set as secure */
assert(!(read_scr_el3() & SCR_NS_BIT));
bl1_arch_next_el_setup();
/* BL1 Service UUID */
DEFINE_SVC_UUID(bl1_svc_uid,
0xfd3967d4, 0x72cb, 0x4d9a, 0xb5, 0x75,
0x67, 0x15, 0xd6, 0xf4, 0xbb, 0x4a);
/* Tell next EL what we want done */
bl2_ep->args.arg0 = RUN_IMAGE;
write_spsr_el3(bl2_ep->spsr);
write_elr_el3(bl2_ep->pc);
NOTICE("BL1: Booting BL2\n");
print_entry_point_info(bl2_ep);
eret(bl2_ep->args.arg0,
bl2_ep->args.arg1,
bl2_ep->args.arg2,
bl2_ep->args.arg3,
bl2_ep->args.arg4,
bl2_ep->args.arg5,
bl2_ep->args.arg6,
bl2_ep->args.arg7);
}
static void bl1_load_bl2(void);
/*******************************************************************************
* The next function has a weak definition. Platform specific code can override
......@@ -91,7 +70,8 @@ void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
/* Check that BL1's memory is lying outside of the free memory */
assert((BL1_RAM_LIMIT <= bl1_mem_layout->free_base) ||
(BL1_RAM_BASE >= bl1_mem_layout->free_base + bl1_mem_layout->free_size));
(BL1_RAM_BASE >= bl1_mem_layout->free_base +
bl1_mem_layout->free_size));
/* Remove BL1 RW data from the scope of memory visible to BL2 */
*bl2_mem_layout = *bl1_mem_layout;
......@@ -105,13 +85,13 @@ void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
/*******************************************************************************
* Function to perform late architectural and platform specific initialization.
* It also locates and loads the BL2 raw binary image in the trusted DRAM. Only
* called by the primary cpu after a cold boot.
* TODO: Add support for alternative image load mechanism e.g using virtio/elf
* loader etc.
******************************************************************************/
* It also queries the platform to load and run next BL image. Only called
* by the primary cpu after a cold boot.
******************************************************************************/
void bl1_main(void)
{
unsigned int image_id;
/* Announce our arrival */
NOTICE(FIRMWARE_WELCOME_STR);
NOTICE("BL1: %s\n", version_string);
......@@ -119,11 +99,6 @@ void bl1_main(void)
INFO("BL1: RAM 0x%lx - 0x%lx\n", BL1_RAM_BASE, BL1_RAM_LIMIT);
image_info_t bl2_image_info = { {0} };
entry_point_info_t bl2_ep = { {0} };
meminfo_t *bl1_tzram_layout;
meminfo_t *bl2_tzram_layout = 0x0;
int err;
#if DEBUG
unsigned long val;
......@@ -153,28 +128,65 @@ void bl1_main(void)
/* Perform remaining generic architectural setup from EL3 */
bl1_arch_setup();
#if TRUSTED_BOARD_BOOT
/* Initialize authentication module */
auth_mod_init();
#endif /* TRUSTED_BOARD_BOOT */
/* Perform platform setup in BL1. */
bl1_platform_setup();
SET_PARAM_HEAD(&bl2_image_info, PARAM_IMAGE_BINARY, VERSION_1, 0);
SET_PARAM_HEAD(&bl2_ep, PARAM_EP, VERSION_1, 0);
/* Get the image id of next image to load and run. */
image_id = bl1_plat_get_next_image_id();
/*
* We currently interpret any image id other than
* BL2_IMAGE_ID as the start of firmware update.
*/
if (image_id == BL2_IMAGE_ID)
bl1_load_bl2();
else
NOTICE("BL1-FWU: *******FWU Process Started*******\n");
bl1_prepare_next_image(image_id);
}
/*******************************************************************************
* This function locates and loads the BL2 raw binary image in the trusted SRAM.
* Called by the primary cpu after a cold boot.
* TODO: Add support for alternative image load mechanism e.g using virtio/elf
* loader etc.
******************************************************************************/
void bl1_load_bl2(void)
{
image_desc_t *image_desc;
image_info_t *image_info;
entry_point_info_t *ep_info;
meminfo_t *bl1_tzram_layout;
meminfo_t *bl2_tzram_layout;
int err;
/* Get the image descriptor */
image_desc = bl1_plat_get_image_desc(BL2_IMAGE_ID);
assert(image_desc);
/* Get the image info */
image_info = &image_desc->image_info;
/* Get the entry point info */
ep_info = &image_desc->ep_info;
/* Find out how much free trusted ram remains after BL1 load */
bl1_tzram_layout = bl1_plat_sec_mem_layout();
INFO("BL1: Loading BL2\n");
#if TRUSTED_BOARD_BOOT
/* Initialize authentication module */
auth_mod_init();
#endif /* TRUSTED_BOARD_BOOT */
/* Load the BL2 image */
err = load_auth_image(bl1_tzram_layout,
BL2_IMAGE_ID,
BL2_BASE,
&bl2_image_info,
&bl2_ep);
image_info->image_base,
image_info,
ep_info);
if (err) {
ERROR("Failed to load BL2 firmware.\n");
......@@ -191,11 +203,10 @@ void bl1_main(void)
bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->free_base;
bl1_init_bl2_mem_layout(bl1_tzram_layout, bl2_tzram_layout);
bl1_plat_set_bl2_ep_info(&bl2_image_info, &bl2_ep);
bl2_ep.args.arg1 = (unsigned long)bl2_tzram_layout;
bl1_run_bl2(&bl2_ep);
return;
ep_info->args.arg1 = (unsigned long)bl2_tzram_layout;
NOTICE("BL1: Booting BL2\n");
VERBOSE("BL1: BL2 memory layout address = 0x%llx\n",
(unsigned long long) bl2_tzram_layout);
}
/*******************************************************************************
......@@ -216,3 +227,45 @@ void print_debug_loop_message(void)
NOTICE("BL1: Please connect the debugger to continue\n");
}
#endif
/*******************************************************************************
* Top level handler for servicing BL1 SMCs.
******************************************************************************/
register_t bl1_smc_handler(unsigned int smc_fid,
register_t x1,
register_t x2,
register_t x3,
register_t x4,
void *cookie,
void *handle,
unsigned int flags)
{
#if TRUSTED_BOARD_BOOT
/*
* Dispatch FWU calls to FWU SMC handler and return its return
* value
*/
if (is_fwu_fid(smc_fid)) {
return bl1_fwu_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
handle, flags);
}
#endif
switch (smc_fid) {
case BL1_SMC_CALL_COUNT:
SMC_RET1(handle, BL1_NUM_SMC_CALLS);
case BL1_SMC_UID:
SMC_UUID_RET(handle, bl1_svc_uid);
case BL1_SMC_VERSION:
SMC_RET1(handle, BL1_SMC_MAJOR_VER | BL1_SMC_MINOR_VER);
default:
break;
}
WARN("Unimplemented BL1 SMC Call: 0x%x \n", smc_fid);
SMC_RET1(handle, SMC_UNK);
}
......@@ -31,6 +31,8 @@
#ifndef __BL1_PRIVATE_H__
#define __BL1_PRIVATE_H__
#include <types.h>
/*******************************************************************************
* Declarations of linker defined symbols which will tell us where BL1 lives
* in Trusted RAM
......@@ -46,4 +48,14 @@ extern uint64_t __BL1_RAM_END__;
void bl1_arch_setup(void);
void bl1_arch_next_el_setup(void);
void bl1_prepare_next_image(unsigned int image_id);
register_t bl1_fwu_smc_handler(unsigned int smc_fid,
register_t x1,
register_t x2,
register_t x3,
register_t x4,
void *cookie,
void *handle,
unsigned int flags);
#endif /* __BL1_PRIVATE_H__ */
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <bl1.h>
#include <bl_common.h>
#include <platform_def.h>
image_desc_t bl1_tbbr_image_descs[] = {
{
.image_id = FWU_CERT_ID,
.image_info.h.attr = SET_EXEC_STATE(NON_EXECUTABLE),
.image_info.image_base = BL2_BASE,
.ep_info.h.attr = SET_SEC_STATE(SECURE),
},
#if NS_BL1U_BASE
{
.image_id = NS_BL1U_IMAGE_ID,
.image_info.h.attr = SET_EXEC_STATE(EXECUTABLE),
.image_info.image_base = NS_BL1U_BASE,
.ep_info.h.attr = SET_SEC_STATE(NON_SECURE),
.ep_info.pc = NS_BL1U_BASE,
},
#endif
#if SCP_BL2U_BASE
{
.image_id = SCP_BL2U_IMAGE_ID,
.image_info.h.attr = SET_EXEC_STATE(NON_EXECUTABLE),
.image_info.image_base = SCP_BL2U_BASE,
.ep_info.h.attr = SET_SEC_STATE(SECURE),
},
#endif
#if BL2U_BASE
{
.image_id = BL2U_IMAGE_ID,
.image_info.h.attr = SET_EXEC_STATE(EXECUTABLE),
.image_info.image_base = BL2U_BASE,
.ep_info.h.attr = SET_SEC_STATE(SECURE),
.ep_info.pc = BL2U_BASE,
},
#endif
#if NS_BL2U_BASE
{
.image_id = NS_BL2U_IMAGE_ID,
.image_info.h.attr = SET_EXEC_STATE(NON_EXECUTABLE),
.image_info.image_base = NS_BL2U_BASE,
.ep_info.h.attr = SET_SEC_STATE(NON_SECURE),
},
#endif
BL2_IMAGE_DESC,
{
.image_id = INVALID_IMAGE_ID,
}
};
......@@ -39,13 +39,12 @@
func bl2_entrypoint
/*---------------------------------------------
* Store the extents of the tzram available to
* BL2 for future use. Use the opcode param to
* allow implement other functions if needed.
* Save from x1 the extents of the tzram
* available to BL2 for future use.
* x0 is not currently used.
* ---------------------------------------------
*/
mov x20, x0
mov x21, x1
*/
mov x20, x1
/* ---------------------------------------------
* Set the exception vector to something sane.
......@@ -73,14 +72,6 @@ func bl2_entrypoint
msr sctlr_el1, x0
isb
/* ---------------------------------------------
* Check the opcodes out of paranoia.
* ---------------------------------------------
*/
mov x0, #RUN_IMAGE
cmp x0, x20
b.ne _panic
/* ---------------------------------------------
* Invalidate the RW memory used by the BL2
* image. This includes the data and NOBITS
......@@ -126,7 +117,7 @@ func bl2_entrypoint
* specific early arch. setup e.g. mmu setup
* ---------------------------------------------
*/
mov x0, x21
mov x0, x20
bl bl2_early_platform_setup
bl bl2_plat_arch_setup
......
......@@ -32,6 +32,7 @@
#include <arch_helpers.h>
#include <assert.h>
#include <auth_mod.h>
#include <bl1.h>
#include <bl_common.h>
#include <debug.h>
#include <errno.h>
......@@ -281,5 +282,5 @@ void bl2_main(void)
* the BL3-2 (if present) and BL3-3 software images will be passed to
* BL3-1 as an argument.
*/
smc(RUN_IMAGE, (unsigned long)bl31_ep_info, 0, 0, 0, 0, 0, 0);
smc(BL1_SMC_RUN_IMAGE, (unsigned long)bl31_ep_info, 0, 0, 0, 0, 0, 0);
}
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
.globl bl2u_entrypoint
func bl2u_entrypoint
/*---------------------------------------------
* Store the extents of the tzram available to
* BL2U and other platform specific information
* for future use. x0 is currently not used.
* ---------------------------------------------
*/
mov x20, x1
mov x21, x2
/* ---------------------------------------------
* Set the exception vector to something sane.
* ---------------------------------------------
*/
adr x0, early_exceptions
msr vbar_el1, x0
isb
/* ---------------------------------------------
* Enable the SError interrupt now that the
* exception vectors have been setup.
* ---------------------------------------------
*/
msr daifclr, #DAIF_ABT_BIT
/* ---------------------------------------------
* Enable the instruction cache, stack pointer
* and data access alignment checks
* ---------------------------------------------
*/
mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el1
orr x0, x0, x1
msr sctlr_el1, x0
isb
/* ---------------------------------------------
* Invalidate the RW memory used by the BL2U
* image. This includes the data and NOBITS
* sections. This is done to safeguard against
* possible corruption of this memory by dirty
* cache lines in a system cache as a result of
* use by an earlier boot loader stage.
* ---------------------------------------------
*/
adr x0, __RW_START__
adr x1, __RW_END__
sub x1, x1, x0
bl inv_dcache_range
/* ---------------------------------------------
* Zero out NOBITS sections. There are 2 of them:
* - the .bss section;
* - the coherent memory section.
* ---------------------------------------------
*/
ldr x0, =__BSS_START__
ldr x1, =__BSS_SIZE__
bl zeromem16
/* --------------------------------------------
* Allocate a stack whose memory will be marked
* as Normal-IS-WBWA when the MMU is enabled.
* There is no risk of reading stale stack
* memory after enabling the MMU as only the
* primary cpu is running at the moment.
* --------------------------------------------
*/
bl plat_set_my_stack
/* ---------------------------------------------
* Perform early platform setup & platform
* specific early arch. setup e.g. mmu setup
* ---------------------------------------------
*/
mov x0, x20
mov x1, x21
bl bl2u_early_platform_setup
bl bl2u_plat_arch_setup
/* ---------------------------------------------
* Jump to bl2u_main function.
* ---------------------------------------------
*/
bl bl2u_main
_panic:
b _panic
endfunc bl2u_entrypoint
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <platform_def.h>
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
ENTRY(bl2u_entrypoint)
MEMORY {
RAM (rwx): ORIGIN = BL2U_BASE, LENGTH = BL2U_LIMIT - BL2U_BASE
}
SECTIONS
{
. = BL2U_BASE;
ASSERT(. == ALIGN(4096),
"BL2U_BASE address is not aligned on a page boundary.")
ro . : {
__RO_START__ = .;
*bl2u_entrypoint.o(.text*)
*(.text*)
*(.rodata*)
*(.vectors)
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as
* read-only, executable. No RW data from the next section must
* creep in. Ensure the rest of the current memory page is unused.
*/
. = NEXT(4096);
__RO_END__ = .;
} >RAM
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
.data . : {
__DATA_START__ = .;
*(.data*)
__DATA_END__ = .;
} >RAM
stacks (NOLOAD) : {
__STACKS_START__ = .;
*(tzfw_normal_stacks)
__STACKS_END__ = .;
} >RAM
/*
* The .bss section gets initialised to 0 at runtime.
* Its base address must be 16-byte aligned.
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
__BSS_END__ = .;
} >RAM
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
* the .bss section and eliminates the unecessary zero init
*/
xlat_table (NOLOAD) : {
*(xlat_table)
} >RAM
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(4096) {
__COHERENT_RAM_START__ = .;
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
*/
. = NEXT(4096);
__COHERENT_RAM_END__ = .;
} >RAM
#endif
/*
* Define a linker symbol to mark end of the RW memory area for this
* image.
*/
__RW_END__ = .;
__BL2U_END__ = .;
__BSS_SIZE__ = SIZEOF(.bss);
ASSERT(. <= BL2U_LIMIT, "BL2U image has exceeded its limit.")
}
#
# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
BL2U_SOURCES += bl2u/bl2u_main.c \
bl2u/aarch64/bl2u_entrypoint.S \
common/aarch64/early_exceptions.S
BL2U_LINKERFILE := bl2u/bl2u.ld.S
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <auth_mod.h>
#include <bl_common.h>
#include <bl1.h>
#include <debug.h>
#include <platform.h>
#include <platform_def.h>
#include <stdint.h>
/*******************************************************************************
* This function is responsible to:
* Load SCP_BL2U if platform has defined SCP_BL2U_BASE
* Perform platform setup.
* Go back to EL3.
******************************************************************************/
void bl2u_main(void)
{
NOTICE("BL2U: %s\n", version_string);
NOTICE("BL2U: %s\n", build_message);
#if SCP_BL2U_BASE
int rc;
/* Load the subsequent bootloader images */
rc = bl2u_plat_handle_scp_bl2u();
if (rc) {
ERROR("Failed to load SCP_BL2U (%i)\n", rc);
panic();
}
#endif
/* Perform platform setup in BL2U after loading SCP_BL2U */
bl2u_platform_setup();
/*
* Indicate that BL2U is done and resume back to
* normal world via an SMC to BL1.
* x1 could be passed to Normal world,
* so DO NOT pass any secret information.
*/
smc(FWU_SMC_SEC_IMAGE_DONE, 0, 0, 0, 0, 0, 0, 0);
wfi();
}
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -36,7 +36,6 @@
#include <runtime_svc.h>
.globl runtime_exceptions
.globl el3_exit
/* -----------------------------------------------------
* Handle SMC exceptions separately from other sync.
......@@ -426,38 +425,7 @@ smc_handler64:
#endif
blr x15
/* -----------------------------------------------------
* This routine assumes that the SP_EL3 is pointing to
* a valid context structure from where the gp regs and
* other special registers can be retrieved.
*
* Keep it in the same section as smc_handler as this
* function uses a fall-through to el3_exit
* -----------------------------------------------------
*/
el3_exit: ; .type el3_exit, %function
/* -----------------------------------------------------
* Save the current SP_EL0 i.e. the EL3 runtime stack
* which will be used for handling the next SMC. Then
* switch to SP_EL3
* -----------------------------------------------------
*/
mov x17, sp
msr spsel, #1
str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
/* -----------------------------------------------------
* Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
* -----------------------------------------------------
*/
ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
msr scr_el3, x18
msr spsr_el3, x16
msr elr_el3, x17
/* Restore saved general purpose registers and return */
b restore_gp_registers_eret
b el3_exit
smc_unknown:
/*
......@@ -479,51 +447,3 @@ rt_svc_fw_critical_error:
msr spsel, #1 /* Switch to SP_ELx */
bl report_unhandled_exception
endfunc smc_handler
/* -----------------------------------------------------
* The following functions are used to saved and restore
* all the general pupose registers. Ideally we would
* only save and restore the callee saved registers when
* a world switch occurs but that type of implementation
* is more complex. So currently we will always save and
* restore these registers on entry and exit of EL3.
* These are not macros to ensure their invocation fits
* within the 32 instructions per exception vector.
* -----------------------------------------------------
*/
func save_gp_registers
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
save_x18_to_x29_sp_el0
ret
endfunc save_gp_registers
func restore_gp_registers_eret
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
restore_gp_registers_callee_eret:
ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
msr sp_el0, x17
ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
eret
endfunc restore_gp_registers_eret
......@@ -29,16 +29,17 @@
#
BL31_SOURCES += bl31/bl31_main.c \
bl31/context_mgmt.c \
bl31/cpu_data_array.c \
bl31/runtime_svc.c \
bl31/interrupt_mgmt.c \
bl31/aarch64/bl31_arch_setup.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/context.S \
bl31/aarch64/cpu_data.S \
bl31/aarch64/runtime_exceptions.S \
bl31/aarch64/crash_reporting.S \
bl31/bl31_context_mgmt.c \
common/aarch64/context.S \
common/context_mgmt.c \
lib/cpus/aarch64/cpu_helpers.S \
lib/locks/exclusive/spinlock.S \
services/std_svc/std_svc_setup.c \
......
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <bl31.h>
#include <context.h>
#include <context_mgmt.h>
#include <cpu_data.h>
#include <platform.h>
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the calling CPU that was set as the context for the specified security
* state. NULL is returned if no such structure has been specified.
******************************************************************************/
void *cm_get_context(uint32_t security_state)
{
assert(security_state <= NON_SECURE);
return get_cpu_data(cpu_context[security_state]);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the calling CPU
******************************************************************************/
void cm_set_context(void *context, uint32_t security_state)
{
assert(security_state <= NON_SECURE);
set_cpu_data(cpu_context[security_state], context);
}
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by `cpu_idx` that was set as the context for the
* specified security state. NULL is returned if no such structure has been
* specified.
******************************************************************************/
void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by CPU index.
******************************************************************************/
void cm_set_context_by_index(unsigned int cpu_idx, void *context,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
}
#if !ERROR_DEPRECATED
/*
* These context management helpers are deprecated but are maintained for use
* by SPDs which have not migrated to the new API. If ERROR_DEPRECATED
* is enabled, these are excluded from the build so as to force users to
* migrate to the new API.
*/
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by MPIDR that was set as the context for the specified
* security state. NULL is returned if no such structure has been specified.
******************************************************************************/
void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by MPIDR
******************************************************************************/
void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
cm_set_context_by_index(platform_get_core_pos(mpidr),
context, security_state);
}
/*******************************************************************************
* The following function provides a compatibility function for SPDs using the
* existing cm library routines. This function is expected to be invoked for
* initializing the cpu_context for the CPU specified by MPIDR for first use.
******************************************************************************/
void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
{
if ((mpidr & MPIDR_AFFINITY_MASK) ==
(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
cm_init_my_context(ep);
else
cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
}
#endif
\ No newline at end of file
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -32,6 +32,17 @@
#include <asm_macros.S>
#include <context.h>
.global el1_sysregs_context_save
.global el1_sysregs_context_restore
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
.global fpregs_context_restore
#endif
.global save_gp_registers
.global restore_gp_registers_eret
.global restore_gp_registers_callee_eret
.global el3_exit
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
......@@ -40,7 +51,6 @@
* the register context will be saved.
* -----------------------------------------------------
*/
.global el1_sysregs_context_save
func el1_sysregs_context_save
mrs x9, spsr_el1
......@@ -127,7 +137,6 @@ endfunc el1_sysregs_context_save
* from where the register context will be restored
* -----------------------------------------------------
*/
.global el1_sysregs_context_restore
func el1_sysregs_context_restore
ldp x9, x10, [x0, #CTX_SPSR_EL1]
......@@ -225,7 +234,6 @@ endfunc el1_sysregs_context_restore
* -----------------------------------------------------
*/
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
func fpregs_context_save
stp q0, q1, [x0, #CTX_FP_Q0]
stp q2, q3, [x0, #CTX_FP_Q2]
......@@ -269,7 +277,6 @@ endfunc fpregs_context_save
* TODO: Revisit when VFP is used in secure world
* -----------------------------------------------------
*/
.global fpregs_context_restore
func fpregs_context_restore
ldp q0, q1, [x0, #CTX_FP_Q0]
ldp q2, q3, [x0, #CTX_FP_Q2]
......@@ -303,3 +310,92 @@ func fpregs_context_restore
ret
endfunc fpregs_context_restore
#endif /* CTX_INCLUDE_FPREGS */
/* -----------------------------------------------------
* The following functions are used to save and restore
* all the general purpose registers. Ideally we would
* only save and restore the callee saved registers when
* a world switch occurs but that type of implementation
* is more complex. So currently we will always save and
* restore these registers on entry and exit of EL3.
* These are not macros to ensure their invocation fits
* within the 32 instructions per exception vector.
* clobbers: x18
* -----------------------------------------------------
*/
func save_gp_registers
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
mrs x18, sp_el0
str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
ret
endfunc save_gp_registers
func restore_gp_registers_eret
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
b restore_gp_registers_callee_eret
endfunc restore_gp_registers_eret
func restore_gp_registers_callee_eret
ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
msr sp_el0, x17
ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
eret
endfunc restore_gp_registers_callee_eret
/* -----------------------------------------------------
* This routine assumes that the SP_EL3 is pointing to
* a valid context structure from where the gp regs and
* other special registers can be retrieved.
* -----------------------------------------------------
*/
func el3_exit
/* -----------------------------------------------------
* Save the current SP_EL0 i.e. the EL3 runtime stack
* which will be used for handling the next SMC. Then
* switch to SP_EL3
* -----------------------------------------------------
*/
mov x17, sp
msr spsel, #1
str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
/* -----------------------------------------------------
* Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
* -----------------------------------------------------
*/
ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
msr scr_el3, x18
msr spsr_el3, x16
msr elr_el3, x17
/* Restore saved general purpose registers and return */
b restore_gp_registers_eret
endfunc el3_exit
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -29,7 +29,7 @@
*/
#include <asm_macros.S>
#include <runtime_svc.h>
#include <bl_common.h>
.globl early_exceptions
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment