Commit 44abeaa6 authored by danh-arm's avatar danh-arm Committed by GitHub
Browse files

Merge pull request #713 from yatharth-arm/yk/AArch32_porting

Add basic AArch32 support for BL1 & BL2
parents 131f7cd4 03a3042b
...@@ -115,7 +115,8 @@ ENABLE_PSCI_STAT := 0 ...@@ -115,7 +115,8 @@ ENABLE_PSCI_STAT := 0
# Whether code and read-only data should be put on separate memory pages. # Whether code and read-only data should be put on separate memory pages.
# The platform Makefile is free to override this value. # The platform Makefile is free to override this value.
SEPARATE_CODE_AND_RODATA := 0 SEPARATE_CODE_AND_RODATA := 0
# Flag to enable new version of image loading
LOAD_IMAGE_V2 := 0
################################################################################ ################################################################################
# Checkpatch script options # Checkpatch script options
...@@ -187,6 +188,10 @@ ifneq (${GENERATE_COT},0) ...@@ -187,6 +188,10 @@ ifneq (${GENERATE_COT},0)
FWU_FIP_DEPS += fwu_certificates FWU_FIP_DEPS += fwu_certificates
endif endif
# For AArch32, enable new version of image loading.
ifeq (${ARCH},aarch32)
LOAD_IMAGE_V2 := 1
endif
################################################################################ ################################################################################
# Toolchain # Toolchain
...@@ -355,6 +360,21 @@ ifeq (${NEED_BL33},yes) ...@@ -355,6 +360,21 @@ ifeq (${NEED_BL33},yes)
endif endif
endif endif
# TRUSTED_BOARD_BOOT is currently not supported when LOAD_IMAGE_V2 is enabled.
ifeq (${LOAD_IMAGE_V2},1)
ifeq (${TRUSTED_BOARD_BOOT},1)
$(error "TRUSTED_BOARD_BOOT is currently not supported \
for LOAD_IMAGE_V2=1")
endif
endif
# For AArch32, LOAD_IMAGE_V2 must be enabled.
ifeq (${ARCH},aarch32)
ifeq (${LOAD_IMAGE_V2}, 0)
$(error "For AArch32, LOAD_IMAGE_V2 must be enabled.")
endif
endif
################################################################################ ################################################################################
# Process platform overrideable behaviour # Process platform overrideable behaviour
...@@ -445,6 +465,7 @@ $(eval $(call assert_boolean,PL011_GENERIC_UART)) ...@@ -445,6 +465,7 @@ $(eval $(call assert_boolean,PL011_GENERIC_UART))
$(eval $(call assert_boolean,ENABLE_PMF)) $(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT)) $(eval $(call assert_boolean,ENABLE_PSCI_STAT))
$(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA)) $(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA))
$(eval $(call assert_boolean,LOAD_IMAGE_V2))
################################################################################ ################################################################################
...@@ -475,6 +496,7 @@ $(eval $(call add_define,PL011_GENERIC_UART)) ...@@ -475,6 +496,7 @@ $(eval $(call add_define,PL011_GENERIC_UART))
$(eval $(call add_define,ENABLE_PMF)) $(eval $(call add_define,ENABLE_PMF))
$(eval $(call add_define,ENABLE_PSCI_STAT)) $(eval $(call add_define,ENABLE_PSCI_STAT))
$(eval $(call add_define,SEPARATE_CODE_AND_RODATA)) $(eval $(call add_define,SEPARATE_CODE_AND_RODATA))
$(eval $(call add_define,LOAD_IMAGE_V2))
# Define the EL3_PAYLOAD_BASE flag only if it is provided. # Define the EL3_PAYLOAD_BASE flag only if it is provided.
ifdef EL3_PAYLOAD_BASE ifdef EL3_PAYLOAD_BASE
$(eval $(call add_define,EL3_PAYLOAD_BASE)) $(eval $(call add_define,EL3_PAYLOAD_BASE))
...@@ -495,8 +517,6 @@ endif ...@@ -495,8 +517,6 @@ endif
################################################################################ ################################################################################
# Include BL specific makefiles # Include BL specific makefiles
################################################################################ ################################################################################
# BL31 is not needed and BL1, BL2 & BL2U are not currently supported in AArch32
ifneq (${ARCH},aarch32)
ifdef BL1_SOURCES ifdef BL1_SOURCES
NEED_BL1 := yes NEED_BL1 := yes
include bl1/bl1.mk include bl1/bl1.mk
...@@ -507,6 +527,8 @@ NEED_BL2 := yes ...@@ -507,6 +527,8 @@ NEED_BL2 := yes
include bl2/bl2.mk include bl2/bl2.mk
endif endif
# For AArch32, BL31 is not applicable, and BL2U is not supported at present.
ifneq (${ARCH},aarch32)
ifdef BL2U_SOURCES ifdef BL2U_SOURCES
NEED_BL2U := yes NEED_BL2U := yes
include bl2u/bl2u.mk include bl2u/bl2u.mk
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************************
* TODO: Function that does the first bit of architectural setup.
******************************************************************************/
void bl1_arch_setup(void)
{
}
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <context.h>
#include <context_mgmt.h>
#include <debug.h>
#include <platform.h>
#include <smcc_helpers.h>
/*
* Following arrays will be used for context management.
* There are 2 instances, for the Secure and Non-Secure contexts.
*/
static cpu_context_t bl1_cpu_context[2];
static smc_ctx_t bl1_smc_context[2];
/* Following contains the next cpu context pointer. */
static void *bl1_next_cpu_context_ptr;
/* Following contains the next smc context pointer. */
static void *bl1_next_smc_context_ptr;
/* Following functions are used for SMC context handling */
void *smc_get_ctx(int security_state)
{
assert(sec_state_is_valid(security_state));
return &bl1_smc_context[security_state];
}
void smc_set_next_ctx(int security_state)
{
assert(sec_state_is_valid(security_state));
bl1_next_smc_context_ptr = &bl1_smc_context[security_state];
}
void *smc_get_next_ctx(void)
{
return bl1_next_smc_context_ptr;
}
/* Following functions are used for CPU context handling */
void *cm_get_context(uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
return &bl1_cpu_context[security_state];
}
void cm_set_next_context(void *cpu_context)
{
assert(cpu_context);
bl1_next_cpu_context_ptr = cpu_context;
}
void *cm_get_next_context(void)
{
return bl1_next_cpu_context_ptr;
}
/*******************************************************************************
* Following function copies GP regs r0-r4, lr and spsr,
* from the CPU context to the SMC context structures.
******************************************************************************/
static void copy_cpu_ctx_to_smc_ctx(const regs_t *cpu_reg_ctx,
smc_ctx_t *next_smc_ctx)
{
next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
next_smc_ctx->r1 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R1);
next_smc_ctx->r2 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R2);
next_smc_ctx->r3 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R3);
next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
}
/*******************************************************************************
* Following function flushes the SMC & CPU context pointer and its data.
******************************************************************************/
static void flush_smc_and_cpu_ctx(void)
{
flush_dcache_range((uintptr_t)&bl1_next_smc_context_ptr,
sizeof(bl1_next_smc_context_ptr));
flush_dcache_range((uintptr_t)bl1_next_smc_context_ptr,
sizeof(smc_ctx_t));
flush_dcache_range((uintptr_t)&bl1_next_cpu_context_ptr,
sizeof(bl1_next_cpu_context_ptr));
flush_dcache_range((uintptr_t)bl1_next_cpu_context_ptr,
sizeof(cpu_context_t));
}
/*******************************************************************************
* This function prepares the context for Secure/Normal world images.
* Normal world images are transitioned to HYP(if supported) else SVC.
******************************************************************************/
void bl1_prepare_next_image(unsigned int image_id)
{
unsigned int security_state;
image_desc_t *image_desc;
entry_point_info_t *next_bl_ep;
/* Get the image descriptor. */
image_desc = bl1_plat_get_image_desc(image_id);
assert(image_desc);
/* Get the entry point info. */
next_bl_ep = &image_desc->ep_info;
/* Get the image security state. */
security_state = GET_SECURITY_STATE(next_bl_ep->h.attr);
/* Prepare the SPSR for the next BL image. */
if (security_state == SECURE) {
next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
} else {
/* Use HYP mode if supported else use SVC. */
if (GET_VIRT_EXT(read_id_pfr1()) == MODE32_hyp) {
next_bl_ep->spsr = SPSR_MODE32(MODE32_hyp, SPSR_T_ARM,
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
} else {
next_bl_ep->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
}
}
/* Allow platform to make change */
bl1_plat_set_ep_info(image_id, next_bl_ep);
/* Prepare the cpu context for the next BL image. */
cm_init_my_context(next_bl_ep);
cm_prepare_el3_exit(security_state);
cm_set_next_context(cm_get_context(security_state));
/* Prepare the smc context for the next BL image. */
smc_set_next_ctx(security_state);
copy_cpu_ctx_to_smc_ctx(get_regs_ctx(cm_get_next_context()),
smc_get_next_ctx());
/*
* Flush the SMC & CPU context and the (next)pointers,
* to access them after caches are disabled.
*/
flush_smc_and_cpu_ctx();
/* Indicate that image is in execution state. */
image_desc->state = IMAGE_STATE_EXECUTED;
print_entry_point_info(next_bl_ep);
}
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <context.h>
#include <el3_common_macros.S>
#include <smcc_helpers.h>
#include <smcc_macros.S>
.globl bl1_vector_table
.globl bl1_entrypoint
/* -----------------------------------------------------
* Setup the vector table to support SVC & MON mode.
* -----------------------------------------------------
*/
vector_base bl1_vector_table
b bl1_entrypoint
b report_exception /* Undef */
b bl1_aarch32_smc_handler /* SMC call */
b report_exception /* Prefetch abort */
b report_exception /* Data abort */
b report_exception /* Reserved */
b report_exception /* IRQ */
b report_exception /* FIQ */
/* -----------------------------------------------------
* bl1_entrypoint() is the entry point into the trusted
* firmware code when a cpu is released from warm or
* cold reset.
* -----------------------------------------------------
*/
func bl1_entrypoint
/* ---------------------------------------------------------------------
* If the reset address is programmable then bl1_entrypoint() is
* executed only on the cold boot path. Therefore, we can skip the warm
* boot mailbox mechanism.
* ---------------------------------------------------------------------
*/
el3_entrypoint_common \
_set_endian=1 \
_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
_init_memory=1 \
_init_c_runtime=1 \
_exception_vectors=bl1_vector_table
/* -----------------------------------------------------
* Perform early platform setup & platform
* specific early arch. setup e.g. mmu setup
* -----------------------------------------------------
*/
bl bl1_early_platform_setup
bl bl1_plat_arch_setup
/* -----------------------------------------------------
* Jump to main function.
* -----------------------------------------------------
*/
bl bl1_main
/* -----------------------------------------------------
* Jump to next image.
* -----------------------------------------------------
*/
/*
* MMU needs to be disabled because both BL1 and BL2 execute
* in PL1, and therefore share the same address space.
* BL2 will initialize the address space according to its
* own requirement.
*/
bl disable_mmu_icache_secure
stcopr r0, TLBIALL
dsb sy
isb
/* Get the cpu_context for next BL image */
bl cm_get_next_context
/* Restore the SCR */
ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
stcopr r2, SCR
isb
/*
* Get the smc_context for next BL image,
* program the gp/system registers and exit
* secure monitor mode
*/
bl smc_get_next_ctx
smcc_restore_gp_mode_regs
eret
endfunc bl1_entrypoint
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl1.h>
#include <bl_common.h>
.globl bl1_aarch32_smc_handler
func bl1_aarch32_smc_handler
/* ------------------------------------------------
* SMC in BL1 is handled assuming that the MMU is
* turned off by BL2.
* ------------------------------------------------
*/
/* ----------------------------------------------
* Only RUN_IMAGE SMC is supported.
* ----------------------------------------------
*/
mov r8, #BL1_SMC_RUN_IMAGE
cmp r8, r0
blne report_exception
/* ------------------------------------------------
* Make sure only Secure world reaches here.
* ------------------------------------------------
*/
ldcopr r8, SCR
tst r8, #SCR_NS_BIT
blne report_exception
/* ---------------------------------------------------------------------
* Pass control to next secure image.
* Here it expects r1 to contain the address of a entry_point_info_t
* structure describing the BL entrypoint.
* ---------------------------------------------------------------------
*/
mov r8, r1
mov r0, r1
bl bl1_print_next_bl_ep_info
#if SPIN_ON_BL1_EXIT
bl print_debug_loop_message
debug_loop:
b debug_loop
#endif
mov r0, r8
bl bl1_plat_prepare_exit
stcopr r0, TLBIALL
dsb sy
isb
/*
* Extract PC and SPSR based on struct `entry_point_info_t`
* and load it in LR and SPSR registers respectively.
*/
ldr lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET]
ldr r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)]
msr spsr, r1
add r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET
ldm r8, {r0, r1, r2, r3}
eret
endfunc bl1_aarch32_smc_handler
...@@ -192,15 +192,15 @@ func smc_handler64 ...@@ -192,15 +192,15 @@ func smc_handler64
mov sp, x30 mov sp, x30
/* --------------------------------------------------------------------- /* ---------------------------------------------------------------------
* Pass EL3 control to BL31. * Pass EL3 control to next BL image.
* Here it expects X1 with the address of a entry_point_info_t * Here it expects X1 with the address of a entry_point_info_t
* structure describing the BL31 entrypoint. * structure describing the next BL image entrypoint.
* --------------------------------------------------------------------- * ---------------------------------------------------------------------
*/ */
mov x20, x1 mov x20, x1
mov x0, x20 mov x0, x20
bl bl1_print_bl31_ep_info bl bl1_print_next_bl_ep_info
ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET] ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
msr elr_el3, x0 msr elr_el3, x0
......
...@@ -29,15 +29,19 @@ ...@@ -29,15 +29,19 @@
# #
BL1_SOURCES += bl1/bl1_main.c \ BL1_SOURCES += bl1/bl1_main.c \
bl1/aarch64/bl1_arch_setup.c \ bl1/${ARCH}/bl1_arch_setup.c \
bl1/aarch64/bl1_entrypoint.S \ bl1/${ARCH}/bl1_context_mgmt.c \
bl1/aarch64/bl1_exceptions.S \ bl1/${ARCH}/bl1_entrypoint.S \
bl1/bl1_context_mgmt.c \ bl1/${ARCH}/bl1_exceptions.S \
lib/cpus/aarch64/cpu_helpers.S \ lib/cpus/${ARCH}/cpu_helpers.S \
lib/el3_runtime/aarch64/context.S \ lib/el3_runtime/${ARCH}/context_mgmt.c \
lib/el3_runtime/aarch64/context_mgmt.c \
plat/common/plat_bl1_common.c plat/common/plat_bl1_common.c
ifeq (${ARCH},aarch64)
BL1_SOURCES += lib/el3_runtime/aarch64/context.S
endif
ifeq (${TRUSTED_BOARD_BOOT},1) ifeq (${TRUSTED_BOARD_BOOT},1)
BL1_SOURCES += bl1/bl1_fwu.c BL1_SOURCES += bl1/bl1_fwu.c
endif endif
......
/* /*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -64,11 +64,19 @@ static void bl1_load_bl2(void); ...@@ -64,11 +64,19 @@ static void bl1_load_bl2(void);
void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout, void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
meminfo_t *bl2_mem_layout) meminfo_t *bl2_mem_layout)
{ {
const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;
assert(bl1_mem_layout != NULL); assert(bl1_mem_layout != NULL);
assert(bl2_mem_layout != NULL); assert(bl2_mem_layout != NULL);
#if LOAD_IMAGE_V2
/*
* Remove BL1 RW data from the scope of memory visible to BL2.
* This is assuming BL1 RW data is at the top of bl1_mem_layout.
*/
assert(BL1_RW_BASE > bl1_mem_layout->total_base);
bl2_mem_layout->total_base = bl1_mem_layout->total_base;
bl2_mem_layout->total_size = BL1_RW_BASE - bl1_mem_layout->total_base;
#else
/* Check that BL1's memory is lying outside of the free memory */ /* Check that BL1's memory is lying outside of the free memory */
assert((BL1_RAM_LIMIT <= bl1_mem_layout->free_base) || assert((BL1_RAM_LIMIT <= bl1_mem_layout->free_base) ||
(BL1_RAM_BASE >= bl1_mem_layout->free_base + (BL1_RAM_BASE >= bl1_mem_layout->free_base +
...@@ -79,7 +87,8 @@ void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout, ...@@ -79,7 +87,8 @@ void bl1_init_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
reserve_mem(&bl2_mem_layout->total_base, reserve_mem(&bl2_mem_layout->total_base,
&bl2_mem_layout->total_size, &bl2_mem_layout->total_size,
BL1_RAM_BASE, BL1_RAM_BASE,
bl1_size); BL1_RAM_LIMIT - BL1_RAM_BASE);
#endif /* LOAD_IMAGE_V2 */
flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t)); flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t));
} }
...@@ -98,15 +107,20 @@ void bl1_main(void) ...@@ -98,15 +107,20 @@ void bl1_main(void)
NOTICE("BL1: %s\n", version_string); NOTICE("BL1: %s\n", version_string);
NOTICE("BL1: %s\n", build_message); NOTICE("BL1: %s\n", build_message);
INFO("BL1: RAM 0x%lx - 0x%lx\n", BL1_RAM_BASE, BL1_RAM_LIMIT); INFO("BL1: RAM %p - %p\n", (void *)BL1_RAM_BASE,
(void *)BL1_RAM_LIMIT);
#if DEBUG #if DEBUG
unsigned long val; u_register_t val;
/* /*
* Ensure that MMU/Caches and coherency are turned on * Ensure that MMU/Caches and coherency are turned on
*/ */
#ifdef AARCH32
val = read_sctlr();
#else
val = read_sctlr_el3(); val = read_sctlr_el3();
#endif
assert(val & SCTLR_M_BIT); assert(val & SCTLR_M_BIT);
assert(val & SCTLR_C_BIT); assert(val & SCTLR_C_BIT);
assert(val & SCTLR_I_BIT); assert(val & SCTLR_I_BIT);
...@@ -182,6 +196,9 @@ void bl1_load_bl2(void) ...@@ -182,6 +196,9 @@ void bl1_load_bl2(void)
INFO("BL1: Loading BL2\n"); INFO("BL1: Loading BL2\n");
#if LOAD_IMAGE_V2
err = load_auth_image(BL2_IMAGE_ID, image_info);
#else
/* Load the BL2 image */ /* Load the BL2 image */
err = load_auth_image(bl1_tzram_layout, err = load_auth_image(bl1_tzram_layout,
BL2_IMAGE_ID, BL2_IMAGE_ID,
...@@ -189,6 +206,8 @@ void bl1_load_bl2(void) ...@@ -189,6 +206,8 @@ void bl1_load_bl2(void)
image_info, image_info,
ep_info); ep_info);
#endif /* LOAD_IMAGE_V2 */
if (err) { if (err) {
ERROR("Failed to load BL2 firmware.\n"); ERROR("Failed to load BL2 firmware.\n");
plat_error_handler(err); plat_error_handler(err);
...@@ -201,24 +220,33 @@ void bl1_load_bl2(void) ...@@ -201,24 +220,33 @@ void bl1_load_bl2(void)
* to BL2. BL2 will read the memory layout before using its * to BL2. BL2 will read the memory layout before using its
* memory for other purposes. * memory for other purposes.
*/ */
#if LOAD_IMAGE_V2
bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->total_base;
#else
bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->free_base; bl2_tzram_layout = (meminfo_t *) bl1_tzram_layout->free_base;
#endif /* LOAD_IMAGE_V2 */
bl1_init_bl2_mem_layout(bl1_tzram_layout, bl2_tzram_layout); bl1_init_bl2_mem_layout(bl1_tzram_layout, bl2_tzram_layout);
ep_info->args.arg1 = (unsigned long)bl2_tzram_layout; ep_info->args.arg1 = (uintptr_t)bl2_tzram_layout;
NOTICE("BL1: Booting BL2\n"); NOTICE("BL1: Booting BL2\n");
VERBOSE("BL1: BL2 memory layout address = 0x%llx\n", VERBOSE("BL1: BL2 memory layout address = %p\n",
(unsigned long long) bl2_tzram_layout); (void *) bl2_tzram_layout);
} }
/******************************************************************************* /*******************************************************************************
* Function called just before handing over to BL31 to inform the user about * Function called just before handing over to the next BL to inform the user
* the boot progress. In debug mode, also print details about the BL31 image's * about the boot progress. In debug mode, also print details about the BL
* execution context. * image's execution context.
******************************************************************************/ ******************************************************************************/
void bl1_print_bl31_ep_info(const entry_point_info_t *bl31_ep_info) void bl1_print_next_bl_ep_info(const entry_point_info_t *bl_ep_info)
{ {
#ifdef AARCH32
NOTICE("BL1: Booting BL32\n");
#else
NOTICE("BL1: Booting BL31\n"); NOTICE("BL1: Booting BL31\n");
print_entry_point_info(bl31_ep_info); #endif /* AARCH32 */
print_entry_point_info(bl_ep_info);
} }
#if SPIN_ON_BL1_EXIT #if SPIN_ON_BL1_EXIT
......
...@@ -37,13 +37,13 @@ ...@@ -37,13 +37,13 @@
* Declarations of linker defined symbols which will tell us where BL1 lives * Declarations of linker defined symbols which will tell us where BL1 lives
* in Trusted ROM and RAM * in Trusted ROM and RAM
******************************************************************************/ ******************************************************************************/
extern uint64_t __BL1_ROM_END__; extern uintptr_t __BL1_ROM_END__;
#define BL1_ROM_END (uint64_t)(&__BL1_ROM_END__) #define BL1_ROM_END (uintptr_t)(&__BL1_ROM_END__)
extern uint64_t __BL1_RAM_START__; extern uintptr_t __BL1_RAM_START__;
extern uint64_t __BL1_RAM_END__; extern uintptr_t __BL1_RAM_END__;
#define BL1_RAM_BASE (uint64_t)(&__BL1_RAM_START__) #define BL1_RAM_BASE (uintptr_t)(&__BL1_RAM_START__)
#define BL1_RAM_LIMIT (uint64_t)(&__BL1_RAM_END__) #define BL1_RAM_LIMIT (uintptr_t)(&__BL1_RAM_END__)
/****************************************** /******************************************
* Function prototypes * Function prototypes
......
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************************
* Place holder function to perform any Secure SVC specific architectural
* setup. At the moment there is nothing to do.
******************************************************************************/
void bl2_arch_setup(void)
{
}
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
.globl bl2_vector_table
.globl bl2_entrypoint
vector_base bl2_vector_table
b bl2_entrypoint
b report_exception /* Undef */
b report_exception /* SVC call */
b report_exception /* Prefetch abort */
b report_exception /* Data abort */
b report_exception /* Reserved */
b report_exception /* IRQ */
b report_exception /* FIQ */
func bl2_entrypoint
/*---------------------------------------------
* Save from r1 the extents of the trusted ram
* available to BL2 for future use.
* r0 is not currently used.
* ---------------------------------------------
*/
mov r11, r1
/* ---------------------------------------------
* Set the exception vector to something sane.
* ---------------------------------------------
*/
ldr r0, =bl2_vector_table
stcopr r0, VBAR
isb
/* -----------------------------------------------------
* Enable the instruction cache
* -----------------------------------------------------
*/
ldcopr r0, SCTLR
orr r0, r0, #SCTLR_I_BIT
stcopr r0, SCTLR
isb
/* ---------------------------------------------
* Since BL2 executes after BL1, it is assumed
* here that BL1 has already has done the
* necessary register initializations.
* ---------------------------------------------
*/
/* ---------------------------------------------
* Invalidate the RW memory used by the BL2
* image. This includes the data and NOBITS
* sections. This is done to safeguard against
* possible corruption of this memory by dirty
* cache lines in a system cache as a result of
* use by an earlier boot loader stage.
* ---------------------------------------------
*/
ldr r0, =__RW_START__
ldr r1, =__RW_END__
sub r1, r1, r0
bl inv_dcache_range
/* ---------------------------------------------
* Zero out NOBITS sections. There are 2 of them:
* - the .bss section;
* - the coherent memory section.
* ---------------------------------------------
*/
ldr r0, =__BSS_START__
ldr r1, =__BSS_SIZE__
bl zeromem
#if USE_COHERENT_MEM
ldr r0, =__COHERENT_RAM_START__
ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
bl zeromem
#endif
/* --------------------------------------------
* Allocate a stack whose memory will be marked
* as Normal-IS-WBWA when the MMU is enabled.
* There is no risk of reading stale stack
* memory after enabling the MMU as only the
* primary cpu is running at the moment.
* --------------------------------------------
*/
bl plat_set_my_stack
/* ---------------------------------------------
* Perform early platform setup & platform
* specific early arch. setup e.g. mmu setup
* ---------------------------------------------
*/
mov r0, r11
bl bl2_early_platform_setup
bl bl2_plat_arch_setup
/* ---------------------------------------------
* Jump to main function.
* ---------------------------------------------
*/
bl bl2_main
/* ---------------------------------------------
* Should never reach this point.
* ---------------------------------------------
*/
bl plat_panic_handler
endfunc bl2_entrypoint
# #
# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. # Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met: # modification, are permitted provided that the following conditions are met:
...@@ -29,9 +29,18 @@ ...@@ -29,9 +29,18 @@
# #
BL2_SOURCES += bl2/bl2_main.c \ BL2_SOURCES += bl2/bl2_main.c \
bl2/aarch64/bl2_entrypoint.S \ bl2/${ARCH}/bl2_entrypoint.S \
bl2/aarch64/bl2_arch_setup.c \ bl2/${ARCH}/bl2_arch_setup.c \
common/aarch64/early_exceptions.S \ lib/locks/exclusive/${ARCH}/spinlock.S
lib/locks/exclusive/aarch64/spinlock.S
ifeq (${ARCH},aarch64)
BL2_SOURCES += common/aarch64/early_exceptions.S
endif
ifeq (${LOAD_IMAGE_V2},1)
BL2_SOURCES += bl2/bl2_image_load_v2.c
else
BL2_SOURCES += bl2/bl2_image_load.c
endif
BL2_LINKERFILE := bl2/bl2.ld.S BL2_LINKERFILE := bl2/bl2.ld.S
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <auth_mod.h>
#include <bl_common.h>
#include <debug.h>
#include <errno.h>
#include <platform.h>
#include <platform_def.h>
#include <stdint.h>
/*
* Check for platforms that use obsolete image terminology
*/
#ifdef BL30_BASE
# error "BL30_BASE platform define no longer used - please use SCP_BL2_BASE"
#endif
/*******************************************************************************
* Load the SCP_BL2 image if there's one.
* If a platform does not want to attempt to load SCP_BL2 image it must leave
* SCP_BL2_BASE undefined.
* Return 0 on success or if there's no SCP_BL2 image to load, a negative error
* code otherwise.
******************************************************************************/
static int load_scp_bl2(void)
{
int e = 0;
#ifdef SCP_BL2_BASE
meminfo_t scp_bl2_mem_info;
image_info_t scp_bl2_image_info;
/*
* It is up to the platform to specify where SCP_BL2 should be loaded if
* it exists. It could create space in the secure sram or point to a
* completely different memory.
*
* The entry point information is not relevant in this case as the AP
* won't execute the SCP_BL2 image.
*/
INFO("BL2: Loading SCP_BL2\n");
bl2_plat_get_scp_bl2_meminfo(&scp_bl2_mem_info);
scp_bl2_image_info.h.version = VERSION_1;
e = load_auth_image(&scp_bl2_mem_info,
SCP_BL2_IMAGE_ID,
SCP_BL2_BASE,
&scp_bl2_image_info,
NULL);
if (e == 0) {
/* The subsequent handling of SCP_BL2 is platform specific */
e = bl2_plat_handle_scp_bl2(&scp_bl2_image_info);
if (e) {
ERROR("Failure in platform-specific handling of SCP_BL2 image.\n");
}
}
#endif /* SCP_BL2_BASE */
return e;
}
#ifndef EL3_PAYLOAD_BASE
/*******************************************************************************
* Load the BL31 image.
* The bl2_to_bl31_params and bl31_ep_info params will be updated with the
* relevant BL31 information.
* Return 0 on success, a negative error code otherwise.
******************************************************************************/
static int load_bl31(bl31_params_t *bl2_to_bl31_params,
entry_point_info_t *bl31_ep_info)
{
meminfo_t *bl2_tzram_layout;
int e;
INFO("BL2: Loading BL31\n");
assert(bl2_to_bl31_params != NULL);
assert(bl31_ep_info != NULL);
/* Find out how much free trusted ram remains after BL2 load */
bl2_tzram_layout = bl2_plat_sec_mem_layout();
/* Set the X0 parameter to BL31 */
bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params;
/* Load the BL31 image */
e = load_auth_image(bl2_tzram_layout,
BL31_IMAGE_ID,
BL31_BASE,
bl2_to_bl31_params->bl31_image_info,
bl31_ep_info);
if (e == 0) {
bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info,
bl31_ep_info);
}
return e;
}
/*******************************************************************************
* Load the BL32 image if there's one.
* The bl2_to_bl31_params param will be updated with the relevant BL32
* information.
* If a platform does not want to attempt to load BL32 image it must leave
* BL32_BASE undefined.
* Return 0 on success or if there's no BL32 image to load, a negative error
* code otherwise.
******************************************************************************/
static int load_bl32(bl31_params_t *bl2_to_bl31_params)
{
int e = 0;
#ifdef BL32_BASE
meminfo_t bl32_mem_info;
INFO("BL2: Loading BL32\n");
assert(bl2_to_bl31_params != NULL);
/*
* It is up to the platform to specify where BL32 should be loaded if
* it exists. It could create space in the secure sram or point to a
* completely different memory.
*/
bl2_plat_get_bl32_meminfo(&bl32_mem_info);
e = load_auth_image(&bl32_mem_info,
BL32_IMAGE_ID,
BL32_BASE,
bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
if (e == 0) {
bl2_plat_set_bl32_ep_info(
bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
}
#endif /* BL32_BASE */
return e;
}
#ifndef PRELOADED_BL33_BASE
/*******************************************************************************
* Load the BL33 image.
* The bl2_to_bl31_params param will be updated with the relevant BL33
* information.
* Return 0 on success, a negative error code otherwise.
******************************************************************************/
static int load_bl33(bl31_params_t *bl2_to_bl31_params)
{
meminfo_t bl33_mem_info;
int e;
INFO("BL2: Loading BL33\n");
assert(bl2_to_bl31_params != NULL);
bl2_plat_get_bl33_meminfo(&bl33_mem_info);
/* Load the BL33 image in non-secure memory provided by the platform */
e = load_auth_image(&bl33_mem_info,
BL33_IMAGE_ID,
plat_get_ns_image_entrypoint(),
bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
if (e == 0) {
bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
}
return e;
}
#endif /* PRELOADED_BL33_BASE */
#endif /* EL3_PAYLOAD_BASE */
/*******************************************************************************
* This function loads SCP_BL2/BL3x images and returns the ep_info for
* the next executable image.
******************************************************************************/
entry_point_info_t *bl2_load_images(void)
{
bl31_params_t *bl2_to_bl31_params;
entry_point_info_t *bl31_ep_info;
int e;
e = load_scp_bl2();
if (e) {
ERROR("Failed to load SCP_BL2 (%i)\n", e);
plat_error_handler(e);
}
/* Perform platform setup in BL2 after loading SCP_BL2 */
bl2_platform_setup();
/*
* Get a pointer to the memory the platform has set aside to pass
* information to BL31.
*/
bl2_to_bl31_params = bl2_plat_get_bl31_params();
bl31_ep_info = bl2_plat_get_bl31_ep_info();
#ifdef EL3_PAYLOAD_BASE
/*
* In the case of an EL3 payload, we don't need to load any further
* images. Just update the BL31 entrypoint info structure to make BL1
* jump to the EL3 payload.
* The pointer to the memory the platform has set aside to pass
* information to BL31 in the normal boot flow is reused here, even
* though only a fraction of the information contained in the
* bl31_params_t structure makes sense in the context of EL3 payloads.
* This will be refined in the future.
*/
INFO("BL2: Populating the entrypoint info for the EL3 payload\n");
bl31_ep_info->pc = EL3_PAYLOAD_BASE;
bl31_ep_info->args.arg0 = (unsigned long) bl2_to_bl31_params;
bl2_plat_set_bl31_ep_info(NULL, bl31_ep_info);
#else
e = load_bl31(bl2_to_bl31_params, bl31_ep_info);
if (e) {
ERROR("Failed to load BL31 (%i)\n", e);
plat_error_handler(e);
}
e = load_bl32(bl2_to_bl31_params);
if (e) {
if (e == -EAUTH) {
ERROR("Failed to authenticate BL32\n");
plat_error_handler(e);
} else {
WARN("Failed to load BL32 (%i)\n", e);
}
}
#ifdef PRELOADED_BL33_BASE
/*
* In this case, don't load the BL33 image as it's already loaded in
* memory. Update BL33 entrypoint information.
*/
INFO("BL2: Populating the entrypoint info for the preloaded BL33\n");
bl2_to_bl31_params->bl33_ep_info->pc = PRELOADED_BL33_BASE;
bl2_plat_set_bl33_ep_info(NULL, bl2_to_bl31_params->bl33_ep_info);
#else
e = load_bl33(bl2_to_bl31_params);
if (e) {
ERROR("Failed to load BL33 (%i)\n", e);
plat_error_handler(e);
}
#endif /* PRELOADED_BL33_BASE */
#endif /* EL3_PAYLOAD_BASE */
/* Flush the params to be passed to memory */
bl2_plat_flush_bl31_params();
return bl31_ep_info;
}
/*
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <auth_mod.h>
#include <bl_common.h>
#include <debug.h>
#include <desc_image_load.h>
#include <platform.h>
#include <platform_def.h>
#include <stdint.h>
/*******************************************************************************
* This function loads SCP_BL2/BL3x images and returns the ep_info for
* the next executable image.
******************************************************************************/
entry_point_info_t *bl2_load_images(void)
{
bl_params_t *bl2_to_next_bl_params;
bl_load_info_t *bl2_load_info;
const bl_load_info_node_t *bl2_node_info;
int plat_setup_done = 0;
int err;
/*
* Get information about the images to load.
*/
bl2_load_info = plat_get_bl_image_load_info();
assert(bl2_load_info);
assert(bl2_load_info->head);
assert(bl2_load_info->h.type == PARAM_BL_LOAD_INFO);
assert(bl2_load_info->h.version >= VERSION_2);
bl2_node_info = bl2_load_info->head;
while (bl2_node_info) {
/*
* Perform platform setup before loading the image,
* if indicated in the image attributes AND if NOT
* already done before.
*/
if (bl2_node_info->image_info->h.attr & IMAGE_ATTRIB_PLAT_SETUP) {
if (plat_setup_done) {
WARN("BL2: Platform setup already done!!\n");
} else {
INFO("BL2: Doing platform setup\n");
bl2_platform_setup();
plat_setup_done = 1;
}
}
if (!(bl2_node_info->image_info->h.attr & IMAGE_ATTRIB_SKIP_LOADING)) {
INFO("BL2: Loading image id %d\n", bl2_node_info->image_id);
err = load_auth_image(bl2_node_info->image_id,
bl2_node_info->image_info);
if (err) {
ERROR("BL2: Failed to load image (%i)\n", err);
plat_error_handler(err);
}
} else {
INFO("BL2: Skip loading image id %d\n", bl2_node_info->image_id);
}
/* Allow platform to handle image information. */
err = bl2_plat_handle_post_image_load(bl2_node_info->image_id);
if (err) {
ERROR("BL2: Failure in post image load handling (%i)\n", err);
plat_error_handler(err);
}
/* Go to next image */
bl2_node_info = bl2_node_info->next_load_info;
}
/*
* Get information to pass to the next image.
*/
bl2_to_next_bl_params = plat_get_next_bl_params();
assert(bl2_to_next_bl_params);
assert(bl2_to_next_bl_params->head);
assert(bl2_to_next_bl_params->h.type == PARAM_BL_PARAMS);
assert(bl2_to_next_bl_params->h.version >= VERSION_2);
/* Flush the parameters to be passed to next image */
plat_flush_next_bl_params();
return bl2_to_next_bl_params->head->ep_info;
}
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -28,192 +28,23 @@ ...@@ -28,192 +28,23 @@
* POSSIBILITY OF SUCH DAMAGE. * POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <arch.h>
#include <arch_helpers.h> #include <arch_helpers.h>
#include <assert.h>
#include <auth_mod.h> #include <auth_mod.h>
#include <bl1.h> #include <bl1.h>
#include <bl_common.h> #include <bl_common.h>
#include <debug.h> #include <debug.h>
#include <errno.h>
#include <platform.h> #include <platform.h>
#include <platform_def.h>
#include <stdint.h>
#include "bl2_private.h" #include "bl2_private.h"
/*
* Check for platforms that use obsolete image terminology
*/
#ifdef BL30_BASE
# error "BL30_BASE platform define no longer used - please use SCP_BL2_BASE"
#endif
/*******************************************************************************
* Load the SCP_BL2 image if there's one.
* If a platform does not want to attempt to load SCP_BL2 image it must leave
* SCP_BL2_BASE undefined.
* Return 0 on success or if there's no SCP_BL2 image to load, a negative error
* code otherwise.
******************************************************************************/
static int load_scp_bl2(void)
{
int e = 0;
#ifdef SCP_BL2_BASE
meminfo_t scp_bl2_mem_info;
image_info_t scp_bl2_image_info;
/*
* It is up to the platform to specify where SCP_BL2 should be loaded if
* it exists. It could create space in the secure sram or point to a
* completely different memory.
*
* The entry point information is not relevant in this case as the AP
* won't execute the SCP_BL2 image.
*/
INFO("BL2: Loading SCP_BL2\n");
bl2_plat_get_scp_bl2_meminfo(&scp_bl2_mem_info);
scp_bl2_image_info.h.version = VERSION_1;
e = load_auth_image(&scp_bl2_mem_info,
SCP_BL2_IMAGE_ID,
SCP_BL2_BASE,
&scp_bl2_image_info,
NULL);
if (e == 0) {
/* The subsequent handling of SCP_BL2 is platform specific */
e = bl2_plat_handle_scp_bl2(&scp_bl2_image_info);
if (e) {
ERROR("Failure in platform-specific handling of SCP_BL2 image.\n");
}
}
#endif /* SCP_BL2_BASE */
return e;
}
#ifndef EL3_PAYLOAD_BASE
/*******************************************************************************
* Load the BL31 image.
* The bl2_to_bl31_params and bl31_ep_info params will be updated with the
* relevant BL31 information.
* Return 0 on success, a negative error code otherwise.
******************************************************************************/
static int load_bl31(bl31_params_t *bl2_to_bl31_params,
entry_point_info_t *bl31_ep_info)
{
meminfo_t *bl2_tzram_layout;
int e;
INFO("BL2: Loading BL31\n");
assert(bl2_to_bl31_params != NULL);
assert(bl31_ep_info != NULL);
/* Find out how much free trusted ram remains after BL2 load */
bl2_tzram_layout = bl2_plat_sec_mem_layout();
/* Set the X0 parameter to BL31 */
bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params;
/* Load the BL31 image */
e = load_auth_image(bl2_tzram_layout,
BL31_IMAGE_ID,
BL31_BASE,
bl2_to_bl31_params->bl31_image_info,
bl31_ep_info);
if (e == 0) {
bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info,
bl31_ep_info);
}
return e;
}
/*******************************************************************************
* Load the BL32 image if there's one.
* The bl2_to_bl31_params param will be updated with the relevant BL32
* information.
* If a platform does not want to attempt to load BL32 image it must leave
* BL32_BASE undefined.
* Return 0 on success or if there's no BL32 image to load, a negative error
* code otherwise.
******************************************************************************/
static int load_bl32(bl31_params_t *bl2_to_bl31_params)
{
int e = 0;
#ifdef BL32_BASE
meminfo_t bl32_mem_info;
INFO("BL2: Loading BL32\n");
assert(bl2_to_bl31_params != NULL);
/*
* It is up to the platform to specify where BL32 should be loaded if
* it exists. It could create space in the secure sram or point to a
* completely different memory.
*/
bl2_plat_get_bl32_meminfo(&bl32_mem_info);
e = load_auth_image(&bl32_mem_info,
BL32_IMAGE_ID,
BL32_BASE,
bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
if (e == 0) {
bl2_plat_set_bl32_ep_info(
bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
}
#endif /* BL32_BASE */
return e;
}
#ifndef PRELOADED_BL33_BASE
/*******************************************************************************
* Load the BL33 image.
* The bl2_to_bl31_params param will be updated with the relevant BL33
* information.
* Return 0 on success, a negative error code otherwise.
******************************************************************************/
static int load_bl33(bl31_params_t *bl2_to_bl31_params)
{
meminfo_t bl33_mem_info;
int e;
INFO("BL2: Loading BL33\n");
assert(bl2_to_bl31_params != NULL);
bl2_plat_get_bl33_meminfo(&bl33_mem_info);
/* Load the BL33 image in non-secure memory provided by the platform */
e = load_auth_image(&bl33_mem_info,
BL33_IMAGE_ID,
plat_get_ns_image_entrypoint(),
bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
if (e == 0) {
bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
}
return e;
}
#endif /* PRELOADED_BL33_BASE */
#endif /* EL3_PAYLOAD_BASE */
/******************************************************************************* /*******************************************************************************
* The only thing to do in BL2 is to load further images and pass control to * The only thing to do in BL2 is to load further images and pass control to
* BL31. The memory occupied by BL2 will be reclaimed by BL3x stages. BL2 runs * next BL. The memory occupied by BL2 will be reclaimed by BL3x stages. BL2
* entirely in S-EL1. * runs entirely in S-EL1.
******************************************************************************/ ******************************************************************************/
void bl2_main(void) void bl2_main(void)
{ {
bl31_params_t *bl2_to_bl31_params; entry_point_info_t *next_bl_ep_info;
entry_point_info_t *bl31_ep_info;
int e;
NOTICE("BL2: %s\n", version_string); NOTICE("BL2: %s\n", version_string);
NOTICE("BL2: %s\n", build_message); NOTICE("BL2: %s\n", build_message);
...@@ -226,82 +57,22 @@ void bl2_main(void) ...@@ -226,82 +57,22 @@ void bl2_main(void)
auth_mod_init(); auth_mod_init();
#endif /* TRUSTED_BOARD_BOOT */ #endif /* TRUSTED_BOARD_BOOT */
/* /* Load the subsequent bootloader images. */
* Load the subsequent bootloader images next_bl_ep_info = bl2_load_images();
*/
e = load_scp_bl2();
if (e) {
ERROR("Failed to load SCP_BL2 (%i)\n", e);
plat_error_handler(e);
}
/* Perform platform setup in BL2 after loading SCP_BL2 */
bl2_platform_setup();
/*
* Get a pointer to the memory the platform has set aside to pass
* information to BL31.
*/
bl2_to_bl31_params = bl2_plat_get_bl31_params();
bl31_ep_info = bl2_plat_get_bl31_ep_info();
#ifdef EL3_PAYLOAD_BASE
/*
* In the case of an EL3 payload, we don't need to load any further
* images. Just update the BL31 entrypoint info structure to make BL1
* jump to the EL3 payload.
* The pointer to the memory the platform has set aside to pass
* information to BL31 in the normal boot flow is reused here, even
* though only a fraction of the information contained in the
* bl31_params_t structure makes sense in the context of EL3 payloads.
* This will be refined in the future.
*/
INFO("BL2: Populating the entrypoint info for the EL3 payload\n");
bl31_ep_info->pc = EL3_PAYLOAD_BASE;
bl31_ep_info->args.arg0 = (unsigned long) bl2_to_bl31_params;
bl2_plat_set_bl31_ep_info(NULL, bl31_ep_info);
#else
e = load_bl31(bl2_to_bl31_params, bl31_ep_info);
if (e) {
ERROR("Failed to load BL31 (%i)\n", e);
plat_error_handler(e);
}
e = load_bl32(bl2_to_bl31_params); #ifdef AARCH32
if (e) {
if (e == -EAUTH) {
ERROR("Failed to authenticate BL32\n");
plat_error_handler(e);
} else {
WARN("Failed to load BL32 (%i)\n", e);
}
}
#ifdef PRELOADED_BL33_BASE
/* /*
* In this case, don't load the BL33 image as it's already loaded in * For AArch32 state BL1 and BL2 share the MMU setup.
* memory. Update BL33 entrypoint information. * Given that BL2 does not map BL1 regions, MMU needs
* to be disabled in order to go back to BL1.
*/ */
INFO("BL2: Populating the entrypoint info for the preloaded BL33\n"); disable_mmu_icache_secure();
bl2_to_bl31_params->bl33_ep_info->pc = PRELOADED_BL33_BASE; #endif /* AARCH32 */
bl2_plat_set_bl33_ep_info(NULL, bl2_to_bl31_params->bl33_ep_info);
#else
e = load_bl33(bl2_to_bl31_params);
if (e) {
ERROR("Failed to load BL33 (%i)\n", e);
plat_error_handler(e);
}
#endif /* PRELOADED_BL33_BASE */
#endif /* EL3_PAYLOAD_BASE */
/* Flush the params to be passed to memory */
bl2_plat_flush_bl31_params();
/* /*
* Run BL31 via an SMC to BL1. Information on how to pass control to * Run next BL image via an SMC to BL1. Information on how to pass
* the BL32 (if present) and BL33 software images will be passed to * control to the BL32 (if present) and BL33 software images will
* BL31 as an argument. * be passed to next BL image as an argument.
*/ */
smc(BL1_SMC_RUN_IMAGE, (unsigned long)bl31_ep_info, 0, 0, 0, 0, 0, 0); smc(BL1_SMC_RUN_IMAGE, (unsigned long)next_bl_ep_info, 0, 0, 0, 0, 0, 0);
} }
/* /*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met: * modification, are permitted provided that the following conditions are met:
...@@ -31,9 +31,15 @@ ...@@ -31,9 +31,15 @@
#ifndef __BL2_PRIVATE_H__ #ifndef __BL2_PRIVATE_H__
#define __BL2_PRIVATE_H__ #define __BL2_PRIVATE_H__
/******************************************
* Forward declarations
*****************************************/
struct entry_point_info;
/****************************************** /******************************************
* Function prototypes * Function prototypes
*****************************************/ *****************************************/
void bl2_arch_setup(void); void bl2_arch_setup(void);
struct entry_point_info *bl2_load_images(void);
#endif /* __BL2_PRIVATE_H__ */ #endif /* __BL2_PRIVATE_H__ */
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm_macros.S> #include <asm_macros.S>
#include <bl_common.h> #include <bl_common.h>
#include <context.h> #include <context.h>
#include <el3_common_macros.S>
#include <runtime_svc.h> #include <runtime_svc.h>
#include <smcc_helpers.h> #include <smcc_helpers.h>
#include <smcc_macros.S> #include <smcc_macros.S>
...@@ -41,7 +42,8 @@ ...@@ -41,7 +42,8 @@
.globl sp_min_entrypoint .globl sp_min_entrypoint
.globl sp_min_warm_entrypoint .globl sp_min_warm_entrypoint
func sp_min_vector_table
vector_base sp_min_vector_table
b sp_min_entrypoint b sp_min_entrypoint
b plat_panic_handler /* Undef */ b plat_panic_handler /* Undef */
b handle_smc /* Syscall */ b handle_smc /* Syscall */
...@@ -50,185 +52,70 @@ func sp_min_vector_table ...@@ -50,185 +52,70 @@ func sp_min_vector_table
b plat_panic_handler /* Reserved */ b plat_panic_handler /* Reserved */
b plat_panic_handler /* IRQ */ b plat_panic_handler /* IRQ */
b plat_panic_handler /* FIQ */ b plat_panic_handler /* FIQ */
endfunc sp_min_vector_table
func handle_smc
smcc_save_gp_mode_regs
/* r0 points to smc_context */
mov r2, r0 /* handle */
ldcopr r0, SCR
/* Save SCR in stack */
push {r0}
and r3, r0, #SCR_NS_BIT /* flags */
/* Switch to Secure Mode*/
bic r0, #SCR_NS_BIT
stcopr r0, SCR
isb
ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
/* Check whether an SMC64 is issued */
tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
beq 1f /* SMC32 is detected */
mov r0, #SMC_UNK
str r0, [r2, #SMC_CTX_GPREG_R0]
mov r0, r2
b 2f /* Skip handling the SMC */
1:
mov r1, #0 /* cookie */
bl handle_runtime_svc
2:
/* r0 points to smc context */
/* Restore SCR from stack */
pop {r1}
stcopr r1, SCR
isb
b sp_min_exit
endfunc handle_smc
/* /*
* The Cold boot/Reset entrypoint for SP_MIN * The Cold boot/Reset entrypoint for SP_MIN
*/ */
func sp_min_entrypoint func sp_min_entrypoint
#if !RESET_TO_SP_MIN
/* /* ---------------------------------------------------------------
* The caches and TLBs are disabled at reset. If any implementation * Preceding bootloader has populated r0 with a pointer to a
* allows the caches/TLB to be hit while they are disabled, ensure * 'bl_params_t' structure & r1 with a pointer to platform
* that they are invalidated here * specific structure
*/ * ---------------------------------------------------------------
/* Make sure we are in Secure Mode*/
ldcopr r0, SCR
bic r0, #SCR_NS_BIT
stcopr r0, SCR
isb
/* Switch to monitor mode */
cps #MODE32_mon
isb
/*
* Set sane values for NS SCTLR as well.
* Switch to non secure mode for this.
*/
ldr r0, =(SCTLR_RES1)
ldcopr r1, SCR
orr r2, r1, #SCR_NS_BIT
stcopr r2, SCR
isb
ldcopr r2, SCTLR
orr r0, r0, r2
stcopr r0, SCTLR
isb
stcopr r1, SCR
isb
/*
* Set the CPU endianness before doing anything that might involve
* memory reads or writes.
*/ */
ldcopr r0, SCTLR mov r11, r0
bic r0, r0, #SCTLR_EE_BIT mov r12, r1
stcopr r0, SCTLR
isb
/* Run the CPU Specific Reset handler */
bl reset_handler
/* /* ---------------------------------------------------------------------
* Enable the instruction cache and data access * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
* alignment checks * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
*/ * and primary/secondary CPU logic should not be executed in this case.
ldcopr r0, SCTLR *
ldr r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT) * Also, assume that the previous bootloader has already set up the CPU
orr r0, r0, r1 * endianness and has initialised the memory.
stcopr r0, SCTLR * ---------------------------------------------------------------------
isb
/* Set the vector tables */
ldr r0, =sp_min_vector_table
stcopr r0, VBAR
stcopr r0, MVBAR
isb
/*
* Enable the SIF bit to disable instruction fetches
* from Non-secure memory.
*/ */
ldcopr r0, SCR el3_entrypoint_common \
orr r0, r0, #SCR_SIF_BIT _set_endian=0 \
stcopr r0, SCR _warm_boot_mailbox=0 \
_secondary_cold_boot=0 \
/* _init_memory=0 \
* Enable the SError interrupt now that the exception vectors have been _init_c_runtime=1 \
* setup. _exception_vectors=sp_min_vector_table
/* ---------------------------------------------------------------------
* Relay the previous bootloader's arguments to the platform layer
* ---------------------------------------------------------------------
*/ */
cpsie a mov r0, r11
isb mov r1, r12
#else
/* Enable access to Advanced SIMD registers */ /* ---------------------------------------------------------------------
ldcopr r0, NSACR * For RESET_TO_SP_MIN systems which have a programmable reset address,
bic r0, r0, #NSASEDIS_BIT * sp_min_entrypoint() is executed only on the cold boot path so we can
orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT) * skip the warm boot mailbox mechanism.
stcopr r0, NSACR * ---------------------------------------------------------------------
isb
/*
* Enable access to Advanced SIMD, Floating point and to the Trace
* functionality as well.
*/ */
ldcopr r0, CPACR el3_entrypoint_common \
bic r0, r0, #ASEDIS_BIT _set_endian=1 \
bic r0, r0, #TRCDIS_BIT _warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
orr r0, r0, #CPACR_ENABLE_FP_ACCESS _secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
stcopr r0, CPACR _init_memory=1 \
isb _init_c_runtime=1 \
_exception_vectors=sp_min_vector_table
vmrs r0, FPEXC
orr r0, r0, #FPEXC_EN_BIT /* ---------------------------------------------------------------------
vmsr FPEXC, r0 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
* to run so there's no argument to relay from a previous bootloader.
/* Detect whether Warm or Cold boot */ * Zero the arguments passed to the platform layer to reflect that.
bl plat_get_my_entrypoint * ---------------------------------------------------------------------
cmp r0, #0
/* If warm boot detected, jump to warm boot entry */
bxne r0
/* Setup C runtime stack */
bl plat_set_my_stack
/* Perform platform specific memory initialization */
bl platform_mem_init
/* Initialize the C Runtime Environment */
/*
* Invalidate the RW memory used by SP_MIN image. This includes
* the data and NOBITS sections. This is done to safeguard against
* possible corruption of this memory by dirty cache lines in a system
* cache as a result of use by an earlier boot loader stage.
*/ */
ldr r0, =__RW_START__ mov r0, #0
ldr r1, =__RW_END__ mov r1, #0
sub r1, r1, r0 #endif /* RESET_TO_SP_MIN */
bl inv_dcache_range
ldr r0, =__BSS_START__
ldr r1, =__BSS_SIZE__
bl zeromem
#if USE_COHERENT_MEM
ldr r0, =__COHERENT_RAM_START__
ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__
bl zeromem
#endif
/* Perform platform specific early arch. setup */
bl sp_min_early_platform_setup bl sp_min_early_platform_setup
bl sp_min_plat_arch_setup bl sp_min_plat_arch_setup
...@@ -270,13 +157,76 @@ func sp_min_entrypoint ...@@ -270,13 +157,76 @@ func sp_min_entrypoint
b sp_min_exit b sp_min_exit
endfunc sp_min_entrypoint endfunc sp_min_entrypoint
/*
* SMC handling function for SP_MIN.
*/
func handle_smc
smcc_save_gp_mode_regs
/* r0 points to smc_context */
mov r2, r0 /* handle */
ldcopr r0, SCR
/* Save SCR in stack */
push {r0}
and r3, r0, #SCR_NS_BIT /* flags */
/* Switch to Secure Mode*/
bic r0, #SCR_NS_BIT
stcopr r0, SCR
isb
ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
/* Check whether an SMC64 is issued */
tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
beq 1f /* SMC32 is detected */
mov r0, #SMC_UNK
str r0, [r2, #SMC_CTX_GPREG_R0]
mov r0, r2
b 2f /* Skip handling the SMC */
1:
mov r1, #0 /* cookie */
bl handle_runtime_svc
2:
/* r0 points to smc context */
/* Restore SCR from stack */
pop {r1}
stcopr r1, SCR
isb
b sp_min_exit
endfunc handle_smc
/* /*
* The Warm boot entrypoint for SP_MIN. * The Warm boot entrypoint for SP_MIN.
*/ */
func sp_min_warm_entrypoint func sp_min_warm_entrypoint
/*
/* Setup C runtime stack */ * On the warm boot path, most of the EL3 initialisations performed by
bl plat_set_my_stack * 'el3_entrypoint_common' must be skipped:
*
* - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
* programming the reset address do we need to set the CPU endianness.
* In other cases, we assume this has been taken care by the
* entrypoint code.
*
* - No need to determine the type of boot, we know it is a warm boot.
*
* - Do not try to distinguish between primary and secondary CPUs, this
* notion only exists for a cold boot.
*
* - No need to initialise the memory or the C runtime environment,
* it has been done once and for all on the cold boot path.
*/
el3_entrypoint_common \
_set_endian=PROGRAMMABLE_RESET_ADDRESS \
_warm_boot_mailbox=0 \
_secondary_cold_boot=0 \
_init_memory=0 \
_init_c_runtime=0 \
_exception_vectors=sp_min_vector_table
/* -------------------------------------------- /* --------------------------------------------
* Enable the MMU with the DCache disabled. It * Enable the MMU with the DCache disabled. It
......
...@@ -50,6 +50,7 @@ SECTIONS ...@@ -50,6 +50,7 @@ SECTIONS
__TEXT_START__ = .; __TEXT_START__ = .;
*entrypoint.o(.text*) *entrypoint.o(.text*)
*(.text*) *(.text*)
*(.vectors)
. = NEXT(4096); . = NEXT(4096);
__TEXT_END__ = .; __TEXT_END__ = .;
} >RAM } >RAM
...@@ -98,6 +99,7 @@ SECTIONS ...@@ -98,6 +99,7 @@ SECTIONS
KEEP(*(cpu_ops)) KEEP(*(cpu_ops))
__CPU_OPS_END__ = .; __CPU_OPS_END__ = .;
*(.vectors)
__RO_END_UNALIGNED__ = .; __RO_END_UNALIGNED__ = .;
/* /*
......
...@@ -58,6 +58,6 @@ else ...@@ -58,6 +58,6 @@ else
include ${SP_MIN_PLAT_MAKEFILE} include ${SP_MIN_PLAT_MAKEFILE}
endif endif
RESET_TO_SP_MIN := 1 RESET_TO_SP_MIN := 0
$(eval $(call add_define,RESET_TO_SP_MIN)) $(eval $(call add_define,RESET_TO_SP_MIN))
$(eval $(call assert_boolean,RESET_TO_SP_MIN)) $(eval $(call assert_boolean,RESET_TO_SP_MIN))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment