Unverified Commit 0d3a27e7 authored by davidcunado-arm's avatar davidcunado-arm Committed by GitHub
Browse files

Merge pull request #1200 from robertovargas-arm/bl2-el3

Add BL2_AT_EL3 build option
parents 0caaa03b 76d26733
...@@ -500,6 +500,7 @@ $(eval $(call assert_boolean,TRUSTED_BOARD_BOOT)) ...@@ -500,6 +500,7 @@ $(eval $(call assert_boolean,TRUSTED_BOARD_BOOT))
$(eval $(call assert_boolean,USE_COHERENT_MEM)) $(eval $(call assert_boolean,USE_COHERENT_MEM))
$(eval $(call assert_boolean,USE_TBBR_DEFS)) $(eval $(call assert_boolean,USE_TBBR_DEFS))
$(eval $(call assert_boolean,WARMBOOT_ENABLE_DCACHE_EARLY)) $(eval $(call assert_boolean,WARMBOOT_ENABLE_DCACHE_EARLY))
$(eval $(call assert_boolean,BL2_AT_EL3))
$(eval $(call assert_numeric,ARM_ARCH_MAJOR)) $(eval $(call assert_numeric,ARM_ARCH_MAJOR))
$(eval $(call assert_numeric,ARM_ARCH_MINOR)) $(eval $(call assert_numeric,ARM_ARCH_MINOR))
...@@ -543,6 +544,7 @@ $(eval $(call add_define,TRUSTED_BOARD_BOOT)) ...@@ -543,6 +544,7 @@ $(eval $(call add_define,TRUSTED_BOARD_BOOT))
$(eval $(call add_define,USE_COHERENT_MEM)) $(eval $(call add_define,USE_COHERENT_MEM))
$(eval $(call add_define,USE_TBBR_DEFS)) $(eval $(call add_define,USE_TBBR_DEFS))
$(eval $(call add_define,WARMBOOT_ENABLE_DCACHE_EARLY)) $(eval $(call add_define,WARMBOOT_ENABLE_DCACHE_EARLY))
$(eval $(call add_define,BL2_AT_EL3))
# Define the EL3_PAYLOAD_BASE flag only if it is provided. # Define the EL3_PAYLOAD_BASE flag only if it is provided.
ifdef EL3_PAYLOAD_BASE ifdef EL3_PAYLOAD_BASE
...@@ -584,8 +586,12 @@ $(eval $(call MAKE_BL,1)) ...@@ -584,8 +586,12 @@ $(eval $(call MAKE_BL,1))
endif endif
ifeq (${NEED_BL2},yes) ifeq (${NEED_BL2},yes)
$(if ${BL2}, $(eval $(call MAKE_TOOL_ARGS,2,${BL2},tb-fw)),\ ifeq (${BL2_AT_EL3}, 0)
$(eval $(call MAKE_BL,2,tb-fw))) FIP_BL2_ARGS := tb-fw
endif
$(if ${BL2}, $(eval $(call MAKE_TOOL_ARGS,2,${BL2},${FIP_BL2_ARGS})),\
$(eval $(call MAKE_BL,2,${FIP_BL2_ARGS})))
endif endif
ifeq (${NEED_SCP_BL2},yes) ifeq (${NEED_SCP_BL2},yes)
......
...@@ -13,7 +13,10 @@ BL1_SOURCES += bl1/bl1_main.c \ ...@@ -13,7 +13,10 @@ BL1_SOURCES += bl1/bl1_main.c \
lib/cpus/errata_report.c \ lib/cpus/errata_report.c \
lib/el3_runtime/${ARCH}/context_mgmt.c \ lib/el3_runtime/${ARCH}/context_mgmt.c \
plat/common/plat_bl1_common.c \ plat/common/plat_bl1_common.c \
plat/common/${ARCH}/platform_up_stack.S plat/common/${ARCH}/platform_up_stack.S \
${MBEDTLS_COMMON_SOURCES} \
${MBEDTLS_CRYPTO_SOURCES} \
${MBEDTLS_X509_SOURCES}
ifeq (${ARCH},aarch64) ifeq (${ARCH},aarch64)
BL1_SOURCES += lib/el3_runtime/aarch64/context.S BL1_SOURCES += lib/el3_runtime/aarch64/context.S
......
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <el3_common_macros.S>
.globl bl2_entrypoint
.globl bl2_run_next_image
func bl2_entrypoint
/* Save arguments x0-x3 from previous Boot loader */
mov r9, r0
mov r10, r1
mov r11, r2
mov r12, r3
el3_entrypoint_common \
_init_sctlr=1 \
_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
_init_memory=1 \
_init_c_runtime=1 \
_exception_vectors=bl2_vector_table
/*
* Restore parameters of boot rom
*/
mov r0, r9
mov r1, r10
mov r2, r11
mov r3, r12
bl bl2_el3_early_platform_setup
bl bl2_el3_plat_arch_setup
/* ---------------------------------------------
* Jump to main function.
* ---------------------------------------------
*/
bl bl2_main
/* ---------------------------------------------
* Should never reach this point.
* ---------------------------------------------
*/
no_ret plat_panic_handler
endfunc bl2_entrypoint
func bl2_run_next_image
mov r8,r0
/*
* MMU needs to be disabled because both BL2 and BL32 execute
* in PL1, and therefore share the same address space.
* BL32 will initialize the address space according to its
* own requirement.
*/
bl disable_mmu_icache_secure
stcopr r0, TLBIALL
dsb sy
isb
mov r0, r8
bl bl2_el3_plat_prepare_exit
/*
* Extract PC and SPSR based on struct `entry_point_info_t`
* and load it in LR and SPSR registers respectively.
*/
ldr lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET]
ldr r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)]
msr spsr, r1
add r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET
ldm r8, {r0, r1, r2, r3}
eret
endfunc bl2_run_next_image
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
.globl bl2_vector_table
vector_base bl2_vector_table
b bl2_entrypoint
b report_exception /* Undef */
b report_exception /* SVC call */
b report_exception /* Prefetch abort */
b report_exception /* Data abort */
b report_exception /* Reserved */
b report_exception /* IRQ */
b report_exception /* FIQ */
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl_common.h>
#include <el3_common_macros.S>
.globl bl2_entrypoint
.globl bl2_vector_table
.globl bl2_el3_run_image
.globl bl2_run_next_image
func bl2_entrypoint
/* Save arguments x0-x3 from previous Boot loader */
mov x20, x0
mov x21, x1
mov x22, x2
mov x23, x3
el3_entrypoint_common \
_init_sctlr=1 \
_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
_init_memory=1 \
_init_c_runtime=1 \
_exception_vectors=bl2_el3_exceptions
/*
* Restore parameters of boot rom
*/
mov x0, x20
mov x1, x21
mov x2, x22
mov x3, x23
bl bl2_el3_early_platform_setup
bl bl2_el3_plat_arch_setup
/* ---------------------------------------------
* Jump to main function.
* ---------------------------------------------
*/
bl bl2_main
/* ---------------------------------------------
* Should never reach this point.
* ---------------------------------------------
*/
no_ret plat_panic_handler
endfunc bl2_entrypoint
func bl2_run_next_image
mov x20,x0
/*
* MMU needs to be disabled because both BL2 and BL31 execute
* in EL3, and therefore share the same address space.
* BL31 will initialize the address space according to its
* own requirement.
*/
bl disable_mmu_icache_el3
tlbi alle3
bl bl2_el3_plat_prepare_exit
ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
msr elr_el3, x0
msr spsr_el3, x1
ldp x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)]
ldp x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)]
ldp x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)]
ldp x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)]
eret
endfunc bl2_run_next_image
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <bl1.h>
#include <bl_common.h>
#include <context.h>
/* -----------------------------------------------------------------------------
* Very simple stackless exception handlers used by BL2.
* -----------------------------------------------------------------------------
*/
.globl bl2_el3_exceptions
vector_base bl2_el3_exceptions
/* -----------------------------------------------------
* Current EL with SP0 : 0x0 - 0x200
* -----------------------------------------------------
*/
vector_entry SynchronousExceptionSP0
mov x0, #SYNC_EXCEPTION_SP_EL0
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size SynchronousExceptionSP0
vector_entry IrqSP0
mov x0, #IRQ_SP_EL0
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size IrqSP0
vector_entry FiqSP0
mov x0, #FIQ_SP_EL0
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size FiqSP0
vector_entry SErrorSP0
mov x0, #SERROR_SP_EL0
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size SErrorSP0
/* -----------------------------------------------------
* Current EL with SPx: 0x200 - 0x400
* -----------------------------------------------------
*/
vector_entry SynchronousExceptionSPx
mov x0, #SYNC_EXCEPTION_SP_ELX
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size SynchronousExceptionSPx
vector_entry IrqSPx
mov x0, #IRQ_SP_ELX
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size IrqSPx
vector_entry FiqSPx
mov x0, #FIQ_SP_ELX
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size FiqSPx
vector_entry SErrorSPx
mov x0, #SERROR_SP_ELX
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size SErrorSPx
/* -----------------------------------------------------
* Lower EL using AArch64 : 0x400 - 0x600
* -----------------------------------------------------
*/
vector_entry SynchronousExceptionA64
mov x0, #SYNC_EXCEPTION_AARCH64
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size SynchronousExceptionA64
vector_entry IrqA64
mov x0, #IRQ_AARCH64
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size IrqA64
vector_entry FiqA64
mov x0, #FIQ_AARCH64
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size FiqA64
vector_entry SErrorA64
mov x0, #SERROR_AARCH64
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size SErrorA64
/* -----------------------------------------------------
* Lower EL using AArch32 : 0x600 - 0x800
* -----------------------------------------------------
*/
vector_entry SynchronousExceptionA32
mov x0, #SYNC_EXCEPTION_AARCH32
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size SynchronousExceptionA32
vector_entry IrqA32
mov x0, #IRQ_AARCH32
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size IrqA32
vector_entry FiqA32
mov x0, #FIQ_AARCH32
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size FiqA32
vector_entry SErrorA32
mov x0, #SERROR_AARCH32
bl plat_report_exception
no_ret plat_panic_handler
check_vector_size SErrorA32
...@@ -5,10 +5,12 @@ ...@@ -5,10 +5,12 @@
# #
BL2_SOURCES += bl2/bl2_main.c \ BL2_SOURCES += bl2/bl2_main.c \
bl2/${ARCH}/bl2_entrypoint.S \
bl2/${ARCH}/bl2_arch_setup.c \ bl2/${ARCH}/bl2_arch_setup.c \
lib/locks/exclusive/${ARCH}/spinlock.S \ lib/locks/exclusive/${ARCH}/spinlock.S \
plat/common/${ARCH}/platform_up_stack.S plat/common/${ARCH}/platform_up_stack.S \
${MBEDTLS_COMMON_SOURCES} \
${MBEDTLS_CRYPTO_SOURCES} \
${MBEDTLS_X509_SOURCES}
ifeq (${ARCH},aarch64) ifeq (${ARCH},aarch64)
BL2_SOURCES += common/aarch64/early_exceptions.S BL2_SOURCES += common/aarch64/early_exceptions.S
...@@ -20,4 +22,15 @@ else ...@@ -20,4 +22,15 @@ else
BL2_SOURCES += bl2/bl2_image_load.c BL2_SOURCES += bl2/bl2_image_load.c
endif endif
ifeq (${BL2_AT_EL3},0)
BL2_SOURCES += bl2/${ARCH}/bl2_entrypoint.S
BL2_LINKERFILE := bl2/bl2.ld.S BL2_LINKERFILE := bl2/bl2.ld.S
else
BL2_SOURCES += bl2/${ARCH}/bl2_el3_entrypoint.S \
bl2/${ARCH}/bl2_el3_exceptions.S \
plat/common/plat_bl2_el3_common.c \
lib/cpus/${ARCH}/cpu_helpers.S \
lib/cpus/errata_report.c
BL2_LINKERFILE := bl2/bl2_el3.ld.S
endif
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <platform_def.h>
#include <xlat_tables_defs.h>
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
ENTRY(bl2_entrypoint)
MEMORY {
RAM (rwx): ORIGIN = BL2_BASE, LENGTH = BL2_LIMIT - BL2_BASE
}
SECTIONS
{
. = BL2_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL2_BASE address is not aligned on a page boundary.")
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
__TEXT_RESIDENT_START__ = .;
*bl2_el3_entrypoint.o(.text*)
*(.text.asm.*)
__TEXT_RESIDENT_END__ = .;
*(.text*)
*(.vectors)
. = NEXT(PAGE_SIZE);
__TEXT_END__ = .;
} >RAM
.rodata . : {
__RODATA_START__ = .;
*(.rodata*)
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__PARSER_LIB_DESCS_START__ = .;
KEEP(*(.img_parser_lib_descs))
__PARSER_LIB_DESCS_END__ = .;
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
. = NEXT(PAGE_SIZE);
__RODATA_END__ = .;
} >RAM
ASSERT(__TEXT_RESIDENT_END__ - __TEXT_RESIDENT_START__ <= PAGE_SIZE,
"Resident part of BL2 has exceeded its limit.")
#else
ro . : {
__RO_START__ = .;
__TEXT_RESIDENT_START__ = .;
*bl2_el3_entrypoint.o(.text*)
*(.text.asm.*)
__TEXT_RESIDENT_END__ = .;
*(.text*)
*(.rodata*)
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__PARSER_LIB_DESCS_START__ = .;
KEEP(*(.img_parser_lib_descs))
__PARSER_LIB_DESCS_END__ = .;
*(.vectors)
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as
* read-only, executable. No RW data from the next section must
* creep in. Ensure the rest of the current memory page is unused.
*/
. = NEXT(PAGE_SIZE);
__RO_END__ = .;
} >RAM
#endif
ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
"cpu_ops not defined for this platform.")
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
/*
* .data must be placed at a lower address than the stacks if the stack
* protector is enabled. Alternatively, the .data.stack_protector_canary
* section can be placed independently of the main .data section.
*/
.data . : {
__DATA_START__ = .;
*(.data*)
__DATA_END__ = .;
} >RAM
stacks (NOLOAD) : {
__STACKS_START__ = .;
*(tzfw_normal_stacks)
__STACKS_END__ = .;
} >RAM
/*
* The .bss section gets initialised to 0 at runtime.
* Its base address should be 16-byte aligned for better performance of the
* zero-initialization code.
*/
.bss : ALIGN(16) {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
__BSS_END__ = .;
} >RAM
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
* the .bss section and eliminates the unnecessary zero init
*/
xlat_table (NOLOAD) : {
*(xlat_table)
} >RAM
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
*/
. = NEXT(PAGE_SIZE);
__COHERENT_RAM_END__ = .;
} >RAM
#endif
/*
* Define a linker symbol to mark end of the RW memory area for this
* image.
*/
__RW_END__ = .;
__BL2_END__ = .;
__BSS_SIZE__ = SIZEOF(.bss);
#if USE_COHERENT_MEM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
ASSERT(. <= BL2_LIMIT, "BL2 image has exceeded its limit.")
}
...@@ -13,6 +13,11 @@ ...@@ -13,6 +13,11 @@
#include <platform.h> #include <platform.h>
#include "bl2_private.h" #include "bl2_private.h"
#ifdef AARCH32
#define NEXT_IMAGE "BL32"
#else
#define NEXT_IMAGE "BL31"
#endif
/******************************************************************************* /*******************************************************************************
* The only thing to do in BL2 is to load further images and pass control to * The only thing to do in BL2 is to load further images and pass control to
...@@ -49,6 +54,8 @@ void bl2_main(void) ...@@ -49,6 +54,8 @@ void bl2_main(void)
disable_mmu_icache_secure(); disable_mmu_icache_secure();
#endif /* AARCH32 */ #endif /* AARCH32 */
#if !BL2_AT_EL3
console_flush(); console_flush();
/* /*
...@@ -57,4 +64,11 @@ void bl2_main(void) ...@@ -57,4 +64,11 @@ void bl2_main(void)
* be passed to next BL image as an argument. * be passed to next BL image as an argument.
*/ */
smc(BL1_SMC_RUN_IMAGE, (unsigned long)next_bl_ep_info, 0, 0, 0, 0, 0, 0); smc(BL1_SMC_RUN_IMAGE, (unsigned long)next_bl_ep_info, 0, 0, 0, 0, 0, 0);
#else
NOTICE("BL2: Booting " NEXT_IMAGE "\n");
print_entry_point_info(next_bl_ep_info);
console_flush();
bl2_run_next_image(next_bl_ep_info);
#endif
} }
/* /*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
...@@ -17,5 +17,6 @@ struct entry_point_info; ...@@ -17,5 +17,6 @@ struct entry_point_info;
*****************************************/ *****************************************/
void bl2_arch_setup(void); void bl2_arch_setup(void);
struct entry_point_info *bl2_load_images(void); struct entry_point_info *bl2_load_images(void);
void bl2_run_next_image(const entry_point_info_t *bl_ep_info);
#endif /* __BL2_PRIVATE_H__ */ #endif /* __BL2_PRIVATE_H__ */
...@@ -418,6 +418,63 @@ BL2 execution continues as follows: ...@@ -418,6 +418,63 @@ BL2 execution continues as follows:
#. BL1 passes control to BL31 at the specified entrypoint at EL3. #. BL1 passes control to BL31 at the specified entrypoint at EL3.
Running BL2 at EL3 execution level
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some platforms have a non-TF Boot ROM that expects the next boot stage
to execute at EL3. On these platforms, TF BL1 is a waste of memory
as its only purpose is to ensure TF BL2 is entered at S-EL1. To avoid
this waste, a special mode enables BL2 to execute at EL3, which allows
a non-TF Boot ROM to load and jump directly to BL2. This mode is selected
when the build flag BL2_AT_EL3 is enabled. The main differences in this
mode are:
#. BL2 includes the reset code and the mailbox mechanism to differentiate
cold boot and warm boot. It runs at EL3 doing the arch
initialization required for EL3.
#. BL2 does not receive the meminfo information from BL1 anymore. This
information can be passed by the Boot ROM or be internal to the
BL2 image.
#. Since BL2 executes at EL3, BL2 jumps directly to the next image,
instead of invoking the RUN_IMAGE SMC call.
We assume 3 different types of BootROM support on the platform:
#. The Boot ROM always jumps to the same address, for both cold
and warm boot. In this case, we will need to keep a resident part
of BL2 whose memory cannot be reclaimed by any other image. The
linker script defines the symbols __TEXT_RESIDENT_START__ and
__TEXT_RESIDENT_END__ that allows the platform to configure
correctly the memory map.
#. The platform has some mechanism to indicate the jump address to the
Boot ROM. Platform code can then program the jump address with
psci_warmboot_entrypoint during cold boot.
#. The platform has some mechanism to program the reset address using
the PROGRAMMABLE_RESET_ADDRESS feature. Platform code can then
program the reset address with psci_warmboot_entrypoint during
cold boot, bypassing the boot ROM for warm boot.
In the last 2 cases, no part of BL2 needs to remain resident at
runtime. In the first 2 cases, we expect the Boot ROM to be able to
differentiate between warm and cold boot, to avoid loading BL2 again
during warm boot.
This functionality can be tested with FVP loading the image directly
in memory and changing the address where the system jumps at reset.
For example:
-C cluster0.cpu0.RVBAR=0x4014000
--data cluster0.cpu0=bl2.bin@0x4014000
With this configuration, FVP is like a platform of the first case,
where the Boot ROM jumps always to the same address. For simplification,
BL32 is loaded in DRAM in this case, to avoid other images reclaiming
BL2 memory.
AArch64 BL31 AArch64 BL31
~~~~~~~~~~~~ ~~~~~~~~~~~~
......
...@@ -1643,6 +1643,70 @@ element in the boot sequence. If there are no more boot sources then it ...@@ -1643,6 +1643,70 @@ element in the boot sequence. If there are no more boot sources then it
must return 0, otherwise it must return 1. The default implementation must return 0, otherwise it must return 1. The default implementation
of this always returns 0. of this always returns 0.
Boot Loader Stage 2 (BL2) at EL3
--------------------------------
When the platform has a non-TF Boot ROM it is desirable to jump
directly to BL2 instead of TF BL1. In this case BL2 is expected to
execute at EL3 instead of executing at EL1. Refer to the `Firmware
Design`_ for more information.
All mandatory functions of BL2 must be implemented, except the functions
bl2\_early\_platform\_setup and bl2\_el3\_plat\_arch\_setup, because
their work is done now by bl2\_el3\_early\_platform\_setup and
bl2\_el3\_plat\_arch\_setup. These functions should generally implement
the bl1\_plat\_xxx() and bl2\_plat\_xxx() functionality combined.
Function : bl2\_el3\_early\_platform\_setup() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : u_register_t, u_register_t, u_register_t, u_register_t
Return : void
This function executes with the MMU and data caches disabled. It is only called
by the primary CPU. This function receives four parameters which can be used
by the platform to pass any needed information from the Boot ROM to BL2.
On ARM standard platforms, this function does the following:
- Initializes a UART (PL011 console), which enables access to the ``printf``
family of functions in BL2.
- Initializes the storage abstraction layer used to load further bootloader
images. It is necessary to do this early on platforms with a SCP\_BL2 image,
since the later ``bl2_platform_setup`` must be done after SCP\_BL2 is loaded.
- Initializes the private variables that define the memory layout used.
Function : bl2\_el3\_plat\_arch\_setup() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : void
Return : void
This function executes with the MMU and data caches disabled. It is only called
by the primary CPU.
The purpose of this function is to perform any architectural initialization
that varies across platforms.
On ARM standard platforms, this function enables the MMU.
Function : bl2\_el3\_plat\_prepare\_exit() [optional]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : void
Return : void
This function is called prior to exiting BL2 and run the next image.
It should be used to perform platform specific clean up or bookkeeping
operations before transferring control to the next image. This function
runs with MMU disabled.
FWU Boot Loader Stage 2 (BL2U) FWU Boot Loader Stage 2 (BL2U)
------------------------------ ------------------------------
......
...@@ -245,6 +245,9 @@ Common build options ...@@ -245,6 +245,9 @@ Common build options
BL2U image. In this case, the BL2U in the ARM Trusted Firmware will not BL2U image. In this case, the BL2U in the ARM Trusted Firmware will not
be built. be built.
- ``BL2_AT_EL3``: This is an optional build option that enables the use of
BL2 at EL3 execution level.
- ``BL31``: This is an optional build option which specifies the path to - ``BL31``: This is an optional build option which specifies the path to
BL31 image for the ``fip`` target. In this case, the BL31 in the ARM BL31 image for the ``fip`` target. In this case, the BL31 in the ARM
Trusted Firmware will not be built. Trusted Firmware will not be built.
......
...@@ -29,7 +29,4 @@ MBEDTLS_COMMON_SOURCES := drivers/auth/mbedtls/mbedtls_common.c \ ...@@ -29,7 +29,4 @@ MBEDTLS_COMMON_SOURCES := drivers/auth/mbedtls/mbedtls_common.c \
platform.c \ platform.c \
) )
BL1_SOURCES += ${MBEDTLS_COMMON_SOURCES}
BL2_SOURCES += ${MBEDTLS_COMMON_SOURCES}
endif endif
...@@ -89,6 +89,3 @@ endif ...@@ -89,6 +89,3 @@ endif
# Needs to be set to drive mbed TLS configuration correctly # Needs to be set to drive mbed TLS configuration correctly
$(eval $(call add_define,TF_MBEDTLS_KEY_ALG_ID)) $(eval $(call add_define,TF_MBEDTLS_KEY_ALG_ID))
$(eval $(call add_define,TF_MBEDTLS_HASH_ALG_ID)) $(eval $(call add_define,TF_MBEDTLS_HASH_ALG_ID))
BL1_SOURCES += ${MBEDTLS_CRYPTO_SOURCES}
BL2_SOURCES += ${MBEDTLS_CRYPTO_SOURCES}
...@@ -11,6 +11,3 @@ MBEDTLS_X509_SOURCES := drivers/auth/mbedtls/mbedtls_x509_parser.c \ ...@@ -11,6 +11,3 @@ MBEDTLS_X509_SOURCES := drivers/auth/mbedtls/mbedtls_x509_parser.c \
x509.c \ x509.c \
x509_crt.c \ x509_crt.c \
) )
BL1_SOURCES += ${MBEDTLS_X509_SOURCES}
BL2_SOURCES += ${MBEDTLS_X509_SOURCES}
...@@ -260,9 +260,9 @@ ...@@ -260,9 +260,9 @@
* --------------------------------------------------------------------- * ---------------------------------------------------------------------
*/ */
.if \_init_c_runtime .if \_init_c_runtime
#ifdef IMAGE_BL32 #if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
/* ----------------------------------------------------------------- /* -----------------------------------------------------------------
* Invalidate the RW memory used by the BL32 (SP_MIN) image. This * Invalidate the RW memory used by the image. This
* includes the data and NOBITS sections. This is done to * includes the data and NOBITS sections. This is done to
* safeguard against possible corruption of this memory by * safeguard against possible corruption of this memory by
* dirty cache lines in a system cache as a result of use by * dirty cache lines in a system cache as a result of use by
...@@ -273,7 +273,7 @@ ...@@ -273,7 +273,7 @@
ldr r1, =__RW_END__ ldr r1, =__RW_END__
sub r1, r1, r0 sub r1, r1, r0
bl inv_dcache_range bl inv_dcache_range
#endif /* IMAGE_BL32 */ #endif
ldr r0, =__BSS_START__ ldr r0, =__BSS_START__
ldr r1, =__BSS_SIZE__ ldr r1, =__BSS_SIZE__
......
...@@ -269,7 +269,7 @@ ...@@ -269,7 +269,7 @@
* --------------------------------------------------------------------- * ---------------------------------------------------------------------
*/ */
.if \_init_c_runtime .if \_init_c_runtime
#ifdef IMAGE_BL31 #if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
/* ------------------------------------------------------------- /* -------------------------------------------------------------
* Invalidate the RW memory used by the BL31 image. This * Invalidate the RW memory used by the BL31 image. This
* includes the data and NOBITS sections. This is done to * includes the data and NOBITS sections. This is done to
...@@ -282,7 +282,7 @@ ...@@ -282,7 +282,7 @@
adr x1, __RW_END__ adr x1, __RW_END__
sub x1, x1, x0 sub x1, x1, x0
bl inv_dcache_range bl inv_dcache_range
#endif /* IMAGE_BL31 */ #endif
ldr x0, =__BSS_START__ ldr x0, =__BSS_START__
ldr x1, =__BSS_SIZE__ ldr x1, =__BSS_SIZE__
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* debugging experience. * debugging experience.
*/ */
.cfi_sections .debug_frame .cfi_sections .debug_frame
.section .text.\_name, "ax" .section .text.asm.\_name, "ax"
.type \_name, %function .type \_name, %function
.func \_name .func \_name
/* /*
......
...@@ -9,6 +9,10 @@ ...@@ -9,6 +9,10 @@
#include <arch.h> #include <arch.h>
#include <errata_report.h> #include <errata_report.h>
#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
#define IMAGE_AT_EL3
#endif
#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \ #define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
(MIDR_PN_MASK << MIDR_PN_SHIFT) (MIDR_PN_MASK << MIDR_PN_SHIFT)
...@@ -38,7 +42,7 @@ ...@@ -38,7 +42,7 @@
CPU_MIDR: /* cpu_ops midr */ CPU_MIDR: /* cpu_ops midr */
.space 4 .space 4
/* Reset fn is needed during reset */ /* Reset fn is needed during reset */
#if defined(IMAGE_BL1) || defined(IMAGE_BL32) #if defined(IMAGE_AT_EL3)
CPU_RESET_FUNC: /* cpu_ops reset_func */ CPU_RESET_FUNC: /* cpu_ops reset_func */
.space 4 .space 4
#endif #endif
...@@ -54,7 +58,7 @@ CPU_PWR_DWN_OPS: /* cpu_ops power down functions */ ...@@ -54,7 +58,7 @@ CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
#if REPORT_ERRATA #if REPORT_ERRATA
CPU_ERRATA_FUNC: /* CPU errata status printing function */ CPU_ERRATA_FUNC: /* CPU errata status printing function */
.space 4 .space 4
#ifdef IMAGE_BL32 #if defined(IMAGE_BL32)
CPU_ERRATA_LOCK: CPU_ERRATA_LOCK:
.space 4 .space 4
CPU_ERRATA_PRINTED: CPU_ERRATA_PRINTED:
...@@ -120,7 +124,7 @@ CPU_OPS_SIZE = . ...@@ -120,7 +124,7 @@ CPU_OPS_SIZE = .
.align 2 .align 2
.type cpu_ops_\_name, %object .type cpu_ops_\_name, %object
.word \_midr .word \_midr
#if defined(IMAGE_BL1) || defined(IMAGE_BL32) #if defined(IMAGE_AT_EL3)
.word \_resetfunc .word \_resetfunc
#endif #endif
#ifdef IMAGE_BL32 #ifdef IMAGE_BL32
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment