Commit db0de0eb authored by Andrew Thoelke's avatar Andrew Thoelke
Browse files

Merge pull request #99 from vikramkanigiri:vk/tf-issues-133_V3

parents 3ea8540d dbad1bac
...@@ -47,6 +47,8 @@ SPD := none ...@@ -47,6 +47,8 @@ SPD := none
BASE_COMMIT := origin/master BASE_COMMIT := origin/master
# NS timer register save and restore # NS timer register save and restore
NS_TIMER_SWITCH := 0 NS_TIMER_SWITCH := 0
# By default, Bl1 acts as the reset handler, not BL31
RESET_TO_BL31 := 0
# Checkpatch ignores # Checkpatch ignores
...@@ -178,6 +180,10 @@ endif ...@@ -178,6 +180,10 @@ endif
$(eval $(call assert_boolean,NS_TIMER_SWITCH)) $(eval $(call assert_boolean,NS_TIMER_SWITCH))
$(eval $(call add_define,NS_TIMER_SWITCH)) $(eval $(call add_define,NS_TIMER_SWITCH))
# Process RESET_TO_BL31 flag
$(eval $(call assert_boolean,RESET_TO_BL31))
$(eval $(call add_define,RESET_TO_BL31))
ASFLAGS += -nostdinc -ffreestanding -Wa,--fatal-warnings \ ASFLAGS += -nostdinc -ffreestanding -Wa,--fatal-warnings \
-mgeneral-regs-only -D__ASSEMBLY__ \ -mgeneral-regs-only -D__ASSEMBLY__ \
${DEFINES} ${INCLUDES} ${DEFINES} ${INCLUDES}
......
...@@ -59,6 +59,15 @@ func bl1_entrypoint ...@@ -59,6 +59,15 @@ func bl1_entrypoint
*/ */
bl cpu_reset_handler bl cpu_reset_handler
/* -------------------------------
* Enable the instruction cache.
* -------------------------------
*/
mrs x0, sctlr_el3
orr x0, x0, #SCTLR_I_BIT
msr sctlr_el3, x0
isb
/* --------------------------------------------- /* ---------------------------------------------
* Set the exception vector to something sane. * Set the exception vector to something sane.
* --------------------------------------------- * ---------------------------------------------
...@@ -89,16 +98,6 @@ func bl1_entrypoint ...@@ -89,16 +98,6 @@ func bl1_entrypoint
bic w0, w0, #TFP_BIT bic w0, w0, #TFP_BIT
msr cptr_el3, x0 msr cptr_el3, x0
/* ---------------------------------------------
* Enable the instruction cache.
* ---------------------------------------------
*/
mrs x0, sctlr_el3
orr x0, x0, #SCTLR_I_BIT
msr sctlr_el3, x0
isb
_wait_for_entrypoint:
/* --------------------------------------------- /* ---------------------------------------------
* Find the type of reset and jump to handler * Find the type of reset and jump to handler
* if present. If the handler is null then it is * if present. If the handler is null then it is
...@@ -107,22 +106,10 @@ _wait_for_entrypoint: ...@@ -107,22 +106,10 @@ _wait_for_entrypoint:
* their turn to be woken up * their turn to be woken up
* --------------------------------------------- * ---------------------------------------------
*/ */
mrs x0, mpidr_el1 wait_for_entrypoint
bl platform_get_entrypoint
cbnz x0, _do_warm_boot
mrs x0, mpidr_el1
bl platform_is_primary_cpu
cbnz x0, _do_cold_boot
/* --------------------------------------------- bl platform_mem_init
* Perform any platform specific secondary cpu
* actions
* ---------------------------------------------
*/
bl plat_secondary_cold_boot_setup
b _wait_for_entrypoint
_do_cold_boot:
/* --------------------------------------------- /* ---------------------------------------------
* Init C runtime environment. * Init C runtime environment.
* - Zero-initialise the NOBITS sections. * - Zero-initialise the NOBITS sections.
...@@ -148,19 +135,38 @@ _do_cold_boot: ...@@ -148,19 +135,38 @@ _do_cold_boot:
bl memcpy16 bl memcpy16
/* --------------------------------------------- /* ---------------------------------------------
* Initialize platform and jump to our c-entry * Give ourselves a small coherent stack to
* point for this type of reset * ease the pain of initializing the MMU and
* CCI in assembler
* ---------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_set_coherent_stack
/* ---------------------------------------------
* Architectural init. can be generic e.g.
* enabling stack alignment and platform spec-
* ific e.g. MMU & page table setup as per the
* platform memory map. Perform the latter here
* and the former in bl1_main.
* --------------------------------------------- * ---------------------------------------------
*/ */
adr x0, bl1_main bl bl1_early_platform_setup
bl platform_cold_boot_init bl bl1_plat_arch_setup
b _panic
_do_warm_boot:
/* --------------------------------------------- /* ---------------------------------------------
* Jump to BL31 for all warm boot init. * Give ourselves a stack allocated in Normal
* -IS-WBWA memory
* --------------------------------------------- * ---------------------------------------------
*/ */
blr x0 mrs x0, mpidr_el1
_panic: bl platform_set_stack
b _panic
/* --------------------------------------------------
* Initialize platform and jump to our c-entry point
* for this type of reset. Panic if it returns
* --------------------------------------------------
*/
bl bl1_main
panic:
b panic
...@@ -112,13 +112,51 @@ SErrorSPx: ...@@ -112,13 +112,51 @@ SErrorSPx:
*/ */
.align 7 .align 7
SynchronousExceptionA64: SynchronousExceptionA64:
/* --------------------------------------------- /* ------------------------------------------------
* Only a single SMC exception from BL2 to ask * Only a single SMC exception from BL2 to ask
* BL1 to pass EL3 control to BL31 is expected * BL1 to pass EL3 control to BL31 is expected
* here. * here.
* --------------------------------------------- * It expects X0 with RUN_IMAGE SMC function id
* X1 with address of a entry_point_info_t structure
* describing the BL3-1 entrypoint
* ------------------------------------------------
*/ */
b process_exception mov x19, x0
mov x20, x1
mrs x0, esr_el3
ubfx x1, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x1, #EC_AARCH64_SMC
b.ne panic
mov x0, #RUN_IMAGE
cmp x19, x0
b.ne panic
mov x0, x20
bl display_boot_progress
ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET]
msr elr_el3, x0
msr spsr_el3, x1
ubfx x0, x1, #MODE_EL_SHIFT, #2
cmp x0, #MODE_EL3
b.ne panic
bl disable_mmu_icache_el3
tlbi alle3
ldp x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)]
ldp x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)]
ldp x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)]
ldp x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)]
eret
panic:
mov x0, #SYNC_EXCEPTION_AARCH64
bl plat_report_exception
wfi
b panic
check_vector_size SynchronousExceptionA64 check_vector_size SynchronousExceptionA64
.align 7 .align 7
...@@ -173,56 +211,3 @@ SErrorA32: ...@@ -173,56 +211,3 @@ SErrorA32:
bl plat_report_exception bl plat_report_exception
b SErrorA32 b SErrorA32
check_vector_size SErrorA32 check_vector_size SErrorA32
.align 7
func process_exception
sub sp, sp, #0x40
stp x0, x1, [sp, #0x0]
stp x2, x3, [sp, #0x10]
stp x4, x5, [sp, #0x20]
stp x6, x7, [sp, #0x30]
mov x19, x0
mov x20, x1
mov x21, x2
mov x0, #SYNC_EXCEPTION_AARCH64
bl plat_report_exception
mrs x0, esr_el3
ubfx x1, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x1, #EC_AARCH64_SMC
b.ne panic
mov x1, #RUN_IMAGE
cmp x19, x1
b.ne panic
mov x0, x20
mov x1, x21
mov x2, x3
mov x3, x4
bl display_boot_progress
msr elr_el3, x20
msr spsr_el3, x21
ubfx x0, x21, #MODE_EL_SHIFT, #2
cmp x0, #MODE_EL3
b.ne skip_mmu_teardown
/* ---------------------------------------------
* If BL31 is to be executed in EL3 as well
* then turn off the MMU so that it can perform
* its own setup.
* ---------------------------------------------
*/
bl disable_mmu_icache_el3
tlbi alle3
skip_mmu_teardown:
ldp x6, x7, [sp, #0x30]
ldp x4, x5, [sp, #0x20]
ldp x2, x3, [sp, #0x10]
ldp x0, x1, [sp, #0x0]
add sp, sp, #0x40
eret
panic:
wfi
b panic
...@@ -33,10 +33,39 @@ ...@@ -33,10 +33,39 @@
#include <assert.h> #include <assert.h>
#include <bl_common.h> #include <bl_common.h>
#include <bl1.h> #include <bl1.h>
#include <debug.h>
#include <platform.h> #include <platform.h>
#include <stdio.h> #include <stdio.h>
#include "bl1_private.h" #include "bl1_private.h"
/*******************************************************************************
* Runs BL2 from the given entry point. It results in dropping the
* exception level
******************************************************************************/
static void __dead2 bl1_run_bl2(entry_point_info_t *bl2_ep)
{
bl1_arch_next_el_setup();
/* Tell next EL what we want done */
bl2_ep->args.arg0 = RUN_IMAGE;
if (GET_SECURITY_STATE(bl2_ep->h.attr) == NON_SECURE)
change_security_state(GET_SECURITY_STATE(bl2_ep->h.attr));
write_spsr_el3(bl2_ep->spsr);
write_elr_el3(bl2_ep->pc);
eret(bl2_ep->args.arg0,
bl2_ep->args.arg1,
bl2_ep->args.arg2,
bl2_ep->args.arg3,
bl2_ep->args.arg4,
bl2_ep->args.arg5,
bl2_ep->args.arg6,
bl2_ep->args.arg7);
}
/******************************************************************************* /*******************************************************************************
* Function to perform late architectural and platform specific initialization. * Function to perform late architectural and platform specific initialization.
* It also locates and loads the BL2 raw binary image in the trusted DRAM. Only * It also locates and loads the BL2 raw binary image in the trusted DRAM. Only
...@@ -49,10 +78,12 @@ void bl1_main(void) ...@@ -49,10 +78,12 @@ void bl1_main(void)
#if DEBUG #if DEBUG
unsigned long sctlr_el3 = read_sctlr_el3(); unsigned long sctlr_el3 = read_sctlr_el3();
#endif #endif
unsigned long bl2_base; unsigned int load_type = TOP_LOAD;
unsigned int load_type = TOP_LOAD, spsr; image_info_t bl2_image_info = { {0} };
entry_point_info_t bl2_ep = { {0} };
meminfo_t *bl1_tzram_layout; meminfo_t *bl1_tzram_layout;
meminfo_t *bl2_tzram_layout = 0x0; meminfo_t *bl2_tzram_layout = 0x0;
int err;
/* /*
* Ensure that MMU/Caches and coherency are turned on * Ensure that MMU/Caches and coherency are turned on
...@@ -71,15 +102,28 @@ void bl1_main(void) ...@@ -71,15 +102,28 @@ void bl1_main(void)
printf(FIRMWARE_WELCOME_STR); printf(FIRMWARE_WELCOME_STR);
printf("%s\n\r", build_message); printf("%s\n\r", build_message);
SET_PARAM_HEAD(&bl2_image_info, PARAM_IMAGE_BINARY, VERSION_1, 0);
SET_PARAM_HEAD(&bl2_ep, PARAM_EP, VERSION_1, 0);
/* /*
* Find out how much free trusted ram remains after BL1 load * Find out how much free trusted ram remains after BL1 load
* & load the BL2 image at its top * & load the BL2 image at its top
*/ */
bl1_tzram_layout = bl1_plat_sec_mem_layout(); bl1_tzram_layout = bl1_plat_sec_mem_layout();
bl2_base = load_image(bl1_tzram_layout, err = load_image(bl1_tzram_layout,
(const char *) BL2_IMAGE_NAME, (const char *) BL2_IMAGE_NAME,
load_type, BL2_BASE); load_type,
BL2_BASE,
&bl2_image_info,
&bl2_ep);
if (err) {
/*
* TODO: print failure to load BL2 but also add a tzwdog timer
* which will reset the system eventually.
*/
printf("Failed to load boot loader stage 2 (BL2) firmware.\n");
panic();
}
/* /*
* Create a new layout of memory for BL2 as seen by BL1 i.e. * Create a new layout of memory for BL2 as seen by BL1 i.e.
* tell it the amount of total and free memory available. * tell it the amount of total and free memory available.
...@@ -91,30 +135,20 @@ void bl1_main(void) ...@@ -91,30 +135,20 @@ void bl1_main(void)
init_bl2_mem_layout(bl1_tzram_layout, init_bl2_mem_layout(bl1_tzram_layout,
bl2_tzram_layout, bl2_tzram_layout,
load_type, load_type,
bl2_base); bl2_image_info.image_base);
if (bl2_base) { bl1_plat_set_bl2_ep_info(&bl2_image_info, &bl2_ep);
bl1_arch_next_el_setup(); bl2_ep.args.arg1 = (unsigned long)bl2_tzram_layout;
spsr = make_spsr(MODE_EL1, MODE_SP_ELX, MODE_RW_64); printf("Booting trusted firmware boot loader stage 2\n");
printf("Booting trusted firmware boot loader stage 2\n\r");
#if DEBUG #if DEBUG
printf("BL2 address = 0x%llx \n\r", (unsigned long long) bl2_base); printf("BL2 address = 0x%llx\n",
printf("BL2 cpsr = 0x%x \n\r", spsr); (unsigned long long) bl2_ep.pc);
printf("BL2 memory layout address = 0x%llx \n\r", printf("BL2 cpsr = 0x%x\n", bl2_ep.spsr);
(unsigned long long) bl2_tzram_layout); printf("BL2 memory layout address = 0x%llx\n",
(unsigned long long) bl2_tzram_layout);
#endif #endif
run_image(bl2_base, bl1_run_bl2(&bl2_ep);
spsr,
SECURE,
(void *) bl2_tzram_layout,
NULL);
}
/*
* TODO: print failure to load BL2 but also add a tzwdog timer
* which will reset the system eventually.
*/
printf("Failed to load boot loader stage 2 (BL2) firmware.\n\r");
return; return;
} }
...@@ -122,17 +156,16 @@ void bl1_main(void) ...@@ -122,17 +156,16 @@ void bl1_main(void)
* Temporary function to print the fact that BL2 has done its job and BL31 is * Temporary function to print the fact that BL2 has done its job and BL31 is
* about to be loaded. This is needed as long as printfs cannot be used * about to be loaded. This is needed as long as printfs cannot be used
******************************************************************************/ ******************************************************************************/
void display_boot_progress(unsigned long entrypoint, void display_boot_progress(entry_point_info_t *bl31_ep_info)
unsigned long spsr,
unsigned long mem_layout,
unsigned long ns_image_info)
{ {
printf("Booting trusted firmware boot loader stage 3\n\r"); printf("Booting trusted firmware boot loader stage 3\n\r");
#if DEBUG #if DEBUG
printf("BL31 address = 0x%llx \n\r", (unsigned long long) entrypoint); printf("BL31 address = 0x%llx\n", (unsigned long long)bl31_ep_info->pc);
printf("BL31 cpsr = 0x%llx \n\r", (unsigned long long)spsr); printf("BL31 cpsr = 0x%llx\n", (unsigned long long)bl31_ep_info->spsr);
printf("BL31 memory layout address = 0x%llx \n\r", (unsigned long long)mem_layout); printf("BL31 params address = 0x%llx\n",
printf("BL31 non-trusted image info address = 0x%llx\n\r", (unsigned long long)ns_image_info); (unsigned long long)bl31_ep_info->args.arg0);
printf("BL31 plat params address = 0x%llx\n",
(unsigned long long)bl31_ep_info->args.arg1);
#endif #endif
return; return;
} }
...@@ -46,7 +46,6 @@ func bl2_entrypoint ...@@ -46,7 +46,6 @@ func bl2_entrypoint
*/ */
mov x20, x0 mov x20, x0
mov x21, x1 mov x21, x1
mov x22, x2
/* --------------------------------------------- /* ---------------------------------------------
* This is BL2 which is expected to be executed * This is BL2 which is expected to be executed
...@@ -110,7 +109,6 @@ func bl2_entrypoint ...@@ -110,7 +109,6 @@ func bl2_entrypoint
* --------------------------------------------- * ---------------------------------------------
*/ */
mov x0, x21 mov x0, x21
mov x1, x22
bl bl2_early_platform_setup bl bl2_early_platform_setup
bl bl2_plat_arch_setup bl bl2_plat_arch_setup
......
...@@ -38,6 +38,25 @@ ...@@ -38,6 +38,25 @@
#include <stdio.h> #include <stdio.h>
#include "bl2_private.h" #include "bl2_private.h"
/*******************************************************************************
* Runs BL31 from the given entry point. It jumps to a higher exception level
* through an SMC.
******************************************************************************/
static void __dead2 bl2_run_bl31(entry_point_info_t *bl31_ep_info,
unsigned long arg1,
unsigned long arg2)
{
/* Set the args pointer */
bl31_ep_info->args.arg0 = arg1;
bl31_ep_info->args.arg1 = arg2;
/* Flush the params to be passed to memory */
bl2_plat_flush_bl31_params();
smc(RUN_IMAGE, (unsigned long)bl31_ep_info, 0, 0, 0, 0, 0, 0);
}
/******************************************************************************* /*******************************************************************************
* The only thing to do in BL2 is to load further images and pass control to * The only thing to do in BL2 is to load further images and pass control to
* BL31. The memory occupied by BL2 will be reclaimed by BL3_x stages. BL2 runs * BL31. The memory occupied by BL2 will be reclaimed by BL3_x stages. BL2 runs
...@@ -47,9 +66,12 @@ ...@@ -47,9 +66,12 @@
void bl2_main(void) void bl2_main(void)
{ {
meminfo_t *bl2_tzram_layout; meminfo_t *bl2_tzram_layout;
bl31_args_t *bl2_to_bl31_args; bl31_params_t *bl2_to_bl31_params;
unsigned long bl31_base, bl32_base = 0, bl33_base, el_status; unsigned int bl2_load, bl31_load;
unsigned int bl2_load, bl31_load, mode; entry_point_info_t *bl31_ep_info;
meminfo_t bl32_mem_info;
meminfo_t bl33_mem_info;
int e;
/* Perform remaining generic architectural setup in S-El1 */ /* Perform remaining generic architectural setup in S-El1 */
bl2_arch_setup(); bl2_arch_setup();
...@@ -62,6 +84,13 @@ void bl2_main(void) ...@@ -62,6 +84,13 @@ void bl2_main(void)
/* Find out how much free trusted ram remains after BL2 load */ /* Find out how much free trusted ram remains after BL2 load */
bl2_tzram_layout = bl2_plat_sec_mem_layout(); bl2_tzram_layout = bl2_plat_sec_mem_layout();
/*
* Get a pointer to the memory the platform has set aside to pass
* information to BL31.
*/
bl2_to_bl31_params = bl2_plat_get_bl31_params();
bl31_ep_info = bl2_plat_get_bl31_ep_info();
/* /*
* Load BL31. BL1 tells BL2 whether it has been TOP or BOTTOM loaded. * Load BL31. BL1 tells BL2 whether it has been TOP or BOTTOM loaded.
* To avoid fragmentation of trusted SRAM memory, BL31 is always * To avoid fragmentation of trusted SRAM memory, BL31 is always
...@@ -71,102 +100,71 @@ void bl2_main(void) ...@@ -71,102 +100,71 @@ void bl2_main(void)
bl2_load = bl2_tzram_layout->attr & LOAD_MASK; bl2_load = bl2_tzram_layout->attr & LOAD_MASK;
assert((bl2_load == TOP_LOAD) || (bl2_load == BOT_LOAD)); assert((bl2_load == TOP_LOAD) || (bl2_load == BOT_LOAD));
bl31_load = (bl2_load == TOP_LOAD) ? BOT_LOAD : TOP_LOAD; bl31_load = (bl2_load == TOP_LOAD) ? BOT_LOAD : TOP_LOAD;
bl31_base = load_image(bl2_tzram_layout, BL31_IMAGE_NAME, e = load_image(bl2_tzram_layout,
bl31_load, BL31_BASE); BL31_IMAGE_NAME,
bl31_load,
BL31_BASE,
bl2_to_bl31_params->bl31_image_info,
bl31_ep_info);
/* Assert if it has not been possible to load BL31 */ /* Assert if it has not been possible to load BL31 */
if (bl31_base == 0) { if (e) {
ERROR("Failed to load BL3-1.\n"); ERROR("Failed to load BL3-1.\n");
panic(); panic();
} }
/* bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info,
* Get a pointer to the memory the platform has set aside to pass bl31_ep_info);
* information to BL31.
*/
bl2_to_bl31_args = bl2_get_bl31_args_ptr();
/* bl2_plat_get_bl33_meminfo(&bl33_mem_info);
* Load the BL32 image if there's one. It is upto to platform
* to specify where BL32 should be loaded if it exists. It
* could create space in the secure sram or point to a
* completely different memory. A zero size indicates that the
* platform does not want to load a BL32 image.
*/
if (bl2_to_bl31_args->bl32_meminfo.total_size)
bl32_base = load_image(&bl2_to_bl31_args->bl32_meminfo,
BL32_IMAGE_NAME,
bl2_to_bl31_args->bl32_meminfo.attr &
LOAD_MASK,
BL32_BASE);
/*
* Create a new layout of memory for BL31 as seen by BL2. This
* will gobble up all the BL2 memory.
*/
init_bl31_mem_layout(bl2_tzram_layout,
&bl2_to_bl31_args->bl31_meminfo,
bl31_load);
/* Load the BL33 image in non-secure memory provided by the platform */ /* Load the BL33 image in non-secure memory provided by the platform */
bl33_base = load_image(&bl2_to_bl31_args->bl33_meminfo, e = load_image(&bl33_mem_info,
BL33_IMAGE_NAME, BL33_IMAGE_NAME,
BOT_LOAD, BOT_LOAD,
plat_get_ns_image_entrypoint()); plat_get_ns_image_entrypoint(),
bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
/* Halt if failed to load normal world firmware. */ /* Halt if failed to load normal world firmware. */
if (bl33_base == 0) { if (e) {
ERROR("Failed to load BL3-3.\n"); ERROR("Failed to load BL3-3.\n");
panic(); panic();
} }
bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
/* /*
* BL2 also needs to tell BL31 where the non-trusted software image * Load the BL32 image if there's one. It is upto to platform
* is located. * to specify where BL32 should be loaded if it exists. It
*/ * could create space in the secure sram or point to a
bl2_to_bl31_args->bl33_image_info.entrypoint = bl33_base; * completely different memory. A zero size indicates that the
* platform does not want to load a BL32 image.
/* Figure out what mode we enter the non-secure world in */
el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
el_status &= ID_AA64PFR0_ELX_MASK;
if (el_status)
mode = MODE_EL2;
else
mode = MODE_EL1;
/*
* TODO: Consider the possibility of specifying the SPSR in
* the FIP ToC and allowing the platform to have a say as
* well.
*/ */
bl2_to_bl31_args->bl33_image_info.spsr = bl2_plat_get_bl32_meminfo(&bl32_mem_info);
make_spsr(mode, MODE_SP_ELX, MODE_RW_64); if (bl32_mem_info.total_size) {
bl2_to_bl31_args->bl33_image_info.security_state = NON_SECURE; e = load_image(&bl32_mem_info,
BL32_IMAGE_NAME,
if (bl32_base) { bl32_mem_info.attr &
/* Fill BL32 image info */ LOAD_MASK,
bl2_to_bl31_args->bl32_image_info.entrypoint = bl32_base; BL32_BASE,
bl2_to_bl31_args->bl32_image_info.security_state = SECURE; bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
/*
* The Secure Payload Dispatcher service is responsible for /* Halt if failed to load normal world firmware. */
* setting the SPSR prior to entry into the BL32 image. if (e) {
*/ WARN("Failed to load BL3-2.\n");
bl2_to_bl31_args->bl32_image_info.spsr = 0; } else {
bl2_plat_set_bl32_ep_info(
bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
}
} }
/* Flush the entire BL31 args buffer */
flush_dcache_range((unsigned long) bl2_to_bl31_args,
sizeof(*bl2_to_bl31_args));
/* /*
* Run BL31 via an SMC to BL1. Information on how to pass control to * Run BL31 via an SMC to BL1. Information on how to pass control to
* the BL32 (if present) and BL33 software images will be passed to * the BL32 (if present) and BL33 software images will be passed to
* BL31 as an argument. * BL31 as an argument.
*/ */
run_image(bl31_base, bl2_run_bl31(bl31_ep_info, (unsigned long)bl2_to_bl31_params, 0);
make_spsr(MODE_EL3, MODE_SP_ELX, MODE_RW_64),
SECURE,
(void *) bl2_to_bl31_args,
NULL);
} }
...@@ -44,13 +44,34 @@ ...@@ -44,13 +44,34 @@
*/ */
func bl31_entrypoint func bl31_entrypoint
/* ---------------------------------------------------------------
* Preceding bootloader has populated x0 with a pointer to a
* 'bl31_params' structure & x1 with a pointer to platform
* specific structure
* ---------------------------------------------------------------
*/
#if !RESET_TO_BL31
mov x20, x0
mov x21, x1
#else
/* -----------------------------------------------------
* Perform any processor specific actions upon reset
* e.g. cache, tlb invalidations etc. Override the
* Boot ROM(BL0) programming sequence
* -----------------------------------------------------
*/
bl cpu_reset_handler
#endif
/* --------------------------------------------- /* ---------------------------------------------
* BL2 has populated x0 with the opcode * Enable the instruction cache.
* indicating BL31 should be run, x3 with
* a pointer to a 'bl31_args' structure & x4
* with any other optional information
* --------------------------------------------- * ---------------------------------------------
*/ */
mrs x1, sctlr_el3
orr x1, x1, #SCTLR_I_BIT
msr sctlr_el3, x1
isb
/* --------------------------------------------- /* ---------------------------------------------
* Set the exception vector to something sane. * Set the exception vector to something sane.
...@@ -82,25 +103,10 @@ func bl31_entrypoint ...@@ -82,25 +103,10 @@ func bl31_entrypoint
bic w1, w1, #TFP_BIT bic w1, w1, #TFP_BIT
msr cptr_el3, x1 msr cptr_el3, x1
/* --------------------------------------------- #if RESET_TO_BL31
* Enable the instruction cache. wait_for_entrypoint
* --------------------------------------------- bl platform_mem_init
*/ #else
mrs x1, sctlr_el3
orr x1, x1, #SCTLR_I_BIT
msr sctlr_el3, x1
isb
/* ---------------------------------------------
* Check the opcodes out of paranoia.
* ---------------------------------------------
*/
mov x19, #RUN_IMAGE
cmp x0, x19
b.ne _panic
mov x20, x3
mov x21, x4
/* --------------------------------------------- /* ---------------------------------------------
* This is BL31 which is expected to be executed * This is BL31 which is expected to be executed
* only by the primary cpu (at least for now). * only by the primary cpu (at least for now).
...@@ -110,6 +116,7 @@ func bl31_entrypoint ...@@ -110,6 +116,7 @@ func bl31_entrypoint
mrs x0, mpidr_el1 mrs x0, mpidr_el1
bl platform_is_primary_cpu bl platform_is_primary_cpu
cbz x0, _panic cbz x0, _panic
#endif
/* --------------------------------------------- /* ---------------------------------------------
* Zero out NOBITS sections. There are 2 of them: * Zero out NOBITS sections. There are 2 of them:
...@@ -143,8 +150,14 @@ func bl31_entrypoint ...@@ -143,8 +150,14 @@ func bl31_entrypoint
* Perform platform specific early arch. setup * Perform platform specific early arch. setup
* --------------------------------------------- * ---------------------------------------------
*/ */
#if RESET_TO_BL31
mov x0, 0
mov x1, 0
#else
mov x0, x20 mov x0, x20
mov x1, x21 mov x1, x21
#endif
bl bl31_early_platform_setup bl bl31_early_platform_setup
bl bl31_plat_arch_setup bl bl31_plat_arch_setup
......
...@@ -37,6 +37,7 @@ BL31_SOURCES += bl31/bl31_main.c \ ...@@ -37,6 +37,7 @@ BL31_SOURCES += bl31/bl31_main.c \
bl31/aarch64/runtime_exceptions.S \ bl31/aarch64/runtime_exceptions.S \
bl31/aarch64/crash_reporting.S \ bl31/aarch64/crash_reporting.S \
common/aarch64/early_exceptions.S \ common/aarch64/early_exceptions.S \
lib/aarch64/cpu_helpers.S \
lib/locks/bakery/bakery_lock.c \ lib/locks/bakery/bakery_lock.c \
lib/locks/exclusive/spinlock.S \ lib/locks/exclusive/spinlock.S \
services/std_svc/std_svc_setup.c \ services/std_svc/std_svc_setup.c \
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
* for SP execution. In cases where both SPD and SP are absent, or when SPD * for SP execution. In cases where both SPD and SP are absent, or when SPD
* finds it impossible to execute SP, this pointer is left as NULL * finds it impossible to execute SP, this pointer is left as NULL
******************************************************************************/ ******************************************************************************/
static int32_t (*bl32_init)(meminfo_t *); static int32_t (*bl32_init)(void);
/******************************************************************************* /*******************************************************************************
* Variable to indicate whether next image to execute after BL31 is BL33 * Variable to indicate whether next image to execute after BL31 is BL33
...@@ -114,11 +114,10 @@ void bl31_main(void) ...@@ -114,11 +114,10 @@ void bl31_main(void)
*/ */
/* /*
* If SPD had registerd an init hook, invoke it. Pass it the information * If SPD had registerd an init hook, invoke it.
* about memory extents
*/ */
if (bl32_init) if (bl32_init)
(*bl32_init)(bl31_plat_get_bl32_mem_layout()); (*bl32_init)();
/* /*
* We are ready to enter the next EL. Prepare entry into the image * We are ready to enter the next EL. Prepare entry into the image
...@@ -152,7 +151,7 @@ uint32_t bl31_get_next_image_type(void) ...@@ -152,7 +151,7 @@ uint32_t bl31_get_next_image_type(void)
******************************************************************************/ ******************************************************************************/
void bl31_prepare_next_image_entry() void bl31_prepare_next_image_entry()
{ {
el_change_info_t *next_image_info; entry_point_info_t *next_image_info;
uint32_t scr, image_type; uint32_t scr, image_type;
/* Determine which image to execute next */ /* Determine which image to execute next */
...@@ -182,20 +181,20 @@ void bl31_prepare_next_image_entry() ...@@ -182,20 +181,20 @@ void bl31_prepare_next_image_entry()
* Tell the context mgmt. library to ensure that SP_EL3 points to * Tell the context mgmt. library to ensure that SP_EL3 points to
* the right context to exit from EL3 correctly. * the right context to exit from EL3 correctly.
*/ */
cm_set_el3_eret_context(next_image_info->security_state, cm_set_el3_eret_context(GET_SECURITY_STATE(next_image_info->h.attr),
next_image_info->entrypoint, next_image_info->pc,
next_image_info->spsr, next_image_info->spsr,
scr); scr);
/* Finally set the next context */ /* Finally set the next context */
cm_set_next_eret_context(next_image_info->security_state); cm_set_next_eret_context(GET_SECURITY_STATE(next_image_info->h.attr));
} }
/******************************************************************************* /*******************************************************************************
* This function initializes the pointer to BL32 init function. This is expected * This function initializes the pointer to BL32 init function. This is expected
* to be called by the SPD after it finishes all its initialization * to be called by the SPD after it finishes all its initialization
******************************************************************************/ ******************************************************************************/
void bl31_register_bl32_init(int32_t (*func)(meminfo_t *)) void bl31_register_bl32_init(int32_t (*func)(void))
{ {
bl32_init = func; bl32_init = func;
} }
...@@ -55,16 +55,6 @@ ...@@ -55,16 +55,6 @@
func tsp_entrypoint func tsp_entrypoint
/*---------------------------------------------
* Store the extents of the tzram available to
* BL32 for future use.
* TODO: We are assuming that x9-x10 will not be
* corrupted by any function before platform
* setup.
* ---------------------------------------------
*/
mov x9, x0
mov x10, x1
/* --------------------------------------------- /* ---------------------------------------------
* The entrypoint is expected to be executed * The entrypoint is expected to be executed
...@@ -119,8 +109,6 @@ func tsp_entrypoint ...@@ -119,8 +109,6 @@ func tsp_entrypoint
* specific early arch. setup e.g. mmu setup * specific early arch. setup e.g. mmu setup
* --------------------------------------------- * ---------------------------------------------
*/ */
mov x0, x9
mov x1, x10
bl bl32_early_platform_setup bl bl32_early_platform_setup
bl bl32_plat_arch_setup bl bl32_plat_arch_setup
......
...@@ -37,6 +37,13 @@ ...@@ -37,6 +37,13 @@
#include <stdio.h> #include <stdio.h>
#include <tsp.h> #include <tsp.h>
/*******************************************************************************
* Declarations of linker defined symbols which will help us find the layout
* of trusted SRAM
******************************************************************************/
extern unsigned long __RO_START__;
extern unsigned long __COHERENT_RAM_END__;
/******************************************************************************* /*******************************************************************************
* Lock to control access to the console * Lock to control access to the console
******************************************************************************/ ******************************************************************************/
...@@ -66,6 +73,15 @@ static const entry_info_t tsp_entry_info = { ...@@ -66,6 +73,15 @@ static const entry_info_t tsp_entry_info = {
tsp_cpu_suspend_entry, tsp_cpu_suspend_entry,
}; };
/*******************************************************************************
* The BL32 memory footprint starts with an RO sections and ends
* with a section for coherent RAM. Use it to find the memory size
******************************************************************************/
#define BL32_TOTAL_BASE (unsigned long)(&__RO_START__)
#define BL32_TOTAL_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
static tsp_args_t *set_smc_args(uint64_t arg0, static tsp_args_t *set_smc_args(uint64_t arg0,
uint64_t arg1, uint64_t arg1,
uint64_t arg2, uint64_t arg2,
...@@ -107,10 +123,6 @@ uint64_t tsp_main(void) ...@@ -107,10 +123,6 @@ uint64_t tsp_main(void)
uint64_t mpidr = read_mpidr(); uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr); uint32_t linear_id = platform_get_core_pos(mpidr);
#if DEBUG
meminfo_t *mem_layout = bl32_plat_sec_mem_layout();
#endif
/* Initialize the platform */ /* Initialize the platform */
bl32_platform_setup(); bl32_platform_setup();
...@@ -123,10 +135,9 @@ uint64_t tsp_main(void) ...@@ -123,10 +135,9 @@ uint64_t tsp_main(void)
spin_lock(&console_lock); spin_lock(&console_lock);
printf("TSP %s\n\r", build_message); printf("TSP %s\n\r", build_message);
INFO("Total memory base : 0x%x\n", mem_layout->total_base); INFO("Total memory base : 0x%x\n", (unsigned long)BL32_TOTAL_BASE);
INFO("Total memory size : 0x%x bytes\n", mem_layout->total_size); INFO("Total memory size : 0x%x bytes\n",
INFO("Free memory base : 0x%x\n", mem_layout->free_base); (unsigned long)(BL32_TOTAL_LIMIT - BL32_TOTAL_BASE));
INFO("Free memory size : 0x%x bytes\n", mem_layout->free_size);
INFO("cpu 0x%x: %d smcs, %d erets %d cpu on requests\n", mpidr, INFO("cpu 0x%x: %d smcs, %d erets %d cpu on requests\n", mpidr,
tsp_stats[linear_id].smc_count, tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count, tsp_stats[linear_id].eret_count,
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <debug.h> #include <debug.h>
#include <io_storage.h> #include <io_storage.h>
#include <platform.h> #include <platform.h>
#include <errno.h>
#include <stdio.h> #include <stdio.h>
unsigned long page_align(unsigned long value, unsigned dir) unsigned long page_align(unsigned long value, unsigned dir)
...@@ -71,134 +72,12 @@ void change_security_state(unsigned int target_security_state) ...@@ -71,134 +72,12 @@ void change_security_state(unsigned int target_security_state)
write_scr(scr); write_scr(scr);
} }
void __dead2 drop_el(aapcs64_params_t *args,
unsigned long spsr,
unsigned long entrypoint)
{
write_spsr_el3(spsr);
write_elr_el3(entrypoint);
eret(args->arg0,
args->arg1,
args->arg2,
args->arg3,
args->arg4,
args->arg5,
args->arg6,
args->arg7);
}
void __dead2 raise_el(aapcs64_params_t *args)
{
smc(args->arg0,
args->arg1,
args->arg2,
args->arg3,
args->arg4,
args->arg5,
args->arg6,
args->arg7);
}
/*
* TODO: If we are not EL3 then currently we only issue an SMC.
* Add support for dropping into EL0 etc. Consider adding support
* for switching from S-EL1 to S-EL0/1 etc.
*/
void __dead2 change_el(el_change_info_t *info)
{
if (IS_IN_EL3()) {
/*
* We can go anywhere from EL3. So find where.
* TODO: Lots to do if we are going non-secure.
* Flip the NS bit. Restore NS registers etc.
* Just doing the bare minimal for now.
*/
if (info->security_state == NON_SECURE)
change_security_state(info->security_state);
drop_el(&info->args, info->spsr, info->entrypoint);
} else
raise_el(&info->args);
}
/* TODO: add a parameter for DAIF. not needed right now */
unsigned long make_spsr(unsigned long target_el,
unsigned long target_sp,
unsigned long target_rw)
{
unsigned long spsr;
/* Disable all exceptions & setup the EL */
spsr = (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
<< PSR_DAIF_SHIFT;
spsr |= PSR_MODE(target_rw, target_el, target_sp);
return spsr;
}
/******************************************************************************* /*******************************************************************************
* The next two functions are the weak definitions. Platform specific * The next function is a weak definition. Platform specific
* code can override them if it wishes to. * code can override it if it wishes to.
******************************************************************************/ ******************************************************************************/
/*******************************************************************************
* Function that takes a memory layout into which BL31 has been either top or
* bottom loaded. Using this information, it populates bl31_mem_layout to tell
* BL31 how much memory it has access to and how much is available for use. It
* does not need the address where BL31 has been loaded as BL31 will reclaim
* all the memory used by BL2.
* TODO: Revisit if this and init_bl2_mem_layout can be replaced by a single
* routine.
******************************************************************************/
void init_bl31_mem_layout(const meminfo_t *bl2_mem_layout,
meminfo_t *bl31_mem_layout,
unsigned int load_type)
{
if (load_type == BOT_LOAD) {
/*
* ------------ ^
* | BL2 | |
* |----------| ^ | BL2
* | | | BL2 free | total
* | | | size | size
* |----------| BL2 free base v |
* | BL31 | |
* ------------ BL2 total base v
*/
unsigned long bl31_size;
bl31_mem_layout->free_base = bl2_mem_layout->free_base;
bl31_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
bl31_mem_layout->free_size = bl2_mem_layout->total_size - bl31_size;
} else {
/*
* ------------ ^
* | BL31 | |
* |----------| ^ | BL2
* | | | BL2 free | total
* | | | size | size
* |----------| BL2 free base v |
* | BL2 | |
* ------------ BL2 total base v
*/
unsigned long bl2_size;
bl31_mem_layout->free_base = bl2_mem_layout->total_base;
bl2_size = bl2_mem_layout->free_base - bl2_mem_layout->total_base;
bl31_mem_layout->free_size = bl2_mem_layout->free_size + bl2_size;
}
bl31_mem_layout->total_base = bl2_mem_layout->total_base;
bl31_mem_layout->total_size = bl2_mem_layout->total_size;
bl31_mem_layout->attr = load_type;
flush_dcache_range((unsigned long) bl31_mem_layout, sizeof(meminfo_t));
return;
}
/******************************************************************************* /*******************************************************************************
* Function that takes a memory layout into which BL2 has been either top or * Function that takes a memory layout into which BL2 has been either top or
* bottom loaded along with the address where BL2 has been loaded in it. Using * bottom loaded along with the address where BL2 has been loaded in it. Using
...@@ -294,12 +173,15 @@ unsigned long image_size(const char *image_name) ...@@ -294,12 +173,15 @@ unsigned long image_size(const char *image_name)
* Generic function to load an image into the trusted RAM, * Generic function to load an image into the trusted RAM,
* given a name, extents of free memory & whether the image should be loaded at * given a name, extents of free memory & whether the image should be loaded at
* the bottom or top of the free memory. It updates the memory layout if the * the bottom or top of the free memory. It updates the memory layout if the
* load is successful. * load is successful. It also updates the image information and the entry point
* information in the params passed
******************************************************************************/ ******************************************************************************/
unsigned long load_image(meminfo_t *mem_layout, int load_image(meminfo_t *mem_layout,
const char *image_name, const char *image_name,
unsigned int load_type, unsigned int load_type,
unsigned long fixed_addr) unsigned long fixed_addr,
image_info_t *image_data,
entry_point_info_t *entry_point_info)
{ {
uintptr_t dev_handle; uintptr_t dev_handle;
uintptr_t image_handle; uintptr_t image_handle;
...@@ -313,13 +195,14 @@ unsigned long load_image(meminfo_t *mem_layout, ...@@ -313,13 +195,14 @@ unsigned long load_image(meminfo_t *mem_layout,
assert(mem_layout != NULL); assert(mem_layout != NULL);
assert(image_name != NULL); assert(image_name != NULL);
assert(image_data->h.version >= VERSION_1);
/* Obtain a reference to the image by querying the platform layer */ /* Obtain a reference to the image by querying the platform layer */
io_result = plat_get_image_source(image_name, &dev_handle, &image_spec); io_result = plat_get_image_source(image_name, &dev_handle, &image_spec);
if (io_result != IO_SUCCESS) { if (io_result != IO_SUCCESS) {
WARN("Failed to obtain reference to image '%s' (%i)\n", WARN("Failed to obtain reference to image '%s' (%i)\n",
image_name, io_result); image_name, io_result);
return 0; return io_result;
} }
/* Attempt to access the image */ /* Attempt to access the image */
...@@ -327,7 +210,7 @@ unsigned long load_image(meminfo_t *mem_layout, ...@@ -327,7 +210,7 @@ unsigned long load_image(meminfo_t *mem_layout,
if (io_result != IO_SUCCESS) { if (io_result != IO_SUCCESS) {
WARN("Failed to access image '%s' (%i)\n", WARN("Failed to access image '%s' (%i)\n",
image_name, io_result); image_name, io_result);
return 0; return io_result;
} }
/* Find the size of the image */ /* Find the size of the image */
...@@ -335,7 +218,7 @@ unsigned long load_image(meminfo_t *mem_layout, ...@@ -335,7 +218,7 @@ unsigned long load_image(meminfo_t *mem_layout,
if ((io_result != IO_SUCCESS) || (image_size == 0)) { if ((io_result != IO_SUCCESS) || (image_size == 0)) {
WARN("Failed to determine the size of the image '%s' file (%i)\n", WARN("Failed to determine the size of the image '%s' file (%i)\n",
image_name, io_result); image_name, io_result);
goto fail; goto exit;
} }
/* See if we have enough space */ /* See if we have enough space */
...@@ -343,7 +226,7 @@ unsigned long load_image(meminfo_t *mem_layout, ...@@ -343,7 +226,7 @@ unsigned long load_image(meminfo_t *mem_layout,
WARN("Cannot load '%s' file: Not enough space.\n", WARN("Cannot load '%s' file: Not enough space.\n",
image_name); image_name);
dump_load_info(0, image_size, mem_layout); dump_load_info(0, image_size, mem_layout);
goto fail; goto exit;
} }
switch (load_type) { switch (load_type) {
...@@ -362,7 +245,8 @@ unsigned long load_image(meminfo_t *mem_layout, ...@@ -362,7 +245,8 @@ unsigned long load_image(meminfo_t *mem_layout,
WARN("Cannot load '%s' file: Not enough space.\n", WARN("Cannot load '%s' file: Not enough space.\n",
image_name); image_name);
dump_load_info(image_base, image_size, mem_layout); dump_load_info(image_base, image_size, mem_layout);
goto fail; io_result = -ENOMEM;
goto exit;
} }
/* Calculate the amount of extra memory used due to alignment */ /* Calculate the amount of extra memory used due to alignment */
...@@ -380,10 +264,11 @@ unsigned long load_image(meminfo_t *mem_layout, ...@@ -380,10 +264,11 @@ unsigned long load_image(meminfo_t *mem_layout,
/* Page align base address and check whether the image still fits */ /* Page align base address and check whether the image still fits */
if (image_base + image_size > if (image_base + image_size >
mem_layout->free_base + mem_layout->free_size) { mem_layout->free_base + mem_layout->free_size) {
WARN("Cannot load '%s' file: Not enough space.\n", WARN("Cannot load '%s' file: Not enough space.\n",
image_name); image_name);
dump_load_info(image_base, image_size, mem_layout); dump_load_info(image_base, image_size, mem_layout);
goto fail; io_result = -ENOMEM;
goto exit;
} }
/* Calculate the amount of extra memory used due to alignment */ /* Calculate the amount of extra memory used due to alignment */
...@@ -448,14 +333,16 @@ unsigned long load_image(meminfo_t *mem_layout, ...@@ -448,14 +333,16 @@ unsigned long load_image(meminfo_t *mem_layout,
WARN("Cannot load '%s' file: Not enough space.\n", WARN("Cannot load '%s' file: Not enough space.\n",
image_name); image_name);
dump_load_info(image_base, image_size, mem_layout); dump_load_info(image_base, image_size, mem_layout);
goto fail; io_result = -ENOMEM;
goto exit;
} }
/* Check whether the fixed load address is page-aligned. */ /* Check whether the fixed load address is page-aligned. */
if (!is_page_aligned(image_base)) { if (!is_page_aligned(image_base)) {
WARN("Cannot load '%s' file at unaligned address 0x%lx\n", WARN("Cannot load '%s' file at unaligned address 0x%lx\n",
image_name, fixed_addr); image_name, fixed_addr);
goto fail; io_result = -ENOMEM;
goto exit;
} }
/* /*
...@@ -505,9 +392,14 @@ unsigned long load_image(meminfo_t *mem_layout, ...@@ -505,9 +392,14 @@ unsigned long load_image(meminfo_t *mem_layout,
io_result = io_read(image_handle, image_base, image_size, &bytes_read); io_result = io_read(image_handle, image_base, image_size, &bytes_read);
if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) { if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
WARN("Failed to load '%s' file (%i)\n", image_name, io_result); WARN("Failed to load '%s' file (%i)\n", image_name, io_result);
goto fail; goto exit;
} }
image_data->image_base = image_base;
image_data->image_size = image_size;
entry_point_info->pc = image_base;
/* /*
* File has been successfully loaded. Update the free memory * File has been successfully loaded. Update the free memory
* data structure & flush the contents of the TZRAM so that * data structure & flush the contents of the TZRAM so that
...@@ -523,54 +415,12 @@ unsigned long load_image(meminfo_t *mem_layout, ...@@ -523,54 +415,12 @@ unsigned long load_image(meminfo_t *mem_layout,
mem_layout->free_base += offset + image_size; mem_layout->free_base += offset + image_size;
exit: exit:
io_result = io_close(image_handle); io_close(image_handle);
/* Ignore improbable/unrecoverable error in 'close' */ /* Ignore improbable/unrecoverable error in 'close' */
/* TODO: Consider maintaining open device connection from this bootloader stage */ /* TODO: Consider maintaining open device connection from this bootloader stage */
io_result = io_dev_close(dev_handle); io_dev_close(dev_handle);
/* Ignore improbable/unrecoverable error in 'dev_close' */ /* Ignore improbable/unrecoverable error in 'dev_close' */
return image_base; return io_result;
fail: image_base = 0;
goto exit;
}
/*******************************************************************************
* Run a loaded image from the given entry point. This could result in either
* dropping into a lower exception level or jumping to a higher exception level.
* The only way of doing the latter is through an SMC. In either case, setup the
* parameters for the EL change request correctly.
******************************************************************************/
void __dead2 run_image(unsigned long entrypoint,
unsigned long spsr,
unsigned long target_security_state,
void *first_arg,
void *second_arg)
{
el_change_info_t run_image_info;
/* Tell next EL what we want done */
run_image_info.args.arg0 = RUN_IMAGE;
run_image_info.entrypoint = entrypoint;
run_image_info.spsr = spsr;
run_image_info.security_state = target_security_state;
/*
* If we are EL3 then only an eret can take us to the desired
* exception level. Else for the time being assume that we have
* to jump to a higher EL and issue an SMC. Contents of argY
* will go into the general purpose register xY e.g. arg0->x0
*/
if (IS_IN_EL3()) {
run_image_info.args.arg1 = (unsigned long) first_arg;
run_image_info.args.arg2 = (unsigned long) second_arg;
} else {
run_image_info.args.arg1 = entrypoint;
run_image_info.args.arg2 = spsr;
run_image_info.args.arg3 = (unsigned long) first_arg;
run_image_info.args.arg4 = (unsigned long) second_arg;
}
change_el(&run_image_info);
} }
...@@ -47,6 +47,27 @@ struct bl31_args; ...@@ -47,6 +47,27 @@ struct bl31_args;
*****************************************/ *****************************************/
extern void bl2_platform_setup(void); extern void bl2_platform_setup(void);
extern struct meminfo *bl2_plat_sec_mem_layout(void); extern struct meminfo *bl2_plat_sec_mem_layout(void);
extern struct bl31_args *bl2_get_bl31_args_ptr(void);
/*******************************************************************************
* This function returns a pointer to the shared memory that the platform has
* kept aside to pass trusted firmware related information that BL3-1
* could need
******************************************************************************/
extern struct bl31_params *bl2_plat_get_bl31_params(void);
/*******************************************************************************
* This function returns a pointer to the shared memory that the platform
* has kept to point to entry point information of BL31 to BL2
******************************************************************************/
extern struct entry_point_info *bl2_plat_get_bl31_ep_info(void);
/************************************************************************
* This function flushes to main memory all the params that are
* passed to BL3-1
**************************************************************************/
extern void bl2_plat_flush_bl31_params(void);
#endif /* __BL2_H__ */ #endif /* __BL2_H__ */
...@@ -42,7 +42,8 @@ extern unsigned long bl31_entrypoint; ...@@ -42,7 +42,8 @@ extern unsigned long bl31_entrypoint;
* Forward declarations * Forward declarations
*****************************************/ *****************************************/
struct meminfo; struct meminfo;
struct el_change_info; struct entry_point_info;
struct bl31_parms;
/******************************************************************************* /*******************************************************************************
* Function prototypes * Function prototypes
...@@ -52,10 +53,10 @@ extern void bl31_next_el_arch_setup(uint32_t security_state); ...@@ -52,10 +53,10 @@ extern void bl31_next_el_arch_setup(uint32_t security_state);
extern void bl31_set_next_image_type(uint32_t type); extern void bl31_set_next_image_type(uint32_t type);
extern uint32_t bl31_get_next_image_type(void); extern uint32_t bl31_get_next_image_type(void);
extern void bl31_prepare_next_image_entry(); extern void bl31_prepare_next_image_entry();
extern struct el_change_info *bl31_get_next_image_info(uint32_t type); extern struct entry_point_info *bl31_get_next_image_info(uint32_t type);
extern void bl31_early_platform_setup(struct bl31_params *from_bl2,
void *plat_params_from_bl2);
extern void bl31_platform_setup(void); extern void bl31_platform_setup(void);
extern struct meminfo *bl31_plat_get_bl32_mem_layout(void); extern void bl31_register_bl32_init(int32_t (*)(void));
extern struct meminfo *bl31_plat_sec_mem_layout(void);
extern void bl31_register_bl32_init(int32_t (*)(struct meminfo *));
#endif /* __BL31_H__ */ #endif /* __BL31_H__ */
...@@ -88,6 +88,41 @@ ...@@ -88,6 +88,41 @@
\_name: \_name:
.endm .endm
/* ---------------------------------------------
* Find the type of reset and jump to handler
* if present. If the handler is null then it is
* a cold boot. The primary cpu will set up the
* platform while the secondaries wait for
* their turn to be woken up
* ---------------------------------------------
*/
.macro wait_for_entrypoint
wait_for_entrypoint:
mrs x0, mpidr_el1
bl platform_get_entrypoint
cbnz x0, do_warm_boot
mrs x0, mpidr_el1
bl platform_is_primary_cpu
cbnz x0, do_cold_boot
/* ---------------------------------------------
* Perform any platform specific secondary cpu
* actions
* ---------------------------------------------
*/
bl plat_secondary_cold_boot_setup
b wait_for_entrypoint
do_warm_boot:
/* ---------------------------------------------
* Jump to BL31 for all warm boot init.
* ---------------------------------------------
*/
blr x0
do_cold_boot:
.endm
/* /*
* This macro declares an array of 1 or more stacks, properly * This macro declares an array of 1 or more stacks, properly
* aligned and in the requested section * aligned and in the requested section
......
...@@ -31,8 +31,9 @@ ...@@ -31,8 +31,9 @@
#ifndef __BL_COMMON_H__ #ifndef __BL_COMMON_H__
#define __BL_COMMON_H__ #define __BL_COMMON_H__
#define SECURE 0 #define SECURE 0x0
#define NON_SECURE 1 #define NON_SECURE 0x1
#define PARAM_EP_SECURITY_MASK 0x1
#define UP 1 #define UP 1
#define DOWN 0 #define DOWN 0
...@@ -56,10 +57,34 @@ ...@@ -56,10 +57,34 @@
*****************************************************************************/ *****************************************************************************/
#define RUN_IMAGE 0xC0000000 #define RUN_IMAGE 0xC0000000
/*******************************************************************************
* Constants that allow assembler code to access members of and the
* 'entry_point_info' structure at their correct offsets.
******************************************************************************/
#define ENTRY_POINT_INFO_PC_OFFSET 0x08
#define ENTRY_POINT_INFO_ARGS_OFFSET 0x18
#ifndef __ASSEMBLY__ #define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK)
#define SET_SECURITY_STATE(x, security) \
((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security))
#define PARAM_EP 0x01
#define PARAM_IMAGE_BINARY 0x02
#define PARAM_BL31 0x03
#define VERSION_1 0x01
#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \
(_p)->h.type = (uint8_t)(_type); \
(_p)->h.version = (uint8_t)(_ver); \
(_p)->h.size = (uint16_t)sizeof(*_p); \
(_p)->h.attr = (uint32_t)(_attr) ; \
} while (0)
#ifndef __ASSEMBLY__
#include <cdefs.h> /* For __dead2 */ #include <cdefs.h> /* For __dead2 */
#include <cassert.h>
#include <stdint.h>
/******************************************************************************* /*******************************************************************************
* Structure used for telling the next BL how much of a particular type of * Structure used for telling the next BL how much of a particular type of
...@@ -85,40 +110,92 @@ typedef struct aapcs64_params { ...@@ -85,40 +110,92 @@ typedef struct aapcs64_params {
unsigned long arg7; unsigned long arg7;
} aapcs64_params_t; } aapcs64_params_t;
/******************************************************************************* /***************************************************************************
* This structure represents the superset of information needed while switching * This structure provides version information and the size of the
* exception levels. The only two mechanisms to do so are ERET & SMC. In case of * structure, attributes for the structure it represents
* SMC all members apart from 'aapcs64_params' will be ignored. ***************************************************************************/
******************************************************************************/ typedef struct param_header {
typedef struct el_change_info { uint8_t type; /* type of the structure */
unsigned long entrypoint; uint8_t version; /* version of this structure */
unsigned long spsr; uint16_t size; /* size of this structure in bytes */
unsigned long security_state; uint32_t attr; /* attributes: unused bits SBZ */
} param_header_t;
/*****************************************************************************
* This structure represents the superset of information needed while
* switching exception levels. The only two mechanisms to do so are
* ERET & SMC. Security state is indicated using bit zero of header
* attribute
* NOTE: BL1 expects entrypoint followed by spsr while processing
* SMC to jump to BL31 from the start of entry_point_info
*****************************************************************************/
typedef struct entry_point_info {
param_header_t h;
uintptr_t pc;
uint32_t spsr;
aapcs64_params_t args; aapcs64_params_t args;
} el_change_info_t; } entry_point_info_t;
/*****************************************************************************
* Image info binary provides information from the image loader that
* can be used by the firmware to manage available trusted RAM.
* More advanced firmware image formats can provide additional
* information that enables optimization or greater flexibility in the
* common firmware code
*****************************************************************************/
typedef struct image_info {
param_header_t h;
uintptr_t image_base; /* physical address of base of image */
uint32_t image_size; /* bytes read from image file */
} image_info_t;
/******************************************************************************* /*******************************************************************************
* This structure represents the superset of information that can be passed to * This structure represents the superset of information that can be passed to
* BL31 e.g. while passing control to it from BL2. The BL32 parameters will be * BL31 e.g. while passing control to it from BL2. The BL32 parameters will be
* populated only if BL2 detects its presence. * populated only if BL2 detects its presence. A pointer to a structure of this
* type should be passed in X3 to BL31's cold boot entrypoint
*
* Use of this structure and the X3 parameter is not mandatory: the BL3-1
* platform code can use other mechanisms to provide the necessary information
* about BL3-2 and BL3-3 to the common and SPD code.
*
* BL3-1 image information is mandatory if this structure is used. If either of
* the optional BL3-2 and BL3-3 image information is not provided, this is
* indicated by the respective image_info pointers being zero.
******************************************************************************/ ******************************************************************************/
typedef struct bl31_args { typedef struct bl31_params {
meminfo_t bl31_meminfo; param_header_t h;
el_change_info_t bl32_image_info; image_info_t *bl31_image_info;
meminfo_t bl32_meminfo; entry_point_info_t *bl32_ep_info;
el_change_info_t bl33_image_info; image_info_t *bl32_image_info;
meminfo_t bl33_meminfo; entry_point_info_t *bl33_ep_info;
} bl31_args_t; image_info_t *bl33_image_info;
} bl31_params_t;
/*
* Compile time assertions related to the 'entry_point_info' structure to
* ensure that the assembler and the compiler view of the offsets of
* the structure members is the same.
*/
CASSERT(ENTRY_POINT_INFO_PC_OFFSET ==
__builtin_offsetof(entry_point_info_t, pc), \
assert_BL31_pc_offset_mismatch);
CASSERT(ENTRY_POINT_INFO_ARGS_OFFSET == \
__builtin_offsetof(entry_point_info_t, args), \
assert_BL31_args_offset_mismatch);
CASSERT(sizeof(unsigned long) ==
__builtin_offsetof(entry_point_info_t, spsr) - \
__builtin_offsetof(entry_point_info_t, pc), \
assert_entrypoint_and_spsr_should_be_adjacent);
/******************************************************************************* /*******************************************************************************
* Function & variable prototypes * Function & variable prototypes
******************************************************************************/ ******************************************************************************/
extern unsigned long page_align(unsigned long, unsigned); extern unsigned long page_align(unsigned long, unsigned);
extern void change_security_state(unsigned int); extern void change_security_state(unsigned int);
extern void __dead2 drop_el(aapcs64_params_t *, unsigned long, unsigned long);
extern void __dead2 raise_el(aapcs64_params_t *);
extern void __dead2 change_el(el_change_info_t *);
extern unsigned long make_spsr(unsigned long, unsigned long, unsigned long);
extern void init_bl2_mem_layout(meminfo_t *, extern void init_bl2_mem_layout(meminfo_t *,
meminfo_t *, meminfo_t *,
unsigned int, unsigned int,
...@@ -127,15 +204,12 @@ extern void init_bl31_mem_layout(const meminfo_t *, ...@@ -127,15 +204,12 @@ extern void init_bl31_mem_layout(const meminfo_t *,
meminfo_t *, meminfo_t *,
unsigned int) __attribute__((weak)); unsigned int) __attribute__((weak));
extern unsigned long image_size(const char *); extern unsigned long image_size(const char *);
extern unsigned long load_image(meminfo_t *, extern int load_image(meminfo_t *,
const char *, const char *,
unsigned int, unsigned int,
unsigned long); unsigned long,
extern void __dead2 run_image(unsigned long entrypoint, image_info_t *,
unsigned long spsr, entry_point_info_t *);
unsigned long security_state,
void *first_arg,
void *second_arg);
extern unsigned long *get_el_change_mem_ptr(void); extern unsigned long *get_el_change_mem_ptr(void);
extern const char build_message[]; extern const char build_message[];
......
...@@ -175,7 +175,25 @@ ...@@ -175,7 +175,25 @@
#define DAIF_IRQ_BIT (1 << 1) #define DAIF_IRQ_BIT (1 << 1)
#define DAIF_ABT_BIT (1 << 2) #define DAIF_ABT_BIT (1 << 2)
#define DAIF_DBG_BIT (1 << 3) #define DAIF_DBG_BIT (1 << 3)
#define PSR_DAIF_SHIFT 0x6 #define SPSR_DAIF_SHIFT 6
#define SPSR_DAIF_MASK 0xf
#define SPSR_AIF_SHIFT 6
#define SPSR_AIF_MASK 0x7
#define SPSR_E_SHIFT 9
#define SPSR_E_MASK 0x1
#define SPSR_E_LITTLE 0x0
#define SPSR_E_BIG 0x1
#define SPSR_T_SHIFT 5
#define SPSR_T_MASK 0x1
#define SPSR_T_ARM 0x0
#define SPSR_T_THUMB 0x1
#define DISABLE_ALL_EXCEPTIONS \
(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
/* /*
* TCR defintions * TCR defintions
...@@ -198,29 +216,53 @@ ...@@ -198,29 +216,53 @@
#define TCR_SH_OUTER_SHAREABLE (0x2 << 12) #define TCR_SH_OUTER_SHAREABLE (0x2 << 12)
#define TCR_SH_INNER_SHAREABLE (0x3 << 12) #define TCR_SH_INNER_SHAREABLE (0x3 << 12)
#define MODE_RW_64 0x0 #define MODE_SP_SHIFT 0x0
#define MODE_RW_32 0x1 #define MODE_SP_MASK 0x1
#define MODE_SP_EL0 0x0 #define MODE_SP_EL0 0x0
#define MODE_SP_ELX 0x1 #define MODE_SP_ELX 0x1
#define MODE_RW_SHIFT 0x4
#define MODE_RW_MASK 0x1
#define MODE_RW_64 0x0
#define MODE_RW_32 0x1
#define MODE_EL_SHIFT 0x2
#define MODE_EL_MASK 0x3
#define MODE_EL3 0x3 #define MODE_EL3 0x3
#define MODE_EL2 0x2 #define MODE_EL2 0x2
#define MODE_EL1 0x1 #define MODE_EL1 0x1
#define MODE_EL0 0x0 #define MODE_EL0 0x0
#define MODE_RW_SHIFT 0x4 #define MODE32_SHIFT 0
#define MODE_EL_SHIFT 0x2 #define MODE32_MASK 0xf
#define MODE_SP_SHIFT 0x0 #define MODE32_usr 0x0
#define MODE32_fiq 0x1
#define GET_RW(mode) ((mode >> MODE_RW_SHIFT) & 0x1) #define MODE32_irq 0x2
#define GET_EL(mode) ((mode >> MODE_EL_SHIFT) & 0x3) #define MODE32_svc 0x3
#define PSR_MODE(rw, el, sp) (rw << MODE_RW_SHIFT | el << MODE_EL_SHIFT \ #define MODE32_mon 0x6
| sp << MODE_SP_SHIFT) #define MODE32_abt 0x7
#define MODE32_hyp 0xa
#define SPSR32_EE_BIT (1 << 9) #define MODE32_und 0xb
#define SPSR32_T_BIT (1 << 5) #define MODE32_sys 0xf
#define GET_RW(mode) (((mode) >> MODE_RW_SHIFT) & MODE_RW_MASK)
#define GET_EL(mode) (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
#define GET_SP(mode) (((mode) >> MODE_SP_SHIFT) & MODE_SP_MASK)
#define GET_M32(mode) (((mode) >> MODE32_SHIFT) & MODE32_MASK)
#define SPSR_64(el, sp, daif) \
(MODE_RW_64 << MODE_RW_SHIFT | \
((el) & MODE_EL_MASK) << MODE_EL_SHIFT | \
((sp) & MODE_SP_MASK) << MODE_SP_SHIFT | \
((daif) & SPSR_DAIF_MASK) << SPSR_DAIF_SHIFT)
#define SPSR_MODE32(mode, isa, endian, aif) \
(MODE_RW_32 << MODE_RW_SHIFT | \
((mode) & MODE32_MASK) << MODE32_SHIFT | \
((isa) & SPSR_T_MASK) << SPSR_T_SHIFT | \
((endian) & SPSR_E_MASK) << SPSR_E_SHIFT | \
((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)
#define AARCH32_MODE_SVC 0x13
#define AARCH32_MODE_HYP 0x1a
/* Miscellaneous MMU related constants */ /* Miscellaneous MMU related constants */
#define NUM_2MB_IN_GB (1 << 9) #define NUM_2MB_IN_GB (1 << 9)
......
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <gic_v2.h>
#include <platform.h>
#include "../drivers/pwrc/fvp_pwrc.h"
.globl platform_get_entrypoint
.globl platform_cold_boot_init
.globl plat_secondary_cold_boot_setup
.macro platform_choose_gicmmap param1, param2, x_tmp, w_tmp, res
ldr \x_tmp, =VE_SYSREGS_BASE + V2M_SYS_ID
ldr \w_tmp, [\x_tmp]
ubfx \w_tmp, \w_tmp, #SYS_ID_BLD_SHIFT, #SYS_ID_BLD_LENGTH
cmp \w_tmp, #BLD_GIC_VE_MMAP
csel \res, \param1, \param2, eq
.endm
/* -----------------------------------------------------
* void plat_secondary_cold_boot_setup (void);
*
* This function performs any platform specific actions
* needed for a secondary cpu after a cold reset e.g
* mark the cpu's presence, mechanism to place it in a
* holding pen etc.
* TODO: Should we read the PSYS register to make sure
* that the request has gone through.
* -----------------------------------------------------
*/
func plat_secondary_cold_boot_setup
/* ---------------------------------------------
* Power down this cpu.
* TODO: Do we need to worry about powering the
* cluster down as well here. That will need
* locks which we won't have unless an elf-
* loader zeroes out the zi section.
* ---------------------------------------------
*/
mrs x0, mpidr_el1
ldr x1, =PWRC_BASE
str w0, [x1, #PPOFFR_OFF]
/* ---------------------------------------------
* Deactivate the gic cpu interface as well
* ---------------------------------------------
*/
ldr x0, =VE_GICC_BASE
ldr x1, =BASE_GICC_BASE
platform_choose_gicmmap x0, x1, x2, w2, x1
mov w0, #(IRQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP1)
orr w0, w0, #(IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP0)
str w0, [x1, #GICC_CTLR]
/* ---------------------------------------------
* There is no sane reason to come out of this
* wfi so panic if we do. This cpu will be pow-
* ered on and reset by the cpu_on pm api
* ---------------------------------------------
*/
dsb sy
wfi
cb_panic:
b cb_panic
/* -----------------------------------------------------
* void platform_get_entrypoint (unsigned int mpid);
*
* Main job of this routine is to distinguish between
* a cold and warm boot.
* On a cold boot the secondaries first wait for the
* platform to be initialized after which they are
* hotplugged in. The primary proceeds to perform the
* platform initialization.
* On a warm boot, each cpu jumps to the address in its
* mailbox.
*
* TODO: Not a good idea to save lr in a temp reg
* TODO: PSYSR is a common register and should be
* accessed using locks. Since its not possible
* to use locks immediately after a cold reset
* we are relying on the fact that after a cold
* reset all cpus will read the same WK field
* -----------------------------------------------------
*/
func platform_get_entrypoint
mov x9, x30 // lr
mov x2, x0
ldr x1, =PWRC_BASE
str w2, [x1, #PSYSR_OFF]
ldr w2, [x1, #PSYSR_OFF]
ubfx w2, w2, #PSYSR_WK_SHIFT, #PSYSR_WK_MASK
cbnz w2, warm_reset
mov x0, x2
b exit
warm_reset:
/* ---------------------------------------------
* A per-cpu mailbox is maintained in the tru-
* sted DRAM. Its flushed out of the caches
* after every update using normal memory so
* its safe to read it here with SO attributes
* ---------------------------------------------
*/
ldr x10, =TZDRAM_BASE + MBOX_OFF
bl platform_get_core_pos
lsl x0, x0, #CACHE_WRITEBACK_SHIFT
ldr x0, [x10, x0]
cbz x0, _panic
exit:
ret x9
_panic: b _panic
/* -----------------------------------------------------
* void platform_mem_init (void);
*
* Zero out the mailbox registers in the TZDRAM. The
* mmu is turned off right now and only the primary can
* ever execute this code. Secondaries will read the
* mailboxes using SO accesses. In short, BL31 will
* update the mailboxes after mapping the tzdram as
* normal memory. It will flush its copy after update.
* BL1 will always read the mailboxes with the MMU off
* -----------------------------------------------------
*/
func platform_mem_init
ldr x0, =TZDRAM_BASE + MBOX_OFF
stp xzr, xzr, [x0, #0]
stp xzr, xzr, [x0, #0x10]
stp xzr, xzr, [x0, #0x20]
stp xzr, xzr, [x0, #0x30]
ret
/* -----------------------------------------------------
* void platform_cold_boot_init (bl1_main function);
*
* Routine called only by the primary cpu after a cold
* boot to perform early platform initialization
* -----------------------------------------------------
*/
func platform_cold_boot_init
mov x20, x0
bl platform_mem_init
/* ---------------------------------------------
* Give ourselves a small coherent stack to
* ease the pain of initializing the MMU and
* CCI in assembler
* ---------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_set_coherent_stack
/* ---------------------------------------------
* Architectural init. can be generic e.g.
* enabling stack alignment and platform spec-
* ific e.g. MMU & page table setup as per the
* platform memory map. Perform the latter here
* and the former in bl1_main.
* ---------------------------------------------
*/
bl bl1_early_platform_setup
bl bl1_plat_arch_setup
/* ---------------------------------------------
* Give ourselves a stack allocated in Normal
* -IS-WBWA memory
* ---------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_set_stack
/* ---------------------------------------------
* Jump to the main function. Returning from it
* is a terminal error.
* ---------------------------------------------
*/
blr x20
cb_init_panic:
b cb_init_panic
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <arch_helpers.h> #include <arch_helpers.h>
#include <assert.h> #include <assert.h>
#include <bl_common.h> #include <bl_common.h>
#include <cci400.h>
#include <debug.h> #include <debug.h>
#include <mmio.h> #include <mmio.h>
#include <platform.h> #include <platform.h>
...@@ -130,14 +131,15 @@ const mmap_region_t fvp_mmap[] = { ...@@ -130,14 +131,15 @@ const mmap_region_t fvp_mmap[] = {
* the platform memory map & initialize the mmu, for the given exception level * the platform memory map & initialize the mmu, for the given exception level
******************************************************************************/ ******************************************************************************/
#define DEFINE_CONFIGURE_MMU_EL(_el) \ #define DEFINE_CONFIGURE_MMU_EL(_el) \
void configure_mmu_el##_el(meminfo_t *mem_layout, \ void configure_mmu_el##_el(unsigned long total_base, \
unsigned long total_size, \
unsigned long ro_start, \ unsigned long ro_start, \
unsigned long ro_limit, \ unsigned long ro_limit, \
unsigned long coh_start, \ unsigned long coh_start, \
unsigned long coh_limit) \ unsigned long coh_limit) \
{ \ { \
mmap_add_region(mem_layout->total_base, \ mmap_add_region(total_base, \
mem_layout->total_size, \ total_size, \
MT_MEMORY | MT_RW | MT_SECURE); \ MT_MEMORY | MT_RW | MT_SECURE); \
mmap_add_region(ro_start, ro_limit - ro_start, \ mmap_add_region(ro_start, ro_limit - ro_start, \
MT_MEMORY | MT_RO | MT_SECURE); \ MT_MEMORY | MT_RO | MT_SECURE); \
...@@ -251,3 +253,57 @@ uint64_t plat_get_syscnt_freq(void) ...@@ -251,3 +253,57 @@ uint64_t plat_get_syscnt_freq(void)
return counter_base_frequency; return counter_base_frequency;
} }
void fvp_cci_setup(void)
{
unsigned long cci_setup;
/*
* Enable CCI-400 for this cluster. No need
* for locks as no other cpu is active at the
* moment
*/
cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI);
if (cci_setup)
cci_enable_coherency(read_mpidr());
}
/*******************************************************************************
* Set SPSR and secure state for BL32 image
******************************************************************************/
void fvp_set_bl32_ep_info(entry_point_info_t *bl32_ep_info)
{
SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
/*
* The Secure Payload Dispatcher service is responsible for
* setting the SPSR prior to entry into the BL32 image.
*/
bl32_ep_info->spsr = 0;
}
/*******************************************************************************
* Set SPSR and secure state for BL33 image
******************************************************************************/
void fvp_set_bl33_ep_info(entry_point_info_t *bl33_ep_info)
{
unsigned long el_status;
unsigned int mode;
/* Figure out what mode we enter the non-secure world in */
el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
el_status &= ID_AA64PFR0_ELX_MASK;
if (el_status)
mode = MODE_EL2;
else
mode = MODE_EL1;
/*
* TODO: Consider the possibility of specifying the SPSR in
* the FIP ToC and allowing the platform to have a say as
* well.
*/
bl33_ep_info->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
}
...@@ -31,10 +31,139 @@ ...@@ -31,10 +31,139 @@
#include <arch.h> #include <arch.h>
#include <asm_macros.S> #include <asm_macros.S>
#include <bl_common.h> #include <bl_common.h>
#include <gic_v2.h>
#include <platform.h> #include <platform.h>
#include "../drivers/pwrc/fvp_pwrc.h"
.globl platform_get_entrypoint
.globl plat_secondary_cold_boot_setup
.globl platform_mem_init
.globl plat_report_exception .globl plat_report_exception
.macro platform_choose_gicmmap param1, param2, x_tmp, w_tmp, res
ldr \x_tmp, =VE_SYSREGS_BASE + V2M_SYS_ID
ldr \w_tmp, [\x_tmp]
ubfx \w_tmp, \w_tmp, #SYS_ID_BLD_SHIFT, #SYS_ID_BLD_LENGTH
cmp \w_tmp, #BLD_GIC_VE_MMAP
csel \res, \param1, \param2, eq
.endm
/* -----------------------------------------------------
* void plat_secondary_cold_boot_setup (void);
*
* This function performs any platform specific actions
* needed for a secondary cpu after a cold reset e.g
* mark the cpu's presence, mechanism to place it in a
* holding pen etc.
* TODO: Should we read the PSYS register to make sure
* that the request has gone through.
* -----------------------------------------------------
*/
func plat_secondary_cold_boot_setup
/* ---------------------------------------------
* Power down this cpu.
* TODO: Do we need to worry about powering the
* cluster down as well here. That will need
* locks which we won't have unless an elf-
* loader zeroes out the zi section.
* ---------------------------------------------
*/
mrs x0, mpidr_el1
ldr x1, =PWRC_BASE
str w0, [x1, #PPOFFR_OFF]
/* ---------------------------------------------
* Deactivate the gic cpu interface as well
* ---------------------------------------------
*/
ldr x0, =VE_GICC_BASE
ldr x1, =BASE_GICC_BASE
platform_choose_gicmmap x0, x1, x2, w2, x1
mov w0, #(IRQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP1)
orr w0, w0, #(IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP0)
str w0, [x1, #GICC_CTLR]
/* ---------------------------------------------
* There is no sane reason to come out of this
* wfi so panic if we do. This cpu will be pow-
* ered on and reset by the cpu_on pm api
* ---------------------------------------------
*/
dsb sy
wfi
cb_panic:
b cb_panic
/* -----------------------------------------------------
* void platform_get_entrypoint (unsigned int mpid);
*
* Main job of this routine is to distinguish between
* a cold and warm boot.
* On a cold boot the secondaries first wait for the
* platform to be initialized after which they are
* hotplugged in. The primary proceeds to perform the
* platform initialization.
* On a warm boot, each cpu jumps to the address in its
* mailbox.
*
* TODO: Not a good idea to save lr in a temp reg
* TODO: PSYSR is a common register and should be
* accessed using locks. Since its not possible
* to use locks immediately after a cold reset
* we are relying on the fact that after a cold
* reset all cpus will read the same WK field
* -----------------------------------------------------
*/
func platform_get_entrypoint
mov x9, x30 // lr
mov x2, x0
ldr x1, =PWRC_BASE
str w2, [x1, #PSYSR_OFF]
ldr w2, [x1, #PSYSR_OFF]
ubfx w2, w2, #PSYSR_WK_SHIFT, #PSYSR_WK_MASK
cbnz w2, warm_reset
mov x0, x2
b exit
warm_reset:
/* ---------------------------------------------
* A per-cpu mailbox is maintained in the tru-
* sted DRAM. Its flushed out of the caches
* after every update using normal memory so
* its safe to read it here with SO attributes
* ---------------------------------------------
*/
ldr x10, =TZDRAM_BASE + MBOX_OFF
bl platform_get_core_pos
lsl x0, x0, #CACHE_WRITEBACK_SHIFT
ldr x0, [x10, x0]
cbz x0, _panic
exit:
ret x9
_panic: b _panic
/* -----------------------------------------------------
* void platform_mem_init (void);
*
* Zero out the mailbox registers in the TZDRAM. The
* mmu is turned off right now and only the primary can
* ever execute this code. Secondaries will read the
* mailboxes using SO accesses. In short, BL31 will
* update the mailboxes after mapping the tzdram as
* normal memory. It will flush its copy after update.
* BL1 will always read the mailboxes with the MMU off
* -----------------------------------------------------
*/
func platform_mem_init
ldr x0, =TZDRAM_BASE + MBOX_OFF
mov w1, #PLATFORM_CORE_COUNT
loop:
str xzr, [x0], #CACHE_WRITEBACK_GRANULE
subs w1, w1, #1
b.gt loop
ret
/* --------------------------------------------- /* ---------------------------------------------
* void plat_report_exception(unsigned int type) * void plat_report_exception(unsigned int type)
* Function to report an unhandled exception * Function to report an unhandled exception
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment