Unverified Commit 3ccfcd6e authored by Soby Mathew's avatar Soby Mathew Committed by GitHub
Browse files

Merge pull request #1587 from antonio-nino-diaz-arm/an/deprecated

Remove deprecated interfaces for all platforms
parents 9a983cfe 991f1f4d
......@@ -11,7 +11,6 @@ build/
# Ignore build products from tools
tools/**/*.o
tools/fip_create/
tools/fiptool/fiptool
tools/fiptool/fiptool.exe
tools/cert_create/src/*.o
......
......@@ -327,19 +327,6 @@ ifeq (${ARM_ARCH_MAJOR},7)
include make_helpers/armv7-a-cpus.mk
endif
# Platform compatibility is not supported in AArch32
ifneq (${ARCH},aarch32)
# If the platform has not defined ENABLE_PLAT_COMPAT, then enable it by default
ifndef ENABLE_PLAT_COMPAT
ENABLE_PLAT_COMPAT := 1
endif
# Include the platform compatibility helpers for PSCI
ifneq (${ENABLE_PLAT_COMPAT}, 0)
include plat/compat/plat_compat.mk
endif
endif
# Include the CPU specific operations makefile, which provides default
# values for all CPU errata workarounds and CPU specific optimisations.
# This can be overridden by the platform.
......@@ -403,13 +390,6 @@ ifeq (${NEED_BL33},yes)
endif
endif
# For AArch32, LOAD_IMAGE_V2 must be enabled.
ifeq (${ARCH},aarch32)
ifeq (${LOAD_IMAGE_V2}, 0)
$(error "For AArch32, LOAD_IMAGE_V2 must be enabled.")
endif
endif
# When building for systems with hardware-assisted coherency, there's no need to
# use USE_COHERENT_MEM. Require that USE_COHERENT_MEM must be set to 0 too.
ifeq ($(HW_ASSISTED_COHERENCY)-$(USE_COHERENT_MEM),1-1)
......@@ -451,14 +431,11 @@ ifeq ($(FAULT_INJECTION_SUPPORT),1)
endif
endif
# DYN_DISABLE_AUTH can be set only when TRUSTED_BOARD_BOOT=1 and LOAD_IMAGE_V2=1
# DYN_DISABLE_AUTH can be set only when TRUSTED_BOARD_BOOT=1
ifeq ($(DYN_DISABLE_AUTH), 1)
ifeq (${TRUSTED_BOARD_BOOT}, 0)
$(error "TRUSTED_BOARD_BOOT must be enabled for DYN_DISABLE_AUTH to be set.")
endif
ifeq (${LOAD_IMAGE_V2}, 0)
$(error "DYN_DISABLE_AUTH is only supported for LOAD_IMAGE_V2.")
endif
endif
################################################################################
......@@ -586,7 +563,6 @@ $(eval $(call assert_boolean,ENABLE_AMU))
$(eval $(call assert_boolean,ENABLE_ASSERTIONS))
$(eval $(call assert_boolean,ENABLE_BACKTRACE))
$(eval $(call assert_boolean,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call assert_boolean,ENABLE_PLAT_COMPAT))
$(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT))
$(eval $(call assert_boolean,ENABLE_RUNTIME_INSTRUMENTATION))
......@@ -599,7 +575,6 @@ $(eval $(call assert_boolean,GENERATE_COT))
$(eval $(call assert_boolean,GICV2_G0_FOR_EL3))
$(eval $(call assert_boolean,HANDLE_EA_EL3_FIRST))
$(eval $(call assert_boolean,HW_ASSISTED_COHERENCY))
$(eval $(call assert_boolean,LOAD_IMAGE_V2))
$(eval $(call assert_boolean,MULTI_CONSOLE_API))
$(eval $(call assert_boolean,NS_TIMER_SWITCH))
$(eval $(call assert_boolean,PL011_GENERIC_UART))
......@@ -630,7 +605,6 @@ $(eval $(call assert_numeric,SMCCC_MAJOR_VERSION))
$(eval $(call add_define,ARM_ARCH_MAJOR))
$(eval $(call add_define,ARM_ARCH_MINOR))
$(eval $(call add_define,ARM_GIC_ARCH))
$(eval $(call add_define,COLD_BOOT_SINGLE_CPU))
$(eval $(call add_define,CTX_INCLUDE_AARCH32_REGS))
$(eval $(call add_define,CTX_INCLUDE_FPREGS))
......@@ -639,7 +613,6 @@ $(eval $(call add_define,ENABLE_AMU))
$(eval $(call add_define,ENABLE_ASSERTIONS))
$(eval $(call add_define,ENABLE_BACKTRACE))
$(eval $(call add_define,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call add_define,ENABLE_PLAT_COMPAT))
$(eval $(call add_define,ENABLE_PMF))
$(eval $(call add_define,ENABLE_PSCI_STAT))
$(eval $(call add_define,ENABLE_RUNTIME_INSTRUMENTATION))
......@@ -651,7 +624,6 @@ $(eval $(call add_define,FAULT_INJECTION_SUPPORT))
$(eval $(call add_define,GICV2_G0_FOR_EL3))
$(eval $(call add_define,HANDLE_EA_EL3_FIRST))
$(eval $(call add_define,HW_ASSISTED_COHERENCY))
$(eval $(call add_define,LOAD_IMAGE_V2))
$(eval $(call add_define,LOG_LEVEL))
$(eval $(call add_define,MULTI_CONSOLE_API))
$(eval $(call add_define,NS_TIMER_SWITCH))
......
......@@ -291,26 +291,11 @@ static int bl1_fwu_image_copy(unsigned int image_id,
return -ENOMEM;
}
#if LOAD_IMAGE_V2
/* Check that the image size to load is within limit */
if (image_size > image_desc->image_info.image_max_size) {
WARN("BL1-FWU: Image size out of bounds\n");
return -ENOMEM;
}
#else
/*
* Check the image will fit into the free trusted RAM after BL1
* load.
*/
const meminfo_t *mem_layout = bl1_plat_sec_mem_layout();
if (!is_mem_free(mem_layout->free_base, mem_layout->free_size,
image_desc->image_info.image_base,
image_size)) {
WARN("BL1-FWU: Copy not allowed due to insufficient"
" resources.\n");
return -ENOMEM;
}
#endif
/* Save the given image size. */
image_desc->image_info.image_size = image_size;
......
......@@ -37,7 +37,6 @@ void bl1_calc_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
assert(bl1_mem_layout != NULL);
assert(bl2_mem_layout != NULL);
#if LOAD_IMAGE_V2
/*
* Remove BL1 RW data from the scope of memory visible to BL2.
* This is assuming BL1 RW data is at the top of bl1_mem_layout.
......@@ -45,42 +44,10 @@ void bl1_calc_bl2_mem_layout(const meminfo_t *bl1_mem_layout,
assert(BL1_RW_BASE > bl1_mem_layout->total_base);
bl2_mem_layout->total_base = bl1_mem_layout->total_base;
bl2_mem_layout->total_size = BL1_RW_BASE - bl1_mem_layout->total_base;
#else
/* Check that BL1's memory is lying outside of the free memory */
assert((BL1_RAM_LIMIT <= bl1_mem_layout->free_base) ||
(BL1_RAM_BASE >= bl1_mem_layout->free_base +
bl1_mem_layout->free_size));
/* Remove BL1 RW data from the scope of memory visible to BL2 */
*bl2_mem_layout = *bl1_mem_layout;
reserve_mem(&bl2_mem_layout->total_base,
&bl2_mem_layout->total_size,
BL1_RAM_BASE,
BL1_RAM_LIMIT - BL1_RAM_BASE);
#endif /* LOAD_IMAGE_V2 */
flush_dcache_range((unsigned long)bl2_mem_layout, sizeof(meminfo_t));
}
#if !ERROR_DEPRECATED
/*******************************************************************************
* Compatibility default implementation for deprecated API. This has a weak
* definition. Platform specific code can override it if it wishes to.
******************************************************************************/
#pragma weak bl1_init_bl2_mem_layout
/*******************************************************************************
* Function that takes a memory layout into which BL2 has been loaded and
* populates a new memory layout for BL2 that ensures that BL1's data sections
* resident in secure RAM are not visible to BL2.
******************************************************************************/
void bl1_init_bl2_mem_layout(const struct meminfo *bl1_mem_layout,
struct meminfo *bl2_mem_layout)
{
bl1_calc_bl2_mem_layout(bl1_mem_layout, bl2_mem_layout);
}
#endif
/*******************************************************************************
* Function to perform late architectural and platform specific initialization.
* It also queries the platform to load and run next BL image. Only called
......@@ -183,27 +150,7 @@ static void bl1_load_bl2(void)
plat_error_handler(err);
}
#if LOAD_IMAGE_V2
err = load_auth_image(BL2_IMAGE_ID, image_info);
#else
entry_point_info_t *ep_info;
meminfo_t *bl1_tzram_layout;
/* Get the entry point info */
ep_info = &image_desc->ep_info;
/* Find out how much free trusted ram remains after BL1 load */
bl1_tzram_layout = bl1_plat_sec_mem_layout();
/* Load the BL2 image */
err = load_auth_image(bl1_tzram_layout,
BL2_IMAGE_ID,
image_info->image_base,
image_info,
ep_info);
#endif /* LOAD_IMAGE_V2 */
if (err) {
ERROR("Failed to load BL2 firmware.\n");
plat_error_handler(err);
......
......@@ -15,9 +15,7 @@ image_desc_t bl1_tbbr_image_descs[] = {
SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
VERSION_1, image_info_t, 0),
.image_info.image_base = BL2_BASE,
#if LOAD_IMAGE_V2
.image_info.image_max_size = BL2_LIMIT - BL2_BASE,
#endif
SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
VERSION_1, entry_point_info_t, SECURE),
},
......@@ -35,9 +33,7 @@ image_desc_t bl1_tbbr_image_descs[] = {
SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
VERSION_1, image_info_t, 0),
.image_info.image_base = SCP_BL2U_BASE,
#if LOAD_IMAGE_V2
.image_info.image_max_size = SCP_BL2U_LIMIT - SCP_BL2U_BASE,
#endif
SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
VERSION_1, entry_point_info_t, SECURE),
},
......@@ -48,9 +44,7 @@ image_desc_t bl1_tbbr_image_descs[] = {
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_1, image_info_t, 0),
.image_info.image_base = BL2U_BASE,
#if LOAD_IMAGE_V2
.image_info.image_max_size = BL2U_LIMIT - BL2U_BASE,
#endif
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_1, entry_point_info_t, SECURE | EXECUTABLE),
.ep_info.pc = BL2U_BASE,
......
......@@ -14,11 +14,7 @@ ifeq (${ARCH},aarch64)
BL2_SOURCES += common/aarch64/early_exceptions.S
endif
ifeq (${LOAD_IMAGE_V2},1)
BL2_SOURCES += bl2/bl2_image_load_v2.c
else
BL2_SOURCES += bl2/bl2_image_load.c
endif
ifeq (${BL2_AT_EL3},0)
BL2_SOURCES += bl2/${ARCH}/bl2_entrypoint.S
......
/*
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <auth_mod.h>
#include <bl_common.h>
#include <debug.h>
#include <errno.h>
#include <platform.h>
#include <platform_def.h>
#include <stdint.h>
/*
* Check for platforms that use obsolete image terminology
*/
#ifdef BL30_BASE
# error "BL30_BASE platform define no longer used - please use SCP_BL2_BASE"
#endif
/*******************************************************************************
* Load the SCP_BL2 image if there's one.
* If a platform does not want to attempt to load SCP_BL2 image it must leave
* SCP_BL2_BASE undefined.
* Return 0 on success or if there's no SCP_BL2 image to load, a negative error
* code otherwise.
******************************************************************************/
static int load_scp_bl2(void)
{
int e = 0;
#ifdef SCP_BL2_BASE
meminfo_t scp_bl2_mem_info;
image_info_t scp_bl2_image_info;
/*
* It is up to the platform to specify where SCP_BL2 should be loaded if
* it exists. It could create space in the secure sram or point to a
* completely different memory.
*
* The entry point information is not relevant in this case as the AP
* won't execute the SCP_BL2 image.
*/
INFO("BL2: Loading SCP_BL2\n");
bl2_plat_get_scp_bl2_meminfo(&scp_bl2_mem_info);
scp_bl2_image_info.h.version = VERSION_1;
e = load_auth_image(&scp_bl2_mem_info,
SCP_BL2_IMAGE_ID,
SCP_BL2_BASE,
&scp_bl2_image_info,
NULL);
if (e == 0) {
/* The subsequent handling of SCP_BL2 is platform specific */
e = bl2_plat_handle_scp_bl2(&scp_bl2_image_info);
if (e) {
ERROR("Failure in platform-specific handling of SCP_BL2 image.\n");
}
}
#endif /* SCP_BL2_BASE */
return e;
}
#ifndef EL3_PAYLOAD_BASE
/*******************************************************************************
* Load the BL31 image.
* The bl2_to_bl31_params and bl31_ep_info params will be updated with the
* relevant BL31 information.
* Return 0 on success, a negative error code otherwise.
******************************************************************************/
static int load_bl31(bl31_params_t *bl2_to_bl31_params,
entry_point_info_t *bl31_ep_info)
{
meminfo_t *bl2_tzram_layout;
int e;
INFO("BL2: Loading BL31\n");
assert(bl2_to_bl31_params != NULL);
assert(bl31_ep_info != NULL);
/* Find out how much free trusted ram remains after BL2 load */
bl2_tzram_layout = bl2_plat_sec_mem_layout();
/* Set the X0 parameter to BL31 */
bl31_ep_info->args.arg0 = (unsigned long)bl2_to_bl31_params;
/* Load the BL31 image */
e = load_auth_image(bl2_tzram_layout,
BL31_IMAGE_ID,
BL31_BASE,
bl2_to_bl31_params->bl31_image_info,
bl31_ep_info);
if (e == 0) {
bl2_plat_set_bl31_ep_info(bl2_to_bl31_params->bl31_image_info,
bl31_ep_info);
}
return e;
}
/*******************************************************************************
* Load the BL32 image if there's one.
* The bl2_to_bl31_params param will be updated with the relevant BL32
* information.
* If a platform does not want to attempt to load BL32 image it must leave
* BL32_BASE undefined.
* Return 0 on success or if there's no BL32 image to load, a negative error
* code otherwise.
******************************************************************************/
static int load_bl32(bl31_params_t *bl2_to_bl31_params)
{
int e = 0;
#ifdef BL32_BASE
meminfo_t bl32_mem_info;
INFO("BL2: Loading BL32\n");
assert(bl2_to_bl31_params != NULL);
/*
* It is up to the platform to specify where BL32 should be loaded if
* it exists. It could create space in the secure sram or point to a
* completely different memory.
*/
bl2_plat_get_bl32_meminfo(&bl32_mem_info);
e = load_auth_image(&bl32_mem_info,
BL32_IMAGE_ID,
BL32_BASE,
bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
if (e == 0) {
bl2_plat_set_bl32_ep_info(
bl2_to_bl31_params->bl32_image_info,
bl2_to_bl31_params->bl32_ep_info);
}
#endif /* BL32_BASE */
return e;
}
#ifndef PRELOADED_BL33_BASE
/*******************************************************************************
* Load the BL33 image.
* The bl2_to_bl31_params param will be updated with the relevant BL33
* information.
* Return 0 on success, a negative error code otherwise.
******************************************************************************/
static int load_bl33(bl31_params_t *bl2_to_bl31_params)
{
meminfo_t bl33_mem_info;
int e;
INFO("BL2: Loading BL33\n");
assert(bl2_to_bl31_params != NULL);
bl2_plat_get_bl33_meminfo(&bl33_mem_info);
/* Load the BL33 image in non-secure memory provided by the platform */
e = load_auth_image(&bl33_mem_info,
BL33_IMAGE_ID,
plat_get_ns_image_entrypoint(),
bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
if (e == 0) {
bl2_plat_set_bl33_ep_info(bl2_to_bl31_params->bl33_image_info,
bl2_to_bl31_params->bl33_ep_info);
}
return e;
}
#endif /* PRELOADED_BL33_BASE */
#endif /* EL3_PAYLOAD_BASE */
/*******************************************************************************
* This function loads SCP_BL2/BL3x images and returns the ep_info for
* the next executable image.
******************************************************************************/
struct entry_point_info *bl2_load_images(void)
{
bl31_params_t *bl2_to_bl31_params;
entry_point_info_t *bl31_ep_info;
int e;
e = load_scp_bl2();
if (e) {
ERROR("Failed to load SCP_BL2 (%i)\n", e);
plat_error_handler(e);
}
/* Perform platform setup in BL2 after loading SCP_BL2 */
bl2_platform_setup();
/*
* Get a pointer to the memory the platform has set aside to pass
* information to BL31.
*/
bl2_to_bl31_params = bl2_plat_get_bl31_params();
bl31_ep_info = bl2_plat_get_bl31_ep_info();
#ifdef EL3_PAYLOAD_BASE
/*
* In the case of an EL3 payload, we don't need to load any further
* images. Just update the BL31 entrypoint info structure to make BL1
* jump to the EL3 payload.
* The pointer to the memory the platform has set aside to pass
* information to BL31 in the normal boot flow is reused here, even
* though only a fraction of the information contained in the
* bl31_params_t structure makes sense in the context of EL3 payloads.
* This will be refined in the future.
*/
INFO("BL2: Populating the entrypoint info for the EL3 payload\n");
bl31_ep_info->pc = EL3_PAYLOAD_BASE;
bl31_ep_info->args.arg0 = (unsigned long) bl2_to_bl31_params;
bl2_plat_set_bl31_ep_info(NULL, bl31_ep_info);
#else
e = load_bl31(bl2_to_bl31_params, bl31_ep_info);
if (e) {
ERROR("Failed to load BL31 (%i)\n", e);
plat_error_handler(e);
}
e = load_bl32(bl2_to_bl31_params);
if (e) {
if (e == -EAUTH) {
ERROR("Failed to authenticate BL32\n");
plat_error_handler(e);
} else {
WARN("Failed to load BL32 (%i)\n", e);
}
}
#ifdef PRELOADED_BL33_BASE
/*
* In this case, don't load the BL33 image as it's already loaded in
* memory. Update BL33 entrypoint information.
*/
INFO("BL2: Populating the entrypoint info for the preloaded BL33\n");
bl2_to_bl31_params->bl33_ep_info->pc = PRELOADED_BL33_BASE;
bl2_plat_set_bl33_ep_info(NULL, bl2_to_bl31_params->bl33_ep_info);
#else
e = load_bl33(bl2_to_bl31_params);
if (e) {
ERROR("Failed to load BL33 (%i)\n", e);
plat_error_handler(e);
}
#endif /* PRELOADED_BL33_BASE */
#endif /* EL3_PAYLOAD_BASE */
/* Flush the params to be passed to memory */
bl2_plat_flush_bl31_params();
return bl31_ep_info;
}
......@@ -61,70 +61,3 @@ void cm_set_context_by_index(unsigned int cpu_idx, void *context,
set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
}
#if !ERROR_DEPRECATED
/*
* These context management helpers are deprecated but are maintained for use
* by SPDs which have not migrated to the new API. If ERROR_DEPRECATED
* is enabled, these are excluded from the build so as to force users to
* migrate to the new API.
*/
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by MPIDR that was set as the context for the specified
* security state. NULL is returned if no such structure has been specified.
******************************************************************************/
void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
/*
* Suppress deprecated declaration warning in compatibility function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state);
#pragma GCC diagnostic pop
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by MPIDR
******************************************************************************/
void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
/*
* Suppress deprecated declaration warning in compatibility function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
cm_set_context_by_index(platform_get_core_pos(mpidr),
context, security_state);
#pragma GCC diagnostic pop
}
/*******************************************************************************
* The following function provides a compatibility function for SPDs using the
* existing cm library routines. This function is expected to be invoked for
* initializing the cpu_context for the CPU specified by MPIDR for first use.
******************************************************************************/
void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
{
if ((mpidr & MPIDR_AFFINITY_MASK) ==
(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
cm_init_my_context(ep);
else {
/*
* Suppress deprecated declaration warning in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
#pragma GCC diagnostic pop
}
}
#endif /* ERROR_DEPRECATED */
......@@ -23,8 +23,7 @@ static int disable_auth;
/******************************************************************************
* API to dynamically disable authentication. Only meant for development
* systems. This is only invoked if DYN_DISABLE_AUTH is defined. This
* capability is restricted to LOAD_IMAGE_V2.
* systems. This is only invoked if DYN_DISABLE_AUTH is defined.
*****************************************************************************/
void dyn_disable_auth(void)
{
......@@ -101,88 +100,6 @@ int is_mem_free(uintptr_t free_base, size_t free_size,
return (addr >= free_base) && (requested_end <= free_end);
}
#if !LOAD_IMAGE_V2
/******************************************************************************
* Inside a given memory region, determine whether a sub-region of memory is
* closer from the top or the bottom of the encompassing region. Return the
* size of the smallest chunk of free memory surrounding the sub-region in
* 'small_chunk_size'.
*****************************************************************************/
static unsigned int choose_mem_pos(uintptr_t mem_start, uintptr_t mem_end,
uintptr_t submem_start, uintptr_t submem_end,
size_t *small_chunk_size)
{
size_t top_chunk_size, bottom_chunk_size;
assert(mem_start <= submem_start);
assert(submem_start <= submem_end);
assert(submem_end <= mem_end);
assert(small_chunk_size != NULL);
top_chunk_size = mem_end - submem_end;
bottom_chunk_size = submem_start - mem_start;
if (top_chunk_size < bottom_chunk_size) {
*small_chunk_size = top_chunk_size;
return TOP;
} else {
*small_chunk_size = bottom_chunk_size;
return BOTTOM;
}
}
/******************************************************************************
* Reserve the memory region delimited by 'addr' and 'size'. The extents of free
* memory are passed in 'free_base' and 'free_size' and they will be updated to
* reflect the memory usage.
* The caller must ensure the memory to reserve is free and that the addresses
* and sizes passed in arguments are sane.
*****************************************************************************/
void reserve_mem(uintptr_t *free_base, size_t *free_size,
uintptr_t addr, size_t size)
{
size_t discard_size;
size_t reserved_size;
unsigned int pos;
assert(free_base != NULL);
assert(free_size != NULL);
assert(is_mem_free(*free_base, *free_size, addr, size));
if (size == 0) {
WARN("Nothing to allocate, requested size is zero\n");
return;
}
pos = choose_mem_pos(*free_base, *free_base + (*free_size - 1),
addr, addr + (size - 1),
&discard_size);
reserved_size = size + discard_size;
*free_size -= reserved_size;
if (pos == BOTTOM)
*free_base = addr + size;
VERBOSE("Reserved 0x%zx bytes (discarded 0x%zx bytes %s)\n",
reserved_size, discard_size,
pos == TOP ? "above" : "below");
}
static void dump_load_info(uintptr_t image_load_addr,
size_t image_size,
const meminfo_t *mem_layout)
{
INFO("Trying to load image at address %p, size = 0x%zx\n",
(void *)image_load_addr, image_size);
INFO("Current memory layout:\n");
INFO(" total region = [base = %p, size = 0x%zx]\n",
(void *) mem_layout->total_base, mem_layout->total_size);
INFO(" free region = [base = %p, size = 0x%zx]\n",
(void *) mem_layout->free_base, mem_layout->free_size);
}
#endif /* LOAD_IMAGE_V2 */
/* Generic function to return the size of an image */
size_t get_image_size(unsigned int image_id)
{
......@@ -226,8 +143,6 @@ size_t get_image_size(unsigned int image_id)
return image_size;
}
#if LOAD_IMAGE_V2
/*******************************************************************************
* Internal function to load an image at a specific address given
* an image ID and extents of free memory.
......@@ -386,214 +301,6 @@ int load_auth_image(unsigned int image_id, image_info_t *image_data)
return err;
}
#else /* LOAD_IMAGE_V2 */
/*******************************************************************************
* Generic function to load an image at a specific address given an image ID and
* extents of free memory.
*
* If the load is successful then the image information is updated.
*
* If the entry_point_info argument is not NULL then this function also updates:
* - the memory layout to mark the memory as reserved;
* - the entry point information.
*
* The caller might pass a NULL pointer for the entry point if they are not
* interested in this information. This is typically the case for non-executable
* images (e.g. certificates) and executable images that won't ever be executed
* on the application processor (e.g. additional microcontroller firmware).
*
* Returns 0 on success, a negative error code otherwise.
******************************************************************************/
int load_image(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info)
{
uintptr_t dev_handle;
uintptr_t image_handle;
uintptr_t image_spec;
size_t image_size;
size_t bytes_read;
int io_result;
assert(mem_layout != NULL);
assert(image_data != NULL);
assert(image_data->h.version == VERSION_1);
/* Obtain a reference to the image by querying the platform layer */
io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
if (io_result != 0) {
WARN("Failed to obtain reference to image id=%u (%i)\n",
image_id, io_result);
return io_result;
}
/* Attempt to access the image */
io_result = io_open(dev_handle, image_spec, &image_handle);
if (io_result != 0) {
WARN("Failed to access image id=%u (%i)\n",
image_id, io_result);
return io_result;
}
INFO("Loading image id=%u at address %p\n", image_id,
(void *) image_base);
/* Find the size of the image */
io_result = io_size(image_handle, &image_size);
if ((io_result != 0) || (image_size == 0)) {
WARN("Failed to determine the size of the image id=%u (%i)\n",
image_id, io_result);
goto exit;
}
/* Check that the memory where the image will be loaded is free */
if (!is_mem_free(mem_layout->free_base, mem_layout->free_size,
image_base, image_size)) {
WARN("Failed to reserve region [base = %p, size = 0x%zx]\n",
(void *) image_base, image_size);
dump_load_info(image_base, image_size, mem_layout);
io_result = -ENOMEM;
goto exit;
}
/* We have enough space so load the image now */
/* TODO: Consider whether to try to recover/retry a partially successful read */
io_result = io_read(image_handle, image_base, image_size, &bytes_read);
if ((io_result != 0) || (bytes_read < image_size)) {
WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
goto exit;
}
image_data->image_base = image_base;
image_data->image_size = image_size;
/*
* Update the memory usage info.
* This is done after the actual loading so that it is not updated when
* the load is unsuccessful.
* If the caller does not provide an entry point, bypass the memory
* reservation.
*/
if (entry_point_info != NULL) {
reserve_mem(&mem_layout->free_base, &mem_layout->free_size,
image_base, image_size);
entry_point_info->pc = image_base;
} else {
INFO("Skip reserving region [base = %p, size = 0x%zx]\n",
(void *) image_base, image_size);
}
#if !TRUSTED_BOARD_BOOT
/*
* File has been successfully loaded.
* Flush the image to main memory so that it can be executed later by
* any CPU, regardless of cache and MMU state.
* When TBB is enabled the image is flushed later, after image
* authentication.
*/
flush_dcache_range(image_base, image_size);
#endif /* TRUSTED_BOARD_BOOT */
INFO("Image id=%u loaded at address %p, size = 0x%zx\n", image_id,
(void *) image_base, image_size);
exit:
io_close(image_handle);
/* Ignore improbable/unrecoverable error in 'close' */
/* TODO: Consider maintaining open device connection from this bootloader stage */
io_dev_close(dev_handle);
/* Ignore improbable/unrecoverable error in 'dev_close' */
return io_result;
}
static int load_auth_image_internal(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info,
int is_parent_image)
{
int rc;
#if TRUSTED_BOARD_BOOT
unsigned int parent_id;
/* Use recursion to authenticate parent images */
rc = auth_mod_get_parent_id(image_id, &parent_id);
if (rc == 0) {
rc = load_auth_image_internal(mem_layout, parent_id, image_base,
image_data, NULL, 1);
if (rc != 0) {
return rc;
}
}
#endif /* TRUSTED_BOARD_BOOT */
/* Load the image */
rc = load_image(mem_layout, image_id, image_base, image_data,
entry_point_info);
if (rc != 0) {
return rc;
}
#if TRUSTED_BOARD_BOOT
/* Authenticate it */
rc = auth_mod_verify_img(image_id,
(void *)image_data->image_base,
image_data->image_size);
if (rc != 0) {
/* Authentication error, zero memory and flush it right away. */
zero_normalmem((void *)image_data->image_base,
image_data->image_size);
flush_dcache_range(image_data->image_base,
image_data->image_size);
return -EAUTH;
}
/*
* File has been successfully loaded and authenticated.
* Flush the image to main memory so that it can be executed later by
* any CPU, regardless of cache and MMU state.
* Do it only for child images, not for the parents (certificates).
*/
if (!is_parent_image) {
flush_dcache_range(image_data->image_base,
image_data->image_size);
}
#endif /* TRUSTED_BOARD_BOOT */
return 0;
}
/*******************************************************************************
* Generic function to load and authenticate an image. The image is actually
* loaded by calling the 'load_image()' function. Therefore, it returns the
* same error codes if the loading operation failed, or -EAUTH if the
* authentication failed. In addition, this function uses recursion to
* authenticate the parent images up to the root of trust.
******************************************************************************/
int load_auth_image(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
image_info_t *image_data,
entry_point_info_t *entry_point_info)
{
int err;
do {
err = load_auth_image_internal(mem_layout, image_id, image_base,
image_data, entry_point_info, 0);
} while (err != 0 && plat_try_next_boot_source());
return err;
}
#endif /* LOAD_IMAGE_V2 */
/*******************************************************************************
* Print the content of an entry_point_info_t structure.
******************************************************************************/
......
......@@ -394,13 +394,9 @@ On Arm platforms, BL2 performs the following platform initializations:
Image loading in BL2
^^^^^^^^^^^^^^^^^^^^
Image loading scheme in BL2 depends on ``LOAD_IMAGE_V2`` build option. If the
flag is disabled, the BLxx images are loaded, by calling the respective
load\_blxx() function from BL2 generic code. If the flag is enabled, the BL2
generic code loads the images based on the list of loadable images provided
by the platform. BL2 passes the list of executable images provided by the
platform to the next handover BL image. By default, this flag is disabled for
AArch64 and the AArch32 build is supported only if this flag is enabled.
BL2 generic code loads the images based on the list of loadable images
provided by the platform. BL2 passes the list of executable images
provided by the platform to the next handover BL image.
The list of loadable images provided by the platform may also contain
dynamic configuration files. The files are loaded and can be parsed as
......@@ -425,10 +421,7 @@ EL3 Runtime Software image load
BL2 loads the EL3 Runtime Software image from platform storage into a platform-
specific address in trusted SRAM. If there is not enough memory to load the
image or image is missing it leads to an assertion failure. If ``LOAD_IMAGE_V2``
is disabled and if image loads successfully, BL2 updates the amount of trusted
SRAM used and available for use by EL3 Runtime Software. This information is
populated at a platform-specific memory address.
image or image is missing it leads to an assertion failure.
AArch64 BL32 (Secure-EL1 Payload) image load
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
......@@ -1281,48 +1274,23 @@ interrupts on the platform. To this end, the platform is expected to provide the
GIC driver (either GICv2 or GICv3, as selected by the platform) with the
interrupt configuration during the driver initialisation.
There are two ways to specify secure interrupt configuration:
Secure interrupt configuration are specified in an array of secure interrupt
properties. In this scheme, in both GICv2 and GICv3 driver data structures, the
``interrupt_props`` member points to an array of interrupt properties. Each
element of the array specifies the interrupt number and its configuration, viz.
priority, group, configuration. Each element of the array shall be populated by
the macro ``INTR_PROP_DESC()``. The macro takes the following arguments:
#. Array of secure interrupt properties: In this scheme, in both GICv2 and GICv3
driver data structures, the ``interrupt_props`` member points to an array of
interrupt properties. Each element of the array specifies the interrupt
number and its configuration, viz. priority, group, configuration. Each
element of the array shall be populated by the macro ``INTR_PROP_DESC()``.
The macro takes the following arguments:
- 10-bit interrupt number,
- 10-bit interrupt number,
- 8-bit interrupt priority,
- 8-bit interrupt priority,
- Interrupt type (one of ``INTR_TYPE_EL3``, ``INTR_TYPE_S_EL1``,
- Interrupt type (one of ``INTR_TYPE_EL3``, ``INTR_TYPE_S_EL1``,
``INTR_TYPE_NS``),
- Interrupt configuration (either ``GIC_INTR_CFG_LEVEL`` or
- Interrupt configuration (either ``GIC_INTR_CFG_LEVEL`` or
``GIC_INTR_CFG_EDGE``).
#. Array of secure interrupts: In this scheme, the GIC driver is provided an
array of secure interrupt numbers. The GIC driver, at the time of
initialisation, iterates through the array and assigns each interrupt
the appropriate group.
- For the GICv2 driver, in ``gicv2_driver_data`` structure, the
``g0_interrupt_array`` member of the should point to the array of
interrupts to be assigned to *Group 0*, and the ``g0_interrupt_num``
member of the should be set to the number of interrupts in the array.
- For the GICv3 driver, in ``gicv3_driver_data`` structure:
- The ``g0_interrupt_array`` member of the should point to the array of
interrupts to be assigned to *Group 0*, and the ``g0_interrupt_num``
member of the should be set to the number of interrupts in the array.
- The ``g1s_interrupt_array`` member of the should point to the array of
interrupts to be assigned to *Group 1 Secure*, and the
``g1s_interrupt_num`` member of the should be set to the number of
interrupts in the array.
**Note that this scheme is deprecated.**
CPU specific operations framework
---------------------------------
......
......@@ -14,13 +14,13 @@ To build:
.. code:: bash
make ERROR_DEPRECATED=1 CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp bl31
make CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp bl31
To build bl32 TSP you have to rebuild bl31 too:
.. code:: bash
make ERROR_DEPRECATED=1 CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp SPD=tspd bl31 bl32
make CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp SPD=tspd bl31 bl32
ZynqMP platform specific build options
======================================
......
Guide to migrate to new Platform porting interface
==================================================
.. section-numbering::
:suffix: .
.. contents::
--------------
Introduction
------------
The PSCI implementation in TF-A has undergone a redesign because of three
requirements that the PSCI 1.0 specification introduced :
- Removing the framework assumption about the structure of the MPIDR, and
its relation to the power topology enables support for deeper and more
complex hierarchies.
- Reworking the power state coordination implementation in the framework
to support the more detailed PSCI 1.0 requirements and reduce platform
port complexity
- Enable the use of the extended power\_state parameter and the larger StateID
field
The PSCI 1.0 implementation introduces new frameworks to fulfill the above
requirements. These framework changes mean that the platform porting API must
also be modified. This document is a guide to assist migration of the existing
platform ports to the new platform API.
This document describes the new platform API and compares it with the
deprecated API. It also describes the compatibility layer that enables the
existing platform ports to work with the PSCI 1.0 implementation. The
deprecated platform API is documented for reference.
Platform API modification due to PSCI framework changes
-------------------------------------------------------
This section describes changes to the platform APIs.
Power domain topology framework platform API modifications
----------------------------------------------------------
This removes the assumption in the PSCI implementation that MPIDR
based affinity instances map directly to power domains. A power domain, as
described in section 4.2 of `PSCI`_, could contain a core or a logical group
of cores (a cluster) which share some state on which power management
operations can be performed. The existing affinity instance based APIs
``plat_get_aff_count()`` and ``plat_get_aff_state()`` are deprecated. The new
platform interfaces that are introduced for this framework are:
- ``plat_core_pos_by_mpidr()``
- ``plat_my_core_pos()``
- ``plat_get_power_domain_tree_desc()``
``plat_my_core_pos()`` and ``plat_core_pos_by_mpidr()`` are mandatory
and are meant to replace the existing ``platform_get_core_pos()`` API.
The description of these APIs can be found in the `Porting Guide`_.
These are used by the power domain topology framework such that:
#. The generic PSCI code does not generate MPIDRs or use them to query the
platform about the number of power domains at a particular power level. The
``plat_get_power_domain_tree_desc()`` provides a description of the power
domain tree on the SoC through a pointer to the byte array containing the
power domain topology tree description data structure.
#. The linear indices returned by ``plat_core_pos_by_mpidr()`` and
``plat_my_core_pos()`` are used to retrieve core power domain nodes from
the power domain tree. These core indices are unique for a core and it is a
number between ``0`` and ``PLATFORM_CORE_COUNT - 1``. The platform can choose
to implement a static mapping between ``MPIDR`` and core index or implement
a dynamic mapping, choosing to skip the unavailable/unused cores to compact
the core indices.
In addition, the platforms must define the macros ``PLAT_NUM_PWR_DOMAINS`` and
``PLAT_MAX_PWR_LVL`` which replace the macros ``PLAT_NUM_AFFS`` and
``PLATFORM_MAX_AFFLVL`` respectively. On platforms where the affinity instances
correspond to power domains, the values of new macros remain the same as the
old ones.
More details on the power domain topology description and its platform
interface can be found in `psci pd tree`_.
Composite power state framework platform API modifications
----------------------------------------------------------
The state-ID field in the power-state parameter of a CPU\_SUSPEND call can be
used to describe the composite power states specific to a platform. The existing
PSCI state coordination had the limitation that it operates on a run/off
granularity of power states and it did not interpret the state-ID field. This
was acceptable as the specification requirement in PSCI 0.2 and the framework's
approach to coordination only required maintaining a reference
count of the number of cores that have requested the cluster to remain powered.
In the PSCI 1.0 specification, this approach is non optimal. If composite
power states are used, the PSCI implementation cannot make global
decisions about state coordination required because it does not understand the
platform specific states.
The PSCI 1.0 implementation now defines a generic representation of the
power-state parameter :
.. code:: c
typedef struct psci_power_state {
plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1];
} psci_power_state_t;
``pwr_domain_state`` is an array where each index corresponds to a power level.
Each entry in the array contains the local power state the power domain at
that power level could enter. The meaning of the local power state value is
platform defined, and can vary between levels in a single platform. The PSCI
implementation constraints the values only so that it can classify the state
as RUN, RETENTION or OFF as required by the specification:
#. Zero means RUN
#. All OFF state values at all levels must be higher than all
RETENTION state values at all levels
The platform is required to define the macros ``PLAT_MAX_RET_STATE`` and
``PLAT_MAX_OFF_STATE`` to the framework. The requirement for these macros can
be found in the `Porting Guide <porting-guide.rst>`__.
The PSCI 1.0 implementation adds support to involve the platform in state
coordination. This enables the platform to decide the final target state.
During a request to place a power domain in a low power state, the platform
is passed an array of requested ``plat_local_state_t`` for that power domain by
each core within it through the ``plat_get_target_pwr_state()`` API. This API
coordinates amongst these requested states to determine a target
``plat_local_state_t`` for that power domain. A default weak implementation of
this API is provided in the platform layer which returns the minimum of the
requested local states back to the PSCI state coordination. More details
of ``plat_get_target_pwr_state()`` API can be found in the
`Porting Guide <porting-guide.rst#user-content-function--plat_get_target_pwr_state-optional>`__.
The PSCI Generic implementation expects platform ports to populate the handlers
for the ``plat_psci_ops`` structure which is declared as :
.. code:: c
typedef struct plat_psci_ops {
void (*cpu_standby)(plat_local_state_t cpu_state);
int (*pwr_domain_on)(u_register_t mpidr);
void (*pwr_domain_off)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend_early)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend_finish)(
const psci_power_state_t *target_state);
void (*system_off)(void) __dead2;
void (*system_reset)(void) __dead2;
int (*validate_power_state)(unsigned int power_state,
psci_power_state_t *req_state);
int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
void (*get_sys_suspend_power_state)(
psci_power_state_t *req_state);
int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state,
int pwrlvl);
int (*translate_power_state_by_mpidr)(u_register_t mpidr,
unsigned int power_state,
psci_power_state_t *output_state);
int (*get_node_hw_state)(u_register_t mpidr, unsigned int power_level);
int (*mem_protect_chk)(uintptr_t base, u_register_t length);
int (*read_mem_protect)(int *val);
int (*write_mem_protect)(int val);
int (*system_reset2)(int is_vendor,
int reset_type, u_register_t cookie);
} plat_psci_ops_t;
The description of these handlers can be found in the `Porting Guide <porting-guide.rst#user-content-function--plat_setup_psci_ops-mandatory>`__.
The previous ``plat_pm_ops`` structure is deprecated. Compared with the previous
handlers, the major differences are:
- Difference in parameters
The PSCI 1.0 implementation depends on the ``validate_power_state`` handler to
convert the power-state parameter (possibly encoding a composite power state)
passed in a PSCI ``CPU_SUSPEND`` to the ``psci_power_state`` format. This handler
is now mandatory for PSCI ``CPU_SUSPEND`` support.
The ``plat_psci_ops`` handlers, ``pwr_domain_off``, ``pwr_domain_suspend_early``
and ``pwr_domain_suspend``, are passed the target local state for each affected
power domain. The platform must execute operations specific to these target
states. Similarly, ``pwr_domain_on_finish`` and ``pwr_domain_suspend_finish``
are passed the local states of the affected power domains before wakeup. The
platform must execute actions to restore these power domains from these specific
local states.
- Difference in invocation
Whereas the power management handlers in ``plat_pm_ops`` used to be invoked
for each affinity level till the target affinity level, the new handlers
are only invoked once. The ``target_state`` encodes the target low power
state or the low power state woken up from for each affected power domain.
- Difference in semantics
Although the previous ``suspend`` handlers could be used for power down as well
as retention at different affinity levels, the new handlers make this support
explicit. The ``pwr_domain_suspend`` can be used to specify powerdown and
retention at various power domain levels subject to the conditions mentioned
in section 4.2.1 of `PSCI`_
Unlike the previous ``standby`` handler, the ``cpu_standby()`` handler is only used
as a fast path for placing a core power domain into a standby or retention
state.
The below diagram shows the sequence of a PSCI SUSPEND call and the interaction
with the platform layer depicting the exchange of data between PSCI Generic
layer and the platform layer.
|Image 1|
Refer `plat/arm/board/fvp/fvp\_pm.c`_ for the implementation details of
these handlers for the FVP. The commit `38dce70f51fb83b27958ba3e2ad15f5635cb1061`_
demonstrates the migration of Arm reference platforms to the new platform API.
Miscellaneous modifications
---------------------------
In addition to the framework changes, unification of warm reset entry points on
wakeup from low power modes has led to a change in the platform API. In the
earlier implementation, the warm reset entry used to be programmed into the
mailboxes by the 'ON' and 'SUSPEND' power management hooks. In the PSCI 1.0
implementation, this information is not required, because it can figure that
out by querying affinity info state whether to execute the 'suspend\_finisher\`
or 'on\_finisher'.
As a result, the warm reset entry point must be programmed only once. The
``plat_setup_psci_ops()`` API takes the secure entry point as an
additional parameter to enable the platforms to configure their mailbox. The
plat\_psci\_ops handlers ``pwr_domain_on`` and ``pwr_domain_suspend`` no longer take
the warm reset entry point as a parameter.
Also, some platform APIs which took ``MPIDR`` as an argument were only ever
invoked to perform actions specific to the caller core which makes the argument
redundant. Therefore the platform APIs ``plat_get_my_entrypoint()``,
``plat_is_my_cpu_primary()``, ``plat_set_my_stack()`` and
``plat_get_my_stack()`` are defined which are meant to be invoked only for
operations on the current caller core instead of ``platform_get_entrypoint()``,
``platform_is_primary_cpu()``, ``platform_set_stack()`` and ``platform_get_stack()``.
Compatibility layer
-------------------
To ease the migration of the platform ports to the new porting interface,
a compatibility layer is introduced that essentially implements a glue layer
between the old platform API and the new API. The build flag
``ENABLE_PLAT_COMPAT`` (enabled by default), specifies whether to enable this
layer or not. A platform port which has migrated to the new API can disable
this flag within the platform specific makefile.
The compatibility layer works on the assumption that the onus of
state coordination, in case multiple low power states are supported,
is with the platform. The generic PSCI implementation only takes into
account whether the suspend request is power down or not. This corresponds
with the behavior of the PSCI implementation before the introduction of
new frameworks. Also, it assumes that the affinity levels of the platform
correspond directly to the power domain levels.
The compatibility layer dynamically constructs the new topology
description array by querying the platform using ``plat_get_aff_count()``
and ``plat_get_aff_state()`` APIs. The linear index returned by
``platform_get_core_pos()`` is used as the core index for the cores. The
higher level (non-core) power domain nodes must know the cores contained
within its domain. It does so by storing the core index of first core
within it and number of core indexes following it. This means that core
indices returned by ``platform_get_core_pos()`` for cores within a particular
power domain must be consecutive. We expect that this is the case for most
platform ports including Arm reference platforms.
The old PSCI helpers like ``psci_get_suspend_powerstate()``,
``psci_get_suspend_stateid()``, ``psci_get_suspend_stateid_by_mpidr()``,
``psci_get_max_phys_off_afflvl()`` and ``psci_get_suspend_afflvl()`` are also
implemented for the compatibility layer. This allows the existing
platform ports to work with the new PSCI frameworks without significant
rework.
Deprecated Platform API
-----------------------
This section documents the deprecated platform porting API.
Common mandatory modifications
------------------------------
The mandatory macros to be defined by the platform port in ``platform_def.h``
- **#define : PLATFORM\_NUM\_AFFS**
Defines the total number of nodes in the affinity hierarchy at all affinity
levels used by the platform.
- **#define : PLATFORM\_MAX\_AFFLVL**
Defines the maximum affinity level that the power management operations
should apply to. Armv8-A has support for four affinity levels. It is likely
that hardware will implement fewer affinity levels. This macro allows the
PSCI implementation to consider only those affinity levels in the system
that the platform implements. For example, the Base AEM FVP implements two
clusters with a configurable number of cores. It reports the maximum
affinity level as 1, resulting in PSCI power control up to the cluster
level.
The following functions must be implemented by the platform port to enable
the reset vector code to perform the required tasks.
Function : platform\_get\_entrypoint() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : unsigned long
This function is called with the ``SCTLR.M`` and ``SCTLR.C`` bits disabled. The core
is identified by its ``MPIDR``, which is passed as the argument. The function is
responsible for distinguishing between a warm and cold reset using platform-
specific means. If it is a warm reset, it returns the entrypoint into the
BL31 image that the core must jump to. If it is a cold reset, this function
must return zero.
This function is also responsible for implementing a platform-specific mechanism
to handle the condition where the core has been warm reset but there is no
entrypoint to jump to.
This function does not follow the Procedure Call Standard used by the
Application Binary Interface for the Arm 64-bit architecture. The caller should
not assume that callee saved registers are preserved across a call to this
function.
Function : platform\_is\_primary\_cpu() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : unsigned int
This function identifies a core by its ``MPIDR``, which is passed as the argument,
to determine whether this core is the primary core or a secondary core. A return
value of zero indicates that the core is not the primary core, while a non-zero
return value indicates that the core is the primary core.
Common optional modifications
-----------------------------
Function : platform\_get\_core\_pos()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : int
A platform may need to convert the ``MPIDR`` of a core to an absolute number, which
can be used as a core-specific linear index into blocks of memory (for example
while allocating per-core stacks). This routine contains a simple mechanism
to perform this conversion, using the assumption that each cluster contains a
maximum of four cores:
::
linear index = cpu_id + (cluster_id * 4)
cpu_id = 8-bit value in MPIDR at affinity level 0
cluster_id = 8-bit value in MPIDR at affinity level 1
Function : platform\_set\_stack()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : void
This function sets the current stack pointer to the normal memory stack that
has been allocated for the core specified by MPIDR. For BL images that only
require a stack for the primary core the parameter is ignored. The size of
the stack allocated to each core is specified by the platform defined constant
``PLATFORM_STACK_SIZE``.
Common implementations of this function for the UP and MP BL images are
provided in `plat/common/aarch64/platform\_up\_stack.S`_ and
`plat/common/aarch64/platform\_mp\_stack.S`_
Function : platform\_get\_stack()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned long
Return : unsigned long
This function returns the base address of the normal memory stack that
has been allocated for the core specificed by MPIDR. For BL images that only
require a stack for the primary core the parameter is ignored. The size of
the stack allocated to each core is specified by the platform defined constant
``PLATFORM_STACK_SIZE``.
Common implementations of this function for the UP and MP BL images are
provided in `plat/common/aarch64/platform\_up\_stack.S`_ and
`plat/common/aarch64/platform\_mp\_stack.S`_
Modifications for Power State Coordination Interface (in BL31)
--------------------------------------------------------------
The following functions must be implemented to initialize PSCI functionality in
TF-A.
Function : plat\_get\_aff\_count() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned int, unsigned long
Return : unsigned int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
called by the primary core.
This function is called by the PSCI initialization code to detect the system
topology. Its purpose is to return the number of affinity instances implemented
at a given ``affinity level`` (specified by the first argument) and a given
``MPIDR`` (specified by the second argument). For example, on a dual-cluster
system where first cluster implements two cores and the second cluster
implements four cores, a call to this function with an ``MPIDR`` corresponding
to the first cluster (``0x0``) and affinity level 0, would return 2. A call
to this function with an ``MPIDR`` corresponding to the second cluster (``0x100``)
and affinity level 0, would return 4.
Function : plat\_get\_aff\_state() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned int, unsigned long
Return : unsigned int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
called by the primary core.
This function is called by the PSCI initialization code. Its purpose is to
return the state of an affinity instance. The affinity instance is determined by
the affinity ID at a given ``affinity level`` (specified by the first argument)
and an ``MPIDR`` (specified by the second argument). The state can be one of
``PSCI_AFF_PRESENT`` or ``PSCI_AFF_ABSENT``. The latter state is used to cater for
system topologies where certain affinity instances are unimplemented. For
example, consider a platform that implements a single cluster with four cores and
another core implemented directly on the interconnect with the cluster. The
``MPIDR``\ s of the cluster would range from ``0x0-0x3``. The ``MPIDR`` of the single
core is 0x100 to indicate that it does not belong to cluster 0. Cluster 1
is missing but needs to be accounted for to reach this single core in the
topology tree. Therefore it is marked as ``PSCI_AFF_ABSENT``.
Function : platform\_setup\_pm() [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : const plat_pm_ops **
Return : int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in ``bl31_plat_arch_setup()``. It is only
called by the primary core.
This function is called by PSCI initialization code. Its purpose is to export
handler routines for platform-specific power management actions by populating
the passed pointer with a pointer to the private ``plat_pm_ops`` structure of
BL31.
A description of each member of this structure is given below. A platform port
is expected to implement these handlers if the corresponding PSCI operation
is to be supported and these handlers are expected to succeed if the return
type is ``void``.
plat\_pm\_ops.affinst\_standby()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Perform the platform-specific setup to enter the standby state indicated by the
passed argument. The generic code expects the handler to succeed.
plat\_pm\_ops.affinst\_on()
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Perform the platform specific setup to power on an affinity instance, specified
by the ``MPIDR`` (first argument) and ``affinity level`` (third argument). The
``state`` (fourth argument) contains the current state of that affinity instance
(ON or OFF). This is useful to determine whether any action must be taken. For
example, while powering on a core, the cluster that contains this core might
already be in the ON state. The platform decides what actions must be taken to
transition from the current state to the target state (indicated by the power
management operation). The generic code expects the platform to return
E\_SUCCESS on success or E\_INTERN\_FAIL for any failure.
plat\_pm\_ops.affinst\_off()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Perform the platform specific setup to power off an affinity instance of the
calling core. It is called by the PSCI ``CPU_OFF`` API implementation.
The ``affinity level`` (first argument) and ``state`` (second argument) have
a similar meaning as described in the ``affinst_on()`` operation. They
identify the affinity instance on which the call is made and its
current state. This gives the platform port an indication of the
state transition it must make to perform the requested action. For example, if
the calling core is the last powered on core in the cluster, after powering down
affinity level 0 (the core), the platform port should power down affinity
level 1 (the cluster) as well. The generic code expects the handler to succeed.
plat\_pm\_ops.affinst\_suspend()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Perform the platform specific setup to power off an affinity instance of the
calling core. It is called by the PSCI ``CPU_SUSPEND`` API and ``SYSTEM_SUSPEND``
API implementation
The ``affinity level`` (second argument) and ``state`` (third argument) have a
similar meaning as described in the ``affinst_on()`` operation. They are used to
identify the affinity instance on which the call is made and its current state.
This gives the platform port an indication of the state transition it must
make to perform the requested action. For example, if the calling core is the
last powered on core in the cluster, after powering down affinity level 0
(the core), the platform port should power down affinity level 1 (the cluster)
as well.
The difference between turning an affinity instance off and suspending it
is that in the former case, the affinity instance is expected to re-initialize
its state when it is next powered on (see ``affinst_on_finish()``). In the latter
case, the affinity instance is expected to save enough state so that it can
resume execution by restoring this state when it is powered on (see
``affinst_suspend_finish()``).The generic code expects the handler to succeed.
plat\_pm\_ops.affinst\_on\_finish()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation after the calling core is
powered on and released from reset in response to an earlier PSCI ``CPU_ON`` call.
It performs the platform-specific setup required to initialize enough state for
this core to enter the Normal world and also provide secure runtime firmware
services.
The ``affinity level`` (first argument) and ``state`` (second argument) have a
similar meaning as described in the previous operations. The generic code
expects the handler to succeed.
plat\_pm\_ops.affinst\_suspend\_finish()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation after the calling core is
powered on and released from reset in response to an asynchronous wakeup
event, for example a timer interrupt that was programmed by the core during the
``CPU_SUSPEND`` call or ``SYSTEM_SUSPEND`` call. It performs the platform-specific
setup required to restore the saved state for this core to resume execution
in the Normal world and also provide secure runtime firmware services.
The ``affinity level`` (first argument) and ``state`` (second argument) have a
similar meaning as described in the previous operations. The generic code
expects the platform to succeed.
plat\_pm\_ops.validate\_power\_state()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation during the ``CPU_SUSPEND``
call to validate the ``power_state`` parameter of the PSCI API. If the
``power_state`` is known to be invalid, the platform must return
PSCI\_E\_INVALID\_PARAMS as an error, which is propagated back to the Normal
world PSCI client.
plat\_pm\_ops.validate\_ns\_entrypoint()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation during the ``CPU_SUSPEND``,
``SYSTEM_SUSPEND`` and ``CPU_ON`` calls to validate the Non-secure ``entry_point``
parameter passed by the Normal world. If the ``entry_point`` is known to be
invalid, the platform must return PSCI\_E\_INVALID\_PARAMS as an error, which is
propagated back to the Normal world PSCI client.
plat\_pm\_ops.get\_sys\_suspend\_power\_state()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This function is called by the PSCI implementation during the ``SYSTEM_SUSPEND``
call to return the ``power_state`` parameter. This allows the platform to encode
the appropriate State-ID field within the ``power_state`` parameter which can be
utilized in ``affinst_suspend()`` to suspend to system affinity level. The
``power_state`` parameter should be in the same format as specified by the
PSCI specification for the CPU\_SUSPEND API.
--------------
*Copyright (c) 2015-2018, Arm Limited and Contributors. All rights reserved.*
.. _PSCI: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
.. _Porting Guide: porting-guide.rst#user-content-function--plat_my_core_pos
.. _psci pd tree: psci-pd-tree.rst
.. _plat/arm/board/fvp/fvp\_pm.c: ../plat/arm/board/fvp/fvp_pm.c
.. _38dce70f51fb83b27958ba3e2ad15f5635cb1061: https://github.com/ARM-software/arm-trusted-firmware/commit/38dce70f51fb83b27958ba3e2ad15f5635cb1061
.. _plat/common/aarch64/platform\_up\_stack.S: ../plat/common/aarch64/platform_up_stack.S
.. _plat/common/aarch64/platform\_mp\_stack.S: ../plat/common/aarch64/platform_mp_stack.S
.. |Image 1| image:: diagrams/psci-suspend-sequence.png?raw=true
......@@ -12,10 +12,6 @@ Trusted Firmware-A Porting Guide
Introduction
------------
Please note that this document has been updated for the new platform API
as required by the PSCI v1.0 implementation. Please refer to the
`Migration Guide`_ for the previous platform API.
Porting Trusted Firmware-A (TF-A) to a new platform involves making some
mandatory and optional modifications for both the cold and warm boot paths.
Modifications consist of:
......@@ -481,13 +477,6 @@ constants must also be defined:
enabled for a BL image, ``MAX_MMAP_REGIONS`` must be defined to accommodate
the dynamic regions as well.
- **#define : ADDR\_SPACE\_SIZE**
Defines the total size of the address space in bytes. For example, for a 32
bit address space, this value should be ``(1ULL << 32)``. This definition is
now deprecated, platforms should use ``PLAT_PHY_ADDR_SPACE_SIZE`` and
``PLAT_VIRT_ADDR_SPACE_SIZE`` instead.
- **#define : PLAT\_VIRT\_ADDR\_SPACE\_SIZE**
Defines the total size of the virtual address space in bytes. For example,
......@@ -2976,12 +2965,6 @@ The default implementation of this function calls
Build flags
-----------
- **ENABLE\_PLAT\_COMPAT**
All the platforms ports conforming to this API specification should define
the build flag ``ENABLE_PLAT_COMPAT`` to 0 as the compatibility layer should
be disabled. For more details on compatibility layer, refer
`Migration Guide`_.
There are some build flags which can be defined by the platform to control
inclusion or exclusion of certain BL stages from the FIP image. These flags
need to be defined in the platform makefile which will get included by the
......@@ -3067,7 +3050,6 @@ amount of open resources per driver.
*Copyright (c) 2013-2018, Arm Limited and Contributors. All rights reserved.*
.. _Migration Guide: platform-migration-guide.rst
.. _include/plat/common/platform.h: ../include/plat/common/platform.h
.. _include/plat/arm/common/plat\_arm.h: ../include/plat/arm/common/plat_arm.h%5D
.. _User Guide: user-guide.rst
......
......@@ -234,11 +234,6 @@ Common build options
compiling TF-A. Its value must be a numeric, and defaults to 0. See also,
*Armv8 Architecture Extensions* in `Firmware Design`_.
- ``ARM_GIC_ARCH``: Choice of Arm GIC architecture version used by the Arm
Legacy GIC driver for implementing the platform GIC API. This API is used
by the interrupt management framework. Default is 2 (that is, version 2.0).
This build option is deprecated.
- ``ARM_PLAT_MT``: This flag determines whether the Arm platform layer has to
cater for the multi-threading ``MT`` bit when accessing MPIDR. When this flag
is set, the functions which deal with MPIDR assume that the ``MT`` bit in
......@@ -334,8 +329,8 @@ Common build options
- ``DYN_DISABLE_AUTH``: Provides the capability to dynamically disable Trusted
Board Boot authentication at runtime. This option is meant to be enabled only
for development platforms. Both TRUSTED_BOARD_BOOT and LOAD_IMAGE_V2 flags
must be set if this flag has to be enabled. 0 is the default.
for development platforms. ``TRUSTED_BOARD_BOOT`` flag must be set if this
flag has to be enabled. 0 is the default.
- ``EL3_PAYLOAD_BASE``: This option enables booting an EL3 payload instead of
the normal boot flow. It must specify the entry point address of the EL3
......@@ -514,12 +509,6 @@ Common build options
- ``LDFLAGS``: Extra user options appended to the linkers' command line in
addition to the one set by the build system.
- ``LOAD_IMAGE_V2``: Boolean option to enable support for new version (v2) of
image loading, which provides more flexibility and scalability around what
images are loaded and executed during boot. Default is 0.
Note: this flag must be enabled for AArch32 builds.
- ``LOG_LEVEL``: Chooses the log level, which controls the amount of console log
output compiled into the build. This should be one of the following:
......@@ -844,9 +833,6 @@ Arm FVP platform specific build options
- ``FVP_GIC600`` : The GIC600 implementation of GICv3 is selected
- ``FVP_GICV2`` : The GICv2 only driver is selected
- ``FVP_GICV3`` : The GICv3 only driver is selected (default option)
- ``FVP_GICV3_LEGACY``: The Legacy GICv3 driver is selected (deprecated)
Note: If TF-A is compiled with this option on FVPs with GICv3 hardware,
then it configures the hardware to run in GICv2 emulation mode
- ``FVP_USE_SP804_TIMER`` : Use the SP804 timer instead of the Generic Timer
for functions that wait for an arbitrary time length (udelay and mdelay).
......@@ -1085,18 +1071,6 @@ destination. In that case, use -f or --force to continue.
More information about FIP can be found in the `Firmware Design`_ document.
Migrating from fip\_create to fiptool
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The previous version of fiptool was called fip\_create. A compatibility script
that emulates the basic functionality of the previous fip\_create is provided.
However, users are strongly encouraged to migrate to fiptool.
- To create a new FIP file, replace "fip\_create" with "fiptool create".
- To update a FIP file, replace "fip\_create" with "fiptool update".
- To dump the contents of a FIP file, replace "fip\_create --dump"
with "fiptool info".
Building FIP images with support for Trusted Board Boot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
......@@ -1215,12 +1189,12 @@ command:
make PLAT=<platform> [DEBUG=1] [V=1] certtool
For platforms that do not require their own IDs in certificate files,
the generic 'cert\_create' tool can be built with the following command:
For platforms that require their own IDs in certificate files, the generic
'cert\_create' tool can be built with the following command:
::
make USE_TBBR_DEFS=1 [DEBUG=1] [V=1] certtool
make USE_TBBR_DEFS=0 [DEBUG=1] [V=1] certtool
``DEBUG=1`` builds the tool in debug mode. ``V=1`` makes the build process more
verbose. The following command should be used to obtain help about the tool:
......
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <assert.h>
#include <cci400.h>
#include <debug.h>
#include <mmio.h>
#include <stdint.h>
#define MAX_CLUSTERS 2
static uintptr_t cci_base_addr;
static unsigned int cci_cluster_ix_to_iface[MAX_CLUSTERS];
void cci_init(uintptr_t cci_base,
int slave_iface3_cluster_ix,
int slave_iface4_cluster_ix)
{
/*
* Check the passed arguments are valid. The cluster indices must be
* less than MAX_CLUSTERS, not the same as each other and at least one
* of them must refer to a valid cluster index.
*/
assert(cci_base);
assert(slave_iface3_cluster_ix < MAX_CLUSTERS);
assert(slave_iface4_cluster_ix < MAX_CLUSTERS);
assert(slave_iface3_cluster_ix != slave_iface4_cluster_ix);
assert((slave_iface3_cluster_ix >= 0) ||
(slave_iface4_cluster_ix >= 0));
WARN("Please migrate to common cci driver, This driver will be" \
" deprecated in future\n");
cci_base_addr = cci_base;
if (slave_iface3_cluster_ix >= 0)
cci_cluster_ix_to_iface[slave_iface3_cluster_ix] =
SLAVE_IFACE3_OFFSET;
if (slave_iface4_cluster_ix >= 0)
cci_cluster_ix_to_iface[slave_iface4_cluster_ix] =
SLAVE_IFACE4_OFFSET;
}
static inline unsigned long get_slave_iface_base(unsigned long mpidr)
{
/*
* We assume the TF topology code allocates affinity instances
* consecutively from zero.
* It is a programming error if this is called without initializing
* the slave interface to use for this cluster.
*/
unsigned int cluster_id =
(mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
assert(cluster_id < MAX_CLUSTERS);
assert(cci_cluster_ix_to_iface[cluster_id] != 0);
return cci_base_addr + cci_cluster_ix_to_iface[cluster_id];
}
void cci_enable_cluster_coherency(unsigned long mpidr)
{
assert(cci_base_addr);
/* Enable Snoops and DVM messages */
mmio_write_32(get_slave_iface_base(mpidr) + SNOOP_CTRL_REG,
DVM_EN_BIT | SNOOP_EN_BIT);
/* Wait for the dust to settle down */
while (mmio_read_32(cci_base_addr + STATUS_REG) & CHANGE_PENDING_BIT)
;
}
void cci_disable_cluster_coherency(unsigned long mpidr)
{
assert(cci_base_addr);
/* Disable Snoops and DVM messages */
mmio_write_32(get_slave_iface_base(mpidr) + SNOOP_CTRL_REG,
~(DVM_EN_BIT | SNOOP_EN_BIT));
/* Wait for the dust to settle down */
while (mmio_read_32(cci_base_addr + STATUS_REG) & CHANGE_PENDING_BIT)
;
}
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
#include <arm_gic.h>
#include <assert.h>
#include <bl_common.h>
#include <debug.h>
#include <gic_v2.h>
#include <gic_v3.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <stdint.h>
/* Value used to initialize Non-Secure IRQ priorities four at a time */
#define GICD_IPRIORITYR_DEF_VAL \
(GIC_HIGHEST_NS_PRIORITY | \
(GIC_HIGHEST_NS_PRIORITY << 8) | \
(GIC_HIGHEST_NS_PRIORITY << 16) | \
(GIC_HIGHEST_NS_PRIORITY << 24))
static uintptr_t g_gicc_base;
static uintptr_t g_gicd_base;
static uintptr_t g_gicr_base;
static const unsigned int *g_irq_sec_ptr;
static unsigned int g_num_irqs;
/*******************************************************************************
* This function does some minimal GICv3 configuration. The Firmware itself does
* not fully support GICv3 at this time and relies on GICv2 emulation as
* provided by GICv3. This function allows software (like Linux) in later stages
* to use full GICv3 features.
******************************************************************************/
static void gicv3_cpuif_setup(void)
{
unsigned int val;
uintptr_t base;
/*
* When CPUs come out of reset they have their GICR_WAKER.ProcessorSleep
* bit set. In order to allow interrupts to get routed to the CPU we
* need to clear this bit if set and wait for GICR_WAKER.ChildrenAsleep
* to clear (GICv3 Architecture specification 5.4.23).
* GICR_WAKER is NOT banked per CPU, compute the correct base address
* per CPU.
*/
assert(g_gicr_base);
base = gicv3_get_rdist(g_gicr_base, read_mpidr());
if (base == (uintptr_t)NULL) {
/* No re-distributor base address. This interface cannot be
* configured.
*/
panic();
}
val = gicr_read_waker(base);
val &= ~WAKER_PS;
gicr_write_waker(base, val);
dsb();
/* We need to wait for ChildrenAsleep to clear. */
val = gicr_read_waker(base);
while (val & WAKER_CA)
val = gicr_read_waker(base);
val = read_icc_sre_el3();
write_icc_sre_el3(val | ICC_SRE_EN | ICC_SRE_SRE);
isb();
}
/*******************************************************************************
* This function does some minimal GICv3 configuration when cores go
* down.
******************************************************************************/
static void gicv3_cpuif_deactivate(void)
{
unsigned int val;
uintptr_t base;
/*
* When taking CPUs down we need to set GICR_WAKER.ProcessorSleep and
* wait for GICR_WAKER.ChildrenAsleep to get set.
* (GICv3 Architecture specification 5.4.23).
* GICR_WAKER is NOT banked per CPU, compute the correct base address
* per CPU.
*/
assert(g_gicr_base);
base = gicv3_get_rdist(g_gicr_base, read_mpidr());
if (base == (uintptr_t)NULL) {
/* No re-distributor base address. This interface cannot be
* configured.
*/
panic();
}
val = gicr_read_waker(base);
val |= WAKER_PS;
gicr_write_waker(base, val);
dsb();
/* We need to wait for ChildrenAsleep to set. */
val = gicr_read_waker(base);
while ((val & WAKER_CA) == 0)
val = gicr_read_waker(base);
}
/*******************************************************************************
* Enable secure interrupts and use FIQs to route them. Disable legacy bypass
* and set the priority mask register to allow all interrupts to trickle in.
******************************************************************************/
void arm_gic_cpuif_setup(void)
{
unsigned int val;
assert(g_gicc_base);
val = gicc_read_iidr(g_gicc_base);
/*
* If GICv3 we need to do a bit of additional setup. We want to
* allow default GICv2 behaviour but allow the next stage to
* enable full gicv3 features.
*/
if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3)
gicv3_cpuif_setup();
val = ENABLE_GRP0 | FIQ_EN | FIQ_BYP_DIS_GRP0;
val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
gicc_write_pmr(g_gicc_base, GIC_PRI_MASK);
gicc_write_ctlr(g_gicc_base, val);
}
/*******************************************************************************
* Place the cpu interface in a state where it can never make a cpu exit wfi as
* as result of an asserted interrupt. This is critical for powering down a cpu
******************************************************************************/
void arm_gic_cpuif_deactivate(void)
{
unsigned int val;
/* Disable secure, non-secure interrupts and disable their bypass */
assert(g_gicc_base);
val = gicc_read_ctlr(g_gicc_base);
val &= ~(ENABLE_GRP0 | ENABLE_GRP1);
val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
gicc_write_ctlr(g_gicc_base, val);
val = gicc_read_iidr(g_gicc_base);
/*
* If GICv3 we need to do a bit of additional setup. Make sure the
* RDIST is put to sleep.
*/
if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3)
gicv3_cpuif_deactivate();
}
/*******************************************************************************
* Per cpu gic distributor setup which will be done by all cpus after a cold
* boot/hotplug. This marks out the secure interrupts & enables them.
******************************************************************************/
void arm_gic_pcpu_distif_setup(void)
{
unsigned int index, irq_num, sec_ppi_sgi_mask;
assert(g_gicd_base);
/* Setup PPI priorities doing four at a time */
for (index = 0; index < 32; index += 4) {
gicd_write_ipriorityr(g_gicd_base, index,
GICD_IPRIORITYR_DEF_VAL);
}
assert(g_irq_sec_ptr);
sec_ppi_sgi_mask = 0;
/* Ensure all SGIs and PPIs are Group0 to begin with */
gicd_write_igroupr(g_gicd_base, 0, 0);
for (index = 0; index < g_num_irqs; index++) {
irq_num = g_irq_sec_ptr[index];
if (irq_num < MIN_SPI_ID) {
/* We have an SGI or a PPI */
sec_ppi_sgi_mask |= 1U << irq_num;
gicd_set_ipriorityr(g_gicd_base, irq_num,
GIC_HIGHEST_SEC_PRIORITY);
gicd_set_isenabler(g_gicd_base, irq_num);
}
}
/*
* Invert the bitmask to create a mask for non-secure PPIs and
* SGIs. Program the GICD_IGROUPR0 with this bit mask. This write will
* update the GICR_IGROUPR0 as well in case we are running on a GICv3
* system. This is critical if GICD_CTLR.ARE_NS=1.
*/
gicd_write_igroupr(g_gicd_base, 0, ~sec_ppi_sgi_mask);
}
/*******************************************************************************
* Get the current CPU bit mask from GICD_ITARGETSR0
******************************************************************************/
static unsigned int arm_gic_get_cpuif_id(void)
{
unsigned int val;
val = gicd_read_itargetsr(g_gicd_base, 0);
return val & GIC_TARGET_CPU_MASK;
}
/*******************************************************************************
* Global gic distributor setup which will be done by the primary cpu after a
* cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It
* then enables the secure GIC distributor interface.
******************************************************************************/
static void arm_gic_distif_setup(void)
{
unsigned int num_ints, ctlr, index, irq_num;
uint8_t target_cpu;
/* Disable the distributor before going further */
assert(g_gicd_base);
ctlr = gicd_read_ctlr(g_gicd_base);
ctlr &= ~(ENABLE_GRP0 | ENABLE_GRP1);
gicd_write_ctlr(g_gicd_base, ctlr);
/*
* Mark out non-secure SPI interrupts. The number of interrupts is
* calculated as 32 * (IT_LINES + 1). We do 32 at a time.
*/
num_ints = gicd_read_typer(g_gicd_base) & IT_LINES_NO_MASK;
num_ints = (num_ints + 1) << 5;
for (index = MIN_SPI_ID; index < num_ints; index += 32)
gicd_write_igroupr(g_gicd_base, index, ~0);
/* Setup SPI priorities doing four at a time */
for (index = MIN_SPI_ID; index < num_ints; index += 4) {
gicd_write_ipriorityr(g_gicd_base, index,
GICD_IPRIORITYR_DEF_VAL);
}
/* Read the target CPU mask */
target_cpu = arm_gic_get_cpuif_id();
/* Configure SPI secure interrupts now */
assert(g_irq_sec_ptr);
for (index = 0; index < g_num_irqs; index++) {
irq_num = g_irq_sec_ptr[index];
if (irq_num >= MIN_SPI_ID) {
/* We have an SPI */
gicd_clr_igroupr(g_gicd_base, irq_num);
gicd_set_ipriorityr(g_gicd_base, irq_num,
GIC_HIGHEST_SEC_PRIORITY);
gicd_set_itargetsr(g_gicd_base, irq_num, target_cpu);
gicd_set_isenabler(g_gicd_base, irq_num);
}
}
/*
* Configure the SGI and PPI. This is done in a separated function
* because each CPU is responsible for initializing its own private
* interrupts.
*/
arm_gic_pcpu_distif_setup();
gicd_write_ctlr(g_gicd_base, ctlr | ENABLE_GRP0);
}
/*******************************************************************************
* Initialize the ARM GIC driver with the provided platform inputs
******************************************************************************/
void arm_gic_init(uintptr_t gicc_base,
uintptr_t gicd_base,
uintptr_t gicr_base,
const unsigned int *irq_sec_ptr,
unsigned int num_irqs)
{
unsigned int val;
assert(gicc_base);
assert(gicd_base);
assert(irq_sec_ptr);
g_gicc_base = gicc_base;
g_gicd_base = gicd_base;
val = gicc_read_iidr(g_gicc_base);
if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
assert(gicr_base);
g_gicr_base = gicr_base;
}
g_irq_sec_ptr = irq_sec_ptr;
g_num_irqs = num_irqs;
}
/*******************************************************************************
* Setup the ARM GIC CPU and distributor interfaces.
******************************************************************************/
void arm_gic_setup(void)
{
arm_gic_cpuif_setup();
arm_gic_distif_setup();
}
/*******************************************************************************
* An ARM processor signals interrupt exceptions through the IRQ and FIQ pins.
* The interrupt controller knows which pin/line it uses to signal a type of
* interrupt. This function provides a common implementation of
* plat_interrupt_type_to_line() in an ARM GIC environment for optional re-use
* across platforms. It lets the interrupt management framework determine
* for a type of interrupt and security state, which line should be used in the
* SCR_EL3 to control its routing to EL3. The interrupt line is represented as
* the bit position of the IRQ or FIQ bit in the SCR_EL3.
******************************************************************************/
uint32_t arm_gic_interrupt_type_to_line(uint32_t type,
uint32_t security_state)
{
assert(type == INTR_TYPE_S_EL1 ||
type == INTR_TYPE_EL3 ||
type == INTR_TYPE_NS);
assert(sec_state_is_valid(security_state));
/*
* We ignore the security state parameter under the assumption that
* both normal and secure worlds are using ARM GICv2. This parameter
* will be used when the secure world starts using GICv3.
*/
#if ARM_GIC_ARCH == 2
return gicv2_interrupt_type_to_line(g_gicc_base, type);
#else
#error "Invalid ARM GIC architecture version specified for platform port"
#endif /* ARM_GIC_ARCH */
}
#if ARM_GIC_ARCH == 2
/*******************************************************************************
* This function returns the type of the highest priority pending interrupt at
* the GIC cpu interface. INTR_TYPE_INVAL is returned when there is no
* interrupt pending.
******************************************************************************/
uint32_t arm_gic_get_pending_interrupt_type(void)
{
uint32_t id;
assert(g_gicc_base);
id = gicc_read_hppir(g_gicc_base) & INT_ID_MASK;
/* Assume that all secure interrupts are S-EL1 interrupts */
if (id < 1022)
return INTR_TYPE_S_EL1;
if (id == GIC_SPURIOUS_INTERRUPT)
return INTR_TYPE_INVAL;
return INTR_TYPE_NS;
}
/*******************************************************************************
* This function returns the id of the highest priority pending interrupt at
* the GIC cpu interface. INTR_ID_UNAVAILABLE is returned when there is no
* interrupt pending.
******************************************************************************/
uint32_t arm_gic_get_pending_interrupt_id(void)
{
uint32_t id;
assert(g_gicc_base);
id = gicc_read_hppir(g_gicc_base) & INT_ID_MASK;
if (id < 1022)
return id;
if (id == 1023)
return INTR_ID_UNAVAILABLE;
/*
* Find out which non-secure interrupt it is under the assumption that
* the GICC_CTLR.AckCtl bit is 0.
*/
return gicc_read_ahppir(g_gicc_base) & INT_ID_MASK;
}
/*******************************************************************************
* This functions reads the GIC cpu interface Interrupt Acknowledge register
* to start handling the pending interrupt. It returns the contents of the IAR.
******************************************************************************/
uint32_t arm_gic_acknowledge_interrupt(void)
{
assert(g_gicc_base);
return gicc_read_IAR(g_gicc_base);
}
/*******************************************************************************
* This functions writes the GIC cpu interface End Of Interrupt register with
* the passed value to finish handling the active interrupt
******************************************************************************/
void arm_gic_end_of_interrupt(uint32_t id)
{
assert(g_gicc_base);
gicc_write_EOIR(g_gicc_base, id);
}
/*******************************************************************************
* This function returns the type of the interrupt id depending upon the group
* this interrupt has been configured under by the interrupt controller i.e.
* group0 or group1.
******************************************************************************/
uint32_t arm_gic_get_interrupt_type(uint32_t id)
{
uint32_t group;
assert(g_gicd_base);
group = gicd_get_igroupr(g_gicd_base, id);
/* Assume that all secure interrupts are S-EL1 interrupts */
if (group == GRP0)
return INTR_TYPE_S_EL1;
else
return INTR_TYPE_NS;
}
#else
#error "Invalid ARM GIC architecture version specified for platform port"
#endif /* ARM_GIC_ARCH */
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <assert.h>
#include <gic_v2.h>
#include <interrupt_mgmt.h>
#include <mmio.h>
/*******************************************************************************
* GIC Distributor interface accessors for reading entire registers
******************************************************************************/
unsigned int gicd_read_igroupr(uintptr_t base, unsigned int id)
{
unsigned n = id >> IGROUPR_SHIFT;
return mmio_read_32(base + GICD_IGROUPR + (n << 2));
}
unsigned int gicd_read_isenabler(uintptr_t base, unsigned int id)
{
unsigned n = id >> ISENABLER_SHIFT;
return mmio_read_32(base + GICD_ISENABLER + (n << 2));
}
unsigned int gicd_read_icenabler(uintptr_t base, unsigned int id)
{
unsigned n = id >> ICENABLER_SHIFT;
return mmio_read_32(base + GICD_ICENABLER + (n << 2));
}
unsigned int gicd_read_ispendr(uintptr_t base, unsigned int id)
{
unsigned n = id >> ISPENDR_SHIFT;
return mmio_read_32(base + GICD_ISPENDR + (n << 2));
}
unsigned int gicd_read_icpendr(uintptr_t base, unsigned int id)
{
unsigned n = id >> ICPENDR_SHIFT;
return mmio_read_32(base + GICD_ICPENDR + (n << 2));
}
unsigned int gicd_read_isactiver(uintptr_t base, unsigned int id)
{
unsigned n = id >> ISACTIVER_SHIFT;
return mmio_read_32(base + GICD_ISACTIVER + (n << 2));
}
unsigned int gicd_read_icactiver(uintptr_t base, unsigned int id)
{
unsigned n = id >> ICACTIVER_SHIFT;
return mmio_read_32(base + GICD_ICACTIVER + (n << 2));
}
unsigned int gicd_read_ipriorityr(uintptr_t base, unsigned int id)
{
unsigned n = id >> IPRIORITYR_SHIFT;
return mmio_read_32(base + GICD_IPRIORITYR + (n << 2));
}
unsigned int gicd_read_itargetsr(uintptr_t base, unsigned int id)
{
unsigned n = id >> ITARGETSR_SHIFT;
return mmio_read_32(base + GICD_ITARGETSR + (n << 2));
}
unsigned int gicd_read_icfgr(uintptr_t base, unsigned int id)
{
unsigned n = id >> ICFGR_SHIFT;
return mmio_read_32(base + GICD_ICFGR + (n << 2));
}
unsigned int gicd_read_cpendsgir(uintptr_t base, unsigned int id)
{
unsigned n = id >> CPENDSGIR_SHIFT;
return mmio_read_32(base + GICD_CPENDSGIR + (n << 2));
}
unsigned int gicd_read_spendsgir(uintptr_t base, unsigned int id)
{
unsigned n = id >> SPENDSGIR_SHIFT;
return mmio_read_32(base + GICD_SPENDSGIR + (n << 2));
}
/*******************************************************************************
* GIC Distributor interface accessors for writing entire registers
******************************************************************************/
void gicd_write_igroupr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> IGROUPR_SHIFT;
mmio_write_32(base + GICD_IGROUPR + (n << 2), val);
}
void gicd_write_isenabler(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ISENABLER_SHIFT;
mmio_write_32(base + GICD_ISENABLER + (n << 2), val);
}
void gicd_write_icenabler(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICENABLER_SHIFT;
mmio_write_32(base + GICD_ICENABLER + (n << 2), val);
}
void gicd_write_ispendr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ISPENDR_SHIFT;
mmio_write_32(base + GICD_ISPENDR + (n << 2), val);
}
void gicd_write_icpendr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICPENDR_SHIFT;
mmio_write_32(base + GICD_ICPENDR + (n << 2), val);
}
void gicd_write_isactiver(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ISACTIVER_SHIFT;
mmio_write_32(base + GICD_ISACTIVER + (n << 2), val);
}
void gicd_write_icactiver(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICACTIVER_SHIFT;
mmio_write_32(base + GICD_ICACTIVER + (n << 2), val);
}
void gicd_write_ipriorityr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> IPRIORITYR_SHIFT;
mmio_write_32(base + GICD_IPRIORITYR + (n << 2), val);
}
void gicd_write_itargetsr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ITARGETSR_SHIFT;
mmio_write_32(base + GICD_ITARGETSR + (n << 2), val);
}
void gicd_write_icfgr(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> ICFGR_SHIFT;
mmio_write_32(base + GICD_ICFGR + (n << 2), val);
}
void gicd_write_cpendsgir(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> CPENDSGIR_SHIFT;
mmio_write_32(base + GICD_CPENDSGIR + (n << 2), val);
}
void gicd_write_spendsgir(uintptr_t base, unsigned int id, unsigned int val)
{
unsigned n = id >> SPENDSGIR_SHIFT;
mmio_write_32(base + GICD_SPENDSGIR + (n << 2), val);
}
/*******************************************************************************
* GIC Distributor interface accessors for individual interrupt manipulation
******************************************************************************/
unsigned int gicd_get_igroupr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
unsigned int reg_val = gicd_read_igroupr(base, id);
return (reg_val >> bit_num) & 0x1;
}
void gicd_set_igroupr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
unsigned int reg_val = gicd_read_igroupr(base, id);
gicd_write_igroupr(base, id, reg_val | (1 << bit_num));
}
void gicd_clr_igroupr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << IGROUPR_SHIFT) - 1);
unsigned int reg_val = gicd_read_igroupr(base, id);
gicd_write_igroupr(base, id, reg_val & ~(1 << bit_num));
}
void gicd_set_isenabler(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISENABLER_SHIFT) - 1);
gicd_write_isenabler(base, id, (1 << bit_num));
}
void gicd_set_icenabler(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICENABLER_SHIFT) - 1);
gicd_write_icenabler(base, id, (1 << bit_num));
}
void gicd_set_ispendr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISPENDR_SHIFT) - 1);
gicd_write_ispendr(base, id, (1 << bit_num));
}
void gicd_set_icpendr(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICPENDR_SHIFT) - 1);
gicd_write_icpendr(base, id, (1 << bit_num));
}
void gicd_set_isactiver(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ISACTIVER_SHIFT) - 1);
gicd_write_isactiver(base, id, (1 << bit_num));
}
void gicd_set_icactiver(uintptr_t base, unsigned int id)
{
unsigned bit_num = id & ((1 << ICACTIVER_SHIFT) - 1);
gicd_write_icactiver(base, id, (1 << bit_num));
}
/*
* Make sure that the interrupt's group is set before expecting
* this function to do its job correctly.
*/
void gicd_set_ipriorityr(uintptr_t base, unsigned int id, unsigned int pri)
{
/*
* Enforce ARM recommendation to manage priority values such
* that group1 interrupts always have a lower priority than
* group0 interrupts.
* Note, lower numerical values are higher priorities so the comparison
* checks below are reversed from what might be expected.
*/
assert(gicd_get_igroupr(base, id) == GRP1 ?
pri >= GIC_HIGHEST_NS_PRIORITY &&
pri <= GIC_LOWEST_NS_PRIORITY :
pri >= GIC_HIGHEST_SEC_PRIORITY &&
pri <= GIC_LOWEST_SEC_PRIORITY);
mmio_write_8(base + GICD_IPRIORITYR + id, pri & GIC_PRI_MASK);
}
void gicd_set_itargetsr(uintptr_t base, unsigned int id, unsigned int target)
{
mmio_write_8(base + GICD_ITARGETSR + id, target & GIC_TARGET_CPU_MASK);
}
/*******************************************************************************
* This function allows the interrupt management framework to determine (through
* the platform) which interrupt line (IRQ/FIQ) to use for an interrupt type to
* route it to EL3. The interrupt line is represented as the bit position of the
* IRQ or FIQ bit in the SCR_EL3.
******************************************************************************/
uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type)
{
uint32_t gicc_ctlr;
/* Non-secure interrupts are signalled on the IRQ line always */
if (type == INTR_TYPE_NS)
return __builtin_ctz(SCR_IRQ_BIT);
/*
* Secure interrupts are signalled using the IRQ line if the FIQ_EN
* bit is not set else they are signalled using the FIQ line.
*/
gicc_ctlr = gicc_read_ctlr(cpuif_base);
if (gicc_ctlr & FIQ_EN)
return __builtin_ctz(SCR_FIQ_BIT);
else
return __builtin_ctz(SCR_IRQ_BIT);
}
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <debug.h>
#include <gic_v3.h>
uintptr_t gicv3_get_rdist(uintptr_t gicr_base, u_register_t mpidr)
{
uint32_t cpu_aff, gicr_aff;
uint64_t gicr_typer;
uintptr_t addr;
/* Construct the affinity as used by GICv3. MPIDR and GIC affinity level
* mask is the same.
*/
cpu_aff = ((mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK) <<
GICV3_AFF0_SHIFT;
cpu_aff |= ((mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK) <<
GICV3_AFF1_SHIFT;
cpu_aff |= ((mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK) <<
GICV3_AFF2_SHIFT;
cpu_aff |= ((mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK) <<
GICV3_AFF3_SHIFT;
addr = gicr_base;
do {
gicr_typer = gicr_read_typer(addr);
gicr_aff = (gicr_typer >> GICR_TYPER_AFF_SHIFT) &
GICR_TYPER_AFF_MASK;
if (cpu_aff == gicr_aff) {
/* Disable this print for now as it appears every time
* when using PSCI CPU_SUSPEND.
* TODO: Print this only the first time for each CPU.
* INFO("GICv3 - Found RDIST for MPIDR(0x%lx) at %p\n",
* mpidr, (void *) addr);
*/
return addr;
}
/* TODO:
* For GICv4 we need to adjust the Base address based on
* GICR_TYPER.VLPIS
*/
addr += (1 << GICR_PCPUBASE_SHIFT);
} while (!(gicr_typer & GICR_TYPER_LAST));
/* If we get here we did not find a match. */
ERROR("GICv3 - Did not find RDIST for CPU with MPIDR 0x%lx\n", mpidr);
return (uintptr_t)NULL;
}
......@@ -114,43 +114,6 @@ void gicv2_spis_configure_defaults(uintptr_t gicd_base)
gicd_write_icfgr(gicd_base, index, 0U);
}
#if !ERROR_DEPRECATED
/*******************************************************************************
* Helper function to configure secure G0 SPIs.
******************************************************************************/
void gicv2_secure_spis_configure(uintptr_t gicd_base,
unsigned int num_ints,
const unsigned int *sec_intr_list)
{
unsigned int index, irq_num;
/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
if (num_ints != 0U)
assert(sec_intr_list != NULL);
for (index = 0; index < num_ints; index++) {
irq_num = sec_intr_list[index];
if (irq_num >= MIN_SPI_ID) {
/* Configure this interrupt as a secure interrupt */
gicd_clr_igroupr(gicd_base, irq_num);
/* Set the priority of this interrupt */
gicd_set_ipriorityr(gicd_base,
irq_num,
GIC_HIGHEST_SEC_PRIORITY);
/* Target the secure interrupts to primary CPU */
gicd_set_itargetsr(gicd_base, irq_num,
gicv2_get_cpuif_id(gicd_base));
/* Enable this interrupt */
gicd_set_isenabler(gicd_base, irq_num);
}
}
}
#endif
/*******************************************************************************
* Helper function to configure properties of secure G0 SPIs.
******************************************************************************/
......@@ -192,56 +155,6 @@ void gicv2_secure_spis_configure_props(uintptr_t gicd_base,
}
}
#if !ERROR_DEPRECATED
/*******************************************************************************
* Helper function to configure secure G0 SGIs and PPIs.
******************************************************************************/
void gicv2_secure_ppi_sgi_setup(uintptr_t gicd_base,
unsigned int num_ints,
const unsigned int *sec_intr_list)
{
unsigned int index, irq_num, sec_ppi_sgi_mask = 0;
/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */
assert(num_ints ? (uintptr_t)sec_intr_list : 1);
/*
* Disable all SGIs (imp. def.)/PPIs before configuring them. This is a
* more scalable approach as it avoids clearing the enable bits in the
* GICD_CTLR.
*/
gicd_write_icenabler(gicd_base, 0, ~0);
/* Setup the default PPI/SGI priorities doing four at a time */
for (index = 0; index < MIN_SPI_ID; index += 4)
gicd_write_ipriorityr(gicd_base,
index,
GICD_IPRIORITYR_DEF_VAL);
for (index = 0; index < num_ints; index++) {
irq_num = sec_intr_list[index];
if (irq_num < MIN_SPI_ID) {
/* We have an SGI or a PPI. They are Group0 at reset */
sec_ppi_sgi_mask |= 1U << irq_num;
/* Set the priority of this interrupt */
gicd_set_ipriorityr(gicd_base,
irq_num,
GIC_HIGHEST_SEC_PRIORITY);
}
}
/*
* Invert the bitmask to create a mask for non-secure PPIs and
* SGIs. Program the GICD_IGROUPR0 with this bit mask.
*/
gicd_write_igroupr(gicd_base, 0, ~sec_ppi_sgi_mask);
/* Enable the Group 0 SGIs and PPIs */
gicd_write_isenabler(gicd_base, 0, sec_ppi_sgi_mask);
}
#endif
/*******************************************************************************
* Helper function to configure properties of secure G0 SGIs and PPIs.
******************************************************************************/
......
......@@ -79,27 +79,9 @@ void gicv2_pcpu_distif_init(void)
assert(driver_data != NULL);
assert(driver_data->gicd_base != 0U);
#if !ERROR_DEPRECATED
if (driver_data->interrupt_props != NULL) {
#endif
gicv2_secure_ppi_sgi_setup_props(driver_data->gicd_base,
driver_data->interrupt_props,
driver_data->interrupt_props_num);
#if !ERROR_DEPRECATED
} else {
/*
* Suppress deprecated declaration warnings in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
assert(driver_data->g0_interrupt_array);
gicv2_secure_ppi_sgi_setup(driver_data->gicd_base,
driver_data->g0_interrupt_num,
driver_data->g0_interrupt_array);
#pragma GCC diagnostic pop
}
#endif
/* Enable G0 interrupts if not already */
ctlr = gicd_read_ctlr(driver_data->gicd_base);
......@@ -129,30 +111,10 @@ void gicv2_distif_init(void)
/* Set the default attribute of all SPIs */
gicv2_spis_configure_defaults(driver_data->gicd_base);
#if !ERROR_DEPRECATED
if (driver_data->interrupt_props != NULL) {
#endif
gicv2_secure_spis_configure_props(driver_data->gicd_base,
driver_data->interrupt_props,
driver_data->interrupt_props_num);
#if !ERROR_DEPRECATED
} else {
/*
* Suppress deprecated declaration warnings in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
assert(driver_data->g0_interrupt_array);
/* Configure the G0 SPIs */
gicv2_secure_spis_configure(driver_data->gicd_base,
driver_data->g0_interrupt_num,
driver_data->g0_interrupt_array);
#pragma GCC diagnostic pop
}
#endif
/* Re-enable the secure SPIs now that they have been configured */
gicd_write_ctlr(driver_data->gicd_base, ctlr | CTLR_ENABLE_G0_BIT);
......@@ -169,35 +131,8 @@ void gicv2_driver_init(const gicv2_driver_data_t *plat_driver_data)
assert(plat_driver_data->gicd_base != 0U);
assert(plat_driver_data->gicc_base != 0U);
#if !ERROR_DEPRECATED
if (plat_driver_data->interrupt_props == NULL) {
/* Interrupt properties array size must be 0 */
assert(plat_driver_data->interrupt_props_num == 0);
/*
* Suppress deprecated declaration warnings in compatibility
* function
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
/*
* If there are no interrupts of a particular type, then the
* number of interrupts of that type should be 0 and vice-versa.
*/
assert(plat_driver_data->g0_interrupt_array ?
plat_driver_data->g0_interrupt_num :
plat_driver_data->g0_interrupt_num == 0);
#pragma GCC diagnostic pop
WARN("Using deprecated integer interrupt array in "
"gicv2_driver_data_t\n");
WARN("Please migrate to using an interrupt_prop_t array\n");
}
#else
assert(plat_driver_data->interrupt_props_num > 0 ?
plat_driver_data->interrupt_props != NULL : 1);
#endif
/* Ensure that this is a GICv2 system */
gic_version = gicd_read_pidr2(plat_driver_data->gicd_base);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment