Commit 9719e19a authored by Joanna Farley's avatar Joanna Farley Committed by TrustedFirmware Code Review
Browse files

Merge changes I500ddbe9,I9c10dac9,I53bfff85,I06f7594d,I24bff8d4, ... into integration

* changes:
  nxp lx2160a-aqds: new plat based on soc lx2160a
  NXP lx2160a-rdb: new plat based on SoC lx2160a
  nxp lx2162aqds: new plat based on soc lx2160a
  nxp: errata handling at soc level for lx2160a
  nxp: make file for loading additional ddr image
  nxp: adding support of soc lx2160a
  nxp: deflt hdr files for soc & their platforms
  nxp: platform files for bl2 and bl31 setup
  nxp: warm reset support to retain ddr content
  nxp: nv storage api on platforms
  nxp: supports two mode of trusted board boot
  nxp: fip-handler for additional fip_fuse.bin
  nxp: fip-handler for additional ddr-fip.bin
  nxp: image loader for loading fip image
  nxp: svp & sip smc handling
  nxp: psci platform functions used by lib/psci
  nxp: helper function used by plat & common code
  nxp: add data handler used by bl31
  nxp: adding the driver.mk file
  nxp-tool: for creating pbl file from bl2
  nxp: adding the smmu driver
  nxp: cot using nxp internal and mbedtls
  nxp:driver for crypto h/w accelerator caam
  nxp:add driver support for sd and emmc
  nxp:add qspi driver
  nxp: add flexspi driver support
  nxp: adding gic apis for nxp soc
  nxp: gpio driver support
  nxp: added csu driver
  nxp: driver pmu for nxp soc
  nxp: ddr driver enablement for nxp layerscape soc
  nxp: i2c driver support.
  NXP: Driver for NXP Security Monitor
  NXP: SFP driver support for NXP SoC
  NXP: Interconnect API based on ARM CCN-CCI driver
  NXP: TZC API to configure ddr region
  NXP: Timer API added to enable ARM generic timer
  nxp: add dcfg driver
  nxp:add console driver for nxp platform
  tools: add mechanism to allow platform specific image UUID
  tbbr-cot: conditional definition for the macro
  tbbr-cot: fix the issue of compiling time define
  cert_create: updated tool for platform defined certs, keys & extensions
  tbbr-tools: enable override TRUSTED_KEY_CERT
parents b59444ea f359a382
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef MMU_MAP_DEF_H
#define MMU_MAP_DEF_H
#include <lib/xlat_tables/xlat_tables_defs.h>
#include <platform_def.h>
#define LS_MAP_CCSR MAP_REGION_FLAT(NXP_CCSR_ADDR, \
NXP_CCSR_SIZE, \
MT_DEVICE | MT_RW | MT_SECURE)
#ifdef NXP_DCSR_ADDR
#define LS_MAP_DCSR MAP_REGION_FLAT(NXP_DCSR_ADDR, \
NXP_DCSR_SIZE, \
MT_DEVICE | MT_RW | MT_SECURE)
#endif
#define LS_MAP_CONSOLE MAP_REGION_FLAT(NXP_DUART1_ADDR, \
NXP_DUART_SIZE, \
MT_DEVICE | MT_RW | MT_NS)
#define LS_MAP_OCRAM MAP_REGION_FLAT(NXP_OCRAM_ADDR, \
NXP_OCRAM_SIZE, \
MT_DEVICE | MT_RW | MT_SECURE)
#endif /* MMU_MAP_DEF_H */
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_COMMON_H
#define PLAT_COMMON_H
#include <stdbool.h>
#include <lib/el3_runtime/cpu_data.h>
#include <platform_def.h>
#ifdef IMAGE_BL31
#define BL31_END (uintptr_t)(&__BL31_END__)
/*******************************************************************************
* This structure represents the superset of information that can be passed to
* BL31 e.g. while passing control to it from BL2. The BL32 parameters will be
* populated only if BL2 detects its presence. A pointer to a structure of this
* type should be passed in X0 to BL31's cold boot entrypoint.
*
* Use of this structure and the X0 parameter is not mandatory: the BL31
* platform code can use other mechanisms to provide the necessary information
* about BL32 and BL33 to the common and SPD code.
*
* BL31 image information is mandatory if this structure is used. If either of
* the optional BL32 and BL33 image information is not provided, this is
* indicated by the respective image_info pointers being zero.
******************************************************************************/
typedef struct bl31_params {
param_header_t h;
image_info_t *bl31_image_info;
entry_point_info_t *bl32_ep_info;
image_info_t *bl32_image_info;
entry_point_info_t *bl33_ep_info;
image_info_t *bl33_image_info;
} bl31_params_t;
/* BL3 utility functions */
void ls_bl31_early_platform_setup(void *from_bl2,
void *plat_params_from_bl2);
/* LS Helper functions */
unsigned int plat_my_core_mask(void);
unsigned int plat_core_mask(u_register_t mpidr);
unsigned int plat_core_pos(u_register_t mpidr);
//unsigned int plat_my_core_pos(void);
/* BL31 Data API(s) */
void _init_global_data(void);
void _initialize_psci(void);
uint32_t _getCoreState(u_register_t core_mask);
void _setCoreState(u_register_t core_mask, u_register_t core_state);
/* SoC defined structure and API(s) */
void soc_runtime_setup(void);
void soc_init(void);
void soc_platform_setup(void);
void soc_early_platform_setup2(void);
#endif /* IMAGE_BL31 */
#ifdef IMAGE_BL2
void soc_early_init(void);
void soc_mem_access(void);
void soc_preload_setup(void);
void soc_bl2_prepare_exit(void);
/* IO storage utility functions */
int plat_io_setup(void);
int open_backend(const uintptr_t spec);
void ls_bl2_plat_arch_setup(void);
void ls_bl2_el3_plat_arch_setup(void);
enum boot_device {
BOOT_DEVICE_IFC_NOR,
BOOT_DEVICE_IFC_NAND,
BOOT_DEVICE_QSPI,
BOOT_DEVICE_EMMC,
BOOT_DEVICE_SDHC2_EMMC,
BOOT_DEVICE_FLEXSPI_NOR,
BOOT_DEVICE_FLEXSPI_NAND,
BOOT_DEVICE_NONE
};
enum boot_device get_boot_dev(void);
/* DDR Related functions */
#if DDR_INIT
#ifdef NXP_WARM_BOOT
long long init_ddr(uint32_t wrm_bt_flg);
#else
long long init_ddr(void);
#endif
#endif
/* Board specific weak functions */
bool board_enable_povdd(void);
bool board_disable_povdd(void);
void mmap_add_ddr_region_dynamically(void);
#endif /* IMAGE_BL2 */
typedef struct {
uint64_t addr;
uint64_t size;
} region_info_t;
typedef struct {
uint64_t num_dram_regions;
uint64_t total_dram_size;
region_info_t region[NUM_DRAM_REGIONS];
} dram_regions_info_t;
dram_regions_info_t *get_dram_regions_info(void);
void ls_setup_page_tables(uintptr_t total_base,
size_t total_size,
uintptr_t code_start,
uintptr_t code_limit,
uintptr_t rodata_start,
uintptr_t rodata_limit
#if USE_COHERENT_MEM
, uintptr_t coh_start,
uintptr_t coh_limit
#endif
);
/* Structure to define SoC personality */
struct soc_type {
char name[10];
uint32_t personality;
uint32_t num_clusters;
uint32_t cores_per_cluster;
};
#define SOC_ENTRY(n, v, ncl, nc) { \
.name = #n, \
.personality = SVR_##v, \
.num_clusters = (ncl), \
.cores_per_cluster = (nc)}
#endif /* PLAT_COMMON_H */
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_MACROS_S
#define PLAT_MACROS_S
/* ---------------------------------------------
* The below required platform porting macro
* prints out relevant GIC and CCI registers
* whenever an unhandled exception is taken in
* BL31.
* Clobbers: x0 - x10, x16, x17, sp
* ---------------------------------------------
*/
.macro plat_crash_print_regs
.endm
#endif /* PLAT_MACROS_S */
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <common/desc_image_load.h>
#include <dcfg.h>
#ifdef POLICY_FUSE_PROVISION
#include <fuse_io.h>
#endif
#include <mmu_def.h>
#include <plat_common.h>
#ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
#include <plat_nv_storage.h>
#endif
#pragma weak bl2_el3_early_platform_setup
#pragma weak bl2_el3_plat_arch_setup
#pragma weak bl2_el3_plat_prepare_exit
static dram_regions_info_t dram_regions_info = {0};
/*******************************************************************************
* Return the pointer to the 'dram_regions_info structure of the DRAM.
* This structure is populated after init_ddr().
******************************************************************************/
dram_regions_info_t *get_dram_regions_info(void)
{
return &dram_regions_info;
}
#ifdef DDR_INIT
static void populate_dram_regions_info(void)
{
long long dram_remain_size = dram_regions_info.total_dram_size;
uint8_t reg_id = 0U;
dram_regions_info.region[reg_id].addr = NXP_DRAM0_ADDR;
dram_regions_info.region[reg_id].size =
dram_remain_size > NXP_DRAM0_MAX_SIZE ?
NXP_DRAM0_MAX_SIZE : dram_remain_size;
if (dram_regions_info.region[reg_id].size != NXP_DRAM0_SIZE) {
ERROR("Incorrect DRAM0 size is defined in platform_def.h\n");
}
dram_remain_size -= dram_regions_info.region[reg_id].size;
dram_regions_info.region[reg_id].size -= (NXP_SECURE_DRAM_SIZE
+ NXP_SP_SHRD_DRAM_SIZE);
assert(dram_regions_info.region[reg_id].size > 0);
/* Reducing total dram size by 66MB */
dram_regions_info.total_dram_size -= (NXP_SECURE_DRAM_SIZE
+ NXP_SP_SHRD_DRAM_SIZE);
#if defined(NXP_DRAM1_ADDR) && defined(NXP_DRAM1_MAX_SIZE)
if (dram_remain_size > 0) {
reg_id++;
dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
dram_regions_info.region[reg_id].size =
dram_remain_size > NXP_DRAM1_MAX_SIZE ?
NXP_DRAM1_MAX_SIZE : dram_remain_size;
dram_remain_size -= dram_regions_info.region[reg_id].size;
}
#endif
#if defined(NXP_DRAM2_ADDR) && defined(NXP_DRAM2_MAX_SIZE)
if (dram_remain_size > 0) {
reg_id++;
dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
dram_regions_info.region[reg_id].size =
dram_remain_size > NXP_DRAM1_MAX_SIZE ?
NXP_DRAM1_MAX_SIZE : dram_remain_size;
dram_remain_size -= dram_regions_info.region[reg_id].size;
}
#endif
reg_id++;
dram_regions_info.num_dram_regions = reg_id;
}
#endif
#ifdef IMAGE_BL32
/*******************************************************************************
* Gets SPSR for BL32 entry
******************************************************************************/
static uint32_t ls_get_spsr_for_bl32_entry(void)
{
/*
* The Secure Payload Dispatcher service is responsible for
* setting the SPSR prior to entry into the BL32 image.
*/
return 0U;
}
#endif
/*******************************************************************************
* Gets SPSR for BL33 entry
******************************************************************************/
#ifndef AARCH32
static uint32_t ls_get_spsr_for_bl33_entry(void)
{
unsigned int mode;
uint32_t spsr;
/* Figure out what mode we enter the non-secure world in */
mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
/*
* TODO: Consider the possibility of specifying the SPSR in
* the FIP ToC and allowing the platform to have a say as
* well.
*/
spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
return spsr;
}
#else
/*******************************************************************************
* Gets SPSR for BL33 entry
******************************************************************************/
static uint32_t ls_get_spsr_for_bl33_entry(void)
{
unsigned int hyp_status, mode, spsr;
hyp_status = GET_VIRT_EXT(read_id_pfr1());
mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
/*
* TODO: Consider the possibility of specifying the SPSR in
* the FIP ToC and allowing the platform to have a say as
* well.
*/
spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
return spsr;
}
#endif /* AARCH32 */
void bl2_el3_early_platform_setup(u_register_t arg0 __unused,
u_register_t arg1 __unused,
u_register_t arg2 __unused,
u_register_t arg3 __unused)
{
/*
* SoC specific early init
* Any errata handling or SoC specific early initialization can
* be done here
* Set Counter Base Frequency in CNTFID0 and in cntfrq_el0.
* Initialize the interconnect.
* Enable coherency for primary CPU cluster
*/
soc_early_init();
/* Initialise the IO layer and register platform IO devices */
plat_io_setup();
if (dram_regions_info.total_dram_size > 0) {
populate_dram_regions_info();
}
#ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
read_nv_app_data();
#if DEBUG
const nv_app_data_t *nv_app_data = get_nv_data();
INFO("Value of warm_reset flag = 0x%x\n", nv_app_data->warm_rst_flag);
INFO("Value of WDT flag = 0x%x\n", nv_app_data->wdt_rst_flag);
#endif
#endif
}
/*******************************************************************************
* Perform the very early platform specific architectural setup here. At the
* moment this is only initializes the mmu in a quick and dirty way.
******************************************************************************/
void ls_bl2_el3_plat_arch_setup(void)
{
unsigned int flags = 0U;
/* Initialise the IO layer and register platform IO devices */
ls_setup_page_tables(
#if SEPARATE_RW_AND_NOLOAD
BL2_START,
BL2_LIMIT - BL2_START,
#else
BL2_BASE,
(unsigned long)(&__BL2_END__) - BL2_BASE,
#endif
BL_CODE_BASE,
BL_CODE_END,
BL_RO_DATA_BASE,
BL_RO_DATA_END
#if USE_COHERENT_MEM
, BL_COHERENT_RAM_BASE,
BL_COHERENT_RAM_END
#endif
);
if ((dram_regions_info.region[0].addr == 0)
&& (dram_regions_info.total_dram_size == 0)) {
flags = XLAT_TABLE_NC;
}
#ifdef AARCH32
enable_mmu_secure(0);
#else
enable_mmu_el3(flags);
#endif
}
void bl2_el3_plat_arch_setup(void)
{
ls_bl2_el3_plat_arch_setup();
}
void bl2_platform_setup(void)
{
/*
* Perform platform setup before loading the image.
*/
}
/* Handling image information by platform. */
int ls_bl2_handle_post_image_load(unsigned int image_id)
{
int err = 0;
bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
assert(bl_mem_params);
switch (image_id) {
case BL31_IMAGE_ID:
bl_mem_params->ep_info.args.arg3 =
(u_register_t) &dram_regions_info;
/* Pass the value of PORSR1 register in Argument 4 */
bl_mem_params->ep_info.args.arg4 =
(u_register_t)read_reg_porsr1();
flush_dcache_range((uintptr_t)&dram_regions_info,
sizeof(dram_regions_info));
break;
#if defined(AARCH64) && defined(IMAGE_BL32)
case BL32_IMAGE_ID:
bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl32_entry();
break;
#endif
case BL33_IMAGE_ID:
/* BL33 expects to receive the primary CPU MPID (through r0) */
bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl33_entry();
break;
}
return err;
}
/*******************************************************************************
* This function can be used by the platforms to update/use image
* information for given `image_id`.
******************************************************************************/
int bl2_plat_handle_post_image_load(unsigned int image_id)
{
return ls_bl2_handle_post_image_load(image_id);
}
void bl2_el3_plat_prepare_exit(void)
{
return soc_bl2_prepare_exit();
}
/* Called to do the dynamic initialization required
* before loading the next image.
*/
void bl2_plat_preload_setup(void)
{
soc_preload_setup();
if (dram_regions_info.total_dram_size < NXP_DRAM0_SIZE) {
NOTICE("ERROR: DRAM0 Size is not correctly configured.");
assert(false);
}
if ((dram_regions_info.region[0].addr == 0)
&& (dram_regions_info.total_dram_size > 0)) {
populate_dram_regions_info();
mmap_add_ddr_region_dynamically();
}
/* setup the memory region access permissions */
soc_mem_access();
#ifdef POLICY_FUSE_PROVISION
fip_fuse_provisioning((uintptr_t)FUSE_BUF, FUSE_SZ);
#endif
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#ifdef LS_EL3_INTERRUPT_HANDLER
#include <ls_interrupt_mgmt.h>
#endif
#include <mmu_def.h>
#include <plat_common.h>
/*
* Placeholder variables for copying the arguments that have been passed to
* BL31 from BL2.
*/
#ifdef TEST_BL31
#define SPSR_FOR_EL2H 0x3C9
#define SPSR_FOR_EL1H 0x3C5
#else
static entry_point_info_t bl31_image_ep_info;
#endif
static entry_point_info_t bl32_image_ep_info;
static entry_point_info_t bl33_image_ep_info;
static dram_regions_info_t dram_regions_info = {0};
static uint64_t rcw_porsr1;
/* Return the pointer to the 'dram_regions_info structure of the DRAM.
* This structure is populated after init_ddr().
*/
dram_regions_info_t *get_dram_regions_info(void)
{
return &dram_regions_info;
}
/* Return the RCW.PORSR1 value which was passed in from BL2
*/
uint64_t bl31_get_porsr1(void)
{
return rcw_porsr1;
}
/*
* Return pointer to the 'entry_point_info' structure of the next image for the
* security state specified:
* - BL33 corresponds to the non-secure image type; while
* - BL32 corresponds to the secure image type.
* - A NULL pointer is returned, if the image does not exist.
*/
entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
{
entry_point_info_t *next_image_info;
assert(sec_state_is_valid(type));
next_image_info = (type == NON_SECURE)
? &bl33_image_ep_info : &bl32_image_ep_info;
#ifdef TEST_BL31
next_image_info->pc = _get_test_entry();
next_image_info->spsr = SPSR_FOR_EL2H;
next_image_info->h.attr = NON_SECURE;
#endif
if (next_image_info->pc != 0U) {
return next_image_info;
} else {
return NULL;
}
}
/*
* Perform any BL31 early platform setup common to NXP platforms.
* - Here is an opportunity to copy parameters passed by the calling EL (S-EL1
* in BL2 & S-EL3 in BL1) before they are lost (potentially).
* - This needs to be done before the MMU is initialized so that the
* memory layout can be used while creating page tables.
* - BL2 has flushed this information to memory, in order to fetch latest data.
*/
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
{
#ifndef TEST_BL31
int i = 0;
void *from_bl2 = (void *)arg0;
#endif
soc_early_platform_setup2();
#ifdef TEST_BL31
dram_regions_info.num_dram_regions = 2;
dram_regions_info.total_dram_size = 0x100000000;
dram_regions_info.region[0].addr = 0x80000000;
dram_regions_info.region[0].size = 0x80000000;
dram_regions_info.region[1].addr = 0x880000000;
dram_regions_info.region[1].size = 0x80000000;
bl33_image_ep_info.pc = _get_test_entry();
#else
/*
* Check params passed from BL2 should not be NULL,
*/
bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
assert(params_from_bl2 != NULL);
assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
assert(params_from_bl2->h.version >= VERSION_2);
bl_params_node_t *bl_params = params_from_bl2->head;
/*
* Copy BL33 and BL32 (if present), entry point information.
* They are stored in Secure RAM, in BL2's address space.
*/
while (bl_params != NULL) {
if (bl_params->image_id == BL31_IMAGE_ID) {
bl31_image_ep_info = *bl_params->ep_info;
dram_regions_info_t *loc_dram_regions_info =
(dram_regions_info_t *) bl31_image_ep_info.args.arg3;
dram_regions_info.num_dram_regions =
loc_dram_regions_info->num_dram_regions;
dram_regions_info.total_dram_size =
loc_dram_regions_info->total_dram_size;
VERBOSE("Number of DRAM Regions = %llx\n",
dram_regions_info.num_dram_regions);
for (i = 0; i < dram_regions_info.num_dram_regions;
i++) {
dram_regions_info.region[i].addr =
loc_dram_regions_info->region[i].addr;
dram_regions_info.region[i].size =
loc_dram_regions_info->region[i].size;
VERBOSE("DRAM%d Size = %llx\n", i,
dram_regions_info.region[i].size);
}
rcw_porsr1 = bl31_image_ep_info.args.arg4;
}
if (bl_params->image_id == BL32_IMAGE_ID) {
bl32_image_ep_info = *bl_params->ep_info;
}
if (bl_params->image_id == BL33_IMAGE_ID) {
bl33_image_ep_info = *bl_params->ep_info;
}
bl_params = bl_params->next_params_info;
}
#endif /* TEST_BL31 */
if (bl33_image_ep_info.pc == 0) {
panic();
}
/*
* perform basic initialization on the soc
*/
soc_init();
}
/*******************************************************************************
* Perform any BL31 platform setup common to ARM standard platforms
******************************************************************************/
void bl31_platform_setup(void)
{
NOTICE("Welcome to %s BL31 Phase\n", BOARD);
soc_platform_setup();
/* Console logs gone missing as part going to
* EL1 for initilizing Bl32 if present.
* console flush is necessary to avoid it.
*/
(void)console_flush();
}
void bl31_plat_runtime_setup(void)
{
#ifdef LS_EL3_INTERRUPT_HANDLER
ls_el3_interrupt_config();
#endif
soc_runtime_setup();
}
/*******************************************************************************
* Perform the very early platform specific architectural setup shared between
* ARM standard platforms. This only does basic initialization. Later
* architectural setup (bl31_arch_setup()) does not do anything platform
* specific.
******************************************************************************/
void bl31_plat_arch_setup(void)
{
ls_setup_page_tables(BL31_BASE,
BL31_END - BL31_BASE,
BL_CODE_BASE,
BL_CODE_END,
BL_RO_DATA_BASE,
BL_RO_DATA_END
#if USE_COHERENT_MEM
, BL_COHERENT_RAM_BASE,
BL_COHERENT_RAM_END
#endif
);
enable_mmu_el3(0);
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <lib/mmio.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#include <mmu_def.h>
#include <plat/common/platform.h>
#include "plat_common.h"
#include "platform_def.h"
const mmap_region_t *plat_ls_get_mmap(void);
/*
* Table of memory regions for various BL stages to map using the MMU.
* This doesn't include Trusted SRAM as arm_setup_page_tables() already
* takes care of mapping it.
*
* The flash needs to be mapped as writable in order to erase the FIP's Table of
* Contents in case of unrecoverable error (see plat_error_handler()).
*/
#ifdef IMAGE_BL2
const mmap_region_t plat_ls_mmap[] = {
LS_MAP_CCSR,
{0}
};
#endif
#ifdef IMAGE_BL31
const mmap_region_t plat_ls_mmap[] = {
LS_MAP_CCSR,
#ifdef NXP_DCSR_ADDR
LS_MAP_DCSR,
#endif
LS_MAP_OCRAM,
{0}
};
#endif
#ifdef IMAGE_BL32
const mmap_region_t plat_ls_mmap[] = {
LS_MAP_CCSR,
LS_MAP_BL32_SEC_MEM,
{0}
};
#endif
/* Weak definitions may be overridden in specific NXP SoC */
#pragma weak plat_get_ns_image_entrypoint
#pragma weak plat_ls_get_mmap
#if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
static void mmap_add_ddr_regions_statically(void)
{
int i = 0;
dram_regions_info_t *info_dram_regions = get_dram_regions_info();
/* MMU map for Non-Secure DRAM Regions */
VERBOSE("DRAM Region %d: %p - %p\n", i,
(void *) info_dram_regions->region[i].addr,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
- 1));
mmap_add_region(info_dram_regions->region[i].addr,
info_dram_regions->region[i].addr,
info_dram_regions->region[i].size,
MT_MEMORY | MT_RW | MT_NS);
/* MMU map for Secure DDR Region on DRAM-0 */
if (info_dram_regions->region[i].size >
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
+ NXP_SECURE_DRAM_SIZE
+ NXP_SP_SHRD_DRAM_SIZE
- 1));
mmap_add_region((info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
MT_MEMORY | MT_RW | MT_SECURE);
}
#ifdef IMAGE_BL31
for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
if (info_dram_regions->region[i].size == 0)
break;
VERBOSE("DRAM Region %d: %p - %p\n", i,
(void *) info_dram_regions->region[i].addr,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
- 1));
mmap_add_region(info_dram_regions->region[i].addr,
info_dram_regions->region[i].addr,
info_dram_regions->region[i].size,
MT_MEMORY | MT_RW | MT_NS);
}
#endif
}
#endif
#if defined(PLAT_XLAT_TABLES_DYNAMIC)
void mmap_add_ddr_region_dynamically(void)
{
int i = 0;
dram_regions_info_t *info_dram_regions = get_dram_regions_info();
/* MMU map for Non-Secure DRAM Regions */
VERBOSE("DRAM Region %d: %p - %p\n", i,
(void *) info_dram_regions->region[i].addr,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
- 1));
mmap_add_dynamic_region(info_dram_regions->region[i].addr,
info_dram_regions->region[i].addr,
info_dram_regions->region[i].size,
MT_MEMORY | MT_RW | MT_NS);
/* MMU map for Secure DDR Region on DRAM-0 */
if (info_dram_regions->region[i].size >
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
+ NXP_SECURE_DRAM_SIZE
+ NXP_SP_SHRD_DRAM_SIZE
- 1));
mmap_add_dynamic_region((info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size),
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
MT_MEMORY | MT_RW | MT_SECURE);
}
#ifdef IMAGE_BL31
for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
if (info_dram_regions->region[i].size == 0) {
break;
}
VERBOSE("DRAM Region %d: %p - %p\n", i,
(void *) info_dram_regions->region[i].addr,
(void *) (info_dram_regions->region[i].addr
+ info_dram_regions->region[i].size
- 1));
mmap_add_dynamic_region(info_dram_regions->region[i].addr,
info_dram_regions->region[i].addr,
info_dram_regions->region[i].size,
MT_MEMORY | MT_RW | MT_NS);
}
#endif
}
#endif
/*
* Set up the page tables for the generic and platform-specific memory regions.
* The extents of the generic memory regions are specified by the function
* arguments and consist of:
* - Trusted SRAM seen by the BL image;
* - Code section;
* - Read-only data section;
* - Coherent memory region, if applicable.
*/
void ls_setup_page_tables(uintptr_t total_base,
size_t total_size,
uintptr_t code_start,
uintptr_t code_limit,
uintptr_t rodata_start,
uintptr_t rodata_limit
#if USE_COHERENT_MEM
,
uintptr_t coh_start,
uintptr_t coh_limit
#endif
)
{
/*
* Map the Trusted SRAM with appropriate memory attributes.
* Subsequent mappings will adjust the attributes for specific regions.
*/
VERBOSE("Memory seen by this BL image: %p - %p\n",
(void *) total_base, (void *) (total_base + total_size));
mmap_add_region(total_base, total_base,
total_size,
MT_MEMORY | MT_RW | MT_SECURE);
/* Re-map the code section */
VERBOSE("Code region: %p - %p\n",
(void *) code_start, (void *) code_limit);
mmap_add_region(code_start, code_start,
code_limit - code_start,
MT_CODE | MT_SECURE);
/* Re-map the read-only data section */
VERBOSE("Read-only data region: %p - %p\n",
(void *) rodata_start, (void *) rodata_limit);
mmap_add_region(rodata_start, rodata_start,
rodata_limit - rodata_start,
MT_RO_DATA | MT_SECURE);
#if USE_COHERENT_MEM
/* Re-map the coherent memory region */
VERBOSE("Coherent region: %p - %p\n",
(void *) coh_start, (void *) coh_limit);
mmap_add_region(coh_start, coh_start,
coh_limit - coh_start,
MT_DEVICE | MT_RW | MT_SECURE);
#endif
/* Now (re-)map the platform-specific memory regions */
mmap_add(plat_ls_get_mmap());
#if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
mmap_add_ddr_regions_statically();
#endif
/* Create the page tables to reflect the above mappings */
init_xlat_tables();
}
/*******************************************************************************
* Returns NXP platform specific memory map regions.
******************************************************************************/
const mmap_region_t *plat_ls_get_mmap(void)
{
return plat_ls_mmap;
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <arch_helpers.h>
#include <common/debug.h>
#if TRUSTED_BOARD_BOOT
#include <dcfg.h>
#include <snvs.h>
#endif
#include "plat_common.h"
/*
* Error handler
*/
void plat_error_handler(int err)
{
#if TRUSTED_BOARD_BOOT
uint32_t mode;
bool sb = check_boot_mode_secure(&mode);
#endif
switch (err) {
case -ENOENT:
case -EAUTH:
printf("Authentication failure\n");
#if TRUSTED_BOARD_BOOT
/* For SB production mode i.e ITS = 1 */
if (sb == true) {
if (mode == 1U) {
transition_snvs_soft_fail();
} else {
transition_snvs_non_secure();
}
}
#endif
break;
default:
/* Unexpected error */
break;
}
/* Loop until the watchdog resets the system */
for (;;)
wfi();
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <common/desc_image_load.h>
/*******************************************************************************
* This function flushes the data structures so that they are visible
* in memory for the next BL image.
******************************************************************************/
void plat_flush_next_bl_params(void)
{
flush_bl_params_desc();
}
/*******************************************************************************
* This function returns the list of loadable images.
******************************************************************************/
bl_load_info_t *plat_get_bl_image_load_info(void)
{
return get_bl_load_info_from_mem_params_desc();
}
/*******************************************************************************
* This function returns the list of executable images.
******************************************************************************/
bl_params_t *plat_get_next_bl_params(void)
{
return get_next_bl_params_from_mem_params_desc();
}
/*
* Copyright 2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <bl31/interrupt_mgmt.h>
#include <common/debug.h>
#include <ls_interrupt_mgmt.h>
#include <plat/common/platform.h>
static interrupt_type_handler_t type_el3_interrupt_table[MAX_INTR_EL3];
int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler)
{
/* Validate 'handler' and 'id' parameters */
if (!handler || id >= MAX_INTR_EL3) {
return -EINVAL;
}
/* Check if a handler has already been registered */
if (type_el3_interrupt_table[id] != NULL) {
return -EALREADY;
}
type_el3_interrupt_table[id] = handler;
return 0;
}
static uint64_t ls_el3_interrupt_handler(uint32_t id, uint32_t flags,
void *handle, void *cookie)
{
uint32_t intr_id;
interrupt_type_handler_t handler;
intr_id = plat_ic_get_pending_interrupt_id();
INFO("Interrupt recvd is %d\n", intr_id);
handler = type_el3_interrupt_table[intr_id];
if (handler != NULL) {
handler(intr_id, flags, handle, cookie);
}
/*
* Mark this interrupt as complete to avoid a interrupt storm.
*/
plat_ic_end_of_interrupt(intr_id);
return 0U;
}
void ls_el3_interrupt_config(void)
{
uint64_t flags = 0U;
uint64_t rc;
set_interrupt_rm_flag(flags, NON_SECURE);
rc = register_interrupt_type_handler(INTR_TYPE_EL3,
ls_el3_interrupt_handler, flags);
if (rc != 0U) {
panic();
}
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <endian.h>
#include <string.h>
#include <common/debug.h>
#include <common/tbbr/tbbr_img_def.h>
#include <drivers/io/io_block.h>
#include <drivers/io/io_driver.h>
#include <drivers/io/io_fip.h>
#include <drivers/io/io_memmap.h>
#include <drivers/io/io_storage.h>
#ifdef FLEXSPI_NOR_BOOT
#include <flexspi_nor.h>
#endif
#if defined(QSPI_BOOT)
#include <qspi.h>
#endif
#if defined(SD_BOOT) || defined(EMMC_BOOT)
#include <sd_mmc.h>
#endif
#include <tools_share/firmware_image_package.h>
#ifdef CONFIG_DDR_FIP_IMAGE
#include <ddr_io_storage.h>
#endif
#ifdef POLICY_FUSE_PROVISION
#include <fuse_io.h>
#endif
#include "plat_common.h"
#include "platform_def.h"
uint32_t fip_device;
/* IO devices */
uintptr_t backend_dev_handle;
static const io_dev_connector_t *fip_dev_con;
static uintptr_t fip_dev_handle;
static const io_dev_connector_t *backend_dev_con;
static io_block_spec_t fip_block_spec = {
.offset = PLAT_FIP_OFFSET,
.length = PLAT_FIP_MAX_SIZE
};
static const io_uuid_spec_t bl2_uuid_spec = {
.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
};
static const io_uuid_spec_t fuse_bl2_uuid_spec = {
.uuid = UUID_SCP_FIRMWARE_SCP_BL2,
};
static const io_uuid_spec_t bl31_uuid_spec = {
.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
};
static const io_uuid_spec_t bl32_uuid_spec = {
.uuid = UUID_SECURE_PAYLOAD_BL32,
};
static const io_uuid_spec_t bl33_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
};
static const io_uuid_spec_t tb_fw_config_uuid_spec = {
.uuid = UUID_TB_FW_CONFIG,
};
static const io_uuid_spec_t hw_config_uuid_spec = {
.uuid = UUID_HW_CONFIG,
};
#if TRUSTED_BOARD_BOOT
static const io_uuid_spec_t tb_fw_cert_uuid_spec = {
.uuid = UUID_TRUSTED_BOOT_FW_CERT,
};
static const io_uuid_spec_t trusted_key_cert_uuid_spec = {
.uuid = UUID_TRUSTED_KEY_CERT,
};
static const io_uuid_spec_t fuse_key_cert_uuid_spec = {
.uuid = UUID_SCP_FW_KEY_CERT,
};
static const io_uuid_spec_t soc_fw_key_cert_uuid_spec = {
.uuid = UUID_SOC_FW_KEY_CERT,
};
static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = {
.uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
};
static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
};
static const io_uuid_spec_t fuse_cert_uuid_spec = {
.uuid = UUID_SCP_FW_CONTENT_CERT,
};
static const io_uuid_spec_t soc_fw_cert_uuid_spec = {
.uuid = UUID_SOC_FW_CONTENT_CERT,
};
static const io_uuid_spec_t tos_fw_cert_uuid_spec = {
.uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
};
static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
};
#endif /* TRUSTED_BOARD_BOOT */
static int open_fip(const uintptr_t spec);
struct plat_io_policy {
uintptr_t *dev_handle;
uintptr_t image_spec;
int (*check)(const uintptr_t spec);
};
/* By default, ARM platforms load images from the FIP */
static const struct plat_io_policy policies[] = {
[FIP_IMAGE_ID] = {
&backend_dev_handle,
(uintptr_t)&fip_block_spec,
open_backend
},
[BL2_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl2_uuid_spec,
open_fip
},
[SCP_BL2_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&fuse_bl2_uuid_spec,
open_fip
},
[BL31_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl31_uuid_spec,
open_fip
},
[BL32_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl32_uuid_spec,
open_fip
},
[BL33_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl33_uuid_spec,
open_fip
},
[TB_FW_CONFIG_ID] = {
&fip_dev_handle,
(uintptr_t)&tb_fw_config_uuid_spec,
open_fip
},
[HW_CONFIG_ID] = {
&fip_dev_handle,
(uintptr_t)&hw_config_uuid_spec,
open_fip
},
#if TRUSTED_BOARD_BOOT
[TRUSTED_BOOT_FW_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&tb_fw_cert_uuid_spec,
open_fip
},
[TRUSTED_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&trusted_key_cert_uuid_spec,
open_fip
},
[SCP_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&fuse_key_cert_uuid_spec,
open_fip
},
[SOC_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&soc_fw_key_cert_uuid_spec,
open_fip
},
[TRUSTED_OS_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&tos_fw_key_cert_uuid_spec,
open_fip
},
[NON_TRUSTED_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&nt_fw_key_cert_uuid_spec,
open_fip
},
[SCP_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&fuse_cert_uuid_spec,
open_fip
},
[SOC_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&soc_fw_cert_uuid_spec,
open_fip
},
[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&tos_fw_cert_uuid_spec,
open_fip
},
[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&nt_fw_cert_uuid_spec,
open_fip
},
#endif /* TRUSTED_BOARD_BOOT */
};
/* Weak definitions may be overridden in specific ARM standard platform */
#pragma weak plat_io_setup
/*
* Return an IO device handle and specification which can be used to access
*/
static int open_fip(const uintptr_t spec)
{
int result;
uintptr_t local_image_handle;
/* See if a Firmware Image Package is available */
result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
if (result == 0) {
result = io_open(fip_dev_handle, spec, &local_image_handle);
if (result == 0) {
VERBOSE("Using FIP\n");
io_close(local_image_handle);
}
}
return result;
}
int open_backend(const uintptr_t spec)
{
int result;
uintptr_t local_image_handle;
result = io_dev_init(backend_dev_handle, (uintptr_t)NULL);
if (result == 0) {
result = io_open(backend_dev_handle, spec, &local_image_handle);
if (result == 0) {
io_close(local_image_handle);
}
}
return result;
}
#if defined(SD_BOOT) || defined(EMMC_BOOT)
static int plat_io_block_setup(size_t fip_offset, uintptr_t block_dev_spec)
{
int io_result;
fip_block_spec.offset = fip_offset;
io_result = register_io_dev_block(&backend_dev_con);
assert(io_result == 0);
/* Open connections to devices and cache the handles */
io_result = io_dev_open(backend_dev_con, block_dev_spec,
&backend_dev_handle);
assert(io_result == 0);
return io_result;
}
#endif
#if defined(FLEXSPI_NOR_BOOT) || defined(QSPI_BOOT)
static int plat_io_memmap_setup(size_t fip_offset)
{
int io_result;
fip_block_spec.offset = fip_offset;
io_result = register_io_dev_memmap(&backend_dev_con);
assert(io_result == 0);
/* Open connections to devices and cache the handles */
io_result = io_dev_open(backend_dev_con, (uintptr_t)NULL,
&backend_dev_handle);
assert(io_result == 0);
return io_result;
}
#endif
static int ls_io_fip_setup(unsigned int boot_dev)
{
int io_result;
io_result = register_io_dev_fip(&fip_dev_con);
assert(io_result == 0);
/* Open connections to devices and cache the handles */
io_result = io_dev_open(fip_dev_con, (uintptr_t)&fip_device,
&fip_dev_handle);
assert(io_result == 0);
#ifdef CONFIG_DDR_FIP_IMAGE
/* Open connection to DDR FIP image if available */
io_result = ddr_fip_setup(fip_dev_con, boot_dev);
assert(io_result == 0);
#endif
#ifdef POLICY_FUSE_PROVISION
/* Open connection to FUSE FIP image if available */
io_result = fuse_fip_setup(fip_dev_con, boot_dev);
assert(io_result == 0);
#endif
return io_result;
}
int ls_qspi_io_setup(void)
{
#ifdef QSPI_BOOT
qspi_io_setup(NXP_QSPI_FLASH_ADDR,
NXP_QSPI_FLASH_SIZE,
PLAT_FIP_OFFSET);
return plat_io_memmap_setup(NXP_QSPI_FLASH_ADDR + PLAT_FIP_OFFSET);
#else
ERROR("QSPI driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
#endif
}
int emmc_sdhc2_io_setup(void)
{
#if defined(EMMC_BOOT) && defined(NXP_ESDHC2_ADDR)
uintptr_t block_dev_spec;
int ret;
ret = sd_emmc_init(&block_dev_spec,
NXP_ESDHC2_ADDR,
NXP_SD_BLOCK_BUF_ADDR,
NXP_SD_BLOCK_BUF_SIZE,
false);
if (ret != 0) {
return ret;
}
return plat_io_block_setup(PLAT_FIP_OFFSET, block_dev_spec);
#else
ERROR("EMMC driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
#endif
}
int emmc_io_setup(void)
{
/* On the platforms which only has one ESDHC controller,
* eMMC-boot will use the first ESDHC controller.
*/
#if defined(SD_BOOT) || defined(EMMC_BOOT)
uintptr_t block_dev_spec;
int ret;
ret = sd_emmc_init(&block_dev_spec,
NXP_ESDHC_ADDR,
NXP_SD_BLOCK_BUF_ADDR,
NXP_SD_BLOCK_BUF_SIZE,
true);
if (ret != 0) {
return ret;
}
return plat_io_block_setup(PLAT_FIP_OFFSET, block_dev_spec);
#else
ERROR("SD driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
#endif
}
int ifc_nor_io_setup(void)
{
ERROR("NOR driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
}
int ifc_nand_io_setup(void)
{
ERROR("NAND driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
}
int ls_flexspi_nor_io_setup(void)
{
#ifdef FLEXSPI_NOR_BOOT
int ret = 0;
ret = flexspi_nor_io_setup(NXP_FLEXSPI_FLASH_ADDR,
NXP_FLEXSPI_FLASH_SIZE,
NXP_FLEXSPI_ADDR);
if (ret != 0) {
ERROR("FlexSPI NOR driver initialization error.\n");
/* Should never reach here */
assert(0);
panic();
return -1;
}
return plat_io_memmap_setup(NXP_FLEXSPI_FLASH_ADDR + PLAT_FIP_OFFSET);
#else
ERROR("FlexSPI NOR driver not present. Check your BUILD\n");
/* Should never reach here */
assert(false);
return -1;
#endif
}
static int (* const ls_io_setup_table[])(void) = {
[BOOT_DEVICE_IFC_NOR] = ifc_nor_io_setup,
[BOOT_DEVICE_IFC_NAND] = ifc_nand_io_setup,
[BOOT_DEVICE_QSPI] = ls_qspi_io_setup,
[BOOT_DEVICE_EMMC] = emmc_io_setup,
[BOOT_DEVICE_SDHC2_EMMC] = emmc_sdhc2_io_setup,
[BOOT_DEVICE_FLEXSPI_NOR] = ls_flexspi_nor_io_setup,
[BOOT_DEVICE_FLEXSPI_NAND] = ls_flexspi_nor_io_setup,
};
int plat_io_setup(void)
{
int (*io_setup)(void);
unsigned int boot_dev = BOOT_DEVICE_NONE;
int ret;
boot_dev = get_boot_dev();
if (boot_dev == BOOT_DEVICE_NONE) {
ERROR("Boot Device detection failed, Check RCW_SRC\n");
return -EINVAL;
}
io_setup = ls_io_setup_table[boot_dev];
ret = io_setup();
if (ret != 0) {
return ret;
}
ret = ls_io_fip_setup(boot_dev);
if (ret != 0) {
return ret;
}
return 0;
}
/* Return an IO device handle and specification which can be used to access
* an image. Use this to enforce platform load policy
*/
int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
uintptr_t *image_spec)
{
int result = -1;
const struct plat_io_policy *policy;
if (image_id < ARRAY_SIZE(policies)) {
policy = &policies[image_id];
result = policy->check(policy->image_spec);
if (result == 0) {
*image_spec = policy->image_spec;
*dev_handle = *(policy->dev_handle);
}
}
#ifdef CONFIG_DDR_FIP_IMAGE
else {
VERBOSE("Trying alternative IO\n");
result = plat_get_ddr_fip_image_source(image_id, dev_handle,
image_spec, open_backend);
}
#endif
#ifdef POLICY_FUSE_PROVISION
if (result != 0) {
VERBOSE("Trying FUSE IO\n");
result = plat_get_fuse_image_source(image_id, dev_handle,
image_spec, open_backend);
}
#endif
return result;
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <stdint.h>
#include <arch_helpers.h>
#include <plat/common/platform.h>
#define RANDOM_CANARY_VALUE ((u_register_t) 3288484550995823360ULL)
u_register_t plat_get_stack_protector_canary(void)
{
/*
* TBD: Generate Random Number from NXP CAAM Block.
*/
return RANDOM_CANARY_VALUE ^ read_cntpct_el0();
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <asm_macros.S>
#include <bl31_data.h>
.global el2_2_aarch32
.global prefetch_disable
#define SPSR_EL3_M4 0x10
#define SPSR_EL_MASK 0xC
#define SPSR_EL2 0x8
#define SCR_EL3_4_EL2_AARCH32 0x131
#define SPSR32_EL2_LE 0x1DA
#define MIDR_PARTNUM_START 4
#define MIDR_PARTNUM_WIDTH 12
#define MIDR_PARTNUM_A53 0xD03
#define MIDR_PARTNUM_A57 0xD07
#define MIDR_PARTNUM_A72 0xD08
/*
* uint64_t el2_2_aarch32(u_register_t smc_id,
* u_register_t start_addr,
* u_register_t parm1,
* u_register_t parm2)
* this function allows changing the execution width of EL2 from Aarch64
* to Aarch32
* Note: MUST be called from EL2 @ Aarch64
* in: x0 = smc function id
* x1 = start address for EL2 @ Aarch32
* x2 = first parameter to pass to EL2 @ Aarch32
* x3 = second parameter to pass to EL2 @ Aarch32
* out: x0 = 0, on success
* x0 = -1, on failure
* uses x0, x1, x2, x3
*/
func el2_2_aarch32
/* check that caller is EL2 @ Aarch64 - err return if not */
mrs x0, spsr_el3
/* see if we were called from Aarch32 */
tst x0, #SPSR_EL3_M4
b.ne 2f
/* see if we were called from EL2 */
and x0, x0, SPSR_EL_MASK
cmp x0, SPSR_EL2
b.ne 2f
/* set ELR_EL3 */
msr elr_el3, x1
/* set scr_el3 */
mov x0, #SCR_EL3_4_EL2_AARCH32
msr scr_el3, x0
/* set sctlr_el2 */
ldr x1, =SCTLR_EL2_RES1
msr sctlr_el2, x1
/* set spsr_el3 */
ldr x0, =SPSR32_EL2_LE
msr spsr_el3, x0
/* x2 = parm 1
* x3 = parm2
*/
/* set the parameters to be passed-thru to EL2 @ Aarch32 */
mov x1, x2
mov x2, x3
/* x1 = parm 1
* x2 = parm2
*/
mov x0, xzr
/* invalidate the icache */
ic iallu
dsb sy
isb
b 1f
2:
/* error return */
mvn x0, xzr
ret
1:
eret
endfunc el2_2_aarch32
/*
* int prefetch_disable(u_register_t smc_id, u_register_t mask)
* this function marks cores which need to have the prefetch disabled -
* secondary cores have prefetch disabled when they are released from reset -
* the bootcore has prefetch disabled when this call is made
* in: x0 = function id
* x1 = core mask, where bit[0]=core0, bit[1]=core1, etc
* if a bit in the mask is set, then prefetch is disabled for that
* core
* out: x0 = SMC_SUCCESS
*/
func prefetch_disable
stp x4, x30, [sp, #-16]!
mov x3, x1
/* x1 = core prefetch disable mask */
/* x3 = core prefetch disable mask */
/* store the mask */
mov x0, #PREFETCH_DIS_OFFSET
bl _set_global_data
/* x3 = core prefetch disable mask */
/* see if we need to disable prefetch on THIS core */
bl plat_my_core_mask
/* x0 = core mask lsb */
/* x3 = core prefetch disable mask */
tst x3, x0
b.eq 1f
/* read midr_el1 */
mrs x1, midr_el1
/* x1 = midr_el1 */
mov x0, xzr
bfxil x0, x1, #MIDR_PARTNUM_START, #MIDR_PARTNUM_WIDTH
/* x0 = part number (a53, a57, a72, etc) */
/* branch on cpu-specific */
cmp x0, #MIDR_PARTNUM_A57
b.eq 1f
cmp x0, #MIDR_PARTNUM_A72
b.ne 1f
bl _disable_ldstr_pfetch_A72
b 1f
1:
ldp x4, x30, [sp], #16
mov x0, xzr
ret
endfunc prefetch_disable
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef SIPSVC_H
#define SIPSVC_H
#include <stdint.h>
#define SMC_FUNC_MASK 0x0000ffff
#define SMC32_PARAM_MASK 0xffffffff
/* SMC function IDs for SiP Service queries */
#define SIP_SVC_CALL_COUNT 0xff00
#define SIP_SVC_UID 0xff01
#define SIP_SVC_VERSION 0xff03
#define SIP_SVC_PRNG 0xff10
#define SIP_SVC_RNG 0xff11
#define SIP_SVC_MEM_BANK 0xff12
#define SIP_SVC_PREFETCH_DIS 0xff13
#define SIP_SVC_HUK 0xff14
#define SIP_SVC_ALLOW_L1L2_ERR 0xff15
#define SIP_SVC_ALLOW_L2_CLR 0xff16
#define SIP_SVC_2_AARCH32 0xff17
#define SIP_SVC_PORSR1 0xff18
/* Layerscape SiP Service Calls version numbers */
#define LS_SIP_SVC_VERSION_MAJOR 0x0
#define LS_SIP_SVC_VERSION_MINOR 0x1
/* Number of Layerscape SiP Calls implemented */
#define LS_COMMON_SIP_NUM_CALLS 10
/* Parameter Type Constants */
#define SIP_PARAM_TYPE_NONE 0x0
#define SIP_PARAM_TYPE_VALUE_INPUT 0x1
#define SIP_PARAM_TYPE_VALUE_OUTPUT 0x2
#define SIP_PARAM_TYPE_VALUE_INOUT 0x3
#define SIP_PARAM_TYPE_MEMREF_INPUT 0x5
#define SIP_PARAM_TYPE_MEMREF_OUTPUT 0x6
#define SIP_PARAM_TYPE_MEMREF_INOUT 0x7
#define SIP_PARAM_TYPE_MASK 0xF
/*
* The macro SIP_PARAM_TYPES can be used to construct a value that you can
* compare against an incoming paramTypes to check the type of all the
* parameters in one comparison.
*/
#define SIP_PARAM_TYPES(t0, t1, t2, t3) \
((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
/*
* The macro SIP_PARAM_TYPE_GET can be used to extract the type of a given
* parameter from paramTypes if you need more fine-grained type checking.
*/
#define SIP_PARAM_TYPE_GET(t, i) ((((uint32_t)(t)) >> ((i) * 4)) & 0xF)
/*
* The macro SIP_PARAM_TYPE_SET can be used to load the type of a given
* parameter from paramTypes without specifying all types (SIP_PARAM_TYPES)
*/
#define SIP_PARAM_TYPE_SET(t, i) (((uint32_t)(t) & 0xF) << ((i) * 4))
#define SIP_SVC_RNG_PARAMS (SIP_PARAM_TYPE_VALUE_INPUT, \
SIP_PARAM_TYPE_MEMREF_OUTPUT, \
SIP_PARAM_TYPE_NONE, \
SIP_PARAM_TYPE_NONE)
/* Layerscape SiP Calls error code */
enum {
LS_SIP_SUCCESS = 0,
LS_SIP_INVALID_PARAM = -1,
LS_SIP_NOT_SUPPORTED = -2,
};
#endif /* SIPSVC_H */
/*
* Copyright 2018-2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <string.h>
#include <caam.h>
#include <common/runtime_svc.h>
#include <dcfg.h>
#include <lib/mmio.h>
#include <tools_share/uuid.h>
#include <plat_common.h>
#include <sipsvc.h>
/* Layerscape SiP Service UUID */
DEFINE_SVC_UUID2(nxp_sip_svc_uid,
0x871de4ef, 0xedfc, 0x4209, 0xa4, 0x23,
0x8d, 0x23, 0x75, 0x9d, 0x3b, 0x9f);
#pragma weak nxp_plat_sip_handler
static uintptr_t nxp_plat_sip_handler(unsigned int smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
u_register_t flags)
{
ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
SMC_RET1(handle, SMC_UNK);
}
uint64_t el2_2_aarch32(u_register_t smc_id, u_register_t start_addr,
u_register_t parm1, u_register_t parm2);
uint64_t prefetch_disable(u_register_t smc_id, u_register_t mask);
uint64_t bl31_get_porsr1(void);
static void clean_top_32b_of_param(uint32_t smc_fid,
u_register_t *px1,
u_register_t *px2,
u_register_t *px3,
u_register_t *px4)
{
/* if parameters from SMC32. Clean top 32 bits */
if (GET_SMC_CC(smc_fid) == SMC_32) {
*px1 = *px1 & SMC32_PARAM_MASK;
*px2 = *px2 & SMC32_PARAM_MASK;
*px3 = *px3 & SMC32_PARAM_MASK;
*px4 = *px4 & SMC32_PARAM_MASK;
}
}
/* This function handles Layerscape defined SiP Calls */
static uintptr_t nxp_sip_handler(unsigned int smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
u_register_t flags)
{
uint32_t ns;
uint64_t ret;
dram_regions_info_t *info_dram_regions;
/* if parameter is sent from SMC32. Clean top 32 bits */
clean_top_32b_of_param(smc_fid, &x1, &x2, &x3, &x4);
/* Determine which security state this SMC originated from */
ns = is_caller_non_secure(flags);
if (ns == 0) {
/* SiP SMC service secure world's call */
;
} else {
/* SiP SMC service normal world's call */
;
}
switch (smc_fid & SMC_FUNC_MASK) {
case SIP_SVC_RNG:
if (is_sec_enabled() == false) {
NOTICE("SEC is disabled.\n");
SMC_RET1(handle, SMC_UNK);
}
/* Return zero on failure */
ret = get_random((int)x1);
if (ret != 0) {
SMC_RET2(handle, SMC_OK, ret);
} else {
SMC_RET1(handle, SMC_UNK);
}
/* break is not required as SMC_RETx return */
case SIP_SVC_HUK:
if (is_sec_enabled() == false) {
NOTICE("SEC is disabled.\n");
SMC_RET1(handle, SMC_UNK);
}
ret = get_hw_unq_key_blob_hw((uint8_t *) x1, (uint32_t) x2);
if (ret == SMC_OK) {
SMC_RET1(handle, SMC_OK);
} else {
SMC_RET1(handle, SMC_UNK);
}
/* break is not required as SMC_RETx return */
case SIP_SVC_MEM_BANK:
VERBOSE("Handling SMC SIP_SVC_MEM_BANK.\n");
info_dram_regions = get_dram_regions_info();
if (x1 == -1) {
SMC_RET2(handle, SMC_OK,
info_dram_regions->total_dram_size);
} else if (x1 >= info_dram_regions->num_dram_regions) {
SMC_RET1(handle, SMC_UNK);
} else {
SMC_RET3(handle, SMC_OK,
info_dram_regions->region[x1].addr,
info_dram_regions->region[x1].size);
}
/* break is not required as SMC_RETx return */
case SIP_SVC_PREFETCH_DIS:
VERBOSE("In SIP_SVC_PREFETCH_DIS call\n");
ret = prefetch_disable(smc_fid, x1);
if (ret == SMC_OK) {
SMC_RET1(handle, SMC_OK);
} else {
SMC_RET1(handle, SMC_UNK);
}
/* break is not required as SMC_RETx return */
case SIP_SVC_2_AARCH32:
ret = el2_2_aarch32(smc_fid, x1, x2, x3);
/* In success case, control should not reach here. */
NOTICE("SMC: SIP_SVC_2_AARCH32 Failed.\n");
SMC_RET1(handle, SMC_UNK);
/* break is not required as SMC_RETx return */
case SIP_SVC_PORSR1:
ret = bl31_get_porsr1();
SMC_RET2(handle, SMC_OK, ret);
/* break is not required as SMC_RETx return */
default:
return nxp_plat_sip_handler(smc_fid, x1, x2, x3, x4,
cookie, handle, flags);
}
}
/* This function is responsible for handling all SiP calls */
static uintptr_t sip_smc_handler(unsigned int smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
u_register_t flags)
{
switch (smc_fid & SMC_FUNC_MASK) {
case SIP_SVC_CALL_COUNT:
/* Return the number of Layerscape SiP Service Calls. */
SMC_RET1(handle, LS_COMMON_SIP_NUM_CALLS);
break;
case SIP_SVC_UID:
/* Return UID to the caller */
SMC_UUID_RET(handle, nxp_sip_svc_uid);
break;
case SIP_SVC_VERSION:
/* Return the version of current implementation */
SMC_RET2(handle, LS_SIP_SVC_VERSION_MAJOR,
LS_SIP_SVC_VERSION_MINOR);
break;
default:
return nxp_sip_handler(smc_fid, x1, x2, x3, x4,
cookie, handle, flags);
}
}
/* Define a runtime service descriptor for fast SMC calls */
DECLARE_RT_SVC(
nxp_sip_svc,
OEN_SIP_START,
OEN_SIP_END,
SMC_TYPE_FAST,
NULL,
sip_smc_handler
);
#
# Copyright 2018-2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
#
#------------------------------------------------------------------------------
#
# Select the SIP SVC files
#
# -----------------------------------------------------------------------------
ifeq (${ADD_SIPSVC},)
ADD_SIPSVC := 1
PLAT_SIPSVC_PATH := $(PLAT_COMMON_PATH)/sip_svc
SIPSVC_SOURCES := ${PLAT_SIPSVC_PATH}/sip_svc.c \
${PLAT_SIPSVC_PATH}/$(ARCH)/sipsvc.S
PLAT_INCLUDES += -I${PLAT_SIPSVC_PATH}/include
ifeq (${BL_COMM_SIPSVC_NEEDED},yes)
BL_COMMON_SOURCES += ${SIPSVC_SOURCES}
else
ifeq (${BL2_SIPSVC_NEEDED},yes)
BL2_SOURCES += ${SIPSVC_SOURCES}
endif
ifeq (${BL31_SIPSVC_NEEDED},yes)
BL31_SOURCES += ${SIPSVC_SOURCES}
endif
endif
endif
# -----------------------------------------------------------------------------
/*
* Copyright 2018-2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*
*/
#include <errno.h>
#include <common/debug.h>
#include <csf_hdr.h>
#include <dcfg.h>
#include <drivers/auth/crypto_mod.h>
#include <snvs.h>
#include <plat/common/platform.h>
#include "plat_common.h"
extern bool rotpk_not_dpld;
extern uint8_t rotpk_hash_table[MAX_KEY_ENTRIES][SHA256_BYTES];
extern uint32_t num_rotpk_hash_entries;
/*
* In case of secure boot, return ptr of rotpk_hash table in key_ptr and
* number of hashes in key_len
*/
int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
unsigned int *flags)
{
uint32_t mode = 0U;
*flags = ROTPK_NOT_DEPLOYED;
/* ROTPK hash table must be available for secure boot */
if (rotpk_not_dpld == true) {
if (check_boot_mode_secure(&mode) == true) {
/* Production mode, don;t continue further */
if (mode == 1U) {
return -EAUTH;
}
/* For development mode, rotpk flag false
* indicates that SRK hash comparison might
* have failed. This is not fatal error.
* Continue in this case but transition SNVS
* to non-secure state
*/
transition_snvs_non_secure();
return 0;
} else {
return 0;
}
}
/*
* We return the complete hash table and number of entries in
* table for NXP platform specific implementation.
* Here hash is always assume as SHA-256
*/
*key_ptr = rotpk_hash_table;
*key_len = num_rotpk_hash_entries;
*flags = ROTPK_IS_HASH;
return 0;
}
int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
{
/*
* No support for non-volatile counter. Update the ROT key to protect
* the system against rollback.
*/
*nv_ctr = 0U;
return 0;
}
int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
{
return 0;
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*
*/
#ifndef _CSF_HDR_H_
.global nxp_rotpk_hash
.global nxp_rotpk_hash_end
.section .rodata.nxp_rotpk_hash, "a"
nxp_rotpk_hash:
/* DER header */
.byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48
.byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20
/* SHA256 */
.incbin ROTPK_HASH
nxp_rotpk_hash_end:
#endif
#
# Copyright 2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
# For TRUSTED_BOARD_BOOT platforms need to include this makefile
# Following definations are to be provided by platform.mk file or
# by user - BL33_INPUT_FILE, BL32_INPUT_FILE, BL31_INPUT_FILE
ifeq ($(CHASSIS), 2)
include $(PLAT_DRIVERS_PATH)/csu/csu.mk
CSF_FILE := input_blx_ch${CHASSIS}
BL2_CSF_FILE := input_bl2_ch${CHASSIS}
else
ifeq ($(CHASSIS), 3_2)
CSF_FILE := input_blx_ch3
BL2_CSF_FILE := input_bl2_ch${CHASSIS}
PBI_CSF_FILE := input_pbi_ch${CHASSIS}
$(eval $(call add_define, CSF_HDR_CH3))
else
$(error -> CHASSIS not set!)
endif
endif
PLAT_AUTH_PATH := $(PLAT_DRIVERS_PATH)/auth
ifeq (${BL2_INPUT_FILE},)
BL2_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${BL2_CSF_FILE}
endif
ifeq (${PBI_INPUT_FILE},)
PBI_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${PBI_CSF_FILE}
endif
# If MBEDTLS_DIR is not specified, use CSF Header option
ifeq (${MBEDTLS_DIR},)
# Generic image processing filters to prepend CSF header
ifeq (${BL33_INPUT_FILE},)
BL33_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE}
endif
ifeq (${BL31_INPUT_FILE},)
BL31_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE}
endif
ifeq (${BL32_INPUT_FILE},)
BL32_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE}
endif
ifeq (${FUSE_INPUT_FILE},)
FUSE_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE}
endif
PLAT_INCLUDES += -I$(PLAT_DRIVERS_PATH)/sfp
PLAT_TBBR_SOURCES += $(PLAT_AUTH_PATH)/csf_hdr_parser/cot.c \
$(PLAT_COMMON_PATH)/tbbr/csf_tbbr.c
# IMG PARSER here is CSF header parser
include $(PLAT_DRIVERS_PATH)/auth/csf_hdr_parser/csf_hdr.mk
PLAT_TBBR_SOURCES += $(CSF_HDR_SOURCES)
SCP_BL2_PRE_TOOL_FILTER := CST_SCP_BL2
BL31_PRE_TOOL_FILTER := CST_BL31
BL32_PRE_TOOL_FILTER := CST_BL32
BL33_PRE_TOOL_FILTER := CST_BL33
else
ifeq (${DISABLE_FUSE_WRITE}, 1)
$(eval $(call add_define,DISABLE_FUSE_WRITE))
endif
# For Mbedtls currently crypto is not supported via CAAM
# enable it when that support is there
CAAM_INTEG := 0
KEY_ALG := rsa
KEY_SIZE := 2048
$(eval $(call add_define,MBEDTLS_X509))
ifeq (${PLAT_DDR_PHY},PHY_GEN2)
$(eval $(call add_define,PLAT_DEF_OID))
endif
include drivers/auth/mbedtls/mbedtls_x509.mk
PLAT_TBBR_SOURCES += $(PLAT_AUTH_PATH)/tbbr/tbbr_cot.c \
$(PLAT_COMMON_PATH)/tbbr/nxp_rotpk.S \
$(PLAT_COMMON_PATH)/tbbr/x509_tbbr.c
#ROTPK key is embedded in BL2 image
ifeq (${ROT_KEY},)
ROT_KEY = $(BUILD_PLAT)/rot_key.pem
endif
ifeq (${SAVE_KEYS},1)
ifeq (${TRUSTED_WORLD_KEY},)
TRUSTED_WORLD_KEY = ${BUILD_PLAT}/trusted.pem
endif
ifeq (${NON_TRUSTED_WORLD_KEY},)
NON_TRUSTED_WORLD_KEY = ${BUILD_PLAT}/non-trusted.pem
endif
ifeq (${BL31_KEY},)
BL31_KEY = ${BUILD_PLAT}/soc.pem
endif
ifeq (${BL32_KEY},)
BL32_KEY = ${BUILD_PLAT}/trusted_os.pem
endif
ifeq (${BL33_KEY},)
BL33_KEY = ${BUILD_PLAT}/non-trusted_os.pem
endif
endif
ROTPK_HASH = $(BUILD_PLAT)/rotpk_sha256.bin
$(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"'))
$(BUILD_PLAT)/bl2/nxp_rotpk.o: $(ROTPK_HASH)
certificates: $(ROT_KEY)
$(ROT_KEY): | $(BUILD_PLAT)
@echo " OPENSSL $@"
@if [ ! -f $(ROT_KEY) ]; then \
openssl genrsa 2048 > $@ 2>/dev/null; \
fi
$(ROTPK_HASH): $(ROT_KEY)
@echo " OPENSSL $@"
$(Q)openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
openssl dgst -sha256 -binary > $@ 2>/dev/null
endif #MBEDTLS_DIR
PLAT_INCLUDES += -Iinclude/common/tbbr
# Generic files for authentication framework
TBBR_SOURCES += drivers/auth/auth_mod.c \
drivers/auth/crypto_mod.c \
drivers/auth/img_parser_mod.c \
plat/common/tbbr/plat_tbbr.c \
${PLAT_TBBR_SOURCES}
# If CAAM_INTEG is not defined (would be scenario with MBED TLS)
# include mbedtls_crypto
ifeq (${CAAM_INTEG},0)
include drivers/auth/mbedtls/mbedtls_crypto.mk
else
include $(PLAT_DRIVERS_PATH)/crypto/caam/src/auth/auth.mk
TBBR_SOURCES += ${AUTH_SOURCES}
endif
/*
* Copyright 2018-2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <common/debug.h>
#include <lib/cassert.h>
#include <sfp.h>
#include <tools_share/tbbr_oid.h>
#include <plat/common/platform.h>
#include "plat_common.h"
extern char nxp_rotpk_hash[], nxp_rotpk_hash_end[];
int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
unsigned int *flags)
{
*key_ptr = nxp_rotpk_hash;
*key_len = nxp_rotpk_hash_end - nxp_rotpk_hash;
*flags = ROTPK_IS_HASH;
return 0;
}
int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
{
const char *oid;
uint32_t uid_num;
uint32_t val = 0U;
assert(cookie != NULL);
assert(nv_ctr != NULL);
oid = (const char *)cookie;
if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) {
uid_num = 3U;
} else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) {
uid_num = 4U;
} else {
return 1;
}
val = sfp_read_oem_uid(uid_num);
INFO("SFP Value read is %x from UID %d\n", val, uid_num);
if (val == 0U) {
*nv_ctr = 0U;
} else {
*nv_ctr = (32U - __builtin_clz(val));
}
INFO("NV Counter value for UID %d is %d\n", uid_num, *nv_ctr);
return 0;
}
int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
{
const char *oid;
uint32_t uid_num, sfp_val;
assert(cookie != NULL);
/* Counter values upto 32 are supported */
if (nv_ctr > 32U) {
return 1;
}
oid = (const char *)cookie;
if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) {
uid_num = 3U;
} else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) {
uid_num = 4U;
} else {
return 1;
}
sfp_val = (1U << (nv_ctr - 1));
if (sfp_write_oem_uid(uid_num, sfp_val) == 1) {
/* Enable POVDD on board */
if (board_enable_povdd()) {
sfp_program_fuses();
}
/* Disable POVDD on board */
board_disable_povdd();
} else {
ERROR("Invalid OEM UID sent.\n");
return 1;
}
return 0;
}
int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size)
{
return get_mbedtls_heap_helper(heap_addr, heap_size);
}
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <errno.h>
#include <common/debug.h>
#include <ddr.h>
#ifndef NXP_COINED_BB
#include <flash_info.h>
#include <fspi.h>
#include <fspi_api.h>
#endif
#include <lib/mmio.h>
#include <lib/psci/psci.h>
#ifdef NXP_COINED_BB
#include <snvs.h>
#endif
#include <plat_nv_storage.h>
#include "plat_warm_rst.h"
#include "platform_def.h"
#if defined(IMAGE_BL2)
uint32_t is_warm_boot(void)
{
uint32_t ret = mmio_read_32(NXP_RESET_ADDR + RST_RSTRQSR1_OFFSET)
& ~(RSTRQSR1_SWRR);
const nv_app_data_t *nv_app_data = get_nv_data();
if (ret == 0U) {
INFO("Not a SW(Warm) triggered reset.\n");
return 0U;
}
ret = (nv_app_data->warm_rst_flag == WARM_BOOT_SUCCESS) ? 1 : 0;
if (ret != 0U) {
INFO("Warm Reset was triggered..\n");
} else {
INFO("Warm Reset was not triggered..\n");
}
return ret;
}
#endif
#if defined(IMAGE_BL31)
int prep_n_execute_warm_reset(void)
{
#ifdef NXP_COINED_BB
#if !TRUSTED_BOARD_BOOT
snvs_disable_zeroize_lp_gpr();
#endif
#else
int ret;
uint8_t warm_reset = WARM_BOOT_SUCCESS;
ret = fspi_init(NXP_FLEXSPI_ADDR, NXP_FLEXSPI_FLASH_ADDR);
if (ret != 0) {
ERROR("Failed to initialized driver flexspi-nor.\n");
ERROR("exiting warm-reset request.\n");
return PSCI_E_INTERN_FAIL;
}
/* Sector starting from NV_STORAGE_BASE_ADDR is already
* erased for writing.
*/
#if (ERLY_WRM_RST_FLG_FLSH_UPDT)
ret = xspi_write((uint32_t)NV_STORAGE_BASE_ADDR,
&warm_reset,
sizeof(warm_reset));
#else
/* Preparation for writing the Warm reset flag. */
ret = xspi_wren((uint32_t)NV_STORAGE_BASE_ADDR);
/* IP Control Register0 - SF Address to be read */
fspi_out32((NXP_FLEXSPI_ADDR + FSPI_IPCR0),
(uint32_t) NV_STORAGE_BASE_ADDR);
while ((fspi_in32(NXP_FLEXSPI_ADDR + FSPI_INTR) &
FSPI_INTR_IPTXWE_MASK) == 0) {
;
}
/* Write TX FIFO Data Register */
fspi_out32(NXP_FLEXSPI_ADDR + FSPI_TFDR, (uint32_t) warm_reset);
fspi_out32(NXP_FLEXSPI_ADDR + FSPI_INTR, FSPI_INTR_IPTXWE);
/* IP Control Register1 - SEQID_WRITE operation, Size = 1 Byte */
fspi_out32(NXP_FLEXSPI_ADDR + FSPI_IPCR1,
(uint32_t)(FSPI_WRITE_SEQ_ID << FSPI_IPCR1_ISEQID_SHIFT) |
(uint16_t) sizeof(warm_reset));
/* Trigger XSPI-IP-Write cmd only if:
* - Putting DDR in-self refresh mode is successfully.
* to complete the writing of the warm-reset flag
* to flash.
*
* This code is as part of assembly.
*/
#endif
#endif
INFO("Doing DDR Self refresh.\n");
_soc_sys_warm_reset();
/* Expected behaviour is to do the power cycle */
while (1 != 0)
;
return -1;
}
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment