Commit 9719e19a authored by Joanna Farley's avatar Joanna Farley Committed by TrustedFirmware Code Review
Browse files

Merge changes I500ddbe9,I9c10dac9,I53bfff85,I06f7594d,I24bff8d4, ... into integration

* changes:
  nxp lx2160a-aqds: new plat based on soc lx2160a
  NXP lx2160a-rdb: new plat based on SoC lx2160a
  nxp lx2162aqds: new plat based on soc lx2160a
  nxp: errata handling at soc level for lx2160a
  nxp: make file for loading additional ddr image
  nxp: adding support of soc lx2160a
  nxp: deflt hdr files for soc & their platforms
  nxp: platform files for bl2 and bl31 setup
  nxp: warm reset support to retain ddr content
  nxp: nv storage api on platforms
  nxp: supports two mode of trusted board boot
  nxp: fip-handler for additional fip_fuse.bin
  nxp: fip-handler for additional ddr-fip.bin
  nxp: image loader for loading fip image
  nxp: svp & sip smc handling
  nxp: psci platform functions used by lib/psci
  nxp: helper function used by plat & common code
  nxp: add data handler used by bl31
  nxp: adding the driver.mk file
  nxp-tool: for creating pbl file from bl2
  nxp: adding the smmu driver
  nxp: cot using nxp internal and mbedtls
  nxp:driver for crypto h/w accelerator caam
  nxp:add driver support for sd and emmc
  nxp:add qspi driver
  nxp: add flexspi driver support
  nxp: adding gic apis for nxp soc
  nxp: gpio driver support
  nxp: added csu driver
  nxp: driver pmu for nxp soc
  nxp: ddr driver enablement for nxp layerscape soc
  nxp: i2c driver support.
  NXP: Driver for NXP Security Monitor
  NXP: SFP driver support for NXP SoC
  NXP: Interconnect API based on ARM CCN-CCI driver
  NXP: TZC API to configure ddr region
  NXP: Timer API added to enable ARM generic timer
  nxp: add dcfg driver
  nxp:add console driver for nxp platform
  tools: add mechanism to allow platform specific image UUID
  tbbr-cot: conditional definition for the macro
  tbbr-cot: fix the issue of compiling time define
  cert_create: updated tool for platform defined certs, keys & extensions
  tbbr-tools: enable override TRUSTED_KEY_CERT
parents b59444ea f359a382
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <common/bl_common.h>
#include <common/desc_image_load.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#include "load_img.h"
/******************************************************************************
* This function can be used to load DDR PHY/FUSE Images
*
* @param [in] image_id Image ID to be loaded
*
* @param [in,out] image_base Location at which the image should be loaded
* In case image is prepended by a CSF header,
* image_base is pointer to actual image after
* the header
*
* @param [in,out] image_size User should pass the maximum size of the image
* possible.(Buffer size starting from image_base)
* Actual size of the image loaded is returned
* back.
*****************************************************************************/
int load_img(unsigned int image_id, uintptr_t *image_base,
uint32_t *image_size)
{
int err = 0;
image_desc_t img_info = {
.image_id = image_id,
SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
VERSION_2, image_info_t, 0),
#ifdef CSF_HEADER_PREPENDED
.image_info.image_base = *image_base - CSF_HDR_SZ,
.image_info.image_max_size = *image_size + CSF_HDR_SZ,
#else
.image_info.image_base = *image_base,
.image_info.image_max_size = *image_size,
#endif
};
/* Create MMU entry for the CSF header */
#if PLAT_XLAT_TABLES_DYNAMIC
#ifdef CSF_HEADER_PREPENDED
mmap_add_dynamic_region(img_info.image_info.image_base,
img_info.image_info.image_base,
CSF_HDR_SZ,
MT_MEMORY | MT_RW | MT_SECURE);
#endif
#endif
VERBOSE("BL2: Loading IMG %d\n", image_id);
err = load_auth_image(image_id, &img_info.image_info);
if (err != 0) {
VERBOSE("Failed to load IMG %d\n", image_id);
return err;
}
#ifdef CSF_HEADER_PREPENDED
*image_base = img_info.image_info.image_base + CSF_HDR_SZ;
*image_size = img_info.image_info.image_size - CSF_HDR_SZ;
#if PLAT_XLAT_TABLES_DYNAMIC
mmap_remove_dynamic_region(img_info.image_info.image_base,
CSF_HDR_SZ);
#endif
#else
*image_base = img_info.image_info.image_base;
*image_size = img_info.image_info.image_size;
#endif
return err;
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef LOAD_IMAGE_H
#define LOAD_IMAGE_H
int load_img(unsigned int image_id, uintptr_t *image_base,
uint32_t *image_size);
#endif /* LOAD_IMAGE_H */
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef SOC_DEFAULT_BASE_ADDR_H
#define SOC_DEFAULT_BASE_ADDR_H
/* CCSR mmu_def.h */
#define NXP_CCSR_ADDR 0x01000000
#define NXP_CCSR_SIZE 0x0F000000
#define NXP_DCSR_ADDR 0x20000000
#define NXP_DCSR_SIZE 0x4000000
/* Flex-SPI controller address */
#define NXP_FLEXSPI_ADDR 0x020C0000
/* QSPI Flash Start address */
#define NXP_QSPI_FLASH_ADDR 0x40000000
/* NOR Flash Start address */
#define NXP_IFC_REGION_ADDR 0x60000000
#define NXP_NOR_FLASH_ADDR NXP_IFC_REGION_ADDR
/* MMU 500 soc.c*/
#define NXP_SMMU_ADDR 0x09000000
#define NXP_SNVS_ADDR 0x01E90000
#define NXP_DCFG_ADDR 0x01EE0000
#define NXP_SFP_ADDR 0x01E80000
#define NXP_RCPM_ADDR 0x01EE2000
#define NXP_CSU_ADDR 0x01510000
#define NXP_SCFG_ADDR 0x01570000
#define NXP_DCSR_ADDR 0x20000000
#define NXP_DCSR_DCFG_ADDR (NXP_DCSR_ADDR + 0x00140000)
#define NXP_I2C_ADDR 0x02180000
#define NXP_ESDHC_ADDR 0x01560000
#define NXP_UART_ADDR 0x021C0500
#define NXP_UART1_ADDR 0x021C0600
#define NXP_GPIO1_ADDR 0x02300000
#define NXP_GPIO2_ADDR 0x02310000
#define NXP_GPIO3_ADDR 0x02320000
#define NXP_GPIO4_ADDR 0x02330000
#define NXP_WDOG1_NS_ADDR 0x02390000
#define NXP_WDOG2_NS_ADDR 0x023A0000
#define NXP_WDOG1_TZ_ADDR 0x023B0000
#define NXP_WDOG2_TZ_ADDR 0x023C0000
#define NXP_TIMER_STATUS_ADDR 0x023F0000
#define NXP_GICD_4K_ADDR 0x01401000
#define NXP_GICC_4K_ADDR 0x01402000
#define NXP_GICD_64K_ADDR 0x01410000
#define NXP_GICC_64K_ADDR 0x01420000
#define NXP_CAAM_ADDR 0x01700000
#define NXP_TZC_ADDR 0x01500000
#define NXP_DDR_ADDR 0x01080000
#define NXP_TIMER_ADDR 0x02B00000
#define NXP_CCI_ADDR 0x01180000
#define NXP_RESET_ADDR 0x01E60000
#define NXP_SEC_REGFILE_ADDR 0x01E88000
#endif /* SOC_DEFAULT_BASE_ADDR_H */
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef SOC_DEFAULT_HELPER_MACROS_H
#define SOC_DEFAULT_HELPER_MACROS_H
#ifdef NXP_OCRAM_TZPC_ADDR
/* 0x1: means 4 KB
* 0x2: means 8 KB
*/
#define TZPC_BLOCK_SIZE 0x1000
#endif
/* DDR controller offsets and defines */
#ifdef NXP_DDR_ADDR
#define DDR_CFG_2_OFFSET 0x114
#define CFG_2_FORCE_REFRESH 0x80000000
#endif /* NXP_DDR_ADDR */
/* Reset block register offsets */
#ifdef NXP_RESET_ADDR
/* Register Offset */
#define RST_RSTCR_OFFSET 0x0
#define RST_RSTRQMR1_OFFSET 0x10
#define RST_RSTRQSR1_OFFSET 0x18
#define BRR_OFFSET 0x60
/* helper macros */
#define RSTRQSR1_SWRR 0x800
#define RSTRQMR_RPTOE_MASK (1 << 19)
#endif /* NXP_RESET_ADDR */
/* Secure-Register-File register offsets and bit masks */
#ifdef NXP_RST_ADDR
/* Register Offset */
#define CORE_HOLD_OFFSET 0x140
#define RSTCNTL_OFFSET 0x180
/* Helper macros */
#define SW_RST_REQ_INIT 0x1
#endif
#ifdef NXP_RCPM_ADDR
/* RCPM Register Offsets */
#define RCPM_PCPH20SETR_OFFSET 0x0D4
#define RCPM_PCPH20CLRR_OFFSET 0x0D8
#define RCPM_POWMGTCSR_OFFSET 0x130
#define RCPM_IPPDEXPCR0_OFFSET 0x140
#define RCPM_POWMGTCSR_LPM20_REQ 0x00100000
#endif
#endif /* SOC_DEFAULT_HELPER_MACROS_H */
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef SOC_DEFAULT_BASE_ADDR_H
#define SOC_DEFAULT_BASE_ADDR_H
/* CCSR mmu_def.h */
#define NXP_CCSR_ADDR 0x1000000
#define NXP_CCSR_SIZE 0xF000000
#define NXP_DCSR_ADDR 0x700000000
#define NXP_DCSR_SIZE 0x40000000
/* Flex-SPI controller address */
#define NXP_FLEXSPI_ADDR 0x020C0000
/* Flex-SPI Flash Start address */
#define NXP_FLEXSPI_FLASH_ADDR 0x20000000
/* MMU 500 soc.c*/
#define NXP_SMMU_ADDR 0x05000000
#define NXP_SNVS_ADDR 0x01E90000
#define NXP_DCFG_ADDR 0x01E00000
#define NXP_PMU_CCSR_ADDR 0x01E30000
#define NXP_PMU_DCSR_ADDR 0x700123000
#define NXP_PMU_ADDR NXP_PMU_CCSR_ADDR
#define NXP_SFP_ADDR 0x01E80000
#define NXP_SCFG_ADDR 0x01FC0000
#define NXP_I2C_ADDR 0x02000000
#define NXP_ESDHC_ADDR 0x02140000
#define NXP_ESDHC2_ADDR 0x02150000
#define NXP_UART_ADDR 0x021C0000
#define NXP_UART1_ADDR 0x021D0000
#define NXP_GPIO1_ADDR 0x02300000
#define NXP_GPIO2_ADDR 0x02310000
#define NXP_GPIO3_ADDR 0x02320000
#define NXP_GPIO4_ADDR 0x02330000
#define NXP_WDOG1_NS_ADDR 0x02390000
#define NXP_WDOG2_NS_ADDR 0x023A0000
#define NXP_WDOG1_TZ_ADDR 0x023B0000
#define NXP_WDOG2_TZ_ADDR 0x023C0000
#define NXP_TIMER_STATUS_ADDR 0x023F0000
#define NXP_GICD_ADDR 0x06000000
#define NXP_GICR_ADDR 0x06200000
#define NXP_GICR_SGI_ADDR 0x06210000
#define NXP_CAAM_ADDR 0x08000000
#define NXP_TZC_ADDR 0x01100000
#define NXP_TZC2_ADDR 0x01110000
#define NXP_TZC3_ADDR 0x01120000
#define NXP_RESET_ADDR 0x01E60000
#define NXP_SEC_REGFILE_ADDR 0x01E88000
#endif /* SOC_DEFAULT_BASE_ADDR_H */
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef SOC_DEFAULT_BASE_ADDR_H
#define SOC_DEFAULT_BASE_ADDR_H
/* CCSR mmu_def.h */
#define NXP_CCSR_ADDR 0x1000000
#define NXP_CCSR_SIZE 0xF000000
#define NXP_DCSR_ADDR 0x700000000
#define NXP_DCSR_SIZE 0x40000000
/* Flex-SPI controller address */
#define NXP_FLEXSPI_ADDR 0x020C0000
/* Flex-SPI Flash Start address */
#define NXP_FLEXSPI_FLASH_ADDR 0x20000000
/* MMU 500 soc.c*/
#define NXP_SMMU_ADDR 0x05000000
#define NXP_SNVS_ADDR 0x01E90000
#define NXP_DCFG_ADDR 0x01E00000
#define NXP_PMU_CCSR_ADDR 0x01E30000
#define NXP_PMU_DCSR_ADDR 0x700123000
#define NXP_PMU_ADDR NXP_PMU_CCSR_ADDR
#define NXP_SFP_ADDR 0x01E80000
#define NXP_SCFG_ADDR 0x01FC0000
#define NXP_I2C_ADDR 0x02000000
#define NXP_ESDHC_ADDR 0x02140000
#define NXP_ESDHC2_ADDR 0x02150000
#define NXP_UART_ADDR 0x021C0000
#define NXP_UART1_ADDR 0x021D0000
#define NXP_GPIO1_ADDR 0x02300000
#define NXP_GPIO2_ADDR 0x02310000
#define NXP_GPIO3_ADDR 0x02320000
#define NXP_GPIO4_ADDR 0x02330000
#define NXP_WDOG1_NS_ADDR 0x02390000
#define NXP_WDOG2_NS_ADDR 0x023A0000
#define NXP_WDOG1_TZ_ADDR 0x023B0000
#define NXP_WDOG2_TZ_ADDR 0x023C0000
#define NXP_TIMER_STATUS_ADDR 0x023F0000
#define NXP_GICD_ADDR 0x06000000
#define NXP_GICR_ADDR 0x06200000
#define NXP_GICR_SGI_ADDR 0x06210000
#define NXP_CAAM_ADDR 0x08000000
#define NXP_TZC_ADDR 0x01100000
#define NXP_TZC2_ADDR 0x01110000
#define NXP_TZC3_ADDR 0x01120000
#define NXP_TIMER_ADDR 0x023E0000
#define NXP_RESET_ADDR 0x01E60000
#define NXP_SEC_REGFILE_ADDR 0x01E88000
#define NXP_RST_ADDR 0x01E88000
#define TPMWAKEMR0_ADDR 0x700123c50
#define TZPC_BLOCK_SIZE 0x1000
#define NXP_TZC_ADDR 0x01100000
#define NXP_TZC2_ADDR 0x01110000
#define NXP_TZC3_ADDR 0x01120000
#define NXP_TZC4_ADDR 0x01130000
#define NXP_DDR_ADDR 0x01080000
#define NXP_DDR2_ADDR 0x01090000
#define NXP_OCRAM_TZPC_ADDR 0x02200000
#define NXP_CCN_ADDR 0x04000000
#define NXP_CCN_HNI_ADDR 0x04080000
#define NXP_CCN_HN_F_0_ADDR 0x04200000
#endif /* SOC_DEFAULT_BASE_ADDR_H */
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef SOC_DEFAULT_HELPER_MACROS_H
#define SOC_DEFAULT_HELPER_MACROS_H
#ifdef NXP_OCRAM_TZPC_ADDR
/* 0x1: means 4 KB
* 0x2: means 8 KB
*/
#define TZPC_BLOCK_SIZE 0x1000
#endif
/* DDR controller offsets and defines */
#ifdef NXP_DDR_ADDR
#define DDR_CFG_2_OFFSET 0x114
#define CFG_2_FORCE_REFRESH 0x80000000
#endif /* NXP_DDR_ADDR */
/* Reset block register offsets */
#ifdef NXP_RESET_ADDR
/* Register Offset */
#define RST_RSTCR_OFFSET 0x0
#define RST_RSTRQMR1_OFFSET 0x10
#define RST_RSTRQSR1_OFFSET 0x18
#define BRR_OFFSET 0x60
/* helper macros */
#define RSTRQSR1_SWRR 0x800
#define RSTRQMR_RPTOE_MASK (1 << 19)
#endif /* NXP_RESET_ADDR */
/* Secure-Register-File register offsets and bit masks */
#ifdef NXP_RST_ADDR
/* Register Offset */
#define CORE_HOLD_OFFSET 0x140
#define RSTCNTL_OFFSET 0x180
/* Helper macros */
#define SW_RST_REQ_INIT 0x1
#endif
#ifdef NXP_CCN_ADDR
#define NXP_CCN_HN_F_1_ADDR 0x04210000
#define CCN_HN_F_SAM_NODEID_MASK 0x7f
#define CCN_HN_F_SNP_DMN_CTL_OFFSET 0x200
#define CCN_HN_F_SNP_DMN_CTL_SET_OFFSET 0x210
#define CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET 0x220
#define CCN_HN_F_SNP_DMN_CTL_MASK 0x80a00
#define CCN_HNF_NODE_COUNT 8
#define CCN_HNF_OFFSET 0x10000
#define SA_AUX_CTRL_REG_OFFSET 0x500
#define NUM_HNI_NODE 2
#define CCN_HNI_MEMORY_MAP_SIZE 0x10000
#define PCIeRC_RN_I_NODE_ID_OFFSET 0x8
#define PoS_CONTROL_REG_OFFSET 0x0
#define POS_EARLY_WR_COMP_EN 0x20
#define HNI_POS_EN 0x01
#define POS_TERMINATE_BARRIERS 0x10
#define SERIALIZE_DEV_nGnRnE_WRITES 0x200
#define ENABLE_ERR_SIGNAL_TO_MN 0x4
#define ENABLE_RESERVE_BIT53 0x400
#define ENABLE_WUO 0x10
#endif /* NXP_CCN_ADDR */
#endif /* SOC_DEFAULT_HELPER_MACROS_H */
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_DEFAULT_DEF_H
#define PLAT_DEFAULT_DEF_H
/*
* Platform binary types for linking
*/
#ifdef __aarch64__
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
#else
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
#define PLATFORM_LINKER_ARCH arm
#endif /* __aarch64__ */
#define LS_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL
/* NXP Platforms have DRAM divided into banks.
* DRAM0 Bank: Maximum size of this bank is fixed to 2GB
* DRAM1 Bank: Greater than 2GB belongs to bank1 and size of bank1 varies from
* one platform to other platform.
* DRAMn Bank:
*
* Except a few, all the platforms have 2GB size as DRAM0 BANK.
* Hence common for all the platforms.
* For platforms where DRAM0 Size is < 2GB, it is defined in platform_def.h
*/
#ifndef PLAT_DEF_DRAM0_SIZE
#define PLAT_DEF_DRAM0_SIZE 0x80000000 /* 2G */
#endif
/* This is common for all platforms where: */
#ifndef NXP_NS_DRAM_ADDR
#define NXP_NS_DRAM_ADDR NXP_DRAM0_ADDR
#endif
/* 64M is reserved for Secure memory
*/
#ifndef NXP_SECURE_DRAM_SIZE
#define NXP_SECURE_DRAM_SIZE (64 * 1024 * 1024)
#endif
/* 2M Secure EL1 Payload Shared Memory */
#ifndef NXP_SP_SHRD_DRAM_SIZE
#define NXP_SP_SHRD_DRAM_SIZE (2 * 1024 * 1024)
#endif
#ifndef NXP_NS_DRAM_SIZE
/* Non secure memory */
#define NXP_NS_DRAM_SIZE (PLAT_DEF_DRAM0_SIZE - \
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE))
#endif
#ifndef NXP_SECURE_DRAM_ADDR
#ifdef TEST_BL31
#define NXP_SECURE_DRAM_ADDR 0
#else
#define NXP_SECURE_DRAM_ADDR (NXP_NS_DRAM_ADDR + PLAT_DEF_DRAM0_SIZE - \
(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE))
#endif
#endif
#ifndef NXP_SP_SHRD_DRAM_ADDR
#define NXP_SP_SHRD_DRAM_ADDR (NXP_NS_DRAM_ADDR + PLAT_DEF_DRAM0_SIZE \
- NXP_SP_SHRD_DRAM_SIZE)
#endif
#ifndef BL31_BASE
/* 2 MB reserved in secure memory for DDR */
#define BL31_BASE NXP_SECURE_DRAM_ADDR
#endif
#ifndef BL31_SIZE
#define BL31_SIZE (0x200000)
#endif
#ifndef BL31_LIMIT
#define BL31_LIMIT (BL31_BASE + BL31_SIZE)
#endif
/* Put BL32 in secure memory */
#ifndef BL32_BASE
#define BL32_BASE (NXP_SECURE_DRAM_ADDR + BL31_SIZE)
#endif
#ifndef BL32_LIMIT
#define BL32_LIMIT (NXP_SECURE_DRAM_ADDR + \
NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)
#endif
/* BL33 memory region */
/* Hardcoded based on current address in u-boot */
#ifndef BL33_BASE
#define BL33_BASE 0x82000000
#endif
#ifndef BL33_LIMIT
#define BL33_LIMIT (NXP_NS_DRAM_ADDR + NXP_NS_DRAM_SIZE)
#endif
/*
* FIP image defines - Offset at which FIP Image would be present
* Image would include Bl31 , Bl33 and Bl32 (optional)
*/
#ifdef POLICY_FUSE_PROVISION
#ifndef FUSE_BUF
#define FUSE_BUF ULL(0x81000000)
#endif
#ifndef FUSE_SZ
#define FUSE_SZ 0x80000
#endif
#endif
#ifndef MAX_FIP_DEVICES
#define MAX_FIP_DEVICES 2
#endif
#ifndef PLAT_FIP_OFFSET
#define PLAT_FIP_OFFSET 0x100000
#endif
#ifndef PLAT_FIP_MAX_SIZE
#define PLAT_FIP_MAX_SIZE 0x400000
#endif
/* Check if this size can be determined from array size */
#if defined(IMAGE_BL2)
#ifndef MAX_MMAP_REGIONS
#define MAX_MMAP_REGIONS 8
#endif
#ifndef MAX_XLAT_TABLES
#define MAX_XLAT_TABLES 6
#endif
#elif defined(IMAGE_BL31)
#ifndef MAX_MMAP_REGIONS
#define MAX_MMAP_REGIONS 9
#endif
#ifndef MAX_XLAT_TABLES
#define MAX_XLAT_TABLES 9
#endif
#elif defined(IMAGE_BL32)
#ifndef MAX_MMAP_REGIONS
#define MAX_MMAP_REGIONS 8
#endif
#ifndef MAX_XLAT_TABLES
#define MAX_XLAT_TABLES 9
#endif
#endif
/*
* ID of the secure physical generic timer interrupt used by the BL32.
*/
#ifndef BL32_IRQ_SEC_PHY_TIMER
#define BL32_IRQ_SEC_PHY_TIMER 29
#endif
#endif /* PLAT_DEFAULT_DEF_H */
#
# Copyright 2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
# NXP Non-Volatile data flag storage used and then cleared by SW on boot-up
$(eval $(call add_define,NXP_NV_SW_MAINT_LAST_EXEC_DATA))
ifeq ($(NXP_COINED_BB),yes)
$(eval $(call add_define,NXP_COINED_BB))
# BL2 : To read the reset cause from LP SECMON GPR register
# BL31: To write the reset cause to LP SECMON GPR register
$(eval $(call SET_NXP_MAKE_FLAG,SNVS_NEEDED,BL_COMM))
# BL2: DDR training data is stored on Flexspi NOR.
ifneq (${BOOT_MODE},flexspi_nor)
$(eval $(call SET_NXP_MAKE_FLAG,XSPI_NEEDED,BL2))
endif
else
$(eval $(call add_define_val,DEFAULT_NV_STORAGE_BASE_ADDR,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - 2 * ${NXP_XSPI_NOR_UNIT_SIZE}'))
$(eval $(call SET_NXP_MAKE_FLAG,XSPI_NEEDED,BL_COMM))
endif
NV_STORAGE_INCLUDES += -I${PLAT_COMMON_PATH}/nv_storage
NV_STORAGE_SOURCES += ${PLAT_COMMON_PATH}/nv_storage/plat_nv_storage.c
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <common/debug.h>
#ifndef NXP_COINED_BB
#include <flash_info.h>
#include <fspi.h>
#include <fspi_api.h>
#endif
#include <lib/mmio.h>
#ifdef NXP_COINED_BB
#include <snvs.h>
#else
#include <xspi_error_codes.h>
#endif
#include <plat_nv_storage.h>
/*This structure will be a static structure and
* will be populated as first step of BL2 booting-up.
* fspi_strorage.c . To be located in the fspi driver folder.
*/
static nv_app_data_t nv_app_data;
int read_nv_app_data(void)
{
int ret = 0;
#ifdef NXP_COINED_BB
uint8_t *nv_app_data_array = (uint8_t *) &nv_app_data;
uint8_t offset = 0U;
ret = snvs_read_app_data();
do {
nv_app_data_array[offset] = snvs_read_app_data_bit(offset);
offset++;
} while (offset < APP_DATA_MAX_OFFSET);
snvs_clear_app_data();
#else
uintptr_t nv_base_addr = NV_STORAGE_BASE_ADDR;
ret = fspi_init(NXP_FLEXSPI_ADDR, NXP_FLEXSPI_FLASH_ADDR);
if (ret != XSPI_SUCCESS) {
ERROR("Failed to initialized driver flexspi-nor.\n");
ERROR("exiting warm-reset request.\n");
return -ENODEV;
}
xspi_read(nv_base_addr,
(uint32_t *)&nv_app_data, sizeof(nv_app_data_t));
xspi_sector_erase((uint32_t) nv_base_addr,
F_SECTOR_ERASE_SZ);
#endif
return ret;
}
int wr_nv_app_data(int data_offset,
uint8_t *data,
int data_size)
{
int ret = 0;
#ifdef NXP_COINED_BB
#if !TRUSTED_BOARD_BOOT
snvs_disable_zeroize_lp_gpr();
#endif
/* In case LP SecMon General purpose register,
* only 1 bit flags can be saved.
*/
if ((data_size > 1) || (*data != DEFAULT_SET_VALUE)) {
ERROR("Only binary value is allowed to be written.\n");
ERROR("Use flash instead of SNVS GPR as NV location.\n");
return -ENODEV;
}
snvs_write_app_data_bit(data_offset);
#else
uint8_t read_val[sizeof(nv_app_data_t)];
uint8_t ready_to_write_val[sizeof(nv_app_data_t)];
uintptr_t nv_base_addr = NV_STORAGE_BASE_ADDR;
assert((nv_base_addr + data_offset + data_size) > (nv_base_addr + F_SECTOR_ERASE_SZ));
ret = fspi_init(NXP_FLEXSPI_ADDR, NXP_FLEXSPI_FLASH_ADDR);
if (ret != XSPI_SUCCESS) {
ERROR("Failed to initialized driver flexspi-nor.\n");
ERROR("exiting warm-reset request.\n");
return -ENODEV;
}
ret = xspi_read(nv_base_addr + data_offset, (uint32_t *)read_val, data_size);
memset(ready_to_write_val, READY_TO_WRITE_VALUE, ARRAY_SIZE(ready_to_write_val));
if (memcmp(read_val, ready_to_write_val, data_size) == 0) {
xspi_write(nv_base_addr + data_offset, data, data_size);
}
#endif
return ret;
}
const nv_app_data_t *get_nv_data(void)
{
return (const nv_app_data_t *) &nv_app_data;
}
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_NV_STRG_H
#define PLAT_NV_STRG_H
#define DEFAULT_SET_VALUE 0xA1
#define READY_TO_WRITE_VALUE 0xFF
#ifndef NV_STORAGE_BASE_ADDR
#define NV_STORAGE_BASE_ADDR DEFAULT_NV_STORAGE_BASE_ADDR
#endif
typedef struct {
uint8_t warm_rst_flag;
uint8_t wdt_rst_flag;
uint8_t dummy[2];
} nv_app_data_t;
/*below enum and above structure should be in-sync. */
enum app_data_offset {
WARM_RESET_FLAG_OFFSET,
WDT_RESET_FLAG_OFFSET,
APP_DATA_MAX_OFFSET,
};
int read_nv_app_data(void);
int wr_nv_app_data(int data_offset,
uint8_t *data,
int data_size);
const nv_app_data_t *get_nv_data(void);
#endif /* PLAT_NV_STRG_H */
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <asm_macros.S>
#include <assert_macros.S>
#include <lib/psci/psci.h>
#include <bl31_data.h>
#include <plat_psci.h>
#define RESET_RETRY_CNT 800
#define PSCI_ABORT_CNT 100
#if (SOC_CORE_RELEASE)
.global _psci_cpu_on
/*
* int _psci_cpu_on(u_register_t core_mask)
* x0 = target cpu core mask
*
* Called from C, so save the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*
*/
func _psci_cpu_on
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mov x6, x0
/* x0 = core mask (lsb)
* x6 = core mask (lsb)
*/
/* check if core disabled */
bl _soc_ck_disabled /* 0-2 */
cbnz w0, psci_disabled
/* check core data area to see if core cannot be turned on
* read the core state
*/
mov x0, x6
bl _getCoreState /* 0-5 */
mov x9, x0
/* x6 = core mask (lsb)
* x9 = core state (from data area)
*/
cmp x9, #CORE_DISABLED
mov x0, #PSCI_E_DISABLED
b.eq cpu_on_done
cmp x9, #CORE_PENDING
mov x0, #PSCI_E_ON_PENDING
b.eq cpu_on_done
cmp x9, #CORE_RELEASED
mov x0, #PSCI_E_ALREADY_ON
b.eq cpu_on_done
8:
/* x6 = core mask (lsb)
* x9 = core state (from data area)
*/
cmp x9, #CORE_WFE
b.eq core_in_wfe
cmp x9, #CORE_IN_RESET
b.eq core_in_reset
cmp x9, #CORE_OFF
b.eq core_is_off
cmp x9, #CORE_OFF_PENDING
/* if state == CORE_OFF_PENDING, set abort */
mov x0, x6
mov x1, #ABORT_FLAG_DATA
mov x2, #CORE_ABORT_OP
bl _setCoreData /* 0-3, [13-15] */
ldr x3, =PSCI_ABORT_CNT
7:
/* watch for abort to take effect */
mov x0, x6
bl _getCoreState /* 0-5 */
cmp x0, #CORE_OFF
b.eq core_is_off
cmp x0, #CORE_PENDING
mov x0, #PSCI_E_SUCCESS
b.eq cpu_on_done
/* loop til finished */
sub x3, x3, #1
cbnz x3, 7b
/* if we didn't see either CORE_OFF or CORE_PENDING, then this
* core is in CORE_OFF_PENDING - exit with success, as the core will
* respond to the abort request
*/
mov x0, #PSCI_E_SUCCESS
b cpu_on_done
/* this is where we start up a core out of reset */
core_in_reset:
/* see if the soc-specific module supports this op */
ldr x7, =SOC_CORE_RELEASE
cbnz x7, 3f
mov x0, #PSCI_E_NOT_SUPPORTED
b cpu_on_done
/* x6 = core mask (lsb) */
3:
/* set core state in data area */
mov x0, x6
mov x1, #CORE_PENDING
bl _setCoreState /* 0-3, [13-15] */
/* release the core from reset */
mov x0, x6
bl _soc_core_release /* 0-3 */
mov x0, #PSCI_E_SUCCESS
b cpu_on_done
/* Start up the core that has been powered-down via CPU_OFF
*/
core_is_off:
/* see if the soc-specific module supports this op
*/
ldr x7, =SOC_CORE_RESTART
cbnz x7, 2f
mov x0, #PSCI_E_NOT_SUPPORTED
b cpu_on_done
/* x6 = core mask (lsb) */
2:
/* set core state in data area */
mov x0, x6
mov x1, #CORE_WAKEUP
bl _setCoreState /* 0-3, [13-15] */
/* put the core back into service */
mov x0, x6
#if (SOC_CORE_RESTART)
bl _soc_core_restart /* 0-5 */
#endif
mov x0, #PSCI_E_SUCCESS
b cpu_on_done
/* this is where we release a core that is being held in wfe */
core_in_wfe:
/* x6 = core mask (lsb) */
/* set core state in data area */
mov x0, x6
mov x1, #CORE_PENDING
bl _setCoreState /* 0-3, [13-15] */
dsb sy
isb
/* put the core back into service */
sev
sev
isb
mov x0, #PSCI_E_SUCCESS
cpu_on_done:
/* restore the aarch32/64 non-volatile registers */
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
b psci_completed
endfunc _psci_cpu_on
#endif
#if (SOC_CORE_OFF)
.global _psci_cpu_prep_off
.global _psci_cpu_off_wfi
/*
* void _psci_cpu_prep_off(u_register_t core_mask)
* this function performs the SoC-specific programming prior
* to shutting the core down
* x0 = core_mask
*
* called from C, so save the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*/
func _psci_cpu_prep_off
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mov x10, x0 /* x10 = core_mask */
/* the core does not return from cpu_off, so no need
* to save/restore non-volatile registers
*/
/* mask interrupts by setting DAIF[7:4] to 'b1111 */
msr DAIFSet, #0xF
/* read cpuectlr and save current value */
mrs x4, CORTEX_A72_ECTLR_EL1
mov x1, #CPUECTLR_DATA
mov x2, x4
mov x0, x10
bl _setCoreData
/* remove the core from coherency */
bic x4, x4, #CPUECTLR_SMPEN_MASK
msr CORTEX_A72_ECTLR_EL1, x4
/* save scr_el3 */
mov x0, x10
mrs x4, SCR_EL3
mov x2, x4
mov x1, #SCR_EL3_DATA
bl _setCoreData
/* x4 = scr_el3 */
/* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */
orr x4, x4, #SCR_FIQ_MASK
msr scr_el3, x4
/* x10 = core_mask */
/* prep the core for shutdown */
mov x0, x10
bl _soc_core_prep_off
/* restore the aarch32/64 non-volatile registers */
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
b psci_completed
endfunc _psci_cpu_prep_off
/*
* void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr)
* - this function shuts down the core
* - this function does not return!!
*/
func _psci_cpu_off_wfi
/* save the wakeup address */
mov x29, x1
/* x0 = core_mask */
/* shutdown the core */
bl _soc_core_entr_off
/* branch to resume execution */
br x29
endfunc _psci_cpu_off_wfi
#endif
#if (SOC_CORE_RESTART)
.global _psci_wakeup
/*
* void _psci_wakeup(u_register_t core_mask)
* this function performs the SoC-specific programming
* after a core wakes up from OFF
* x0 = core mask
*
* called from C, so save the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*/
func _psci_wakeup
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mov x4, x0 /* x4 = core mask */
/* restore scr_el3 */
mov x0, x4
mov x1, #SCR_EL3_DATA
bl _getCoreData
/* x0 = saved scr_el3 */
msr SCR_EL3, x0
/* x4 = core mask */
/* restore CPUECTLR */
mov x0, x4
mov x1, #CPUECTLR_DATA
bl _getCoreData
orr x0, x0, #CPUECTLR_SMPEN_MASK
msr CORTEX_A72_ECTLR_EL1, x0
/* x4 = core mask */
/* start the core back up */
mov x0, x4
bl _soc_core_exit_off
/* restore the aarch32/64 non-volatile registers
*/
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
b psci_completed
endfunc _psci_wakeup
#endif
#if (SOC_SYSTEM_RESET)
.global _psci_system_reset
func _psci_system_reset
/* system reset is mandatory
* system reset is soc-specific
* Note: under no circumstances do we return from this call
*/
bl _soc_sys_reset
endfunc _psci_system_reset
#endif
#if (SOC_SYSTEM_OFF)
.global _psci_system_off
func _psci_system_off
/* system off is mandatory
* system off is soc-specific
* Note: under no circumstances do we return from this call */
b _soc_sys_off
endfunc _psci_system_off
#endif
#if (SOC_CORE_STANDBY)
.global _psci_core_entr_stdby
.global _psci_core_prep_stdby
.global _psci_core_exit_stdby
/*
* void _psci_core_entr_stdby(u_register_t core_mask) - this
* is the fast-path for simple core standby
*/
func _psci_core_entr_stdby
stp x4, x5, [sp, #-16]!
stp x6, x30, [sp, #-16]!
mov x5, x0 /* x5 = core mask */
/* save scr_el3 */
mov x0, x5
mrs x4, SCR_EL3
mov x2, x4
mov x1, #SCR_EL3_DATA
bl _setCoreData
/* x4 = SCR_EL3
* x5 = core mask
*/
/* allow interrupts @ EL3 */
orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
msr SCR_EL3, x4
/* x5 = core mask */
/* put the core into standby */
mov x0, x5
bl _soc_core_entr_stdby
/* restore scr_el3 */
mov x0, x5
mov x1, #SCR_EL3_DATA
bl _getCoreData
/* x0 = saved scr_el3 */
msr SCR_EL3, x0
ldp x6, x30, [sp], #16
ldp x4, x5, [sp], #16
isb
ret
endfunc _psci_core_entr_stdby
/*
* void _psci_core_prep_stdby(u_register_t core_mask) - this
* sets up the core to enter standby state thru the normal path
*/
func _psci_core_prep_stdby
stp x4, x5, [sp, #-16]!
stp x6, x30, [sp, #-16]!
mov x5, x0
/* x5 = core mask */
/* save scr_el3 */
mov x0, x5
mrs x4, SCR_EL3
mov x2, x4
mov x1, #SCR_EL3_DATA
bl _setCoreData
/* allow interrupts @ EL3 */
orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
msr SCR_EL3, x4
/* x5 = core mask */
/* call for any SoC-specific programming */
mov x0, x5
bl _soc_core_prep_stdby
ldp x6, x30, [sp], #16
ldp x4, x5, [sp], #16
isb
ret
endfunc _psci_core_prep_stdby
/*
* void _psci_core_exit_stdby(u_register_t core_mask) - this
* exits the core from standby state thru the normal path
*/
func _psci_core_exit_stdby
stp x4, x5, [sp, #-16]!
stp x6, x30, [sp, #-16]!
mov x5, x0
/* x5 = core mask */
/* restore scr_el3 */
mov x0, x5
mov x1, #SCR_EL3_DATA
bl _getCoreData
/* x0 = saved scr_el3 */
msr SCR_EL3, x0
/* x5 = core mask */
/* perform any SoC-specific programming after standby state */
mov x0, x5
bl _soc_core_exit_stdby
ldp x6, x30, [sp], #16
ldp x4, x5, [sp], #16
isb
ret
endfunc _psci_core_exit_stdby
#endif
#if (SOC_CORE_PWR_DWN)
.global _psci_core_prep_pwrdn
.global _psci_cpu_pwrdn_wfi
.global _psci_core_exit_pwrdn
/*
* void _psci_core_prep_pwrdn_(u_register_t core_mask)
* this function prepares the core for power-down
* x0 = core mask
*
* called from C, so save the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*/
func _psci_core_prep_pwrdn
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mov x6, x0
/* x6 = core mask */
/* mask interrupts by setting DAIF[7:4] to 'b1111 */
msr DAIFSet, #0xF
/* save scr_el3 */
mov x0, x6
mrs x4, SCR_EL3
mov x2, x4
mov x1, #SCR_EL3_DATA
bl _setCoreData
/* allow interrupts @ EL3 */
orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
msr SCR_EL3, x4
/* save cpuectlr */
mov x0, x6
mov x1, #CPUECTLR_DATA
mrs x2, CORTEX_A72_ECTLR_EL1
bl _setCoreData
/* x6 = core mask */
/* SoC-specific programming for power-down */
mov x0, x6
bl _soc_core_prep_pwrdn
/* restore the aarch32/64 non-volatile registers
*/
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
b psci_completed
endfunc _psci_core_prep_pwrdn
/*
* void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
* this function powers down the core
*/
func _psci_cpu_pwrdn_wfi
/* save the wakeup address */
mov x29, x1
/* x0 = core mask */
/* shutdown the core */
bl _soc_core_entr_pwrdn
/* branch to resume execution */
br x29
endfunc _psci_cpu_pwrdn_wfi
/*
* void _psci_core_exit_pwrdn_(u_register_t core_mask)
* this function cleans up after a core power-down
* x0 = core mask
*
* called from C, so save the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*/
func _psci_core_exit_pwrdn
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mov x5, x0 /* x5 = core mask */
/* restore scr_el3 */
mov x0, x5
mov x1, #SCR_EL3_DATA
bl _getCoreData
/* x0 = saved scr_el3 */
msr SCR_EL3, x0
/* x5 = core mask */
/* restore cpuectlr */
mov x0, x5
mov x1, #CPUECTLR_DATA
bl _getCoreData
/* make sure smp is set */
orr x0, x0, #CPUECTLR_SMPEN_MASK
msr CORTEX_A72_ECTLR_EL1, x0
/* x5 = core mask */
/* SoC-specific cleanup */
mov x0, x5
bl _soc_core_exit_pwrdn
/* restore the aarch32/64 non-volatile registers
*/
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
b psci_completed
endfunc _psci_core_exit_pwrdn
#endif
#if (SOC_CLUSTER_STANDBY)
.global _psci_clstr_prep_stdby
.global _psci_clstr_exit_stdby
/*
* void _psci_clstr_prep_stdby(u_register_t core_mask) - this
* sets up the clstr to enter standby state thru the normal path
*/
func _psci_clstr_prep_stdby
stp x4, x5, [sp, #-16]!
stp x6, x30, [sp, #-16]!
mov x5, x0
/* x5 = core mask */
/* save scr_el3 */
mov x0, x5
mrs x4, SCR_EL3
mov x2, x4
mov x1, #SCR_EL3_DATA
bl _setCoreData
/* allow interrupts @ EL3 */
orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
msr SCR_EL3, x4
/* x5 = core mask */
/* call for any SoC-specific programming */
mov x0, x5
bl _soc_clstr_prep_stdby
ldp x6, x30, [sp], #16
ldp x4, x5, [sp], #16
isb
ret
endfunc _psci_clstr_prep_stdby
/*
* void _psci_clstr_exit_stdby(u_register_t core_mask) - this
* exits the clstr from standby state thru the normal path
*/
func _psci_clstr_exit_stdby
stp x4, x5, [sp, #-16]!
stp x6, x30, [sp, #-16]!
mov x5, x0 /* x5 = core mask */
/* restore scr_el3 */
mov x0, x5
mov x1, #SCR_EL3_DATA
bl _getCoreData
/* x0 = saved scr_el3 */
msr SCR_EL3, x0
/* x5 = core mask */
/* perform any SoC-specific programming after standby state */
mov x0, x5
bl _soc_clstr_exit_stdby
ldp x6, x30, [sp], #16
ldp x4, x5, [sp], #16
isb
ret
endfunc _psci_clstr_exit_stdby
#endif
#if (SOC_CLUSTER_PWR_DWN)
.global _psci_clstr_prep_pwrdn
.global _psci_clstr_exit_pwrdn
/*
* void _psci_clstr_prep_pwrdn_(u_register_t core_mask)
* this function prepares the cluster+core for power-down
* x0 = core mask
*
* called from C, so save the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*/
func _psci_clstr_prep_pwrdn
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mov x6, x0 /* x6 = core mask */
/* mask interrupts by setting DAIF[7:4] to 'b1111 */
msr DAIFSet, #0xF
/* save scr_el3 */
mov x0, x6
mrs x4, SCR_EL3
mov x2, x4
mov x1, #SCR_EL3_DATA
bl _setCoreData
/* allow interrupts @ EL3 */
orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
msr SCR_EL3, x4
/* save cpuectlr */
mov x0, x6
mov x1, #CPUECTLR_DATA
mrs x2, CORTEX_A72_ECTLR_EL1
mov x4, x2
bl _setCoreData
/* remove core from coherency */
bic x4, x4, #CPUECTLR_SMPEN_MASK
msr CORTEX_A72_ECTLR_EL1, x4
/* x6 = core mask */
/* SoC-specific programming for power-down */
mov x0, x6
bl _soc_clstr_prep_pwrdn
/* restore the aarch32/64 non-volatile registers
*/
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
b psci_completed
endfunc _psci_clstr_prep_pwrdn
/*
* void _psci_clstr_exit_pwrdn_(u_register_t core_mask)
* this function cleans up after a cluster power-down
* x0 = core mask
*
* called from C, so save the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*/
func _psci_clstr_exit_pwrdn
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mov x4, x0 /* x4 = core mask */
/* restore scr_el3 */
mov x0, x4
mov x1, #SCR_EL3_DATA
bl _getCoreData
/* x0 = saved scr_el3 */
msr SCR_EL3, x0
/* x4 = core mask */
/* restore cpuectlr */
mov x0, x4
mov x1, #CPUECTLR_DATA
bl _getCoreData
/* make sure smp is set */
orr x0, x0, #CPUECTLR_SMPEN_MASK
msr CORTEX_A72_ECTLR_EL1, x0
/* x4 = core mask */
/* SoC-specific cleanup */
mov x0, x4
bl _soc_clstr_exit_pwrdn
/* restore the aarch32/64 non-volatile registers
*/
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
b psci_completed
endfunc _psci_clstr_exit_pwrdn
#endif
#if (SOC_SYSTEM_STANDBY)
.global _psci_sys_prep_stdby
.global _psci_sys_exit_stdby
/*
* void _psci_sys_prep_stdby(u_register_t core_mask) - this
* sets up the system to enter standby state thru the normal path
*/
func _psci_sys_prep_stdby
stp x4, x5, [sp, #-16]!
stp x6, x30, [sp, #-16]!
mov x5, x0 /* x5 = core mask */
/* save scr_el3 */
mov x0, x5
mrs x4, SCR_EL3
mov x2, x4
mov x1, #SCR_EL3_DATA
bl _setCoreData
/* allow interrupts @ EL3 */
orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
msr SCR_EL3, x4
/* x5 = core mask */
/* call for any SoC-specific programming */
mov x0, x5
bl _soc_sys_prep_stdby
ldp x6, x30, [sp], #16
ldp x4, x5, [sp], #16
isb
ret
endfunc _psci_sys_prep_stdby
/*
* void _psci_sys_exit_stdby(u_register_t core_mask) - this
* exits the system from standby state thru the normal path
*/
func _psci_sys_exit_stdby
stp x4, x5, [sp, #-16]!
stp x6, x30, [sp, #-16]!
mov x5, x0
/* x5 = core mask */
/* restore scr_el3 */
mov x0, x5
mov x1, #SCR_EL3_DATA
bl _getCoreData
/* x0 = saved scr_el3 */
msr SCR_EL3, x0
/* x5 = core mask */
/* perform any SoC-specific programming after standby state */
mov x0, x5
bl _soc_sys_exit_stdby
ldp x6, x30, [sp], #16
ldp x4, x5, [sp], #16
isb
ret
endfunc _psci_sys_exit_stdby
#endif
#if (SOC_SYSTEM_PWR_DWN)
.global _psci_sys_prep_pwrdn
.global _psci_sys_pwrdn_wfi
.global _psci_sys_exit_pwrdn
/*
* void _psci_sys_prep_pwrdn_(u_register_t core_mask)
* this function prepares the system+core for power-down
* x0 = core mask
*
* called from C, so save the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*/
func _psci_sys_prep_pwrdn
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mov x6, x0 /* x6 = core mask */
/* mask interrupts by setting DAIF[7:4] to 'b1111 */
msr DAIFSet, #0xF
/* save scr_el3 */
mov x0, x6
mrs x4, SCR_EL3
mov x2, x4
mov x1, #SCR_EL3_DATA
bl _setCoreData
/* allow interrupts @ EL3 */
orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
msr SCR_EL3, x4
/* save cpuectlr */
mov x0, x6
mov x1, #CPUECTLR_DATA
mrs x2, CORTEX_A72_ECTLR_EL1
mov x4, x2
bl _setCoreData
/* remove core from coherency */
bic x4, x4, #CPUECTLR_SMPEN_MASK
msr CORTEX_A72_ECTLR_EL1, x4
/* x6 = core mask */
/* SoC-specific programming for power-down */
mov x0, x6
bl _soc_sys_prep_pwrdn
/* restore the aarch32/64 non-volatile registers
*/
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
b psci_completed
endfunc _psci_sys_prep_pwrdn
/*
* void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
* this function powers down the system
*/
func _psci_sys_pwrdn_wfi
/* save the wakeup address */
mov x29, x1
/* x0 = core mask */
/* shutdown the system */
bl _soc_sys_pwrdn_wfi
/* branch to resume execution */
br x29
endfunc _psci_sys_pwrdn_wfi
/*
* void _psci_sys_exit_pwrdn_(u_register_t core_mask)
* this function cleans up after a system power-down
* x0 = core mask
*
* Called from C, so save the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*/
func _psci_sys_exit_pwrdn
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
stp x18, x30, [sp, #-16]!
mov x4, x0 /* x4 = core mask */
/* restore scr_el3 */
mov x0, x4
mov x1, #SCR_EL3_DATA
bl _getCoreData
/* x0 = saved scr_el3 */
msr SCR_EL3, x0
/* x4 = core mask */
/* restore cpuectlr */
mov x0, x4
mov x1, #CPUECTLR_DATA
bl _getCoreData
/* make sure smp is set */
orr x0, x0, #CPUECTLR_SMPEN_MASK
msr CORTEX_A72_ECTLR_EL1, x0
/* x4 = core mask */
/* SoC-specific cleanup */
mov x0, x4
bl _soc_sys_exit_pwrdn
/* restore the aarch32/64 non-volatile registers
*/
ldp x18, x30, [sp], #16
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
b psci_completed
endfunc _psci_sys_exit_pwrdn
#endif
/* psci std returns */
func psci_disabled
ldr w0, =PSCI_E_DISABLED
b psci_completed
endfunc psci_disabled
func psci_not_present
ldr w0, =PSCI_E_NOT_PRESENT
b psci_completed
endfunc psci_not_present
func psci_on_pending
ldr w0, =PSCI_E_ON_PENDING
b psci_completed
endfunc psci_on_pending
func psci_already_on
ldr w0, =PSCI_E_ALREADY_ON
b psci_completed
endfunc psci_already_on
func psci_failure
ldr w0, =PSCI_E_INTERN_FAIL
b psci_completed
endfunc psci_failure
func psci_unimplemented
ldr w0, =PSCI_E_NOT_SUPPORTED
b psci_completed
endfunc psci_unimplemented
func psci_denied
ldr w0, =PSCI_E_DENIED
b psci_completed
endfunc psci_denied
func psci_invalid
ldr w0, =PSCI_E_INVALID_PARAMS
b psci_completed
endfunc psci_invalid
func psci_success
mov x0, #PSCI_E_SUCCESS
endfunc psci_success
func psci_completed
/* x0 = status code */
ret
endfunc psci_completed
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_PSCI_H
#define PLAT_PSCI_H
/* core abort current op */
#define CORE_ABORT_OP 0x1
/* psci power levels - these are actually affinity levels
* in the psci_power_state_t array
*/
#define PLAT_CORE_LVL PSCI_CPU_PWR_LVL
#define PLAT_CLSTR_LVL U(1)
#define PLAT_SYS_LVL U(2)
#define PLAT_MAX_LVL PLAT_SYS_LVL
/* core state */
/* OFF states 0x0 - 0xF */
#define CORE_IN_RESET 0x0
#define CORE_DISABLED 0x1
#define CORE_OFF 0x2
#define CORE_STANDBY 0x3
#define CORE_PWR_DOWN 0x4
#define CORE_WFE 0x6
#define CORE_WFI 0x7
#define CORE_LAST 0x8
#define CORE_OFF_PENDING 0x9
#define CORE_WORKING_INIT 0xA
#define SYS_OFF_PENDING 0xB
#define SYS_OFF 0xC
/* ON states 0x10 - 0x1F */
#define CORE_PENDING 0x10
#define CORE_RELEASED 0x11
#define CORE_WAKEUP 0x12
/* highest off state */
#define CORE_OFF_MAX 0xF
/* lowest on state */
#define CORE_ON_MIN CORE_PENDING
#define DAIF_SET_MASK 0x3C0
#define SCTLR_I_C_M_MASK 0x00001005
#define SCTLR_C_MASK 0x00000004
#define SCTLR_I_MASK 0x00001000
#define CPUACTLR_L1PCTL_MASK 0x0000E000
#define DCSR_RCPM2_BASE 0x20170000
#define CPUECTLR_SMPEN_MASK 0x40
#define CPUECTLR_SMPEN_EN 0x40
#define CPUECTLR_RET_MASK 0x7
#define CPUECTLR_RET_SET 0x2
#define CPUECTLR_TIMER_MASK 0x7
#define CPUECTLR_TIMER_8TICKS 0x2
#define SCR_IRQ_MASK 0x2
#define SCR_FIQ_MASK 0x4
/* pwr mgmt features supported in the soc-specific code:
* value == 0x0, the soc code does not support this feature
* value != 0x0, the soc code supports this feature
*/
#define SOC_CORE_RELEASE 0x1
#define SOC_CORE_RESTART 0x1
#define SOC_CORE_OFF 0x1
#define SOC_CORE_STANDBY 0x1
#define SOC_CORE_PWR_DWN 0x1
#define SOC_CLUSTER_STANDBY 0x1
#define SOC_CLUSTER_PWR_DWN 0x1
#define SOC_SYSTEM_STANDBY 0x1
#define SOC_SYSTEM_PWR_DWN 0x1
#define SOC_SYSTEM_OFF 0x1
#define SOC_SYSTEM_RESET 0x1
#define SOC_SYSTEM_RESET2 0x1
#ifndef __ASSEMBLER__
void __dead2 _psci_system_reset(void);
void __dead2 _psci_system_off(void);
int _psci_cpu_on(u_register_t core_mask);
void _psci_cpu_prep_off(u_register_t core_mask);
void __dead2 _psci_cpu_off_wfi(u_register_t core_mask,
u_register_t wakeup_address);
void __dead2 _psci_cpu_pwrdn_wfi(u_register_t core_mask,
u_register_t wakeup_address);
void __dead2 _psci_sys_pwrdn_wfi(u_register_t core_mask,
u_register_t wakeup_address);
void _psci_wakeup(u_register_t core_mask);
void _psci_core_entr_stdby(u_register_t core_mask);
void _psci_core_prep_stdby(u_register_t core_mask);
void _psci_core_exit_stdby(u_register_t core_mask);
void _psci_core_prep_pwrdn(u_register_t core_mask);
void _psci_core_exit_pwrdn(u_register_t core_mask);
void _psci_clstr_prep_stdby(u_register_t core_mask);
void _psci_clstr_exit_stdby(u_register_t core_mask);
void _psci_clstr_prep_pwrdn(u_register_t core_mask);
void _psci_clstr_exit_pwrdn(u_register_t core_mask);
void _psci_sys_prep_stdby(u_register_t core_mask);
void _psci_sys_exit_stdby(u_register_t core_mask);
void _psci_sys_prep_pwrdn(u_register_t core_mask);
void _psci_sys_exit_pwrdn(u_register_t core_mask);
#endif
#endif /* __PLAT_PSCI_H__ */
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <common/debug.h>
#include <plat_gic.h>
#include <plat_common.h>
#include <plat_psci.h>
#ifdef NXP_WARM_BOOT
#include <plat_warm_rst.h>
#endif
#include <platform_def.h>
#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN)
static void __dead2 _no_return_wfi(void)
{
_bl31_dead_wfi:
wfi();
goto _bl31_dead_wfi;
}
#endif
#if (SOC_CORE_RELEASE || SOC_CORE_PWR_DWN)
/* the entry for core warm boot */
static uintptr_t warmboot_entry = (uintptr_t) NULL;
#endif
#if (SOC_CORE_RELEASE)
static int _pwr_domain_on(u_register_t mpidr)
{
int core_pos = plat_core_pos(mpidr);
int rc = PSCI_E_INVALID_PARAMS;
u_register_t core_mask;
if (core_pos >= 0 && core_pos < PLATFORM_CORE_COUNT) {
_soc_set_start_addr(warmboot_entry);
dsb();
isb();
core_mask = (1 << core_pos);
rc = _psci_cpu_on(core_mask);
}
return (rc);
}
#endif
#if (SOC_CORE_OFF)
static void _pwr_domain_off(const psci_power_state_t *target_state)
{
u_register_t core_mask = plat_my_core_mask();
u_register_t core_state = _getCoreState(core_mask);
/* set core state in internal data */
core_state = CORE_OFF_PENDING;
_setCoreState(core_mask, core_state);
_psci_cpu_prep_off(core_mask);
}
#endif
#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN)
static void __dead2 _pwr_down_wfi(const psci_power_state_t *target_state)
{
u_register_t core_mask = plat_my_core_mask();
u_register_t core_state = _getCoreState(core_mask);
switch (core_state) {
#if (SOC_CORE_OFF)
case CORE_OFF_PENDING:
/* set core state in internal data */
core_state = CORE_OFF;
_setCoreState(core_mask, core_state);
/* turn the core off */
_psci_cpu_off_wfi(core_mask, warmboot_entry);
break;
#endif
#if (SOC_CORE_PWR_DWN)
case CORE_PWR_DOWN:
/* power-down the core */
_psci_cpu_pwrdn_wfi(core_mask, warmboot_entry);
break;
#endif
#if (SOC_SYSTEM_PWR_DWN)
case SYS_OFF_PENDING:
/* set core state in internal data */
core_state = SYS_OFF;
_setCoreState(core_mask, core_state);
/* power-down the system */
_psci_sys_pwrdn_wfi(core_mask, warmboot_entry);
break;
#endif
default:
_no_return_wfi();
break;
}
}
#endif
#if (SOC_CORE_RELEASE || SOC_CORE_RESTART)
static void _pwr_domain_wakeup(const psci_power_state_t *target_state)
{
u_register_t core_mask = plat_my_core_mask();
u_register_t core_state = _getCoreState(core_mask);
switch (core_state) {
case CORE_PENDING: /* this core is coming out of reset */
/* soc per cpu setup */
soc_init_percpu();
/* gic per cpu setup */
plat_gic_pcpu_init();
/* set core state in internal data */
core_state = CORE_RELEASED;
_setCoreState(core_mask, core_state);
break;
#if (SOC_CORE_RESTART)
case CORE_WAKEUP:
/* this core is waking up from OFF */
_psci_wakeup(core_mask);
/* set core state in internal data */
core_state = CORE_RELEASED;
_setCoreState(core_mask, core_state);
break;
#endif
}
}
#endif
#if (SOC_CORE_STANDBY)
static void _pwr_cpu_standby(plat_local_state_t cpu_state)
{
u_register_t core_mask = plat_my_core_mask();
u_register_t core_state;
if (cpu_state == PLAT_MAX_RET_STATE) {
/* set core state to standby */
core_state = CORE_STANDBY;
_setCoreState(core_mask, core_state);
_psci_core_entr_stdby(core_mask);
/* when we are here, the core is waking up
* set core state to released
*/
core_state = CORE_RELEASED;
_setCoreState(core_mask, core_state);
}
}
#endif
#if (SOC_CORE_PWR_DWN)
static void _pwr_suspend(const psci_power_state_t *state)
{
u_register_t core_mask = plat_my_core_mask();
u_register_t core_state;
if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) {
#if (SOC_SYSTEM_PWR_DWN)
_psci_sys_prep_pwrdn(core_mask);
/* set core state */
core_state = SYS_OFF_PENDING;
_setCoreState(core_mask, core_state);
#endif
} else if (state->pwr_domain_state[PLAT_MAX_LVL]
== PLAT_MAX_RET_STATE) {
#if (SOC_SYSTEM_STANDBY)
_psci_sys_prep_stdby(core_mask);
/* set core state */
core_state = CORE_STANDBY;
_setCoreState(core_mask, core_state);
#endif
}
else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
PLAT_MAX_OFF_STATE) {
#if (SOC_CLUSTER_PWR_DWN)
_psci_clstr_prep_pwrdn(core_mask);
/* set core state */
core_state = CORE_PWR_DOWN;
_setCoreState(core_mask, core_state);
#endif
}
else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
PLAT_MAX_RET_STATE) {
#if (SOC_CLUSTER_STANDBY)
_psci_clstr_prep_stdby(core_mask);
/* set core state */
core_state = CORE_STANDBY;
_setCoreState(core_mask, core_state);
#endif
}
else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) {
#if (SOC_CORE_PWR_DWN)
/* prep the core for power-down */
_psci_core_prep_pwrdn(core_mask);
/* set core state */
core_state = CORE_PWR_DOWN;
_setCoreState(core_mask, core_state);
#endif
}
else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) {
#if (SOC_CORE_STANDBY)
_psci_core_prep_stdby(core_mask);
/* set core state */
core_state = CORE_STANDBY;
_setCoreState(core_mask, core_state);
#endif
}
}
#endif
#if (SOC_CORE_PWR_DWN)
static void _pwr_suspend_finish(const psci_power_state_t *state)
{
u_register_t core_mask = plat_my_core_mask();
u_register_t core_state;
if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) {
#if (SOC_SYSTEM_PWR_DWN)
_psci_sys_exit_pwrdn(core_mask);
/* when we are here, the core is back up
* set core state to released
*/
core_state = CORE_RELEASED;
_setCoreState(core_mask, core_state);
#endif
} else if (state->pwr_domain_state[PLAT_MAX_LVL]
== PLAT_MAX_RET_STATE) {
#if (SOC_SYSTEM_STANDBY)
_psci_sys_exit_stdby(core_mask);
/* when we are here, the core is waking up
* set core state to released
*/
core_state = CORE_RELEASED;
_setCoreState(core_mask, core_state);
#endif
}
else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
PLAT_MAX_OFF_STATE) {
#if (SOC_CLUSTER_PWR_DWN)
_psci_clstr_exit_pwrdn(core_mask);
/* when we are here, the core is waking up
* set core state to released
*/
core_state = CORE_RELEASED;
_setCoreState(core_mask, core_state);
#endif
}
else if (state->pwr_domain_state[PLAT_CLSTR_LVL] ==
PLAT_MAX_RET_STATE) {
#if (SOC_CLUSTER_STANDBY)
_psci_clstr_exit_stdby(core_mask);
/* when we are here, the core is waking up
* set core state to released
*/
core_state = CORE_RELEASED;
_setCoreState(core_mask, core_state);
#endif
}
else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) {
#if (SOC_CORE_PWR_DWN)
_psci_core_exit_pwrdn(core_mask);
/* when we are here, the core is back up
* set core state to released
*/
core_state = CORE_RELEASED;
_setCoreState(core_mask, core_state);
#endif
}
else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) {
#if (SOC_CORE_STANDBY)
_psci_core_exit_stdby(core_mask);
/* when we are here, the core is waking up
* set core state to released
*/
core_state = CORE_RELEASED;
_setCoreState(core_mask, core_state);
#endif
}
}
#endif
#if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN)
#define PWR_STATE_TYPE_MASK 0x00010000
#define PWR_STATE_TYPE_STNDBY 0x0
#define PWR_STATE_TYPE_PWRDWN 0x00010000
#define PWR_STATE_LVL_MASK 0x03000000
#define PWR_STATE_LVL_CORE 0x0
#define PWR_STATE_LVL_CLSTR 0x01000000
#define PWR_STATE_LVL_SYS 0x02000000
#define PWR_STATE_LVL_MAX 0x03000000
/* turns a requested power state into a target power state
* based on SoC capabilities
*/
static int _pwr_state_validate(uint32_t pwr_state,
psci_power_state_t *state)
{
int stat = PSCI_E_INVALID_PARAMS;
int pwrdn = (pwr_state & PWR_STATE_TYPE_MASK);
int lvl = (pwr_state & PWR_STATE_LVL_MASK);
switch (lvl) {
case PWR_STATE_LVL_MAX:
if (pwrdn && SOC_SYSTEM_PWR_DWN)
state->pwr_domain_state[PLAT_MAX_LVL] =
PLAT_MAX_OFF_STATE;
else if (SOC_SYSTEM_STANDBY)
state->pwr_domain_state[PLAT_MAX_LVL] =
PLAT_MAX_RET_STATE;
/* intentional fall-thru condition */
case PWR_STATE_LVL_SYS:
if (pwrdn && SOC_SYSTEM_PWR_DWN)
state->pwr_domain_state[PLAT_SYS_LVL] =
PLAT_MAX_OFF_STATE;
else if (SOC_SYSTEM_STANDBY)
state->pwr_domain_state[PLAT_SYS_LVL] =
PLAT_MAX_RET_STATE;
/* intentional fall-thru condition */
case PWR_STATE_LVL_CLSTR:
if (pwrdn && SOC_CLUSTER_PWR_DWN)
state->pwr_domain_state[PLAT_CLSTR_LVL] =
PLAT_MAX_OFF_STATE;
else if (SOC_CLUSTER_STANDBY)
state->pwr_domain_state[PLAT_CLSTR_LVL] =
PLAT_MAX_RET_STATE;
/* intentional fall-thru condition */
case PWR_STATE_LVL_CORE:
stat = PSCI_E_SUCCESS;
if (pwrdn && SOC_CORE_PWR_DWN)
state->pwr_domain_state[PLAT_CORE_LVL] =
PLAT_MAX_OFF_STATE;
else if (SOC_CORE_STANDBY)
state->pwr_domain_state[PLAT_CORE_LVL] =
PLAT_MAX_RET_STATE;
break;
}
return (stat);
}
#endif
#if (SOC_SYSTEM_PWR_DWN)
static void _pwr_state_sys_suspend(psci_power_state_t *req_state)
{
/* if we need to have per-SoC settings, then we need to
* extend this by calling into psci_utils.S and from there
* on down to the SoC.S files
*/
req_state->pwr_domain_state[PLAT_MAX_LVL] = PLAT_MAX_OFF_STATE;
req_state->pwr_domain_state[PLAT_SYS_LVL] = PLAT_MAX_OFF_STATE;
req_state->pwr_domain_state[PLAT_CLSTR_LVL] = PLAT_MAX_OFF_STATE;
req_state->pwr_domain_state[PLAT_CORE_LVL] = PLAT_MAX_OFF_STATE;
}
#endif
#if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2)
static int psci_system_reset2(int is_vendor,
int reset_type,
u_register_t cookie)
{
int ret = 0;
INFO("Executing the sequence of warm reset.\n");
ret = prep_n_execute_warm_reset();
return ret;
}
#endif
static plat_psci_ops_t _psci_pm_ops = {
#if (SOC_SYSTEM_OFF)
.system_off = _psci_system_off,
#endif
#if (SOC_SYSTEM_RESET)
.system_reset = _psci_system_reset,
#endif
#if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2)
.system_reset2 = psci_system_reset2,
#endif
#if (SOC_CORE_RELEASE || SOC_CORE_RESTART)
/* core released or restarted */
.pwr_domain_on_finish = _pwr_domain_wakeup,
#endif
#if (SOC_CORE_OFF)
/* core shutting down */
.pwr_domain_off = _pwr_domain_off,
#endif
#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN)
.pwr_domain_pwr_down_wfi = _pwr_down_wfi,
#endif
#if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN)
/* cpu_suspend */
.validate_power_state = _pwr_state_validate,
#if (SOC_CORE_STANDBY)
.cpu_standby = _pwr_cpu_standby,
#endif
#if (SOC_CORE_PWR_DWN)
.pwr_domain_suspend = _pwr_suspend,
.pwr_domain_suspend_finish = _pwr_suspend_finish,
#endif
#endif
#if (SOC_SYSTEM_PWR_DWN)
.get_sys_suspend_power_state = _pwr_state_sys_suspend,
#endif
#if (SOC_CORE_RELEASE)
/* core executing psci_cpu_on */
.pwr_domain_on = _pwr_domain_on
#endif
};
#if (SOC_CORE_RELEASE || SOC_CORE_PWR_DWN)
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
warmboot_entry = sec_entrypoint;
*psci_ops = &_psci_pm_ops;
return 0;
}
#else
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
*psci_ops = &_psci_pm_ops;
return 0;
}
#endif
#
# Copyright 2018-2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
#
#------------------------------------------------------------------------------
#
# Select the PSCI files
#
# -----------------------------------------------------------------------------
ifeq (${ADD_PSCI},)
ADD_PSCI := 1
PLAT_PSCI_PATH := $(PLAT_COMMON_PATH)/psci
PSCI_SOURCES := ${PLAT_PSCI_PATH}/plat_psci.c \
${PLAT_PSCI_PATH}/$(ARCH)/psci_utils.S \
plat/common/plat_psci_common.c
PLAT_INCLUDES += -I${PLAT_PSCI_PATH}/include
ifeq (${BL_COMM_PSCI_NEEDED},yes)
BL_COMMON_SOURCES += ${PSCI_SOURCES}
else
ifeq (${BL2_PSCI_NEEDED},yes)
BL2_SOURCES += ${PSCI_SOURCES}
endif
ifeq (${BL31_PSCI_NEEDED},yes)
BL31_SOURCES += ${PSCI_SOURCES}
endif
endif
endif
# -----------------------------------------------------------------------------
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <common/bl_common.h>
#include <common/desc_image_load.h>
#ifdef CSF_HEADER_PREPENDED
#include <csf_hdr.h>
#endif
#include <plat/common/platform.h>
#include <platform_def.h>
/*******************************************************************************
* Following descriptor provides BL image/ep information that gets used
* by BL2 to load the images and also subset of this information is
* passed to next BL image. The image loading sequence is managed by
* populating the images in required loading order. The image execution
* sequence is managed by populating the `next_handoff_image_id` with
* the next executable image id.
******************************************************************************/
static bl_mem_params_node_t bl2_mem_params_descs[] = {
/* Fill BL31 related information */
{
.image_id = BL31_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_2, entry_point_info_t,
SECURE | EXECUTABLE | EP_FIRST_EXE),
.ep_info.pc = BL31_BASE,
.ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS),
#if DEBUG
.ep_info.args.arg1 = LS_BL31_PLAT_PARAM_VAL,
#endif
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
#ifdef CSF_HEADER_PREPENDED
.image_info.image_base = BL31_BASE - CSF_HDR_SZ,
.image_info.image_max_size = (BL31_LIMIT - BL31_BASE) +
CSF_HDR_SZ,
#else
.image_info.image_base = BL31_BASE,
.image_info.image_max_size = (BL31_LIMIT - BL31_BASE),
#endif
# ifdef NXP_LOAD_BL32
.next_handoff_image_id = BL32_IMAGE_ID,
# else
.next_handoff_image_id = BL33_IMAGE_ID,
# endif
},
# ifdef NXP_LOAD_BL32
/* Fill BL32 related information */
{
.image_id = BL32_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
.ep_info.pc = BL32_BASE,
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, 0),
#ifdef CSF_HEADER_PREPENDED
.image_info.image_base = BL32_BASE - CSF_HDR_SZ,
.image_info.image_max_size = (BL32_LIMIT - BL32_BASE) +
CSF_HDR_SZ,
#else
.image_info.image_base = BL32_BASE,
.image_info.image_max_size = (BL32_LIMIT - BL32_BASE),
#endif
.next_handoff_image_id = BL33_IMAGE_ID,
},
# endif /* BL32_BASE */
/* Fill BL33 related information */
{
.image_id = BL33_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
.ep_info.pc = BL33_BASE,
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, 0),
#ifdef CSF_HEADER_PREPENDED
.image_info.image_base = BL33_BASE - CSF_HDR_SZ,
.image_info.image_max_size = (BL33_LIMIT - BL33_BASE) +
CSF_HDR_SZ,
#else
.image_info.image_base = BL33_BASE,
.image_info.image_max_size = BL33_LIMIT - BL33_BASE,
#endif
.ep_info.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS),
.next_handoff_image_id = INVALID_IMAGE_ID,
}
};
REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
#
# Copyright 2018-2021 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
#
###############################################################################
# Flow begins in BL2 at EL3 mode
BL2_AT_EL3 := 1
# Though one core is powered up by default, there are
# platform specific ways to release more than one core
COLD_BOOT_SINGLE_CPU := 0
PROGRAMMABLE_RESET_ADDRESS := 1
USE_COHERENT_MEM := 0
# Use generic OID definition (tbbr_oid.h)
USE_TBBR_DEFS := 1
PLAT_XLAT_TABLES_DYNAMIC := 0
ENABLE_SVE_FOR_NS := 0
ENABLE_STACK_PROTECTOR := 0
ERROR_DEPRECATED := 0
LS_DISABLE_TRUSTED_WDOG := 1
# On ARM platforms, separate the code and read-only data sections to allow
# mapping the former as executable and the latter as execute-never.
SEPARATE_CODE_AND_RODATA := 1
# Enable new version of image loading on ARM platforms
LOAD_IMAGE_V2 := 1
RCW := ""
ifneq (${SPD},none)
$(eval $(call add_define, NXP_LOAD_BL32))
endif
###############################################################################
PLAT_TOOL_PATH := tools/nxp
CREATE_PBL_TOOL_PATH := ${PLAT_TOOL_PATH}/create_pbl
PLAT_SETUP_PATH := ${PLAT_PATH}/common/setup
PLAT_INCLUDES += -I${PLAT_SETUP_PATH}/include \
-Iinclude/plat/arm/common \
-Iinclude/drivers/arm \
-Iinclude/lib \
-Iinclude/drivers/io \
-Ilib/psci
# Required without TBBR.
# To include the defines for DDR PHY Images.
PLAT_INCLUDES += -Iinclude/common/tbbr
include ${PLAT_SETUP_PATH}/core.mk
PLAT_BL_COMMON_SOURCES += ${CPU_LIBS} \
plat/nxp/common/setup/ls_err.c \
plat/nxp/common/setup/ls_common.c
ifneq (${ENABLE_STACK_PROTECTOR},0)
PLAT_BL_COMMON_SOURCES += ${PLAT_SETUP_PATH}/ls_stack_protector.c
endif
include lib/xlat_tables_v2/xlat_tables.mk
PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS}
BL2_SOURCES += drivers/io/io_fip.c \
drivers/io/io_memmap.c \
drivers/io/io_storage.c \
common/desc_image_load.c \
plat/nxp/common/setup/ls_image_load.c \
plat/nxp/common/setup/ls_io_storage.c \
plat/nxp/common/setup/ls_bl2_el3_setup.c \
plat/nxp/common/setup/${ARCH}/ls_bl2_mem_params_desc.c
BL31_SOURCES += plat/nxp/common/setup/ls_bl31_setup.c \
ifeq (${LS_EL3_INTERRUPT_HANDLER}, yes)
$(eval $(call add_define, LS_EL3_INTERRUPT_HANDLER))
BL31_SOURCES += plat/nxp/common/setup/ls_interrupt_mgmt.c
endif
ifeq (${TEST_BL31}, 1)
BL31_SOURCES += ${TEST_SOURCES}
endif
# Verify build config
# -------------------
ifneq (${LOAD_IMAGE_V2}, 1)
$(error Error: Layerscape needs LOAD_IMAGE_V2=1)
else
$(eval $(call add_define,LOAD_IMAGE_V2))
endif
include $(CREATE_PBL_TOOL_PATH)/create_pbl.mk
# Copyright 2018-2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
#
#------------------------------------------------------------------------------
#
# Select the CORE files
#
# -----------------------------------------------------------------------------
CPU_LIBS := lib/cpus/${ARCH}/aem_generic.S
ifeq (,$(filter $(CORE_TYPE),a53 a55 a57 a72 a75))
$(error "CORE_TYPE not specified or incorrect")
else
CPU_LIBS += lib/cpus/${ARCH}/cortex_$(CORE_TYPE).S
endif
# -----------------------------------------------------------------------------
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef BL31_DATA_H
#define BL31_DATA_H
#define SECURE_DATA_BASE NXP_OCRAM_ADDR
#define SECURE_DATA_SIZE NXP_OCRAM_SIZE
#define SECURE_DATA_TOP (SECURE_DATA_BASE + SECURE_DATA_SIZE)
#define SMC_REGION_SIZE 0x80
#define SMC_GLBL_BASE (SECURE_DATA_TOP - SMC_REGION_SIZE)
#define BC_PSCI_DATA_SIZE 0xC0
#define BC_PSCI_BASE (SMC_GLBL_BASE - BC_PSCI_DATA_SIZE)
#define SECONDARY_TOP BC_PSCI_BASE
#define SEC_PSCI_DATA_SIZE 0xC0
#define SEC_REGION_SIZE SEC_PSCI_DATA_SIZE
/* SMC global data */
#define BOOTLOC_OFFSET 0x0
#define BOOT_SVCS_OSET 0x8
/* offset to prefetch disable mask */
#define PREFETCH_DIS_OFFSET 0x10
/* must reference last smc global entry */
#define LAST_SMC_GLBL_OFFSET 0x18
#define SMC_TASK_OFFSET 0xC
#define TSK_START_OFFSET 0x0
#define TSK_DONE_OFFSET 0x4
#define TSK_CORE_OFFSET 0x8
#define SMC_TASK1_BASE (SMC_GLBL_BASE + 32)
#define SMC_TASK2_BASE (SMC_TASK1_BASE + SMC_TASK_OFFSET)
#define SMC_TASK3_BASE (SMC_TASK2_BASE + SMC_TASK_OFFSET)
#define SMC_TASK4_BASE (SMC_TASK3_BASE + SMC_TASK_OFFSET)
/* psci data area offsets */
#define CORE_STATE_DATA 0x0
#define SPSR_EL3_DATA 0x8
#define CNTXT_ID_DATA 0x10
#define START_ADDR_DATA 0x18
#define LINK_REG_DATA 0x20
#define GICC_CTLR_DATA 0x28
#define ABORT_FLAG_DATA 0x30
#define SCTLR_DATA 0x38
#define CPUECTLR_DATA 0x40
#define AUX_01_DATA 0x48 /* usage defined per SoC */
#define AUX_02_DATA 0x50 /* usage defined per SoC */
#define AUX_03_DATA 0x58 /* usage defined per SoC */
#define AUX_04_DATA 0x60 /* usage defined per SoC */
#define AUX_05_DATA 0x68 /* usage defined per SoC */
#define AUX_06_DATA 0x70 /* usage defined per SoC */
#define AUX_07_DATA 0x78 /* usage defined per SoC */
#define SCR_EL3_DATA 0x80
#define HCR_EL2_DATA 0x88
#endif /* BL31_DATA_H */
/*
* Copyright 2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef LS_EL3_INTRPT_MGMT_H
#define LS_EL3_INTRPT_MGMT_H
#include <bl31/interrupt_mgmt.h>
#define MAX_INTR_EL3 128
/*
* Register handler to specific GIC entrance
* for INTR_TYPE_EL3 type of interrupt
*/
int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler);
void ls_el3_interrupt_config(void);
#endif /* LS_EL3_INTRPT_MGMT_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment