Commit 9719e19a authored by Joanna Farley's avatar Joanna Farley Committed by TrustedFirmware Code Review
Browse files

Merge changes I500ddbe9,I9c10dac9,I53bfff85,I06f7594d,I24bff8d4, ... into integration

* changes:
  nxp lx2160a-aqds: new plat based on soc lx2160a
  NXP lx2160a-rdb: new plat based on SoC lx2160a
  nxp lx2162aqds: new plat based on soc lx2160a
  nxp: errata handling at soc level for lx2160a
  nxp: make file for loading additional ddr image
  nxp: adding support of soc lx2160a
  nxp: deflt hdr files for soc & their platforms
  nxp: platform files for bl2 and bl31 setup
  nxp: warm reset support to retain ddr content
  nxp: nv storage api on platforms
  nxp: supports two mode of trusted board boot
  nxp: fip-handler for additional fip_fuse.bin
  nxp: fip-handler for additional ddr-fip.bin
  nxp: image loader for loading fip image
  nxp: svp & sip smc handling
  nxp: psci platform functions used by lib/psci
  nxp: helper function used by plat & common code
  nxp: add data handler used by bl31
  nxp: adding the driver.mk file
  nxp-tool: for creating pbl file from bl2
  nxp: adding the smmu driver
  nxp: cot using nxp internal and mbedtls
  nxp:driver for crypto h/w accelerator caam
  nxp:add driver support for sd and emmc
  nxp:add qspi driver
  nxp: add flexspi driver support
  nxp: adding gic apis for nxp soc
  nxp: gpio driver support
  nxp: added csu driver
  nxp: driver pmu for nxp soc
  nxp: ddr driver enablement for nxp layerscape soc
  nxp: i2c driver support.
  NXP: Driver for NXP Security Monitor
  NXP: SFP driver support for NXP SoC
  NXP: Interconnect API based on ARM CCN-CCI driver
  NXP: TZC API to configure ddr region
  NXP: Timer API added to enable ARM generic timer
  nxp: add dcfg driver
  nxp:add console driver for nxp platform
  tools: add mechanism to allow platform specific image UUID
  tbbr-cot: conditional definition for the macro
  tbbr-cot: fix the issue of compiling time define
  cert_create: updated tool for platform defined certs, keys & extensions
  tbbr-tools: enable override TRUSTED_KEY_CERT
parents b59444ea f359a382
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_WARM_RST_H
#define PLAT_WARM_RST_H
#ifndef NXP_COINED_BB
#define ERLY_WRM_RST_FLG_FLSH_UPDT 0
#endif
#ifndef __ASSEMBLER__
#if defined(IMAGE_BL2)
uint32_t is_warm_boot(void);
#endif
#if defined(IMAGE_BL31)
int prep_n_execute_warm_reset(void);
int _soc_sys_warm_reset(void);
#endif
#endif /* __ASSEMBLER__ */
#endif /* PLAT_WARM_RST_H */
#
# Copyright 2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
#-----------------------------------------------------------------------------
ifeq (${WARM_RST_ADDED},)
WARM_RST_ADDED := 1
NXP_NV_SW_MAINT_LAST_EXEC_DATA := yes
$(eval $(call add_define,NXP_WARM_BOOT))
WARM_RST_INCLUDES += -I${PLAT_COMMON_PATH}/warm_reset
WARM_RST_BL31_SOURCES += ${PLAT_SOC_PATH}/$(ARCH)/${SOC}_warm_rst.S
WARM_RST_BL_COMM_SOURCES += ${PLAT_COMMON_PATH}/warm_reset/plat_warm_reset.c
endif
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
.section .text, "ax"
#include <asm_macros.S>
#include <lib/psci/psci.h>
#include <nxp_timer.h>
#include <plat_gic.h>
#include <pmu.h>
#include <bl31_data.h>
#include <plat_psci.h>
#include <platform_def.h>
.global soc_init_start
.global soc_init_percpu
.global soc_init_finish
.global _set_platform_security
.global _soc_set_start_addr
.global _soc_core_release
.global _soc_ck_disabled
.global _soc_core_restart
.global _soc_core_prep_off
.global _soc_core_entr_off
.global _soc_core_exit_off
.global _soc_sys_reset
.global _soc_sys_off
.global _soc_core_prep_stdby
.global _soc_core_entr_stdby
.global _soc_core_exit_stdby
.global _soc_core_prep_pwrdn
.global _soc_core_entr_pwrdn
.global _soc_core_exit_pwrdn
.global _soc_clstr_prep_stdby
.global _soc_clstr_exit_stdby
.global _soc_clstr_prep_pwrdn
.global _soc_clstr_exit_pwrdn
.global _soc_sys_prep_stdby
.global _soc_sys_exit_stdby
.global _soc_sys_prep_pwrdn
.global _soc_sys_pwrdn_wfi
.global _soc_sys_exit_pwrdn
.equ TZPC_BASE, 0x02200000
.equ TZPCDECPROT_0_SET_BASE, 0x02200804
.equ TZPCDECPROT_1_SET_BASE, 0x02200810
.equ TZPCDECPROT_2_SET_BASE, 0x0220081C
#define CLUSTER_3_CORES_MASK 0xC0
#define CLUSTER_3_IN_RESET 1
#define CLUSTER_3_NORMAL 0
/* cluster 3 handling no longer based on frequency, but rather on RCW[850],
* which is bit 18 of RCWSR27
*/
#define CLUSTER_3_RCW_BIT 0x40000
/* retry count for clock-stop acks */
.equ CLOCK_RETRY_CNT, 800
/* disable prefetching in the A72 core */
#define CPUACTLR_DIS_LS_HW_PRE 0x100000000000000
#define CPUACTLR_DIS_L2_TLB_PRE 0x200000
/* Function starts the initialization tasks of the soc,
* using secondary cores if they are available
*
* Called from C, saving the non-volatile regs
* save these as pairs of registers to maintain the
* required 16-byte alignment on the stack
*
* in:
* out:
* uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11
*/
func soc_init_start
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x18, x30, [sp, #-16]!
/* make sure the personality has been
* established by releasing cores that
* are marked "to-be-disabled" from reset
*/
bl release_disabled /* 0-9 */
/* init the task flags */
bl _init_task_flags /* 0-1 */
/* set SCRATCHRW7 to 0x0 */
ldr x0, =DCFG_SCRATCHRW7_OFFSET
mov x1, xzr
bl _write_reg_dcfg
1:
/* restore the aarch32/64 non-volatile registers */
ldp x18, x30, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
ret
endfunc soc_init_start
/* Function performs any soc-specific initialization that is needed on
* a per-core basis.
* in: none
* out: none
* uses x0, x1, x2, x3
*/
func soc_init_percpu
stp x4, x30, [sp, #-16]!
bl plat_my_core_mask
mov x2, x0 /* x2 = core mask */
/* Check if this core is marked for prefetch disable
*/
mov x0, #PREFETCH_DIS_OFFSET
bl _get_global_data /* 0-1 */
tst x0, x2
b.eq 1f
bl _disable_ldstr_pfetch_A72 /* 0 */
1:
mov x0, #NXP_PMU_ADDR
bl enable_timer_base_to_cluster
ldp x4, x30, [sp], #16
ret
endfunc soc_init_percpu
/* Function completes the initialization tasks of the soc
* in:
* out:
* uses x0, x1, x2, x3, x4
*/
func soc_init_finish
stp x4, x30, [sp, #-16]!
ldp x4, x30, [sp], #16
ret
endfunc soc_init_finish
/* Function sets the security mechanisms in the SoC to implement the
* Platform Security Policy
*/
func _set_platform_security
mov x8, x30
#if (!SUPPRESS_TZC)
/* initialize the tzpc */
bl init_tzpc
#endif
#if (!SUPPRESS_SEC)
/* initialize secmon */
#ifdef NXP_SNVS_ENABLED
mov x0, #NXP_SNVS_ADDR
bl init_sec_mon
#endif
#endif
mov x30, x8
ret
endfunc _set_platform_security
/* Function writes a 64-bit address to bootlocptrh/l
* in: x0, 64-bit address to write to BOOTLOCPTRL/H
* uses x0, x1, x2
*/
func _soc_set_start_addr
/* Get the 64-bit base address of the dcfg block */
ldr x2, =NXP_DCFG_ADDR
/* write the 32-bit BOOTLOCPTRL register */
mov x1, x0
str w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET]
/* write the 32-bit BOOTLOCPTRH register */
lsr x1, x0, #32
str w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET]
ret
endfunc _soc_set_start_addr
/* Function releases a secondary core from reset
* in: x0 = core_mask_lsb
* out: none
* uses: x0, x1, x2, x3
*/
func _soc_core_release
mov x3, x30
ldr x1, =NXP_SEC_REGFILE_ADDR
/* write to CORE_HOLD to tell
* the bootrom that this core is
* expected to run.
*/
str w0, [x1, #CORE_HOLD_OFFSET]
/* read-modify-write BRRL to release core */
mov x1, #NXP_RESET_ADDR
ldr w2, [x1, #BRR_OFFSET]
/* x0 = core mask */
orr w2, w2, w0
str w2, [x1, #BRR_OFFSET]
dsb sy
isb
/* send event */
sev
isb
mov x30, x3
ret
endfunc _soc_core_release
/* Function determines if a core is disabled via COREDISABLEDSR
* in: w0 = core_mask_lsb
* out: w0 = 0, core not disabled
* w0 != 0, core disabled
* uses x0, x1
*/
func _soc_ck_disabled
/* get base addr of dcfg block */
ldr x1, =NXP_DCFG_ADDR
/* read COREDISABLEDSR */
ldr w1, [x1, #DCFG_COREDISABLEDSR_OFFSET]
/* test core bit */
and w0, w1, w0
ret
endfunc _soc_ck_disabled
/* Part of CPU_ON
* Function restarts a core shutdown via _soc_core_entr_off
* in: x0 = core mask lsb (of the target cpu)
* out: x0 == 0, on success
* x0 != 0, on failure
* uses x0, x1, x2, x3, x4, x5, x6
*/
func _soc_core_restart
mov x6, x30
mov x4, x0
/* pgm GICD_CTLR - enable secure grp0 */
mov x5, #NXP_GICD_ADDR
ldr w2, [x5, #GICD_CTLR_OFFSET]
orr w2, w2, #GICD_CTLR_EN_GRP_0
str w2, [x5, #GICD_CTLR_OFFSET]
dsb sy
isb
/* poll on RWP til write completes */
4:
ldr w2, [x5, #GICD_CTLR_OFFSET]
tst w2, #GICD_CTLR_RWP
b.ne 4b
/* x4 = core mask lsb
* x5 = gicd base addr
*/
mov x0, x4
bl get_mpidr_value
/* x0 = mpidr of target core
* x4 = core mask lsb of target core
* x5 = gicd base addr
*/
/* generate target list bit */
and x1, x0, #MPIDR_AFFINITY0_MASK
mov x2, #1
lsl x2, x2, x1
/* get the affinity1 field */
and x1, x0, #MPIDR_AFFINITY1_MASK
lsl x1, x1, #8
orr x2, x2, x1
/* insert the INTID for SGI15 */
orr x2, x2, #ICC_SGI0R_EL1_INTID
/* fire the SGI */
msr ICC_SGI0R_EL1, x2
dsb sy
isb
/* load '0' on success */
mov x0, xzr
mov x30, x6
ret
endfunc _soc_core_restart
/* Part of CPU_OFF
* Function programs SoC & GIC registers in preparation for shutting down
* the core
* in: x0 = core mask lsb
* out: none
* uses x0, x1, x2, x3, x4, x5, x6, x7
*/
func _soc_core_prep_off
mov x8, x30
mov x7, x0 /* x7 = core mask lsb */
mrs x1, CORTEX_A72_ECTLR_EL1
/* set smp and disable L2 snoops in cpuectlr */
orr x1, x1, #CPUECTLR_SMPEN_EN
orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
bic x1, x1, #CPUECTLR_INS_PREFETCH_MASK
bic x1, x1, #CPUECTLR_DAT_PREFETCH_MASK
/* set retention control in cpuectlr */
bic x1, x1, #CPUECTLR_TIMER_MASK
orr x1, x1, #CPUECTLR_TIMER_8TICKS
msr CORTEX_A72_ECTLR_EL1, x1
/* get redistributor rd base addr for this core */
mov x0, x7
bl get_gic_rd_base
mov x6, x0
/* get redistributor sgi base addr for this core */
mov x0, x7
bl get_gic_sgi_base
mov x5, x0
/* x5 = gicr sgi base addr
* x6 = gicr rd base addr
* x7 = core mask lsb
*/
/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
mov w3, #GICR_ICENABLER0_SGI15
str w3, [x5, #GICR_ICENABLER0_OFFSET]
2:
/* poll on rwp bit in GICR_CTLR */
ldr w4, [x6, #GICR_CTLR_OFFSET]
tst w4, #GICR_CTLR_RWP
b.ne 2b
/* disable GRP1 interrupts at cpu interface */
msr ICC_IGRPEN1_EL3, xzr
/* disable GRP0 ints at cpu interface */
msr ICC_IGRPEN0_EL1, xzr
/* program the redistributor - poll on GICR_CTLR.RWP as needed */
/* define SGI 15 as Grp0 - GICR_IGROUPR0 */
ldr w4, [x5, #GICR_IGROUPR0_OFFSET]
bic w4, w4, #GICR_IGROUPR0_SGI15
str w4, [x5, #GICR_IGROUPR0_OFFSET]
/* define SGI 15 as Grp0 - GICR_IGRPMODR0 */
ldr w3, [x5, #GICR_IGRPMODR0_OFFSET]
bic w3, w3, #GICR_IGRPMODR0_SGI15
str w3, [x5, #GICR_IGRPMODR0_OFFSET]
/* set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */
ldr w4, [x5, #GICR_IPRIORITYR3_OFFSET]
bic w4, w4, #GICR_IPRIORITYR3_SGI15_MASK
str w4, [x5, #GICR_IPRIORITYR3_OFFSET]
/* enable SGI 15 at redistributor - GICR_ISENABLER0 */
mov w3, #GICR_ISENABLER0_SGI15
str w3, [x5, #GICR_ISENABLER0_OFFSET]
dsb sy
isb
3:
/* poll on rwp bit in GICR_CTLR */
ldr w4, [x6, #GICR_CTLR_OFFSET]
tst w4, #GICR_CTLR_RWP
b.ne 3b
/* quiesce the debug interfaces */
mrs x3, osdlr_el1
orr x3, x3, #OSDLR_EL1_DLK_LOCK
msr osdlr_el1, x3
isb
/* enable grp0 ints */
mov x3, #ICC_IGRPEN0_EL1_EN
msr ICC_IGRPEN0_EL1, x3
/* x5 = gicr sgi base addr
* x6 = gicr rd base addr
* x7 = core mask lsb
*/
/* clear any pending interrupts */
mvn w1, wzr
str w1, [x5, #GICR_ICPENDR0_OFFSET]
/* make sure system counter is enabled */
ldr x3, =NXP_TIMER_ADDR
ldr w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
tst w0, #SYS_COUNTER_CNTCR_EN
b.ne 4f
orr w0, w0, #SYS_COUNTER_CNTCR_EN
str w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
4:
/* enable the core timer and mask timer interrupt */
mov x1, #CNTP_CTL_EL0_EN
orr x1, x1, #CNTP_CTL_EL0_IMASK
msr cntp_ctl_el0, x1
isb
mov x30, x8
ret
endfunc _soc_core_prep_off
/* Part of CPU_OFF:
* Function performs the final steps to shutdown the core
* in: x0 = core mask lsb
* out: none
* uses x0, x1, x2, x3, x4, x5
*/
func _soc_core_entr_off
mov x5, x30
mov x4, x0
1:
/* enter low-power state by executing wfi */
wfi
/* see if SGI15 woke us up */
mrs x2, ICC_IAR0_EL1
mov x3, #ICC_IAR0_EL1_SGI15
cmp x2, x3
b.ne 2f
/* deactivate the intrrupts. */
msr ICC_EOIR0_EL1, x2
2:
/* check if core is turned ON */
mov x0, x4
/* Fetched the core state in x0 */
bl _getCoreState
cmp x0, #CORE_WAKEUP
b.ne 1b
/* Reached here, exited the wfi */
mov x30, x5
ret
endfunc _soc_core_entr_off
/* Part of CPU_OFF:
* Function starts the process of starting a core back up
* in: x0 = core mask lsb
* out: none
* uses x0, x1, x2, x3, x4, x5, x6
*/
func _soc_core_exit_off
mov x6, x30
mov x5, x0
/* disable forwarding of GRP0 ints at cpu interface */
msr ICC_IGRPEN0_EL1, xzr
/* get redistributor sgi base addr for this core */
mov x0, x5
bl get_gic_sgi_base
mov x4, x0
/* x4 = gicr sgi base addr
* x5 = core mask
*/
/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
mov w1, #GICR_ICENABLER0_SGI15
str w1, [x4, #GICR_ICENABLER0_OFFSET]
/* get redistributor rd base addr for this core */
mov x0, x5
bl get_gic_rd_base
mov x4, x0
2:
/* poll on rwp bit in GICR_CTLR */
ldr w2, [x4, #GICR_CTLR_OFFSET]
tst w2, #GICR_CTLR_RWP
b.ne 2b
/* unlock the debug interfaces */
mrs x3, osdlr_el1
bic x3, x3, #OSDLR_EL1_DLK_LOCK
msr osdlr_el1, x3
isb
dsb sy
isb
mov x30, x6
ret
endfunc _soc_core_exit_off
/* Function requests a reset of the entire SOC
* in: none
* out: none
* uses: x0, x1, x2, x3, x4, x5, x6
*/
func _soc_sys_reset
mov x6, x30
ldr x2, =NXP_RST_ADDR
/* clear the RST_REQ_MSK and SW_RST_REQ */
mov w0, #0x00000000
str w0, [x2, #RSTCNTL_OFFSET]
/* initiate the sw reset request */
mov w0, #SW_RST_REQ_INIT
str w0, [x2, #RSTCNTL_OFFSET]
/* In case this address range is mapped as cacheable,
* flush the write out of the dcaches.
*/
add x2, x2, #RSTCNTL_OFFSET
dc cvac, x2
dsb st
isb
/* Function does not return */
b .
endfunc _soc_sys_reset
/* Part of SYSTEM_OFF:
* Function turns off the SoC clocks
* Note: Function is not intended to return, and the only allowable
* recovery is POR
* in: none
* out: none
* uses x0, x1, x2, x3
*/
func _soc_sys_off
/* A-009810: LPM20 entry sequence might cause
* spurious timeout reset request
* workaround: MASK RESET REQ RPTOE
*/
ldr x0, =NXP_RESET_ADDR
ldr w1, =RSTRQMR_RPTOE_MASK
str w1, [x0, #RST_RSTRQMR1_OFFSET]
/* disable sec, QBman, spi and qspi */
ldr x2, =NXP_DCFG_ADDR
ldr x0, =DCFG_DEVDISR1_OFFSET
ldr w1, =DCFG_DEVDISR1_SEC
str w1, [x2, x0]
ldr x0, =DCFG_DEVDISR3_OFFSET
ldr w1, =DCFG_DEVDISR3_QBMAIN
str w1, [x2, x0]
ldr x0, =DCFG_DEVDISR4_OFFSET
ldr w1, =DCFG_DEVDISR4_SPI_QSPI
str w1, [x2, x0]
/* set TPMWAKEMR0 */
ldr x0, =TPMWAKEMR0_ADDR
mov w1, #0x1
str w1, [x0]
/* disable icache, dcache, mmu @ EL1 */
mov x1, #SCTLR_I_C_M_MASK
mrs x0, sctlr_el1
bic x0, x0, x1
msr sctlr_el1, x0
/* disable L2 prefetches */
mrs x0, CORTEX_A72_ECTLR_EL1
bic x1, x1, #CPUECTLR_TIMER_MASK
orr x0, x0, #CPUECTLR_SMPEN_EN
orr x0, x0, #CPUECTLR_TIMER_8TICKS
msr CORTEX_A72_ECTLR_EL1, x0
isb
/* disable CCN snoop domain */
mov x1, #NXP_CCN_HN_F_0_ADDR
ldr x0, =CCN_HN_F_SNP_DMN_CTL_MASK
str x0, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
3:
ldr w2, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
cmp w2, #0x2
b.ne 3b
mov x3, #NXP_PMU_ADDR
4:
ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
cmp w1, #PMU_IDLE_CORE_MASK
b.ne 4b
mov w1, #PMU_IDLE_CLUSTER_MASK
str w1, [x3, #PMU_CLAINACTSETR_OFFSET]
1:
ldr w1, [x3, #PMU_PCPW20SR_OFFSET]
cmp w1, #PMU_IDLE_CORE_MASK
b.ne 1b
mov w1, #PMU_FLUSH_CLUSTER_MASK
str w1, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
2:
ldr w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
cmp w1, #PMU_FLUSH_CLUSTER_MASK
b.ne 2b
mov w1, #PMU_FLUSH_CLUSTER_MASK
str w1, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
mov w1, #PMU_FLUSH_CLUSTER_MASK
str w1, [x3, #PMU_CLSINACTSETR_OFFSET]
mov x2, #DAIF_SET_MASK
mrs x1, spsr_el1
orr x1, x1, x2
msr spsr_el1, x1
mrs x1, spsr_el2
orr x1, x1, x2
msr spsr_el2, x1
/* force the debug interface to be quiescent */
mrs x0, osdlr_el1
orr x0, x0, #0x1
msr osdlr_el1, x0
/* invalidate all TLB entries at all 3 exception levels */
tlbi alle1
tlbi alle2
tlbi alle3
/* x3 = pmu base addr */
/* request lpm20 */
ldr x0, =PMU_POWMGTCSR_OFFSET
ldr w1, =PMU_POWMGTCSR_VAL
str w1, [x3, x0]
5:
wfe
b.eq 5b
endfunc _soc_sys_off
/* Part of CPU_SUSPEND
* Function puts the calling core into standby state
* in: x0 = core mask lsb
* out: none
* uses x0
*/
func _soc_core_entr_stdby
dsb sy
isb
wfi
ret
endfunc _soc_core_entr_stdby
/* Part of CPU_SUSPEND
* Function performs SoC-specific programming prior to standby
* in: x0 = core mask lsb
* out: none
* uses x0, x1
*/
func _soc_core_prep_stdby
/* clear CORTEX_A72_ECTLR_EL1[2:0] */
mrs x1, CORTEX_A72_ECTLR_EL1
bic x1, x1, #CPUECTLR_TIMER_MASK
msr CORTEX_A72_ECTLR_EL1, x1
ret
endfunc _soc_core_prep_stdby
/* Part of CPU_SUSPEND
* Function performs any SoC-specific cleanup after standby state
* in: x0 = core mask lsb
* out: none
* uses none
*/
func _soc_core_exit_stdby
ret
endfunc _soc_core_exit_stdby
/* Part of CPU_SUSPEND
* Function performs SoC-specific programming prior to power-down
* in: x0 = core mask lsb
* out: none
* uses none
*/
func _soc_core_prep_pwrdn
/* make sure system counter is enabled */
ldr x2, =NXP_TIMER_ADDR
ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
tst w0, #SYS_COUNTER_CNTCR_EN
b.ne 1f
orr w0, w0, #SYS_COUNTER_CNTCR_EN
str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
1:
/* enable dynamic retention control (CPUECTLR[2:0])
* set the SMPEN bit (CPUECTLR[6])
*/
mrs x1, CORTEX_A72_ECTLR_EL1
bic x1, x1, #CPUECTLR_RET_MASK
orr x1, x1, #CPUECTLR_TIMER_8TICKS
orr x1, x1, #CPUECTLR_SMPEN_EN
msr CORTEX_A72_ECTLR_EL1, x1
isb
ret
endfunc _soc_core_prep_pwrdn
/* Part of CPU_SUSPEND
* Function puts the calling core into a power-down state
* in: x0 = core mask lsb
* out: none
* uses x0
*/
func _soc_core_entr_pwrdn
/* X0 = core mask lsb */
dsb sy
isb
wfi
ret
endfunc _soc_core_entr_pwrdn
/* Part of CPU_SUSPEND
* Function performs any SoC-specific cleanup after power-down state
* in: x0 = core mask lsb
* out: none
* uses none
*/
func _soc_core_exit_pwrdn
ret
endfunc _soc_core_exit_pwrdn
/* Part of CPU_SUSPEND
* Function performs SoC-specific programming prior to standby
* in: x0 = core mask lsb
* out: none
* uses x0, x1
*/
func _soc_clstr_prep_stdby
/* clear CORTEX_A72_ECTLR_EL1[2:0] */
mrs x1, CORTEX_A72_ECTLR_EL1
bic x1, x1, #CPUECTLR_TIMER_MASK
msr CORTEX_A72_ECTLR_EL1, x1
ret
endfunc _soc_clstr_prep_stdby
/* Part of CPU_SUSPEND
* Function performs any SoC-specific cleanup after standby state
* in: x0 = core mask lsb
* out: none
* uses none
*/
func _soc_clstr_exit_stdby
ret
endfunc _soc_clstr_exit_stdby
/* Part of CPU_SUSPEND
* Function performs SoC-specific programming prior to power-down
* in: x0 = core mask lsb
* out: none
* uses none
*/
func _soc_clstr_prep_pwrdn
/* make sure system counter is enabled */
ldr x2, =NXP_TIMER_ADDR
ldr w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
tst w0, #SYS_COUNTER_CNTCR_EN
b.ne 1f
orr w0, w0, #SYS_COUNTER_CNTCR_EN
str w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
1:
/* enable dynamic retention control (CPUECTLR[2:0])
* set the SMPEN bit (CPUECTLR[6])
*/
mrs x1, CORTEX_A72_ECTLR_EL1
bic x1, x1, #CPUECTLR_RET_MASK
orr x1, x1, #CPUECTLR_TIMER_8TICKS
orr x1, x1, #CPUECTLR_SMPEN_EN
msr CORTEX_A72_ECTLR_EL1, x1
isb
ret
endfunc _soc_clstr_prep_pwrdn
/* Part of CPU_SUSPEND
* Function performs any SoC-specific cleanup after power-down state
* in: x0 = core mask lsb
* out: none
* uses none
*/
func _soc_clstr_exit_pwrdn
ret
endfunc _soc_clstr_exit_pwrdn
/* Part of CPU_SUSPEND
* Function performs SoC-specific programming prior to standby
* in: x0 = core mask lsb
* out: none
* uses x0, x1
*/
func _soc_sys_prep_stdby
/* clear CORTEX_A72_ECTLR_EL1[2:0] */
mrs x1, CORTEX_A72_ECTLR_EL1
bic x1, x1, #CPUECTLR_TIMER_MASK
msr CORTEX_A72_ECTLR_EL1, x1
ret
endfunc _soc_sys_prep_stdby
/* Part of CPU_SUSPEND
* Function performs any SoC-specific cleanup after standby state
* in: x0 = core mask lsb
* out: none
* uses none
*/
func _soc_sys_exit_stdby
ret
endfunc _soc_sys_exit_stdby
/* Part of CPU_SUSPEND
* Function performs SoC-specific programming prior to
* suspend-to-power-down
* in: x0 = core mask lsb
* out: none
* uses x0, x1
*/
func _soc_sys_prep_pwrdn
mrs x1, CORTEX_A72_ECTLR_EL1
/* make sure the smp bit is set */
orr x1, x1, #CPUECTLR_SMPEN_MASK
/* set the retention control */
orr x1, x1, #CPUECTLR_RET_8CLK
/* disable tablewalk prefetch */
orr x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
msr CORTEX_A72_ECTLR_EL1, x1
isb
ret
endfunc _soc_sys_prep_pwrdn
/* Part of CPU_SUSPEND
* Function puts the calling core, and potentially the soc, into a
* low-power state
* in: x0 = core mask lsb
* out: x0 = 0, success
* x0 < 0, failure
* uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14,
* x15, x16, x17, x18, x19, x20, x21, x28
*/
func _soc_sys_pwrdn_wfi
mov x28, x30
/* disable cluster snooping in the CCN-508 */
ldr x1, =NXP_CCN_HN_F_0_ADDR
ldr x7, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
mov x6, #CCN_HNF_NODE_COUNT
1:
str x7, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
sub x6, x6, #1
add x1, x1, #CCN_HNF_OFFSET
cbnz x6, 1b
/* x0 = core mask
* x7 = hnf sdcr
*/
ldr x1, =NXP_PMU_CCSR_ADDR
ldr x2, =NXP_PMU_DCSR_ADDR
/* enable the stop-request-override */
mov x3, #PMU_POWMGTDCR0_OFFSET
mov x4, #POWMGTDCR_STP_OV_EN
str w4, [x2, x3]
/* x0 = core mask
* x1 = NXP_PMU_CCSR_ADDR
* x2 = NXP_PMU_DCSR_ADDR
* x7 = hnf sdcr
*/
/* disable prefetching in the A72 core */
mrs x8, CORTEX_A72_CPUACTLR_EL1
tst x8, #CPUACTLR_DIS_LS_HW_PRE
b.ne 2f
dsb sy
isb
/* disable data prefetch */
orr x16, x8, #CPUACTLR_DIS_LS_HW_PRE
/* disable tlb prefetch */
orr x16, x16, #CPUACTLR_DIS_L2_TLB_PRE
msr CORTEX_A72_CPUACTLR_EL1, x16
isb
/* x0 = core mask
* x1 = NXP_PMU_CCSR_ADDR
* x2 = NXP_PMU_DCSR_ADDR
* x7 = hnf sdcr
* x8 = cpuactlr
*/
2:
/* save hnf-sdcr and cpuactlr to stack */
stp x7, x8, [sp, #-16]!
/* x0 = core mask
* x1 = NXP_PMU_CCSR_ADDR
* x2 = NXP_PMU_DCSR_ADDR
*/
/* save the IPSTPCRn registers to stack */
mov x15, #PMU_IPSTPCR0_OFFSET
ldr w9, [x1, x15]
mov x16, #PMU_IPSTPCR1_OFFSET
ldr w10, [x1, x16]
mov x17, #PMU_IPSTPCR2_OFFSET
ldr w11, [x1, x17]
mov x18, #PMU_IPSTPCR3_OFFSET
ldr w12, [x1, x18]
mov x19, #PMU_IPSTPCR4_OFFSET
ldr w13, [x1, x19]
mov x20, #PMU_IPSTPCR5_OFFSET
ldr w14, [x1, x20]
stp x9, x10, [sp, #-16]!
stp x11, x12, [sp, #-16]!
stp x13, x14, [sp, #-16]!
/* x0 = core mask
* x1 = NXP_PMU_CCSR_ADDR
* x2 = NXP_PMU_DCSR_ADDR
* x15 = PMU_IPSTPCR0_OFFSET
* x16 = PMU_IPSTPCR1_OFFSET
* x17 = PMU_IPSTPCR2_OFFSET
* x18 = PMU_IPSTPCR3_OFFSET
* x19 = PMU_IPSTPCR4_OFFSET
* x20 = PMU_IPSTPCR5_OFFSET
*/
/* load the full clock mask for IPSTPCR0 */
ldr x3, =DEVDISR1_MASK
/* get the exclusions */
mov x21, #PMU_IPPDEXPCR0_OFFSET
ldr w4, [x1, x21]
/* apply the exclusions to the mask */
bic w7, w3, w4
/* stop the clocks in IPSTPCR0 */
str w7, [x1, x15]
/* use same procedure for IPSTPCR1-IPSTPCR5 */
/* stop the clocks in IPSTPCR1 */
ldr x5, =DEVDISR2_MASK
mov x21, #PMU_IPPDEXPCR1_OFFSET
ldr w6, [x1, x21]
bic w8, w5, w6
str w8, [x1, x16]
/* stop the clocks in IPSTPCR2 */
ldr x3, =DEVDISR3_MASK
mov x21, #PMU_IPPDEXPCR2_OFFSET
ldr w4, [x1, x21]
bic w9, w3, w4
str w9, [x1, x17]
/* stop the clocks in IPSTPCR3 */
ldr x5, =DEVDISR4_MASK
mov x21, #PMU_IPPDEXPCR3_OFFSET
ldr w6, [x1, x21]
bic w10, w5, w6
str w10, [x1, x18]
/* stop the clocks in IPSTPCR4
* - exclude the ddr clocks as we are currently executing
* out of *some* memory, might be ddr
* - exclude the OCRAM clk so that we retain any code/data in
* OCRAM
* - may need to exclude the debug clock if we are testing
*/
ldr x3, =DEVDISR5_MASK
mov w6, #DEVDISR5_MASK_ALL_MEM
bic w3, w3, w6
mov w5, #POLICY_DEBUG_ENABLE
cbz w5, 3f
mov w6, #DEVDISR5_MASK_DBG
bic w3, w3, w6
3:
mov x21, #PMU_IPPDEXPCR4_OFFSET
ldr w4, [x1, x21]
bic w11, w3, w4
str w11, [x1, x19]
/* stop the clocks in IPSTPCR5 */
ldr x5, =DEVDISR6_MASK
mov x21, #PMU_IPPDEXPCR5_OFFSET
ldr w6, [x1, x21]
bic w12, w5, w6
str w12, [x1, x20]
/* x0 = core mask
* x1 = NXP_PMU_CCSR_ADDR
* x2 = NXP_PMU_DCSR_ADDR
* x7 = IPSTPCR0
* x8 = IPSTPCR1
* x9 = IPSTPCR2
* x10 = IPSTPCR3
* x11 = IPSTPCR4
* x12 = IPSTPCR5
*/
/* poll until the clocks are stopped in IPSTPACKSR0 */
mov w4, #CLOCK_RETRY_CNT
mov x21, #PMU_IPSTPACKSR0_OFFSET
4:
ldr w5, [x1, x21]
cmp w5, w7
b.eq 5f
sub w4, w4, #1
cbnz w4, 4b
/* poll until the clocks are stopped in IPSTPACKSR1 */
5:
mov w4, #CLOCK_RETRY_CNT
mov x21, #PMU_IPSTPACKSR1_OFFSET
6:
ldr w5, [x1, x21]
cmp w5, w8
b.eq 7f
sub w4, w4, #1
cbnz w4, 6b
/* poll until the clocks are stopped in IPSTPACKSR2 */
7:
mov w4, #CLOCK_RETRY_CNT
mov x21, #PMU_IPSTPACKSR2_OFFSET
8:
ldr w5, [x1, x21]
cmp w5, w9
b.eq 9f
sub w4, w4, #1
cbnz w4, 8b
/* poll until the clocks are stopped in IPSTPACKSR3 */
9:
mov w4, #CLOCK_RETRY_CNT
mov x21, #PMU_IPSTPACKSR3_OFFSET
10:
ldr w5, [x1, x21]
cmp w5, w10
b.eq 11f
sub w4, w4, #1
cbnz w4, 10b
/* poll until the clocks are stopped in IPSTPACKSR4 */
11:
mov w4, #CLOCK_RETRY_CNT
mov x21, #PMU_IPSTPACKSR4_OFFSET
12:
ldr w5, [x1, x21]
cmp w5, w11
b.eq 13f
sub w4, w4, #1
cbnz w4, 12b
/* poll until the clocks are stopped in IPSTPACKSR5 */
13:
mov w4, #CLOCK_RETRY_CNT
mov x21, #PMU_IPSTPACKSR5_OFFSET
14:
ldr w5, [x1, x21]
cmp w5, w12
b.eq 15f
sub w4, w4, #1
cbnz w4, 14b
/* x0 = core mask
* x1 = NXP_PMU_CCSR_ADDR
* x2 = NXP_PMU_DCSR_ADDR
* x7 = IPSTPCR0
* x8 = IPSTPCR1
* x9 = IPSTPCR2
* x10 = IPSTPCR3
* x11 = IPSTPCR4
* x12 = IPSTPCR5
*/
15:
mov x3, #NXP_DCFG_ADDR
/* save the devdisr registers to stack */
ldr w13, [x3, #DCFG_DEVDISR1_OFFSET]
ldr w14, [x3, #DCFG_DEVDISR2_OFFSET]
ldr w15, [x3, #DCFG_DEVDISR3_OFFSET]
ldr w16, [x3, #DCFG_DEVDISR4_OFFSET]
ldr w17, [x3, #DCFG_DEVDISR5_OFFSET]
ldr w18, [x3, #DCFG_DEVDISR6_OFFSET]
stp x13, x14, [sp, #-16]!
stp x15, x16, [sp, #-16]!
stp x17, x18, [sp, #-16]!
/* power down the IP in DEVDISR1 - corresponds to IPSTPCR0 */
str w7, [x3, #DCFG_DEVDISR1_OFFSET]
/* power down the IP in DEVDISR2 - corresponds to IPSTPCR1 */
str w8, [x3, #DCFG_DEVDISR2_OFFSET]
/* power down the IP in DEVDISR3 - corresponds to IPSTPCR2 */
str w9, [x3, #DCFG_DEVDISR3_OFFSET]
/* power down the IP in DEVDISR4 - corresponds to IPSTPCR3 */
str w10, [x3, #DCFG_DEVDISR4_OFFSET]
/* power down the IP in DEVDISR5 - corresponds to IPSTPCR4 */
str w11, [x3, #DCFG_DEVDISR5_OFFSET]
/* power down the IP in DEVDISR6 - corresponds to IPSTPCR5 */
str w12, [x3, #DCFG_DEVDISR6_OFFSET]
/* setup register values for the cache-only sequence */
mov x4, #NXP_DDR_ADDR
mov x5, #NXP_DDR2_ADDR
mov x6, x11
mov x7, x17
ldr x12, =PMU_CLAINACTSETR_OFFSET
ldr x13, =PMU_CLSINACTSETR_OFFSET
ldr x14, =PMU_CLAINACTCLRR_OFFSET
ldr x15, =PMU_CLSINACTCLRR_OFFSET
/* x0 = core mask
* x1 = NXP_PMU_CCSR_ADDR
* x2 = NXP_PMU_DCSR_ADDR
* x3 = NXP_DCFG_ADDR
* x4 = NXP_DDR_ADDR
* x5 = NXP_DDR2_ADDR
* w6 = IPSTPCR4
* w7 = DEVDISR5
* x12 = PMU_CLAINACTSETR_OFFSET
* x13 = PMU_CLSINACTSETR_OFFSET
* x14 = PMU_CLAINACTCLRR_OFFSET
* x15 = PMU_CLSINACTCLRR_OFFSET
*/
mov x8, #POLICY_DEBUG_ENABLE
cbnz x8, 29f
/* force the debug interface to be quiescent */
mrs x9, OSDLR_EL1
orr x9, x9, #0x1
msr OSDLR_EL1, x9
/* enter the cache-only sequence */
29:
bl final_pwrdown
/* when we are here, the core has come out of wfi and the
* ddr is back up
*/
mov x8, #POLICY_DEBUG_ENABLE
cbnz x8, 30f
/* restart the debug interface */
mrs x9, OSDLR_EL1
mov x10, #1
bic x9, x9, x10
msr OSDLR_EL1, x9
/* get saved DEVDISR regs off stack */
30:
ldp x17, x18, [sp], #16
ldp x15, x16, [sp], #16
ldp x13, x14, [sp], #16
/* restore DEVDISR regs */
str w18, [x3, #DCFG_DEVDISR6_OFFSET]
str w17, [x3, #DCFG_DEVDISR5_OFFSET]
str w16, [x3, #DCFG_DEVDISR4_OFFSET]
str w15, [x3, #DCFG_DEVDISR3_OFFSET]
str w14, [x3, #DCFG_DEVDISR2_OFFSET]
str w13, [x3, #DCFG_DEVDISR1_OFFSET]
isb
/* get saved IPSTPCRn regs off stack */
ldp x13, x14, [sp], #16
ldp x11, x12, [sp], #16
ldp x9, x10, [sp], #16
/* restore IPSTPCRn regs */
mov x15, #PMU_IPSTPCR5_OFFSET
str w14, [x1, x15]
mov x16, #PMU_IPSTPCR4_OFFSET
str w13, [x1, x16]
mov x17, #PMU_IPSTPCR3_OFFSET
str w12, [x1, x17]
mov x18, #PMU_IPSTPCR2_OFFSET
str w11, [x1, x18]
mov x19, #PMU_IPSTPCR1_OFFSET
str w10, [x1, x19]
mov x20, #PMU_IPSTPCR0_OFFSET
str w9, [x1, x20]
isb
/* poll on IPSTPACKCRn regs til IP clocks are restarted */
mov w4, #CLOCK_RETRY_CNT
mov x15, #PMU_IPSTPACKSR5_OFFSET
16:
ldr w5, [x1, x15]
and w5, w5, w14
cbz w5, 17f
sub w4, w4, #1
cbnz w4, 16b
17:
mov w4, #CLOCK_RETRY_CNT
mov x15, #PMU_IPSTPACKSR4_OFFSET
18:
ldr w5, [x1, x15]
and w5, w5, w13
cbz w5, 19f
sub w4, w4, #1
cbnz w4, 18b
19:
mov w4, #CLOCK_RETRY_CNT
mov x15, #PMU_IPSTPACKSR3_OFFSET
20:
ldr w5, [x1, x15]
and w5, w5, w12
cbz w5, 21f
sub w4, w4, #1
cbnz w4, 20b
21:
mov w4, #CLOCK_RETRY_CNT
mov x15, #PMU_IPSTPACKSR2_OFFSET
22:
ldr w5, [x1, x15]
and w5, w5, w11
cbz w5, 23f
sub w4, w4, #1
cbnz w4, 22b
23:
mov w4, #CLOCK_RETRY_CNT
mov x15, #PMU_IPSTPACKSR1_OFFSET
24:
ldr w5, [x1, x15]
and w5, w5, w10
cbz w5, 25f
sub w4, w4, #1
cbnz w4, 24b
25:
mov w4, #CLOCK_RETRY_CNT
mov x15, #PMU_IPSTPACKSR0_OFFSET
26:
ldr w5, [x1, x15]
and w5, w5, w9
cbz w5, 27f
sub w4, w4, #1
cbnz w4, 26b
27:
/* disable the stop-request-override */
mov x8, #PMU_POWMGTDCR0_OFFSET
mov w9, #POWMGTDCR_STP_OV_EN
str w9, [x2, x8]
isb
/* get hnf-sdcr and cpuactlr off stack */
ldp x7, x8, [sp], #16
/* restore cpuactlr */
msr CORTEX_A72_CPUACTLR_EL1, x8
isb
/* restore snooping in the hnf nodes */
ldr x9, =NXP_CCN_HN_F_0_ADDR
mov x6, #CCN_HNF_NODE_COUNT
28:
str x7, [x9, #CCN_HN_F_SNP_DMN_CTL_SET_OFFSET]
sub x6, x6, #1
add x9, x9, #CCN_HNF_OFFSET
cbnz x6, 28b
isb
mov x30, x28
ret
endfunc _soc_sys_pwrdn_wfi
/* Part of CPU_SUSPEND
* Function performs any SoC-specific cleanup after power-down
* in: x0 = core mask lsb
* out: none
* uses x0,
*/
func _soc_sys_exit_pwrdn
mrs x1, CORTEX_A72_ECTLR_EL1
/* make sure the smp bit is set */
orr x1, x1, #CPUECTLR_SMPEN_MASK
/* clr the retention control */
mov x2, #CPUECTLR_RET_8CLK
bic x1, x1, x2
/* enable tablewalk prefetch */
mov x2, #CPUECTLR_DISABLE_TWALK_PREFETCH
bic x1, x1, x2
msr CORTEX_A72_ECTLR_EL1, x1
isb
ret
endfunc _soc_sys_exit_pwrdn
/* Function will pwrdown ddr and the final core - it will do this
* by loading itself into the icache and then executing from there
* in:
* x0 = core mask
* x1 = NXP_PMU_CCSR_ADDR
* x2 = NXP_PMU_DCSR_ADDR
* x3 = NXP_DCFG_ADDR
* x4 = NXP_DDR_ADDR
* x5 = NXP_DDR2_ADDR
* w6 = IPSTPCR4
* w7 = DEVDISR5
* x12 = PMU_CLAINACTSETR_OFFSET
* x13 = PMU_CLSINACTSETR_OFFSET
* x14 = PMU_CLAINACTCLRR_OFFSET
* x15 = PMU_CLSINACTCLRR_OFFSET
* out: none
* uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x13, x14, x15, x16,
* x17, x18
*/
/* 4Kb aligned */
.align 12
func final_pwrdown
mov x0, xzr
b touch_line_0
start_line_0:
mov x0, #1
/* put ddr controller 1 into self-refresh */
ldr w8, [x4, #DDR_CFG_2_OFFSET]
orr w8, w8, #CFG_2_FORCE_REFRESH
str w8, [x4, #DDR_CFG_2_OFFSET]
/* put ddr controller 2 into self-refresh */
ldr w8, [x5, #DDR_CFG_2_OFFSET]
orr w8, w8, #CFG_2_FORCE_REFRESH
str w8, [x5, #DDR_CFG_2_OFFSET]
/* stop the clocks in both ddr controllers */
mov w10, #DEVDISR5_MASK_DDR
mov x16, #PMU_IPSTPCR4_OFFSET
orr w9, w6, w10
str w9, [x1, x16]
isb
mov x17, #PMU_IPSTPACKSR4_OFFSET
touch_line_0:
cbz x0, touch_line_1
start_line_1:
/* poll IPSTPACKSR4 until
* ddr controller clocks are stopped.
*/
1:
ldr w8, [x1, x17]
and w8, w8, w10
cmp w8, w10
b.ne 1b
/* shut down power to the ddr controllers */
orr w9, w7, #DEVDISR5_MASK_DDR
str w9, [x3, #DCFG_DEVDISR5_OFFSET]
/* disable cluster acp ports */
mov w8, #CLAINACT_DISABLE_ACP
str w8, [x1, x12]
/* disable skyros ports */
mov w9, #CLSINACT_DISABLE_SKY
str w9, [x1, x13]
isb
touch_line_1:
cbz x0, touch_line_2
start_line_2:
isb
3:
wfi
/* if we are here then we are awake
* - bring this device back up
*/
/* enable skyros ports */
mov w9, #CLSINACT_DISABLE_SKY
str w9, [x1, x15]
/* enable acp ports */
mov w8, #CLAINACT_DISABLE_ACP
str w8, [x1, x14]
isb
/* bring up the ddr controllers */
str w7, [x3, #DCFG_DEVDISR5_OFFSET]
isb
str w6, [x1, x16]
isb
nop
touch_line_2:
cbz x0, touch_line_3
start_line_3:
/* poll IPSTPACKSR4 until
* ddr controller clocks are running
*/
mov w10, #DEVDISR5_MASK_DDR
2:
ldr w8, [x1, x17]
and w8, w8, w10
cbnz w8, 2b
/* take ddr controller 2 out of self-refresh */
mov w8, #CFG_2_FORCE_REFRESH
ldr w9, [x5, #DDR_CFG_2_OFFSET]
bic w9, w9, w8
str w9, [x5, #DDR_CFG_2_OFFSET]
/* take ddr controller 1 out of self-refresh */
ldr w9, [x4, #DDR_CFG_2_OFFSET]
bic w9, w9, w8
str w9, [x4, #DDR_CFG_2_OFFSET]
isb
nop
nop
nop
touch_line_3:
cbz x0, start_line_0
/* execute here after ddr is back up */
ret
endfunc final_pwrdown
/* Function returns CLUSTER_3_NORMAL if the cores of cluster 3 are
* to be handled normally, and it returns CLUSTER_3_IN_RESET if the cores
* are to be held in reset
* in: none
* out: x0 = #CLUSTER_3_NORMAL, cluster 3 treated normal
* x0 = #CLUSTER_3_IN_RESET, cluster 3 cores held in reset
* uses x0, x1, x2
*/
func cluster3InReset
/* default return is treat cores normal */
mov x0, #CLUSTER_3_NORMAL
/* read RCW_SR27 register */
mov x1, #NXP_DCFG_ADDR
ldr w2, [x1, #RCW_SR27_OFFSET]
/* test the cluster 3 bit */
tst w2, #CLUSTER_3_RCW_BIT
b.eq 1f
/* if we are here, then the bit was set */
mov x0, #CLUSTER_3_IN_RESET
1:
ret
endfunc cluster3InReset
/* Function checks to see if cores which are to be disabled have been
* released from reset - if not, it releases them
* Note: there may be special handling of cluster 3 cores depending upon the
* sys clk frequency
* in: none
* out: none
* uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
*/
func release_disabled
mov x9, x30
/* check if we need to keep cluster 3 cores in reset */
bl cluster3InReset /* 0-2 */
mov x8, x0
/* x8 = cluster 3 handling */
/* read COREDISABLESR */
mov x0, #NXP_DCFG_ADDR
ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
cmp x8, #CLUSTER_3_IN_RESET
b.ne 4f
/* the cluster 3 cores are to be held in reset, so remove
* them from the disable mask
*/
bic x4, x4, #CLUSTER_3_CORES_MASK
4:
/* get the number of cpus on this device */
mov x6, #PLATFORM_CORE_COUNT
mov x0, #NXP_RESET_ADDR
ldr w5, [x0, #BRR_OFFSET]
/* load the core mask for the first core */
mov x7, #1
/* x4 = COREDISABLESR
* x5 = BRR
* x6 = loop count
* x7 = core mask bit
*/
2:
/* check if the core is to be disabled */
tst x4, x7
b.eq 1f
/* see if disabled cores have already been released from reset */
tst x5, x7
b.ne 5f
/* if core has not been released, then release it (0-3) */
mov x0, x7
bl _soc_core_release
/* record the core state in the data area (0-3) */
mov x0, x7
mov x1, #CORE_STATE_DATA
mov x2, #CORE_DISABLED
bl _setCoreData
1:
/* see if this is a cluster 3 core */
mov x3, #CLUSTER_3_CORES_MASK
tst x3, x7
b.eq 5f
/* this is a cluster 3 core - see if it needs to be held in reset */
cmp x8, #CLUSTER_3_IN_RESET
b.ne 5f
/* record the core state as disabled in the data area (0-3) */
mov x0, x7
mov x1, #CORE_STATE_DATA
mov x2, #CORE_DISABLED
bl _setCoreData
5:
/* decrement the counter */
subs x6, x6, #1
b.le 3f
/* shift the core mask to the next core */
lsl x7, x7, #1
/* continue */
b 2b
3:
cmp x8, #CLUSTER_3_IN_RESET
b.ne 6f
/* we need to hold the cluster 3 cores in reset,
* so mark them in the COREDISR and COREDISABLEDSR registers as
* "disabled", and the rest of the sw stack will leave them alone
* thinking that they have been disabled
*/
mov x0, #NXP_DCFG_ADDR
ldr w1, [x0, #DCFG_COREDISR_OFFSET]
orr w1, w1, #CLUSTER_3_CORES_MASK
str w1, [x0, #DCFG_COREDISR_OFFSET]
ldr w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
orr w2, w2, #CLUSTER_3_CORES_MASK
str w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
dsb sy
isb
#if (PSCI_TEST)
/* x0 = NXP_DCFG_ADDR : read COREDISABLESR */
ldr w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
/* read COREDISR */
ldr w3, [x0, #DCFG_COREDISR_OFFSET]
#endif
6:
mov x30, x9
ret
endfunc release_disabled
/* Function setc up the TrustZone Address Space Controller (TZASC)
* in: none
* out: none
* uses x0, x1
*/
func init_tzpc
/* set Non Secure access for all devices protected via TZPC */
/* decode Protection-0 Set Reg */
ldr x1, =TZPCDECPROT_0_SET_BASE
/* set decode region to NS, Bits[7:0] */
mov w0, #0xFF
str w0, [x1]
/* decode Protection-1 Set Reg */
ldr x1, =TZPCDECPROT_1_SET_BASE
/* set decode region to NS, Bits[7:0] */
mov w0, #0xFF
str w0, [x1]
/* decode Protection-2 Set Reg */
ldr x1, =TZPCDECPROT_2_SET_BASE
/* set decode region to NS, Bits[7:0] */
mov w0, #0xFF
str w0, [x1]
/* entire SRAM as NS */
/* secure RAM region size Reg */
ldr x1, =TZPC_BASE
/* 0x00000000 = no secure region */
mov w0, #0x00000000
str w0, [x1]
ret
endfunc init_tzpc
/* write a register in the DCFG block
* in: x0 = offset
* in: w1 = value to write
* uses x0, x1, x2
*/
func _write_reg_dcfg
ldr x2, =NXP_DCFG_ADDR
str w1, [x2, x0]
ret
endfunc _write_reg_dcfg
/* read a register in the DCFG block
* in: x0 = offset
* out: w0 = value read
* uses x0, x1, x2
*/
func _read_reg_dcfg
ldr x2, =NXP_DCFG_ADDR
ldr w1, [x2, x0]
mov w0, w1
ret
endfunc _read_reg_dcfg
/* Function returns an mpidr value for a core, given a core_mask_lsb
* in: x0 = core mask lsb
* out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits
* uses x0, x1
*/
func get_mpidr_value
/* convert a core mask to an SoC core number */
clz w0, w0
mov w1, #31
sub w0, w1, w0
/* get the mpidr core number from the SoC core number */
mov w1, wzr
tst x0, #1
b.eq 1f
orr w1, w1, #1
1:
/* extract the cluster number */
lsr w0, w0, #1
orr w0, w1, w0, lsl #8
ret
endfunc get_mpidr_value
/* Function returns the redistributor base address for the core specified
* in x1
* in: x0 - core mask lsb of specified core
* out: x0 = redistributor rd base address for specified core
* uses x0, x1, x2
*/
func get_gic_rd_base
clz w1, w0
mov w2, #0x20
sub w2, w2, w1
sub w2, w2, #1
ldr x0, =NXP_GICR_ADDR
mov x1, #GIC_RD_OFFSET
/* x2 = core number
* loop counter
*/
2:
cbz x2, 1f
add x0, x0, x1
sub x2, x2, #1
b 2b
1:
ret
endfunc get_gic_rd_base
/* Function returns the redistributor base address for the core specified
* in x1
* in: x0 - core mask lsb of specified core
* out: x0 = redistributor sgi base address for specified core
* uses x0, x1, x2
*/
func get_gic_sgi_base
clz w1, w0
mov w2, #0x20
sub w2, w2, w1
sub w2, w2, #1
ldr x0, =NXP_GICR_SGI_ADDR
mov x1, #GIC_SGI_OFFSET
/* loop counter */
2:
cbz x2, 1f /* x2 = core number */
add x0, x0, x1
sub x2, x2, #1
b 2b
1:
ret
endfunc get_gic_sgi_base
/* Function writes a register in the RESET block
* in: x0 = offset
* in: w1 = value to write
* uses x0, x1, x2
*/
func _write_reg_reset
ldr x2, =NXP_RESET_ADDR
str w1, [x2, x0]
ret
endfunc _write_reg_reset
/* Function reads a register in the RESET block
* in: x0 = offset
* out: w0 = value read
* uses x0, x1
*/
func _read_reg_reset
ldr x1, =NXP_RESET_ADDR
ldr w0, [x1, x0]
ret
endfunc _read_reg_reset
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <arch.h>
#include <asm_macros.S>
#include <platform_def.h>
.globl plat_secondary_cold_boot_setup
.globl plat_is_my_cpu_primary
.globl plat_reset_handler
.globl platform_mem_init
func platform_mem1_init
ret
endfunc platform_mem1_init
func platform_mem_init
ret
endfunc platform_mem_init
func apply_platform_errata
ret
endfunc apply_platform_errata
func plat_reset_handler
mov x29, x30
bl apply_platform_errata
#if defined(IMAGE_BL31)
ldr x0, =POLICY_SMMU_PAGESZ_64K
cbz x0, 1f
/* Set the SMMU page size in the sACR register */
bl _set_smmu_pagesz_64
#endif
1:
mov x30, x29
ret
endfunc plat_reset_handler
/* void plat_secondary_cold_boot_setup (void);
*
* This function performs any platform specific actions
* needed for a secondary cpu after a cold reset e.g
* mark the cpu's presence, mechanism to place it in a
* holding pen etc.
*/
func plat_secondary_cold_boot_setup
/* lx2160a does not do cold boot for secondary CPU */
cb_panic:
b cb_panic
endfunc plat_secondary_cold_boot_setup
/* unsigned int plat_is_my_cpu_primary (void);
*
* Find out whether the current cpu is the primary
* cpu.
*/
func plat_is_my_cpu_primary
mrs x0, mpidr_el1
and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
cmp x0, 0x0
cset w0, eq
ret
endfunc plat_is_my_cpu_primary
/*
* Copyright 2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
.section .text, "ax"
#include <asm_macros.S>
#ifndef NXP_COINED_BB
#include <flash_info.h>
#include <fspi.h>
#endif
#include <regs.h>
#ifdef NXP_COINED_BB
#include <snvs.h>
#endif
#include <plat_warm_rst.h>
#include <platform_def.h>
#define SDRAM_CFG 0x110
#define SDRAM_CFG_2 0x114
#define SDRAM_MD_CNTL 0x120
#define SDRAM_INTERVAL 0x124
#define TIMING_CFG_10 0x258
#define DEBUG_2 0xF04
#define DEBUG_26 0xF64
#define DDR_DSR2 0xB24
#define DDR_CNTRLR_2 0x2
#define COUNT_100 1000
.globl _soc_sys_warm_reset
.align 12
func _soc_sys_warm_reset
mov x3, xzr
b touch_line0
start_line0:
mov x3, #1
mov x2, #NUM_OF_DDRC
ldr x1, =NXP_DDR_ADDR
1:
ldr w0, [x1, #SDRAM_CFG]
orr w0, w0, #SDRAM_CFG_MEM_HLT
str w0, [x1, #SDRAM_CFG]
2:
ldr w0, [x1, #DEBUG_2]
and w0, w0, #DDR_DBG_2_MEM_IDLE
cbz w0, 2b
ldr w0, [x1, #DEBUG_26]
orr w0, w0, #DDR_DEBUG_26_BIT_12
orr w0, w0, #DDR_DEBUG_26_BIT_13
orr w0, w0, #DDR_DEBUG_26_BIT_14
touch_line0:
cbz x3, touch_line1
orr w0, w0, #DDR_DEBUG_26_BIT_15
orr w0, w0, #DDR_DEBUG_26_BIT_16
str w0, [x1, #DEBUG_26]
ldr w0, [x1, #SDRAM_CFG_2]
orr w0, w0, #SDRAM_CFG2_FRC_SR
str w0, [x1, #SDRAM_CFG_2]
3:
ldr w0, [x1, #DDR_DSR2]
orr w0, w0, #DDR_DSR_2_PHY_INIT_CMPLT
str w0, [x1, #DDR_DSR2]
ldr w0, [x1, #DDR_DSR2]
and w0, w0, #DDR_DSR_2_PHY_INIT_CMPLT
cbnz w0, 3b
ldr w0, [x1, #SDRAM_INTERVAL]
and w0, w0, #SDRAM_INTERVAL_REFINT_CLEAR
str w0, [x1, #SDRAM_INTERVAL]
touch_line1:
cbz x3, touch_line2
ldr w0, [x1, #SDRAM_MD_CNTL]
orr w0, w0, #MD_CNTL_CKE(1)
orr w0, w0, #MD_CNTL_MD_EN
str w0, [x1, #SDRAM_MD_CNTL]
ldr w0, [x1, #TIMING_CFG_10]
orr w0, w0, #DDR_TIMING_CFG_10_T_STAB
str w0, [x1, #TIMING_CFG_10]
ldr w0, [x1, #SDRAM_CFG_2]
and w0, w0, #SDRAM_CFG2_FRC_SR_CLEAR
str w0, [x1, #SDRAM_CFG_2]
4:
ldr w0, [x1, #DDR_DSR2]
and w0, w0, #DDR_DSR_2_PHY_INIT_CMPLT
cbz w0, 4b
nop
touch_line2:
cbz x3, touch_line3
ldr w0, [x1, #DEBUG_26]
orr w0, w0, #DDR_DEBUG_26_BIT_25
and w0, w0, #DDR_DEBUG_26_BIT_24_CLEAR
str w0, [x1, #DEBUG_26]
cmp x2, #DDR_CNTRLR_2
b.ne 5f
ldr x1, =NXP_DDR2_ADDR
mov x2, xzr
b 1b
5:
mov x5, xzr
6:
add x5, x5, #1
cmp x5, #COUNT_100
b.ne 6b
nop
touch_line3:
cbz x3, touch_line4
#ifdef NXP_COINED_BB
ldr x1, =NXP_SNVS_ADDR
ldr w0, [x1, #NXP_APP_DATA_LP_GPR_OFFSET]
/* On Warm Boot is enabled, then zeroth bit
* of SNVS LP GPR register 0 will used
* to save the status of warm-reset as a cause.
*/
orr w0, w0, #(1 << NXP_LPGPR_ZEROTH_BIT)
/* write back */
str w0, [x1, #NXP_APP_DATA_LP_GPR_OFFSET]
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
touch_line4:
cbz x3, touch_line6
#elif !(ERLY_WRM_RST_FLG_FLSH_UPDT)
ldr x1, =NXP_FLEXSPI_ADDR
ldr w0, [x1, #FSPI_IPCMD]
orr w0, w0, #FSPI_IPCMD_TRG_MASK
str w0, [x1, #FSPI_IPCMD]
7:
ldr w0, [x1, #FSPI_INTR]
and w0, w0, #FSPI_INTR_IPCMDDONE_MASK
cmp w0, #0
b.eq 7b
ldr w0, [x1, #FSPI_IPTXFCR]
orr w0, w0, #FSPI_IPTXFCR_CLR
str w0, [x1, #FSPI_IPTXFCR]
ldr w0, [x1, #FSPI_INTR]
orr w0, w0, #FSPI_INTR_IPCMDDONE_MASK
str w0, [x1, #FSPI_INTR]
nop
touch_line4:
cbz x3, touch_line5
/* flexspi driver has an api
* is_flash_busy().
* Impelementation of the api will not
* fit-in in 1 cache line.
* instead a nop-cycles are introduced to
* simulate the wait time for flash write
* completion.
*
* Note: This wait time varies from flash to flash.
*/
mov x0, #FLASH_WR_COMP_WAIT_BY_NOP_COUNT
8:
sub x0, x0, #1
nop
cmp x0, #0
b.ne 8b
nop
nop
nop
nop
nop
nop
nop
nop
nop
touch_line5:
cbz x3, touch_line6
#endif
ldr x2, =NXP_RST_ADDR
/* clear the RST_REQ_MSK and SW_RST_REQ */
mov w0, #0x00000000
str w0, [x2, #RSTCNTL_OFFSET]
/* initiate the sw reset request */
mov w0, #SW_RST_REQ_INIT
str w0, [x2, #RSTCNTL_OFFSET]
/* In case this address range is mapped as cacheable,
* flush the write out of the dcaches.
*/
add x2, x2, #RSTCNTL_OFFSET
dc cvac, x2
dsb st
isb
/* Function does not return */
b .
nop
nop
nop
nop
nop
nop
nop
touch_line6:
cbz x3, start_line0
endfunc _soc_sys_warm_reset
#
# Copyright 2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
DDR_PHY_BIN_PATH ?= ./ddr-phy-binary/lx2160a
ifeq (${DDR_IMEM_UDIMM_1D},)
DDR_IMEM_UDIMM_1D := ${DDR_PHY_BIN_PATH}/ddr4_pmu_train_imem.bin
endif
ifeq (${DDR_IMEM_UDIMM_2D},)
DDR_IMEM_UDIMM_2D := ${DDR_PHY_BIN_PATH}/ddr4_2d_pmu_train_imem.bin
endif
ifeq (${DDR_DMEM_UDIMM_1D},)
DDR_DMEM_UDIMM_1D := ${DDR_PHY_BIN_PATH}/ddr4_pmu_train_dmem.bin
endif
ifeq (${DDR_DMEM_UDIMM_2D},)
DDR_DMEM_UDIMM_2D := ${DDR_PHY_BIN_PATH}/ddr4_2d_pmu_train_dmem.bin
endif
ifeq (${DDR_IMEM_RDIMM_1D},)
DDR_IMEM_RDIMM_1D := ${DDR_PHY_BIN_PATH}/ddr4_rdimm_pmu_train_imem.bin
endif
ifeq (${DDR_IMEM_RDIMM_2D},)
DDR_IMEM_RDIMM_2D := ${DDR_PHY_BIN_PATH}/ddr4_rdimm2d_pmu_train_imem.bin
endif
ifeq (${DDR_DMEM_RDIMM_1D},)
DDR_DMEM_RDIMM_1D := ${DDR_PHY_BIN_PATH}/ddr4_rdimm_pmu_train_dmem.bin
endif
ifeq (${DDR_DMEM_RDIMM_2D},)
DDR_DMEM_RDIMM_2D := ${DDR_PHY_BIN_PATH}/ddr4_rdimm2d_pmu_train_dmem.bin
endif
$(shell mkdir -p '${BUILD_PLAT}')
ifeq (${DDR_FIP_NAME},)
ifeq (${TRUSTED_BOARD_BOOT},1)
DDR_FIP_NAME := ddr_fip_sec.bin
else
DDR_FIP_NAME := ddr_fip.bin
endif
endif
ifneq (${TRUSTED_BOARD_BOOT},1)
DDR_FIP_ARGS += --ddr-immem-udimm-1d ${DDR_IMEM_UDIMM_1D} \
--ddr-immem-udimm-2d ${DDR_IMEM_UDIMM_2D} \
--ddr-dmmem-udimm-1d ${DDR_DMEM_UDIMM_1D} \
--ddr-dmmem-udimm-2d ${DDR_DMEM_UDIMM_2D} \
--ddr-immem-rdimm-1d ${DDR_IMEM_RDIMM_1D} \
--ddr-immem-rdimm-2d ${DDR_IMEM_RDIMM_2D} \
--ddr-dmmem-rdimm-1d ${DDR_DMEM_RDIMM_1D} \
--ddr-dmmem-rdimm-2d ${DDR_DMEM_RDIMM_2D}
endif
ifeq (${TRUSTED_BOARD_BOOT},1)
ifeq (${MBEDTLS_DIR},)
include plat/nxp/soc-lx2160a/ddr_sb.mk
else
include plat/nxp/soc-lx2160a/ddr_tbbr.mk
# Variables for use with Certificate Generation Tool
CRTTOOLPATH ?= tools/cert_create
CRTTOOL ?= ${CRTTOOLPATH}/cert_create${BIN_EXT}
ifneq (${GENERATE_COT},0)
ddr_certificates: ${DDR_CRT_DEPS} ${CRTTOOL}
${Q}${CRTTOOL} ${DDR_CRT_ARGS}
@${ECHO_BLANK_LINE}
@echo "Built $@ successfully"
@echo "DDR certificates can be found in ${BUILD_PLAT}"
@${ECHO_BLANK_LINE}
endif
endif
endif
# Variables for use with Firmware Image Package
FIPTOOLPATH ?= tools/fiptool
FIPTOOL ?= ${FIPTOOLPATH}/fiptool${BIN_EXT}
${BUILD_PLAT}/${DDR_FIP_NAME}: ${DDR_FIP_DEPS} ${FIPTOOL}
$(eval ${CHECK_DDR_FIP_CMD})
${Q}${FIPTOOL} create ${DDR_FIP_ARGS} $@
${Q}${FIPTOOL} info $@
@${ECHO_BLANK_LINE}
@echo "Built $@ successfully"
@${ECHO_BLANK_LINE}
fip_ddr: ${BUILD_PLAT}/${DDR_FIP_NAME}
#
# Copyright 2021 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
ifneq (${TRUSTED_BOARD_BOOT},0)
ifeq (${GENERATE_COT},0)
DDR_FIP_ARGS += --ddr-immem-udimm-1d ${DDR_IMEM_UDIMM_1D}.sb \
--ddr-immem-udimm-2d ${DDR_IMEM_UDIMM_2D}.sb \
--ddr-dmmem-udimm-1d ${DDR_DMEM_UDIMM_1D}.sb \
--ddr-dmmem-udimm-2d ${DDR_DMEM_UDIMM_2D}.sb \
--ddr-immem-rdimm-1d ${DDR_IMEM_RDIMM_1D}.sb \
--ddr-immem-rdimm-2d ${DDR_IMEM_RDIMM_2D}.sb \
--ddr-dmmem-rdimm-1d ${DDR_DMEM_RDIMM_1D}.sb \
--ddr-dmmem-rdimm-2d ${DDR_DMEM_RDIMM_2D}.sb
endif
UDIMM_DEPS = ${DDR_IMEM_UDIMM_1D}.sb ${DDR_IMEM_UDIMM_2D}.sb ${DDR_DMEM_UDIMM_1D}.sb ${DDR_DMEM_UDIMM_2D}.sb
RDIMM_DEPS = ${DDR_IMEM_RDIMM_1D}.sb ${DDR_IMEM_RDIMM_2D}.sb ${DDR_DMEM_RDIMM_1D}.sb ${DDR_DMEM_RDIMM_2D}.sb
DDR_FIP_DEPS += ${UDIMM_DEPS}
DDR_FIP_DEPS += ${RDIMM_DEPS}
# Max Size of CSF header (CSF_HDR_SZ = 0x3000).
# Image will be appended at this offset of the header.
# Path to CST directory is required to generate the CSF header,
# and prepend it to image before fip image gets generated
ifeq (${CST_DIR},)
$(error Error: CST_DIR not set)
endif
ifeq (${DDR_INPUT_FILE},)
DDR_INPUT_FILE:= drivers/nxp/auth/csf_hdr_parser/${CSF_FILE}
endif
%.sb: %
@echo " Generating CSF Header for $@ $<"
$(CST_DIR)/create_hdr_esbc --in $< --out $@ --app_off ${CSF_HDR_SZ} \
--app $< ${DDR_INPUT_FILE}
endif
#
# Copyright 2021 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
# This file defines the keys and certificates that must be created to establish
# a Chain of Trust for the DDR FW. These definitions include the
# command line options passed to the cert_create and fiptool commands for DDR FW.
# A DDR FW key is used for signing the DDR Firmware. The DDR key is authenticated
# by the Trusted World Key. Two content certificates are created:
# For DDR RDIMM Images [ signed by DDR FW Key]
# For DDR UDIMM Images [ signed by DDR FW Key]
#
# Expected environment:
#
# BUILD_PLAT: output directory
#
# Build options added by this file:
#
# KEY_ALG
# KEY_SIZE
# TRUSTED_WORLD_KEY
# NON_TRUSTED_WORLD_KEY
#
# Copy the tbbr.mk from PLAT_TOOL_PATH/cert_create_helper
# to the ${PLAT_DIR}. So that cert_create is enabled
# to create certificates for DDR
$(shell cp ${PLAT_TOOL_PATH}/cert_create_helper/cert_create_tbbr.mk ${PLAT_DIR})
# Certificate generation tool default parameters
DDR_FW_CERT := ${BUILD_PLAT}/ddr_fw_key_cert.crt
# Default non-volatile counter values (overridable by the platform)
TFW_NVCTR_VAL ?= 0
NTFW_NVCTR_VAL ?= 0
# Pass the non-volatile counters to the cert_create tool
$(eval $(call CERT_ADD_CMD_OPT,${TFW_NVCTR_VAL},--tfw-nvctr,DDR_))
$(shell mkdir -p '${BUILD_PLAT}')
ifeq (${DDR_KEY},)
DDR_KEY=${BUILD_PLAT}/ddr.pem
endif
ifeq (${TRUSTED_KEY_CERT},)
$(info Generating: Trusted key certificate as part of DDR cert creation)
TRUSTED_KEY_CERT := ${BUILD_PLAT}/trusted_key.crt
$(eval $(call TOOL_ADD_PAYLOAD,${TRUSTED_KEY_CERT},--trusted-key-cert,))
$(eval $(call TOOL_ADD_PAYLOAD,${TRUSTED_KEY_CERT},--trusted-key-cert,,DDR_))
else
$(info Using: Trusted key certificate as part of DDR cert creation)
DDR_FIP_ARGS += --trusted-key-cert ${TRUSTED_KEY_CERT}
endif
# Add the keys to the cert_create command line options (private keys are NOT
# packed in the FIP). Developers can use their own keys by specifying the proper
# build option in the command line when building the Trusted Firmware
$(if ${KEY_ALG},$(eval $(call CERT_ADD_CMD_OPT,${KEY_ALG},--key-alg,DDR_)))
$(if ${KEY_SIZE},$(eval $(call CERT_ADD_CMD_OPT,${KEY_SIZE},--key-size,DDR_)))
$(if ${HASH_ALG},$(eval $(call CERT_ADD_CMD_OPT,${HASH_ALG},--hash-alg,DDR_)))
$(if ${ROT_KEY},$(eval $(call CERT_ADD_CMD_OPT,${ROT_KEY},--rot-key,DDR_)))
$(if ${TRUSTED_WORLD_KEY},$(eval $(call CERT_ADD_CMD_OPT,${TRUSTED_WORLD_KEY},--trusted-world-key,DDR_)))
$(if ${NON_TRUSTED_WORLD_KEY},$(eval $(call CERT_ADD_CMD_OPT,${NON_TRUSTED_WORLD_KEY},--non-trusted-world-key, DDR_)))
# Add the DDR CoT (key cert + img cert)
$(if ${DDR_KEY},$(eval $(call CERT_ADD_CMD_OPT,${DDR_KEY},--ddr-fw-key,DDR_)))
$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/ddr_fw_key.crt,--ddr-fw-key-cert,,DDR_))
$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/ddr_udimm_fw_content.crt,--ddr-udimm-fw-cert,,DDR_))
$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/ddr_rdimm_fw_content.crt,--ddr-rdimm-fw-cert,,DDR_))
$(eval $(call TOOL_ADD_IMG,DDR_IMEM_UDIMM_1D,--ddr-immem-udimm-1d,DDR_))
$(eval $(call TOOL_ADD_IMG,DDR_IMEM_UDIMM_2D,--ddr-immem-udimm-2d,DDR_))
$(eval $(call TOOL_ADD_IMG,DDR_DMEM_UDIMM_1D,--ddr-dmmem-udimm-1d,DDR_))
$(eval $(call TOOL_ADD_IMG,DDR_DMEM_UDIMM_2D,--ddr-dmmem-udimm-2d,DDR_))
$(eval $(call TOOL_ADD_IMG,DDR_IMEM_RDIMM_1D,--ddr-immem-rdimm-1d,DDR_))
$(eval $(call TOOL_ADD_IMG,DDR_IMEM_RDIMM_2D,--ddr-immem-rdimm-2d,DDR_))
$(eval $(call TOOL_ADD_IMG,DDR_DMEM_RDIMM_1D,--ddr-dmmem-rdimm-1d,DDR_))
$(eval $(call TOOL_ADD_IMG,DDR_DMEM_RDIMM_2D,--ddr-dmmem-rdimm-2d,DDR_))
DDR_FIP_DEPS += ddr_certificates
# Process TBB related flags
ifneq (${GENERATE_COT},0)
# Common cert_create options
ifneq (${CREATE_KEYS},0)
$(eval DDR_CRT_ARGS += -n)
ifneq (${SAVE_KEYS},0)
$(eval DDR_CRT_ARGS += -k)
endif
endif
endif
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <mmio.h>
#ifdef ERRATA_SOC_A050426
void erratum_a050426(void)
{
uint32_t i, val3, val4;
/* Enable BIST to access Internal memory locations */
val3 = mmio_read_32(0x700117E60);
mmio_write_32(0x700117E60, (val3 | 0x80000001));
val4 = mmio_read_32(0x700117E90);
mmio_write_32(0x700117E90, (val4 & 0xFFDFFFFF));
/* wriop Internal Memory.*/
for (i = 0U; i < 4U; i++) {
mmio_write_32(0x706312000 + (i * 4), 0x55555555);
mmio_write_32(0x706312400 + (i * 4), 0x55555555);
mmio_write_32(0x706312800 + (i * 4), 0x55555555);
mmio_write_32(0x706314000 + (i * 4), 0x55555555);
mmio_write_32(0x706314400 + (i * 4), 0x55555555);
mmio_write_32(0x706314800 + (i * 4), 0x55555555);
mmio_write_32(0x706314c00 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x706316000 + (i * 4), 0x55555555);
mmio_write_32(0x706320000 + (i * 4), 0x55555555);
mmio_write_32(0x706320400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 2U; i++) {
mmio_write_32(0x70640a000 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x706518000 + (i * 4), 0x55555555);
mmio_write_32(0x706519000 + (i * 4), 0x55555555);
}
for (i = 0U; i < 4U; i++) {
mmio_write_32(0x706522000 + (i * 4), 0x55555555);
mmio_write_32(0x706522800 + (i * 4), 0x55555555);
mmio_write_32(0x706523000 + (i * 4), 0x55555555);
mmio_write_32(0x706523800 + (i * 4), 0x55555555);
mmio_write_32(0x706524000 + (i * 4), 0x55555555);
mmio_write_32(0x706524800 + (i * 4), 0x55555555);
mmio_write_32(0x706608000 + (i * 4), 0x55555555);
mmio_write_32(0x706608800 + (i * 4), 0x55555555);
mmio_write_32(0x706609000 + (i * 4), 0x55555555);
mmio_write_32(0x706609800 + (i * 4), 0x55555555);
mmio_write_32(0x70660a000 + (i * 4), 0x55555555);
mmio_write_32(0x70660a800 + (i * 4), 0x55555555);
mmio_write_32(0x70660b000 + (i * 4), 0x55555555);
mmio_write_32(0x70660b800 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70660c000 + (i * 4), 0x55555555);
mmio_write_32(0x70660c800 + (i * 4), 0x55555555);
}
for (i = 0U; i < 2U; i++) {
mmio_write_32(0x706718000 + (i * 4), 0x55555555);
mmio_write_32(0x706718800 + (i * 4), 0x55555555);
}
mmio_write_32(0x706b0a000 + (i * 4), 0x55555555);
for (i = 0U; i < 4U; i++) {
mmio_write_32(0x706b0e000 + (i * 4), 0x55555555);
mmio_write_32(0x706b0e800 + (i * 4), 0x55555555);
}
for (i = 0U; i < 2U; i++) {
mmio_write_32(0x706b10000 + (i * 4), 0x55555555);
mmio_write_32(0x706b10400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 4U; i++) {
mmio_write_32(0x706b14000 + (i * 4), 0x55555555);
mmio_write_32(0x706b14800 + (i * 4), 0x55555555);
mmio_write_32(0x706b15000 + (i * 4), 0x55555555);
mmio_write_32(0x706b15800 + (i * 4), 0x55555555);
}
mmio_write_32(0x706e12000 + (i * 4), 0x55555555);
for (i = 0U; i < 4U; i++) {
mmio_write_32(0x706e14000 + (i * 4), 0x55555555);
mmio_write_32(0x706e14800 + (i * 4), 0x55555555);
}
for (i = 0U; i < 2U; i++) {
mmio_write_32(0x706e16000 + (i * 4), 0x55555555);
mmio_write_32(0x706e16400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x706e1a000 + (i * 4), 0x55555555);
mmio_write_32(0x706e1a800 + (i * 4), 0x55555555);
mmio_write_32(0x706e1b000 + (i * 4), 0x55555555);
mmio_write_32(0x706e1b800 + (i * 4), 0x55555555);
mmio_write_32(0x706e1c000 + (i * 4), 0x55555555);
mmio_write_32(0x706e1c800 + (i * 4), 0x55555555);
mmio_write_32(0x706e1e000 + (i * 4), 0x55555555);
mmio_write_32(0x706e1e800 + (i * 4), 0x55555555);
mmio_write_32(0x706e1f000 + (i * 4), 0x55555555);
mmio_write_32(0x706e1f800 + (i * 4), 0x55555555);
mmio_write_32(0x706e20000 + (i * 4), 0x55555555);
mmio_write_32(0x706e20800 + (i * 4), 0x55555555);
}
for (i = 0U; i < 4U; i++) {
mmio_write_32(0x707108000 + (i * 4), 0x55555555);
mmio_write_32(0x707109000 + (i * 4), 0x55555555);
mmio_write_32(0x70710a000 + (i * 4), 0x55555555);
}
for (i = 0U; i < 2U; i++) {
mmio_write_32(0x70711c000 + (i * 4), 0x55555555);
mmio_write_32(0x70711c800 + (i * 4), 0x55555555);
mmio_write_32(0x70711d000 + (i * 4), 0x55555555);
mmio_write_32(0x70711d800 + (i * 4), 0x55555555);
mmio_write_32(0x70711e000 + (i * 4), 0x55555555);
}
for (i = 0U; i < 4U; i++) {
mmio_write_32(0x707120000 + (i * 4), 0x55555555);
mmio_write_32(0x707121000 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x707122000 + (i * 4), 0x55555555);
mmio_write_32(0x70725a000 + (i * 4), 0x55555555);
mmio_write_32(0x70725b000 + (i * 4), 0x55555555);
mmio_write_32(0x70725c000 + (i * 4), 0x55555555);
mmio_write_32(0x70725e000 + (i * 4), 0x55555555);
mmio_write_32(0x70725e400 + (i * 4), 0x55555555);
mmio_write_32(0x70725e800 + (i * 4), 0x55555555);
mmio_write_32(0x70725ec00 + (i * 4), 0x55555555);
mmio_write_32(0x70725f000 + (i * 4), 0x55555555);
mmio_write_32(0x70725f400 + (i * 4), 0x55555555);
mmio_write_32(0x707340000 + (i * 4), 0x55555555);
mmio_write_32(0x707346000 + (i * 4), 0x55555555);
mmio_write_32(0x707484000 + (i * 4), 0x55555555);
mmio_write_32(0x70748a000 + (i * 4), 0x55555555);
mmio_write_32(0x70748b000 + (i * 4), 0x55555555);
mmio_write_32(0x70748c000 + (i * 4), 0x55555555);
mmio_write_32(0x70748d000 + (i * 4), 0x55555555);
}
/* EDMA Internal Memory.*/
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70a208000 + (i * 4), 0x55555555);
mmio_write_32(0x70a208800 + (i * 4), 0x55555555);
mmio_write_32(0x70a209000 + (i * 4), 0x55555555);
mmio_write_32(0x70a209800 + (i * 4), 0x55555555);
}
/* PEX1 Internal Memory.*/
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70a508000 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70a520000 + (i * 4), 0x55555555);
mmio_write_32(0x70a528000 + (i * 4), 0x55555555);
}
/* PEX2 Internal Memory.*/
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70a608000 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70a620000 + (i * 4), 0x55555555);
mmio_write_32(0x70a628000 + (i * 4), 0x55555555);
}
/* PEX3 Internal Memory.*/
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70a708000 + (i * 4), 0x55555555);
mmio_write_32(0x70a728000 + (i * 4), 0x55555555);
mmio_write_32(0x70a730000 + (i * 4), 0x55555555);
mmio_write_32(0x70a738000 + (i * 4), 0x55555555);
mmio_write_32(0x70a748000 + (i * 4), 0x55555555);
mmio_write_32(0x70a758000 + (i * 4), 0x55555555);
}
/* PEX4 Internal Memory.*/
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70a808000 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70a820000 + (i * 4), 0x55555555);
mmio_write_32(0x70a828000 + (i * 4), 0x55555555);
}
/* PEX5 Internal Memory.*/
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70aa08000 + (i * 4), 0x55555555);
mmio_write_32(0x70aa28000 + (i * 4), 0x55555555);
mmio_write_32(0x70aa30000 + (i * 4), 0x55555555);
mmio_write_32(0x70aa38000 + (i * 4), 0x55555555);
mmio_write_32(0x70aa48000 + (i * 4), 0x55555555);
mmio_write_32(0x70aa58000 + (i * 4), 0x55555555);
}
/* PEX6 Internal Memory.*/
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70ab08000 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70ab20000 + (i * 4), 0x55555555);
mmio_write_32(0x70ab28000 + (i * 4), 0x55555555);
}
/* QDMA Internal Memory.*/
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70b008000 + (i * 4), 0x55555555);
mmio_write_32(0x70b00c000 + (i * 4), 0x55555555);
mmio_write_32(0x70b010000 + (i * 4), 0x55555555);
mmio_write_32(0x70b014000 + (i * 4), 0x55555555);
mmio_write_32(0x70b018000 + (i * 4), 0x55555555);
mmio_write_32(0x70b018400 + (i * 4), 0x55555555);
mmio_write_32(0x70b01a000 + (i * 4), 0x55555555);
mmio_write_32(0x70b01a400 + (i * 4), 0x55555555);
mmio_write_32(0x70b01c000 + (i * 4), 0x55555555);
mmio_write_32(0x70b01d000 + (i * 4), 0x55555555);
mmio_write_32(0x70b01e000 + (i * 4), 0x55555555);
mmio_write_32(0x70b01e800 + (i * 4), 0x55555555);
mmio_write_32(0x70b01f000 + (i * 4), 0x55555555);
mmio_write_32(0x70b01f800 + (i * 4), 0x55555555);
mmio_write_32(0x70b020000 + (i * 4), 0x55555555);
mmio_write_32(0x70b020400 + (i * 4), 0x55555555);
mmio_write_32(0x70b020800 + (i * 4), 0x55555555);
mmio_write_32(0x70b020c00 + (i * 4), 0x55555555);
mmio_write_32(0x70b022000 + (i * 4), 0x55555555);
mmio_write_32(0x70b022400 + (i * 4), 0x55555555);
mmio_write_32(0x70b024000 + (i * 4), 0x55555555);
mmio_write_32(0x70b024800 + (i * 4), 0x55555555);
mmio_write_32(0x70b025000 + (i * 4), 0x55555555);
mmio_write_32(0x70b025800 + (i * 4), 0x55555555);
}
for (i = 0U; i < 4U; i++) {
mmio_write_32(0x70b026000 + (i * 4), 0x55555555);
mmio_write_32(0x70b026200 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70b028000 + (i * 4), 0x55555555);
mmio_write_32(0x70b028800 + (i * 4), 0x55555555);
mmio_write_32(0x70b029000 + (i * 4), 0x55555555);
mmio_write_32(0x70b029800 + (i * 4), 0x55555555);
}
/* lnx1_e1000#0 Internal Memory.*/
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c00a000 + (i * 4), 0x55555555);
mmio_write_32(0x70c00a200 + (i * 4), 0x55555555);
mmio_write_32(0x70c00a400 + (i * 4), 0x55555555);
mmio_write_32(0x70c00a600 + (i * 4), 0x55555555);
mmio_write_32(0x70c00a800 + (i * 4), 0x55555555);
mmio_write_32(0x70c00aa00 + (i * 4), 0x55555555);
mmio_write_32(0x70c00ac00 + (i * 4), 0x55555555);
mmio_write_32(0x70c00ae00 + (i * 4), 0x55555555);
mmio_write_32(0x70c00b000 + (i * 4), 0x55555555);
mmio_write_32(0x70c00b200 + (i * 4), 0x55555555);
mmio_write_32(0x70c00b400 + (i * 4), 0x55555555);
mmio_write_32(0x70c00b600 + (i * 4), 0x55555555);
mmio_write_32(0x70c00b800 + (i * 4), 0x55555555);
mmio_write_32(0x70c00ba00 + (i * 4), 0x55555555);
mmio_write_32(0x70c00bc00 + (i * 4), 0x55555555);
mmio_write_32(0x70c00be00 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c00c000 + (i * 4), 0x55555555);
mmio_write_32(0x70c00c400 + (i * 4), 0x55555555);
mmio_write_32(0x70c00c800 + (i * 4), 0x55555555);
mmio_write_32(0x70c00cc00 + (i * 4), 0x55555555);
mmio_write_32(0x70c00d000 + (i * 4), 0x55555555);
mmio_write_32(0x70c00d400 + (i * 4), 0x55555555);
mmio_write_32(0x70c00d800 + (i * 4), 0x55555555);
mmio_write_32(0x70c00dc00 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c00e000 + (i * 4), 0x55555555);
mmio_write_32(0x70c00f000 + (i * 4), 0x55555555);
mmio_write_32(0x70c012000 + (i * 4), 0x55555555);
mmio_write_32(0x70c012200 + (i * 4), 0x55555555);
mmio_write_32(0x70c012400 + (i * 4), 0x55555555);
mmio_write_32(0x70c012600 + (i * 4), 0x55555555);
mmio_write_32(0x70c012800 + (i * 4), 0x55555555);
mmio_write_32(0x70c012a00 + (i * 4), 0x55555555);
mmio_write_32(0x70c012c00 + (i * 4), 0x55555555);
mmio_write_32(0x70c012e00 + (i * 4), 0x55555555);
mmio_write_32(0x70c013000 + (i * 4), 0x55555555);
mmio_write_32(0x70c013200 + (i * 4), 0x55555555);
mmio_write_32(0x70c013400 + (i * 4), 0x55555555);
mmio_write_32(0x70c013600 + (i * 4), 0x55555555);
mmio_write_32(0x70c013800 + (i * 4), 0x55555555);
mmio_write_32(0x70c013a00 + (i * 4), 0x55555555);
mmio_write_32(0x70c013c00 + (i * 4), 0x55555555);
mmio_write_32(0x70c013e00 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c014000 + (i * 4), 0x55555555);
mmio_write_32(0x70c014400 + (i * 4), 0x55555555);
mmio_write_32(0x70c014800 + (i * 4), 0x55555555);
mmio_write_32(0x70c014c00 + (i * 4), 0x55555555);
mmio_write_32(0x70c015000 + (i * 4), 0x55555555);
mmio_write_32(0x70c015400 + (i * 4), 0x55555555);
mmio_write_32(0x70c015800 + (i * 4), 0x55555555);
mmio_write_32(0x70c015c00 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c016000 + (i * 4), 0x55555555);
mmio_write_32(0x70c017000 + (i * 4), 0x55555555);
}
/* lnx1_xfi Internal Memory.*/
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c108000 + (i * 4), 0x55555555);
mmio_write_32(0x70c108200 + (i * 4), 0x55555555);
mmio_write_32(0x70c10a000 + (i * 4), 0x55555555);
mmio_write_32(0x70c10a400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c10c000 + (i * 4), 0x55555555);
mmio_write_32(0x70c10c400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c10e000 + (i * 4), 0x55555555);
mmio_write_32(0x70c10e200 + (i * 4), 0x55555555);
mmio_write_32(0x70c110000 + (i * 4), 0x55555555);
mmio_write_32(0x70c110400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c112000 + (i * 4), 0x55555555);
mmio_write_32(0x70c112400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c114000 + (i * 4), 0x55555555);
mmio_write_32(0x70c114200 + (i * 4), 0x55555555);
mmio_write_32(0x70c116000 + (i * 4), 0x55555555);
mmio_write_32(0x70c116400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c118000 + (i * 4), 0x55555555);
mmio_write_32(0x70c118400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c11a000 + (i * 4), 0x55555555);
mmio_write_32(0x70c11a200 + (i * 4), 0x55555555);
mmio_write_32(0x70c11c000 + (i * 4), 0x55555555);
mmio_write_32(0x70c11c400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c11e000 + (i * 4), 0x55555555);
mmio_write_32(0x70c11e400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c120000 + (i * 4), 0x55555555);
mmio_write_32(0x70c120200 + (i * 4), 0x55555555);
mmio_write_32(0x70c122000 + (i * 4), 0x55555555);
mmio_write_32(0x70c122400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c124000 + (i * 4), 0x55555555);
mmio_write_32(0x70c124400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c126000 + (i * 4), 0x55555555);
mmio_write_32(0x70c126200 + (i * 4), 0x55555555);
mmio_write_32(0x70c128000 + (i * 4), 0x55555555);
mmio_write_32(0x70c128400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c12a000 + (i * 4), 0x55555555);
mmio_write_32(0x70c12a400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c12c000 + (i * 4), 0x55555555);
mmio_write_32(0x70c12c200 + (i * 4), 0x55555555);
mmio_write_32(0x70c12e000 + (i * 4), 0x55555555);
mmio_write_32(0x70c12e400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c130000 + (i * 4), 0x55555555);
mmio_write_32(0x70c130400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c132000 + (i * 4), 0x55555555);
mmio_write_32(0x70c132200 + (i * 4), 0x55555555);
mmio_write_32(0x70c134000 + (i * 4), 0x55555555);
mmio_write_32(0x70c134400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c136000 + (i * 4), 0x55555555);
mmio_write_32(0x70c136400 + (i * 4), 0x55555555);
}
/* lnx2_xfi Internal Memory.*/
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c308000 + (i * 4), 0x55555555);
mmio_write_32(0x70c308200 + (i * 4), 0x55555555);
mmio_write_32(0x70c30a000 + (i * 4), 0x55555555);
mmio_write_32(0x70c30a400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c30c000 + (i * 4), 0x55555555);
mmio_write_32(0x70c30c400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 3U; i++) {
mmio_write_32(0x70c30e000 + (i * 4), 0x55555555);
mmio_write_32(0x70c30e200 + (i * 4), 0x55555555);
mmio_write_32(0x70c310000 + (i * 4), 0x55555555);
mmio_write_32(0x70c310400 + (i * 4), 0x55555555);
}
for (i = 0U; i < 5U; i++) {
mmio_write_32(0x70c312000 + (i * 4), 0x55555555);
mmio_write_32(0x70c312400 + (i * 4), 0x55555555);
}
/* Disable BIST */
mmio_write_32(0x700117E60, val3);
mmio_write_32(0x700117E90, val4);
}
#endif
#
# Copyright 2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Platform Errata Build flags.
# These should be enabled by the platform if the erratum workaround needs to be
# applied.
# Flag to apply erratum 50426 workaround during reset.
ERRATA_SOC_A050426 ?= 0
# Process ERRATA_SOC_A050426 flag
ifeq (${ERRATA_SOC_A050426}, 1)
INCL_SOC_ERRATA_SOURCES := yes
$(eval $(call add_define,ERRATA_SOC_A050426))
endif
ifeq (${INCL_SOC_ERRATA_SOURCES},yes)
BL2_SOURCES += ${PLAT_SOC_PATH}/erratas_soc.c
endif
/*
* Copyright 2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef ERRATA_H
#define ERRATA_H
#ifdef ERRATA_SOC_A050426
void erratum_a050426(void);
#endif
#endif /* ERRATA_H */
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef _SOC_H
#define _SOC_H
/* Chassis specific defines - common across SoC's of a particular platform */
#include <dcfg_lsch3.h>
#include <soc_default_base_addr.h>
#include <soc_default_helper_macros.h>
#define NUM_DRAM_REGIONS 3
#define NXP_DRAM0_ADDR 0x80000000
#define NXP_DRAM0_MAX_SIZE 0x80000000 /* 2 GB */
#define NXP_DRAM1_ADDR 0x2080000000
#define NXP_DRAM1_MAX_SIZE 0x1F80000000 /* 126 G */
#define NXP_DRAM2_ADDR 0x6000000000
#define NXP_DRAM2_MAX_SIZE 0x2000000000 /* 128G */
/*DRAM0 Size defined in platform_def.h */
#define NXP_DRAM0_SIZE PLAT_DEF_DRAM0_SIZE
#define DDR_PLL_FIX
#define NXP_DDR_PHY1_ADDR 0x01400000
#define NXP_DDR_PHY2_ADDR 0x01600000
#if defined(IMAGE_BL31)
#define LS_SYS_TIMCTL_BASE 0x2890000
#ifdef LS_SYS_TIMCTL_BASE
#define PLAT_LS_NSTIMER_FRAME_ID 0
#define LS_CONFIG_CNTACR 1
#endif
#endif
/* Start: Macros used by soc.c: get_boot_dev */
#define PORSR1_RCW_MASK 0x07800000
#define PORSR1_RCW_SHIFT 23
#define SDHC1_VAL 0x8
#define SDHC2_VAL 0x9
#define I2C1_VAL 0xa
#define FLEXSPI_NAND2K_VAL 0xc
#define FLEXSPI_NAND4K_VAL 0xd
#define FLEXSPI_NOR 0xf
/* End: Macros used by soc.c: get_boot_dev */
/* bits */
/* SVR Definition */
#define SVR_LX2160A 0x04
#define SVR_LX2120A 0x14
#define SVR_LX2080A 0x05
/* Number of cores in platform */
/* Used by common code for array initialization */
#define NUMBER_OF_CLUSTERS 8
#define CORES_PER_CLUSTER 2
#define PLATFORM_CORE_COUNT NUMBER_OF_CLUSTERS * CORES_PER_CLUSTER
/*
* Required LS standard platform porting definitions
* for CCN-508
*/
#define PLAT_CLUSTER_TO_CCN_ID_MAP 11, 15, 27, 31, 12, 28, 16, 0
#define PLAT_6CLUSTER_TO_CCN_ID_MAP 11, 15, 27, 31, 12, 28
/* Defines required for using XLAT tables from ARM common code */
#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 40)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 40)
/* Clock Divisors */
#define NXP_PLATFORM_CLK_DIVIDER 2
#define NXP_UART_CLK_DIVIDER 4
/* Start: Macros used by lx2160a.S */
#define MPIDR_AFFINITY0_MASK 0x00FF
#define MPIDR_AFFINITY1_MASK 0xFF00
#define CPUECTLR_DISABLE_TWALK_PREFETCH 0x4000000000
#define CPUECTLR_INS_PREFETCH_MASK 0x1800000000
#define CPUECTLR_DAT_PREFETCH_MASK 0x0300000000
#define CPUECTLR_RET_8CLK 0x2
#define OSDLR_EL1_DLK_LOCK 0x1
#define CNTP_CTL_EL0_EN 0x1
#define CNTP_CTL_EL0_IMASK 0x2
/* set to 0 if the clusters are not symmetrical */
#define SYMMETRICAL_CLUSTERS 1
/* End: Macros used by lx2160a.S */
/* Start: Macros used by lib/psci files */
#define SYSTEM_PWR_DOMAINS 1
#define PLAT_NUM_PWR_DOMAINS (PLATFORM_CORE_COUNT + \
NUMBER_OF_CLUSTERS + \
SYSTEM_PWR_DOMAINS)
/* Power state coordination occurs at the system level */
#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2
/* define retention state */
#define PLAT_MAX_RET_STATE (PSCI_LOCAL_STATE_RUN + 1)
/* define power-down state */
#define PLAT_MAX_OFF_STATE (PLAT_MAX_RET_STATE + 1)
/* End: Macros used by lib/psci files */
/* Some data must be aligned on the biggest cache line size in the platform.
* This is known only to the platform as it might have a combination of
* integrated and external caches.
*
* CACHE_WRITEBACK_GRANULE is defined in soc.def
*
* One cache line needed for bakery locks on ARM platforms
*/
#define PLAT_PERCPU_BAKERY_LOCK_SIZE (1 * CACHE_WRITEBACK_GRANULE)
#ifndef WDOG_RESET_FLAG
#define WDOG_RESET_FLAG DEFAULT_SET_VALUE
#endif
#ifndef WARM_BOOT_SUCCESS
#define WARM_BOOT_SUCCESS DEFAULT_SET_VALUE
#endif
#ifndef __ASSEMBLER__
void set_base_freq_CNTFID0(void);
void soc_init_start(void);
void soc_init_finish(void);
void soc_init_percpu(void);
void _soc_set_start_addr(unsigned long addr);
void _set_platform_security(void);
#endif
#endif /* _SOC_H */
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <common/debug.h>
#include <ddr.h>
#include <lib/utils.h>
#include <load_img.h>
#include "plat_common.h"
#include <platform_def.h>
#ifdef CONFIG_STATIC_DDR
const struct ddr_cfg_regs static_3200 = {
.cs[0].bnds = U(0x03FF),
.cs[1].bnds = U(0x03FF),
.cs[0].config = U(0x80050422),
.cs[1].config = U(0x80000422),
.cs[2].bnds = U(0x00),
.cs[3].bnds = U(0x00),
.cs[2].config = U(0x00),
.cs[3].config = U(0x00),
.timing_cfg[0] = U(0xFFAA0018),
.timing_cfg[1] = U(0x646A8844),
.timing_cfg[2] = U(0x00058022),
.timing_cfg[3] = U(0x13622100),
.timing_cfg[4] = U(0x02),
.timing_cfg[5] = U(0x07401400),
.timing_cfg[7] = U(0x3BB00000),
.timing_cfg[8] = U(0x0944AC00),
.sdram_cfg[0] = U(0x65044008),
.sdram_cfg[1] = U(0x00401011),
.sdram_cfg[2] = U(0x00),
.sdram_mode[0] = U(0x06010C50),
.sdram_mode[1] = U(0x00280400),
.sdram_mode[2] = U(0x00),
.sdram_mode[3] = U(0x00),
.sdram_mode[4] = U(0x00),
.sdram_mode[5] = U(0x00),
.sdram_mode[6] = U(0x00),
.sdram_mode[7] = U(0x00),
.sdram_mode[8] = U(0x0500),
.sdram_mode[9] = U(0x10240000),
.sdram_mode[10] = U(0x00),
.sdram_mode[11] = U(0x00),
.sdram_mode[12] = U(0x00),
.sdram_mode[13] = U(0x00),
.sdram_mode[14] = U(0x00),
.sdram_mode[15] = U(0x00),
.md_cntl = U(0x00),
.interval = U(0x30C00000),
.data_init = U(0xDEADBEEF),
.init_addr = U(0x00),
.zq_cntl = U(0x8A090705),
.sdram_rcw[0] = U(0x00),
.sdram_rcw[1] = U(0x00),
.sdram_rcw[2] = U(0x00),
.sdram_rcw[3] = U(0x00),
.sdram_rcw[4] = U(0x00),
.sdram_rcw[5] = U(0x00),
.err_disable = U(0x00),
.err_int_en = U(0x00),
};
const struct ddr_cfg_regs static_2900 = {
.cs[0].bnds = U(0x03FF),
.cs[1].bnds = U(0x03FF),
.cs[0].config = U(0x80050422),
.cs[1].config = U(0x80000422),
.cs[2].bnds = U(0x00),
.cs[3].bnds = U(0x00),
.cs[2].config = U(0x00),
.cs[3].config = U(0x00),
.timing_cfg[0] = U(0xFF990018),
.timing_cfg[1] = U(0x4F4A4844),
.timing_cfg[2] = U(0x0005601F),
.timing_cfg[3] = U(0x125F2100),
.timing_cfg[4] = U(0x02),
.timing_cfg[5] = U(0x07401400),
.timing_cfg[7] = U(0x3AA00000),
.timing_cfg[8] = U(0x09449B00),
.sdram_cfg[0] = U(0x65044008),
.sdram_cfg[1] = U(0x00401011),
.sdram_cfg[2] = U(0x00),
.sdram_mode[0] = U(0x06010C50),
.sdram_mode[1] = U(0x00280400),
.sdram_mode[2] = U(0x00),
.sdram_mode[3] = U(0x00),
.sdram_mode[4] = U(0x00),
.sdram_mode[5] = U(0x00),
.sdram_mode[6] = U(0x00),
.sdram_mode[7] = U(0x00),
.sdram_mode[8] = U(0x0500),
.sdram_mode[9] = U(0x10240000),
.sdram_mode[10] = U(0x00),
.sdram_mode[11] = U(0x00),
.sdram_mode[12] = U(0x00),
.sdram_mode[13] = U(0x00),
.sdram_mode[14] = U(0x00),
.sdram_mode[15] = U(0x00),
.md_cntl = U(0x00),
.interval = U(0x2C2E0000),
.data_init = U(0xDEADBEEF),
.init_addr = U(0x00),
.zq_cntl = U(0x8A090705),
.sdram_rcw[0] = U(0x00),
.sdram_rcw[1] = U(0x00),
.sdram_rcw[2] = U(0x00),
.sdram_rcw[3] = U(0x00),
.sdram_rcw[4] = U(0x00),
.sdram_rcw[5] = U(0x00),
.err_disable = U(0x00),
.err_int_en = U(0x00),
};
const struct ddr_cfg_regs static_2600 = {
.cs[0].bnds = U(0x03FF),
.cs[1].bnds = U(0x03FF),
.cs[0].config = U(0x80050422),
.cs[1].config = U(0x80000422),
.cs[2].bnds = U(0x00),
.cs[3].bnds = U(0x00),
.cs[2].config = U(0x00),
.cs[3].config = U(0x00),
.timing_cfg[0] = U(0xFF880018),
.timing_cfg[1] = U(0x2A24F444),
.timing_cfg[2] = U(0x007141DC),
.timing_cfg[3] = U(0x125B2100),
.timing_cfg[4] = U(0x02),
.timing_cfg[5] = U(0x06401400),
.timing_cfg[7] = U(0x28800000),
.timing_cfg[8] = U(0x07338A00),
.sdram_cfg[0] = U(0x65044008),
.sdram_cfg[1] = U(0x00401011),
.sdram_cfg[2] = U(0x00),
.sdram_mode[0] = U(0x06010A70),
.sdram_mode[1] = U(0x00200400),
.sdram_mode[2] = U(0x00),
.sdram_mode[3] = U(0x00),
.sdram_mode[4] = U(0x00),
.sdram_mode[5] = U(0x00),
.sdram_mode[6] = U(0x00),
.sdram_mode[7] = U(0x00),
.sdram_mode[8] = U(0x0500),
.sdram_mode[9] = U(0x0C240000),
.sdram_mode[10] = U(0x00),
.sdram_mode[11] = U(0x00),
.sdram_mode[12] = U(0x00),
.sdram_mode[13] = U(0x00),
.sdram_mode[14] = U(0x00),
.sdram_mode[15] = U(0x00),
.md_cntl = U(0x00),
.interval = U(0x279C0000),
.data_init = U(0xDEADBEEF),
.init_addr = U(0x00),
.zq_cntl = U(0x8A090705),
.sdram_rcw[0] = U(0x00),
.sdram_rcw[1] = U(0x00),
.sdram_rcw[2] = U(0x00),
.sdram_rcw[3] = U(0x00),
.sdram_rcw[4] = U(0x00),
.sdram_rcw[5] = U(0x00),
.err_disable = U(0x00),
.err_int_en = U(0x00),
};
const struct dimm_params static_dimm = {
.rdimm = U(0),
.primary_sdram_width = U(64),
.ec_sdram_width = U(8),
.n_ranks = U(2),
.device_width = U(8),
.mirrored_dimm = U(1),
};
/* Sample code using two UDIMM MT18ASF1G72AZ-2G6B1, on each DDR controller */
unsigned long long board_static_ddr(struct ddr_info *priv)
{
(void)memcpy(&priv->ddr_reg, &static_2900, sizeof(static_2900));
(void)memcpy(&priv->dimm, &static_dimm, sizeof(static_dimm));
priv->conf.cs_on_dimm[0] = 0x3;
ddr_board_options(priv);
compute_ddr_phy(priv);
return ULL(0x400000000);
}
#elif defined(CONFIG_DDR_NODIMM)
/*
* Sample code to bypass reading SPD. This is a sample, not recommended
* for boards with slots. DDR model number: UDIMM MT18ASF1G72AZ-2G6B1.
*/
const struct dimm_params ddr_raw_timing = {
.n_ranks = U(2),
.rank_density = U(4294967296u),
.capacity = U(8589934592u),
.primary_sdram_width = U(64),
.ec_sdram_width = U(8),
.device_width = U(8),
.die_density = U(0x4),
.rdimm = U(0),
.mirrored_dimm = U(1),
.n_row_addr = U(15),
.n_col_addr = U(10),
.bank_addr_bits = U(0),
.bank_group_bits = U(2),
.edc_config = U(2),
.burst_lengths_bitmask = U(0x0c),
.tckmin_x_ps = 750,
.tckmax_ps = 1600,
.caslat_x = U(0x00FFFC00),
.taa_ps = 13750,
.trcd_ps = 13750,
.trp_ps = 13750,
.tras_ps = 32000,
.trc_ps = 457500,
.twr_ps = 15000,
.trfc1_ps = 260000,
.trfc2_ps = 160000,
.trfc4_ps = 110000,
.tfaw_ps = 21000,
.trrds_ps = 3000,
.trrdl_ps = 4900,
.tccdl_ps = 5000,
.refresh_rate_ps = U(7800000),
};
int ddr_get_ddr_params(struct dimm_params *pdimm,
struct ddr_conf *conf)
{
static const char dimm_model[] = "Fixed DDR on board";
conf->dimm_in_use[0] = 1; /* Modify accordingly */
memcpy(pdimm, &ddr_raw_timing, sizeof(struct dimm_params));
memcpy(pdimm->mpart, dimm_model, sizeof(dimm_model) - 1);
/* valid DIMM mask, change accordingly, together with dimm_on_ctlr. */
return 0x5;
}
#endif /* CONFIG_DDR_NODIMM */
int ddr_board_options(struct ddr_info *priv)
{
struct memctl_opt *popts = &priv->opt;
const struct ddr_conf *conf = &priv->conf;
popts->vref_dimm = U(0x24); /* range 1, 83.4% */
popts->rtt_override = 0;
popts->rtt_park = U(240);
popts->otf_burst_chop_en = 0;
popts->burst_length = U(DDR_BL8);
popts->trwt_override = U(1);
popts->bstopre = U(0); /* auto precharge */
popts->addr_hash = 1;
/* Set ODT impedance on PHY side */
switch (conf->cs_on_dimm[1]) {
case 0xc: /* Two slots dual rank */
case 0x4: /* Two slots single rank, not valid for interleaving */
popts->trwt = U(0xf);
popts->twrt = U(0x7);
popts->trrt = U(0x7);
popts->twwt = U(0x7);
popts->vref_phy = U(0x6B); /* 83.6% */
popts->odt = U(60);
popts->phy_tx_impedance = U(28);
break;
case 0: /* One slot used */
default:
popts->trwt = U(0x3);
popts->twrt = U(0x3);
popts->trrt = U(0x3);
popts->twwt = U(0x3);
popts->vref_phy = U(0x60); /* 75% */
popts->odt = U(48);
popts->phy_tx_impedance = U(28);
break;
}
return 0;
}
#ifdef NXP_WARM_BOOT
long long init_ddr(uint32_t wrm_bt_flg)
#else
long long init_ddr(void)
#endif
{
int spd_addr[] = {0x51U, 0x52U, 0x53U, 0x54U};
struct ddr_info info;
struct sysinfo sys;
long long dram_size;
zeromem(&sys, sizeof(sys));
if (get_clocks(&sys) == 1) {
ERROR("System clocks are not set.\n");
panic();
}
debug("platform clock %lu\n", sys.freq_platform);
debug("DDR PLL1 %lu\n", sys.freq_ddr_pll0);
debug("DDR PLL2 %lu\n", sys.freq_ddr_pll1);
zeromem(&info, sizeof(info));
/* Set two DDRC. Unused DDRC will be removed automatically. */
info.num_ctlrs = NUM_OF_DDRC;
info.spd_addr = spd_addr;
info.ddr[0] = (void *)NXP_DDR_ADDR;
info.ddr[1] = (void *)NXP_DDR2_ADDR;
info.phy[0] = (void *)NXP_DDR_PHY1_ADDR;
info.phy[1] = (void *)NXP_DDR_PHY2_ADDR;
info.clk = get_ddr_freq(&sys, 0);
info.img_loadr = load_img;
info.phy_gen2_fw_img_buf = PHY_GEN2_FW_IMAGE_BUFFER;
if (info.clk == 0) {
info.clk = get_ddr_freq(&sys, 1);
}
info.dimm_on_ctlr = DDRC_NUM_DIMM;
info.warm_boot_flag = DDR_WRM_BOOT_NT_SUPPORTED;
#ifdef NXP_WARM_BOOT
info.warm_boot_flag = DDR_COLD_BOOT;
if (wrm_bt_flg != 0U) {
info.warm_boot_flag = DDR_WARM_BOOT;
} else {
info.warm_boot_flag = DDR_COLD_BOOT;
}
#endif
dram_size = dram_init(&info
#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
, NXP_CCN_HN_F_0_ADDR
#endif
);
if (dram_size < 0) {
ERROR("DDR init failed.\n");
}
return dram_size;
}
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_DEF_H
#define PLAT_DEF_H
#include <arch.h>
#include <cortex_a72.h>
/* Required without TBBR.
* To include the defines for DDR PHY
* Images.
*/
#include <tbbr_img_def.h>
#include <policy.h>
#include <soc.h>
#if defined(IMAGE_BL31)
#define LS_SYS_TIMCTL_BASE 0x2890000
#define PLAT_LS_NSTIMER_FRAME_ID 0
#define LS_CONFIG_CNTACR 1
#endif
#define NXP_SYSCLK_FREQ 100000000
#define NXP_DDRCLK_FREQ 100000000
/* UART related definition */
#define NXP_CONSOLE_ADDR NXP_UART_ADDR
#define NXP_CONSOLE_BAUDRATE 115200
/* Size of cacheable stacks */
#if defined(IMAGE_BL2)
#if defined(TRUSTED_BOARD_BOOT)
#define PLATFORM_STACK_SIZE 0x2000
#else
#define PLATFORM_STACK_SIZE 0x1000
#endif
#elif defined(IMAGE_BL31)
#define PLATFORM_STACK_SIZE 0x1000
#endif
/* SD block buffer */
#define NXP_SD_BLOCK_BUF_SIZE (0x8000)
#define NXP_SD_BLOCK_BUF_ADDR (NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
- NXP_SD_BLOCK_BUF_SIZE)
#ifdef SD_BOOT
#define BL2_LIMIT (NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
- NXP_SD_BLOCK_BUF_SIZE)
#else
#define BL2_LIMIT (NXP_OCRAM_ADDR + NXP_OCRAM_SIZE)
#endif
/* IO defines as needed by IO driver framework */
#define MAX_IO_DEVICES 4
#define MAX_IO_BLOCK_DEVICES 1
#define MAX_IO_HANDLES 4
#define PHY_GEN2_FW_IMAGE_BUFFER (NXP_OCRAM_ADDR + CSF_HDR_SZ)
/*
* FIP image defines - Offset at which FIP Image would be present
* Image would include Bl31 , Bl33 and Bl32 (optional)
*/
#ifdef POLICY_FUSE_PROVISION
#define MAX_FIP_DEVICES 3
#endif
#ifndef MAX_FIP_DEVICES
#define MAX_FIP_DEVICES 2
#endif
/*
* ID of the secure physical generic timer interrupt used by the BL32.
*/
#define BL32_IRQ_SEC_PHY_TIMER 29
#define BL31_WDOG_SEC 89
#define BL31_NS_WDOG_WS1 108
/*
* Define properties of Group 1 Secure and Group 0 interrupts as per GICv3
* terminology. On a GICv2 system or mode, the lists will be merged and treated
* as Group 0 interrupts.
*/
#define PLAT_LS_G1S_IRQ_PROPS(grp) \
INTR_PROP_DESC(BL32_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE)
/* SGI 15 and Secure watchdog interrupts assigned to Group 0 */
#define NXP_IRQ_SEC_SGI_7 15
#define PLAT_LS_G0_IRQ_PROPS(grp) \
INTR_PROP_DESC(BL31_WDOG_SEC, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE), \
INTR_PROP_DESC(BL31_NS_WDOG_WS1, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE), \
INTR_PROP_DESC(NXP_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL)
#endif
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <plat_common.h>
#pragma weak board_enable_povdd
#pragma weak board_disable_povdd
bool board_enable_povdd(void)
{
#ifdef CONFIG_POVDD_ENABLE
return true;
#else
return false;
#endif
}
bool board_disable_povdd(void)
{
#ifdef CONFIG_POVDD_ENABLE
return true;
#else
return false;
#endif
}
#
# Copyright 2018-2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
# board-specific build parameters
BOOT_MODE ?= flexspi_nor
BOARD ?= lx2160aqds
POVDD_ENABLE := no
NXP_COINED_BB := no
# DDR Compilation Configs
NUM_OF_DDRC := 1
DDRC_NUM_DIMM := 1
DDRC_NUM_CS := 2
DDR_ECC_EN := yes
#enable address decoding feature
DDR_ADDR_DEC := yes
APPLY_MAX_CDD := yes
# DDR Errata
ERRATA_DDR_A011396 := 1
ERRATA_DDR_A050450 := 1
# On-Board Flash Details
FLASH_TYPE := MT35XU512A
XSPI_FLASH_SZ := 0x10000000
NXP_XSPI_NOR_UNIT_SIZE := 0x20000
BL2_BIN_XSPI_NOR_END_ADDRESS := 0x100000
# CONFIG_FSPI_ERASE_4K is required to erase 4K sector sizes. This
# config is enabled for future use cases.
FSPI_ERASE_4K := 0
# Platform specific features.
WARM_BOOT := yes
# Adding platform specific defines
$(eval $(call add_define_val,BOARD,'"${BOARD}"'))
ifeq (${POVDD_ENABLE},yes)
$(eval $(call add_define,CONFIG_POVDD_ENABLE))
endif
ifneq (${FLASH_TYPE},)
$(eval $(call add_define,CONFIG_${FLASH_TYPE}))
endif
ifneq (${XSPI_FLASH_SZ},)
$(eval $(call add_define_val,NXP_FLEXSPI_FLASH_SIZE,${XSPI_FLASH_SZ}))
endif
ifneq (${FSPI_ERASE_4K},)
$(eval $(call add_define_val,CONFIG_FSPI_ERASE_4K,${FSPI_ERASE_4K}))
endif
ifneq (${NUM_OF_DDRC},)
$(eval $(call add_define_val,NUM_OF_DDRC,${NUM_OF_DDRC}))
endif
ifneq (${DDRC_NUM_DIMM},)
$(eval $(call add_define_val,DDRC_NUM_DIMM,${DDRC_NUM_DIMM}))
endif
ifneq (${DDRC_NUM_CS},)
$(eval $(call add_define_val,DDRC_NUM_CS,${DDRC_NUM_CS}))
endif
ifeq (${DDR_ADDR_DEC},yes)
$(eval $(call add_define,CONFIG_DDR_ADDR_DEC))
endif
ifeq (${DDR_ECC_EN},yes)
$(eval $(call add_define,CONFIG_DDR_ECC_EN))
endif
# Platform can control the base address for non-volatile storage.
#$(eval $(call add_define_val,NV_STORAGE_BASE_ADDR,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - 2 * ${NXP_XSPI_NOR_UNIT_SIZE}'))
ifeq (${WARM_BOOT},yes)
$(eval $(call add_define_val,PHY_TRAINING_REGS_ON_FLASH,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - ${NXP_XSPI_NOR_UNIT_SIZE}'))
endif
# Adding Platform files build files
BL2_SOURCES += ${BOARD_PATH}/ddr_init.c\
${BOARD_PATH}/platform.c
# Adding SoC build info
include plat/nxp/soc-lx2160a/soc.mk
/*
* Copyright 2018-2020 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLATFORM_DEF_H
#define PLATFORM_DEF_H
#include "plat_def.h"
#include "plat_default_def.h"
#endif
/*
* Copyright 2018-2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef POLICY_H
#define POLICY_H
/* Following defines affect the PLATFORM SECURITY POLICY */
/* set this to 0x0 if the platform is not using/responding to ECC errors
* set this to 0x1 if ECC is being used (we have to do some init)
*/
#define POLICY_USING_ECC 0x0
/* Set this to 0x0 to leave the default SMMU page size in sACR
* Set this to 0x1 to change the SMMU page size to 64K
*/
#define POLICY_SMMU_PAGESZ_64K 0x1
/*
* POLICY_PERF_WRIOP = 0 : No Performance enhancement for WRIOP RN-I
* POLICY_PERF_WRIOP = 1 : No Performance enhancement for WRIOP RN-I = 7
* POLICY_PERF_WRIOP = 2 : No Performance enhancement for WRIOP RN-I = 23
*/
#define POLICY_PERF_WRIOP 0
/*
* set this to '1' if the debug clocks need to remain enabled during
* system entry to low-power (LPM20) - this should only be necessary
* for testing and NEVER set for normal production
*/
#define POLICY_DEBUG_ENABLE 0
#endif /* POLICY_H */
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <common/debug.h>
#include <ddr.h>
#include <lib/utils.h>
#include <load_img.h>
#include "plat_common.h"
#include <platform_def.h>
#ifdef CONFIG_STATIC_DDR
const struct ddr_cfg_regs static_1600 = {
.cs[0].config = U(0xA8050322),
.cs[1].config = U(0x80000322),
.cs[0].bnds = U(0x3FF),
.cs[1].bnds = U(0x3FF),
.sdram_cfg[0] = U(0xE5044000),
.sdram_cfg[1] = U(0x401011),
.timing_cfg[0] = U(0xFF550018),
.timing_cfg[1] = U(0xBAB48C42),
.timing_cfg[2] = U(0x48C111),
.timing_cfg[3] = U(0x10C1000),
.timing_cfg[4] = U(0x2),
.timing_cfg[5] = U(0x3401400),
.timing_cfg[7] = U(0x13300000),
.timing_cfg[8] = U(0x2114600),
.sdram_mode[0] = U(0x6010210),
.sdram_mode[8] = U(0x500),
.sdram_mode[9] = U(0x4240000),
.interval = U(0x18600000),
.data_init = U(0xDEADBEEF),
.zq_cntl = U(0x8A090705),
};
const struct dimm_params static_dimm = {
.rdimm = U(0),
.primary_sdram_width = U(64),
.ec_sdram_width = U(8),
.n_ranks = U(2),
.device_width = U(8),
.mirrored_dimm = U(1),
};
/* Sample code using two UDIMM MT18ASF1G72AZ-2G6B1, on each DDR controller */
unsigned long long board_static_ddr(struct ddr_info *priv)
{
memcpy(&priv->ddr_reg, &static_1600, sizeof(static_1600));
memcpy(&priv->dimm, &static_dimm, sizeof(static_dimm));
priv->conf.cs_on_dimm[0] = 0x3;
ddr_board_options(priv);
compute_ddr_phy(priv);
return ULL(0x400000000);
}
#elif defined(CONFIG_DDR_NODIMM)
/*
* Sample code to bypass reading SPD. This is a sample, not recommended
* for boards with slots. DDR model number: UDIMM MT18ASF1G72AZ-2G6B1.
*/
const struct dimm_params ddr_raw_timing = {
.n_ranks = U(2),
.rank_density = U(4294967296u),
.capacity = U(8589934592u),
.primary_sdram_width = U(64),
.ec_sdram_width = U(8),
.device_width = U(8),
.die_density = U(0x4),
.rdimm = U(0),
.mirrored_dimm = U(1),
.n_row_addr = U(15),
.n_col_addr = U(10),
.bank_addr_bits = U(0),
.bank_group_bits = U(2),
.edc_config = U(2),
.burst_lengths_bitmask = U(0x0c),
.tckmin_x_ps = 750,
.tckmax_ps = 1600,
.caslat_x = U(0x00FFFC00),
.taa_ps = 13750,
.trcd_ps = 13750,
.trp_ps = 13750,
.tras_ps = 32000,
.trc_ps = 457500,
.twr_ps = 15000,
.trfc1_ps = 260000,
.trfc2_ps = 160000,
.trfc4_ps = 110000,
.tfaw_ps = 21000,
.trrds_ps = 3000,
.trrdl_ps = 4900,
.tccdl_ps = 5000,
.refresh_rate_ps = U(7800000),
};
int ddr_get_ddr_params(struct dimm_params *pdimm,
struct ddr_conf *conf)
{
static const char dimm_model[] = "Fixed DDR on board";
conf->dimm_in_use[0] = 1; /* Modify accordingly */
memcpy(pdimm, &ddr_raw_timing, sizeof(struct dimm_params));
memcpy(pdimm->mpart, dimm_model, sizeof(dimm_model) - 1);
/* valid DIMM mask, change accordingly, together with dimm_on_ctlr. */
return 0x5;
}
#endif /* CONFIG_DDR_NODIMM */
int ddr_board_options(struct ddr_info *priv)
{
struct memctl_opt *popts = &priv->opt;
const struct ddr_conf *conf = &priv->conf;
popts->vref_dimm = U(0x24); /* range 1, 83.4% */
popts->rtt_override = 0;
popts->rtt_park = U(240);
popts->otf_burst_chop_en = 0;
popts->burst_length = U(DDR_BL8);
popts->trwt_override = U(1);
popts->bstopre = U(0); /* auto precharge */
popts->addr_hash = 1;
/* Set ODT impedance on PHY side */
switch (conf->cs_on_dimm[1]) {
case 0xc: /* Two slots dual rank */
case 0x4: /* Two slots single rank, not valid for interleaving */
popts->trwt = U(0xf);
popts->twrt = U(0x7);
popts->trrt = U(0x7);
popts->twwt = U(0x7);
popts->vref_phy = U(0x6B); /* 83.6% */
popts->odt = U(60);
popts->phy_tx_impedance = U(28);
break;
case 0: /* One slot used */
default:
popts->trwt = U(0x3);
popts->twrt = U(0x3);
popts->trrt = U(0x3);
popts->twwt = U(0x3);
popts->vref_phy = U(0x60); /* 75% */
popts->odt = U(48);
popts->phy_tx_impedance = U(28);
break;
}
return 0;
}
long long init_ddr(void)
{
int spd_addr[] = { 0x51, 0x52, 0x53, 0x54 };
struct ddr_info info;
struct sysinfo sys;
long long dram_size;
zeromem(&sys, sizeof(sys));
if (get_clocks(&sys) != 0) {
ERROR("System clocks are not set\n");
panic();
}
debug("platform clock %lu\n", sys.freq_platform);
debug("DDR PLL1 %lu\n", sys.freq_ddr_pll0);
debug("DDR PLL2 %lu\n", sys.freq_ddr_pll1);
zeromem(&info, sizeof(info));
/* Set two DDRC. Unused DDRC will be removed automatically. */
info.num_ctlrs = NUM_OF_DDRC;
info.spd_addr = spd_addr;
info.ddr[0] = (void *)NXP_DDR_ADDR;
info.ddr[1] = (void *)NXP_DDR2_ADDR;
info.phy[0] = (void *)NXP_DDR_PHY1_ADDR;
info.phy[1] = (void *)NXP_DDR_PHY2_ADDR;
info.clk = get_ddr_freq(&sys, 0);
info.img_loadr = load_img;
info.phy_gen2_fw_img_buf = PHY_GEN2_FW_IMAGE_BUFFER;
if (info.clk == 0) {
info.clk = get_ddr_freq(&sys, 1);
}
info.dimm_on_ctlr = DDRC_NUM_DIMM;
info.warm_boot_flag = DDR_WRM_BOOT_NT_SUPPORTED;
dram_size = dram_init(&info
#if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
, NXP_CCN_HN_F_0_ADDR
#endif
);
if (dram_size < 0) {
ERROR("DDR init failed.\n");
}
return dram_size;
}
/*
* Copyright 2021 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef PLAT_DEF_H
#define PLAT_DEF_H
#include <arch.h>
#include <cortex_a72.h>
/* Required without TBBR.
* To include the defines for DDR PHY
* Images.
*/
#include <tbbr_img_def.h>
#include <policy.h>
#include <soc.h>
#if defined(IMAGE_BL31)
#define LS_SYS_TIMCTL_BASE 0x2890000
#define PLAT_LS_NSTIMER_FRAME_ID 0
#define LS_CONFIG_CNTACR 1
#endif
#define NXP_SYSCLK_FREQ 100000000
#define NXP_DDRCLK_FREQ 100000000
/* UART related definition */
#define NXP_CONSOLE_ADDR NXP_UART_ADDR
#define NXP_CONSOLE_BAUDRATE 115200
/* Size of cacheable stacks */
#if defined(IMAGE_BL2)
#if defined(TRUSTED_BOARD_BOOT)
#define PLATFORM_STACK_SIZE 0x2000
#else
#define PLATFORM_STACK_SIZE 0x1000
#endif
#elif defined(IMAGE_BL31)
#define PLATFORM_STACK_SIZE 0x1000
#endif
/* SD block buffer */
#define NXP_SD_BLOCK_BUF_SIZE (0x8000)
#define NXP_SD_BLOCK_BUF_ADDR (NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
- NXP_SD_BLOCK_BUF_SIZE)
#ifdef SD_BOOT
#define BL2_LIMIT (NXP_OCRAM_ADDR + NXP_OCRAM_SIZE \
- NXP_SD_BLOCK_BUF_SIZE)
#else
#define BL2_LIMIT (NXP_OCRAM_ADDR + NXP_OCRAM_SIZE)
#endif
/* IO defines as needed by IO driver framework */
#define MAX_IO_DEVICES 4
#define MAX_IO_BLOCK_DEVICES 1
#define MAX_IO_HANDLES 4
#define PHY_GEN2_FW_IMAGE_BUFFER (NXP_OCRAM_ADDR + CSF_HDR_SZ)
/*
* FIP image defines - Offset at which FIP Image would be present
* Image would include Bl31 , Bl33 and Bl32 (optional)
*/
#ifdef POLICY_FUSE_PROVISION
#define MAX_FIP_DEVICES 3
#endif
#ifndef MAX_FIP_DEVICES
#define MAX_FIP_DEVICES 2
#endif
/*
* ID of the secure physical generic timer interrupt used by the BL32.
*/
#define BL32_IRQ_SEC_PHY_TIMER 29
#define BL31_WDOG_SEC 89
#define BL31_NS_WDOG_WS1 108
/*
* Define properties of Group 1 Secure and Group 0 interrupts as per GICv3
* terminology. On a GICv2 system or mode, the lists will be merged and treated
* as Group 0 interrupts.
*/
#define PLAT_LS_G1S_IRQ_PROPS(grp) \
INTR_PROP_DESC(BL32_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE)
/* SGI 15 and Secure watchdog interrupts assigned to Group 0 */
#define NXP_IRQ_SEC_SGI_7 15
#define PLAT_LS_G0_IRQ_PROPS(grp) \
INTR_PROP_DESC(BL31_WDOG_SEC, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE), \
INTR_PROP_DESC(BL31_NS_WDOG_WS1, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE), \
INTR_PROP_DESC(NXP_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL)
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment