Commit 926cd70a authored by Manish Pandey's avatar Manish Pandey Committed by TrustedFirmware Code Review
Browse files

Merge changes from topic "brcm_initial_support" into integration

* changes:
  doc: brcm: Add documentation file for brcm stingray platform
  drivers: Add SPI Nor flash support
  drivers: Add iproc spi driver
  drivers: Add emmc driver for Broadcom platforms
  Add BL31 support for Broadcom stingray platform
  Add BL2 support for Broadcom stingray platform
  Add bl31 support common across Broadcom platforms
  Add bl2 setup code common across Broadcom platforms
  drivers: Add support to retrieve plat_toc_flags
parents 33f1dd9c fd1017b1
/*
* Copyright (c) 2015-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PLATFORM_DEF_H
#define PLATFORM_DEF_H
#include <arch.h>
#include <common/tbbr/tbbr_img_def.h>
#include <plat/common/common_def.h>
#include <brcm_def.h>
#include "sr_def.h"
#include <cmn_plat_def.h>
/*
* Most platform porting definitions provided by included headers
*/
#define PLAT_BRCM_SCP_TZC_DRAM1_SIZE ULL(0x0)
/*
* Required by standard platform porting definitions
*/
#define PLATFORM_CLUSTER0_CORE_COUNT 2
#define PLATFORM_CLUSTER1_CORE_COUNT 2
#define PLATFORM_CLUSTER2_CORE_COUNT 2
#define PLATFORM_CLUSTER3_CORE_COUNT 2
#define BRCM_SYSTEM_COUNT 1
#define BRCM_CLUSTER_COUNT 4
#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER0_CORE_COUNT + \
PLATFORM_CLUSTER1_CORE_COUNT+ \
PLATFORM_CLUSTER2_CORE_COUNT+ \
PLATFORM_CLUSTER3_CORE_COUNT)
#define PLAT_NUM_PWR_DOMAINS (BRCM_SYSTEM_COUNT + \
BRCM_CLUSTER_COUNT + \
PLATFORM_CORE_COUNT)
#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2
/* TBD-STINGRAY */
#define CACHE_WRITEBACK_SHIFT 6
/*
* Some data must be aligned on the biggest cache line size in the platform.
* This is known only to the platform as it might have a combination of
* integrated and external caches.
*/
#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
/* TBD-STINGRAY */
#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL1
#define BL1_PLATFORM_STACK_SIZE 0x3300
#define BL2_PLATFORM_STACK_SIZE 0xc000
#define BL11_PLATFORM_STACK_SIZE 0x2b00
#define DEFAULT_PLATFORM_STACK_SIZE 0x400
#if IMAGE_BL1
# define PLATFORM_STACK_SIZE BL1_PLATFORM_STACK_SIZE
#else
#if IMAGE_BL2
#ifdef USE_BL1_RW
# define PLATFORM_STACK_SIZE BL2_PLATFORM_STACK_SIZE
#else
# define PLATFORM_STACK_SIZE BL1_PLATFORM_STACK_SIZE
#endif
#else
#if IMAGE_BL11
# define PLATFORM_STACK_SIZE BL11_PLATFORM_STACK_SIZE
#else
# define PLATFORM_STACK_SIZE DEFAULT_PLATFORM_STACK_SIZE
#endif
#endif
#endif
#define PLAT_BRCM_TRUSTED_SRAM_BASE 0x66D00000
#define PLAT_BRCM_TRUSTED_SRAM_SIZE 0x00040000
#ifdef RUN_BL1_FROM_QSPI /* BL1 XIP from QSPI */
# define PLAT_BRCM_TRUSTED_ROM_BASE QSPI_BASE_ADDR
#elif RUN_BL1_FROM_NAND /* BL1 XIP from NAND */
# define PLAT_BRCM_TRUSTED_ROM_BASE NAND_BASE_ADDR
#else /* BL1 executed in ROM */
# define PLAT_BRCM_TRUSTED_ROM_BASE ROM_BASE_ADDR
#endif
#define PLAT_BRCM_TRUSTED_ROM_SIZE 0x00040000
/*******************************************************************************
* BL1 specific defines.
******************************************************************************/
#define BL1_RO_BASE PLAT_BRCM_TRUSTED_ROM_BASE
#define BL1_RO_LIMIT (PLAT_BRCM_TRUSTED_ROM_BASE \
+ PLAT_BRCM_TRUSTED_ROM_SIZE)
/*
* Put BL1 RW at the beginning of the Trusted SRAM.
*/
#define BL1_RW_BASE (BRCM_BL_RAM_BASE)
#define BL1_RW_LIMIT (BL1_RW_BASE + 0x12000)
#define BL11_RW_BASE BL1_RW_LIMIT
#define BL11_RW_LIMIT (PLAT_BRCM_TRUSTED_SRAM_BASE + \
PLAT_BRCM_TRUSTED_SRAM_SIZE)
/*******************************************************************************
* BL2 specific defines.
******************************************************************************/
#if RUN_BL2_FROM_QSPI /* BL2 XIP from QSPI */
#define BL2_BASE QSPI_BASE_ADDR
#define BL2_LIMIT (BL2_BASE + 0x40000)
#define BL2_RW_BASE BL1_RW_LIMIT
#define BL2_RW_LIMIT (PLAT_BRCM_TRUSTED_SRAM_BASE + \
PLAT_BRCM_TRUSTED_SRAM_SIZE)
#elif RUN_BL2_FROM_NAND /* BL2 XIP from NAND */
#define BL2_BASE NAND_BASE_ADDR
#define BL2_LIMIT (BL2_BASE + 0x40000)
#define BL2_RW_BASE BL1_RW_LIMIT
#define BL2_RW_LIMIT (PLAT_BRCM_TRUSTED_SRAM_BASE + \
PLAT_BRCM_TRUSTED_SRAM_SIZE)
#else
#define BL2_BASE (BL1_RW_LIMIT + PAGE_SIZE)
#define BL2_LIMIT (BRCM_BL_RAM_BASE + BRCM_BL_RAM_SIZE)
#endif
/*
* BL1 persistent area in internal SRAM
* This area will increase as more features gets into BL1
*/
#define BL1_PERSISTENT_DATA_SIZE 0x2000
/* To reduce BL2 runtime footprint, we can re-use some BL1_RW area */
#define BL1_RW_RECLAIM_BASE (PLAT_BRCM_TRUSTED_SRAM_BASE + \
BL1_PERSISTENT_DATA_SIZE)
/*******************************************************************************
* BL3-1 specific defines.
******************************************************************************/
/* Max Size of BL31 (in DRAM) */
#define PLAT_BRCM_MAX_BL31_SIZE 0x30000
#ifdef USE_DDR
#define BL31_BASE BRCM_AP_TZC_DRAM1_BASE
#define BL31_LIMIT (BRCM_AP_TZC_DRAM1_BASE + \
PLAT_BRCM_MAX_BL31_SIZE)
#else
/* Put BL3-1 at the end of external on-board SRAM connected as NOR flash */
#define BL31_BASE (NOR_BASE_ADDR + NOR_SIZE - \
PLAT_BRCM_MAX_BL31_SIZE)
#define BL31_LIMIT (NOR_BASE_ADDR + NOR_SIZE)
#endif
#define SECURE_DDR_END_ADDRESS BL31_LIMIT
#ifdef NEED_SCP_BL2
#define SCP_BL2_BASE BL31_BASE
#define PLAT_MAX_SCP_BL2_SIZE 0x9000
#define PLAT_SCP_COM_SHARED_MEM_BASE (CRMU_SHARED_SRAM_BASE)
/* dummy defined */
#define PLAT_BRCM_MHU_BASE 0x0
#endif
#define SECONDARY_CPU_SPIN_BASE_ADDR BRCM_SHARED_RAM_BASE
/* Generic system timer counter frequency */
#ifndef SYSCNT_FREQ
#define SYSCNT_FREQ (125 * 1000 * 1000)
#endif
/*
* Enable the BL32 definitions, only when optee os is selected as secure
* payload (BL32).
*/
#ifdef SPD_opteed
/*
* Reserved Memory Map : SHMEM & TZDRAM.
*
* +--------+----------+ 0x8D000000
* | SHMEM (NS) | 16MB
* +-------------------+ 0x8E000000
* | | TEE_RAM(S)| 4MB
* + TZDRAM +----------+ 0x8E400000
* | | TA_RAM(S) | 12MB
* +-------------------+ 0x8F000000
* | BL31 Binary (S) | 192KB
* +-------------------+ 0x8F030000
*/
#define BL32_VA_SIZE (4 * 1024 * 1024)
#define BL32_BASE (0x8E000000)
#define BL32_LIMIT (BL32_BASE + BL32_VA_SIZE)
#define TSP_SEC_MEM_BASE BL32_BASE
#define TSP_SEC_MEM_SIZE BL32_VA_SIZE
#endif
#ifdef SPD_opteed
#define SECURE_DDR_BASE_ADDRESS BL32_BASE
#else
#define SECURE_DDR_BASE_ADDRESS BL31_BASE
#endif
/*******************************************************************************
* Platform specific page table and MMU setup constants
******************************************************************************/
#define MAX_XLAT_TABLES 7
#define PLAT_BRCM_MMAP_ENTRIES 10
#define MAX_MMAP_REGIONS (PLAT_BRCM_MMAP_ENTRIES + \
BRCM_BL_REGIONS)
#ifdef USE_DDR
#ifdef BL33_OVERRIDE_LOAD_ADDR
#define PLAT_BRCM_NS_IMAGE_OFFSET BL33_OVERRIDE_LOAD_ADDR
#else
/*
* BL3-3 image starting offset.
* Putting start of DRAM as of now.
*/
#define PLAT_BRCM_NS_IMAGE_OFFSET 0x80000000
#endif /* BL33_OVERRIDE_LOAD_ADDR */
#else
/*
* BL3-3 image starting offset.
* Putting start of external on-board SRAM as of now.
*/
#define PLAT_BRCM_NS_IMAGE_OFFSET NOR_BASE_ADDR
#endif /* USE_DDR */
/******************************************************************************
* Required platform porting definitions common to all BRCM platforms
*****************************************************************************/
#define MAX_IO_DEVICES 5
#define MAX_IO_HANDLES 6
#define PRIMARY_CPU 0
/* GIC Parameter */
#define PLAT_BRCM_GICD_BASE GIC500_BASE
#define PLAT_BRCM_GICR_BASE (GIC500_BASE + 0x200000)
/* Define secure interrupt as per Group here */
#define PLAT_BRCM_G1S_IRQ_PROPS(grp) \
INTR_PROP_DESC(BRCM_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, (grp), \
GIC_INTR_CFG_EDGE), \
INTR_PROP_DESC(BRCM_IRQ_SEC_SPI_0, GIC_HIGHEST_SEC_PRIORITY, (grp), \
GIC_INTR_CFG_EDGE)
#define PLAT_BRCM_G0_IRQ_PROPS(grp) \
INTR_PROP_DESC(BRCM_IRQ_SEC_SGI_0, PLAT_SDEI_NORMAL_PRI, (grp), \
GIC_INTR_CFG_EDGE), \
/*
*CCN 502 related constants.
*/
#define PLAT_BRCM_CLUSTER_COUNT 4 /* Number of RN-F Masters */
#define PLAT_BRCM_CLUSTER_TO_CCN_ID_MAP CLUSTER0_NODE_ID, CLUSTER1_NODE_ID, CLUSTER2_NODE_ID, CLUSTER3_NODE_ID
#define CCN_SIZE 0x1000000
#define CLUSTER0_NODE_ID 1
#define CLUSTER1_NODE_ID 7
#define CLUSTER2_NODE_ID 9
#define CLUSTER3_NODE_ID 15
#endif
/*
* Copyright (c) 2016-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PLATFORM_SOTP_H
#define PLATFORM_SOTP_H
#define SOTP_DEVICE_SECURE_CFG0_ROW 17
#define SOTP_DEVICE_SECURE_CFG1_ROW 18
#define SOTP_DEVICE_SECURE_CFG2_ROW 19
#define SOTP_DEVICE_SECURE_CFG3_ROW 20
#define SOTP_BRCM_SOFTWARE_CFG0_ROW 21
#define SOTP_BRCM_SOFTWARE_CFG1_ROW 22
#define SOTP_BRCM_SOFTWARE_CFG2_ROW 23
#define SOTP_BRCM_SOFTWARE_CFG3_ROW 24
#define SOTP_CUSTOMER_ID_CFG0_ROW 25
#define SOTP_CUSTOMER_ID_CFG1_ROW 26
#define SOTP_CUSTOMER_ID_CFG2_ROW 27
#define SOTP_CUSTOMER_ID_CFG3_ROW 28
#define SOTP_CUSTOMER_DEV_CFG0_ROW 29
#define SOTP_CUSTOMER_DEV_CFG1_ROW 30
#define SOTP_CUSTOMER_DEV_CFG2_ROW 31
#define SOTP_CUSTOMER_DEV_CFG3_ROW 32
#define SOTP_DAUTH_ROW 33
#define SOTP_K_HMAC_ROW 45
#define SOTP_K_AES_ROW 57
#define SOTP_NVCOUNTER_ROW 69
#define SOTP_BRCM_CFG_ECC_ERROR_MASK 0x100000
#define SOTP_DAUTH_ECC_ERROR_MASK 0x800000
#define SOTP_K_HMAC_ECC_ERROR_MASK 0x1000000
#define SOTP_K_AES_ECC_ERROR_MASK 0x2000000
#endif
/*
* Copyright (c) 2017-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SCP_CMD_H
#define SCP_SMD_H
#include <stdint.h>
typedef struct {
int cmd;
int completed;
int ret;
} crmu_response_t;
#define SCP_CMD_MASK 0xffff
#define SCP_CMD_DEFAULT_TIMEOUT_US 1000
#define SCP_CMD_SCP_BOOT_TIMEOUT_US 5000
int scp_send_cmd(uint32_t cmd, uint32_t param, uint32_t timeout);
#endif
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SCP_UTILS_H
#define SCP_UTILS_H
#include <common/bl_common.h>
#include <lib/mmio.h>
#include <m0_cfg.h>
int plat_bcm_bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info);
bool is_crmu_alive(void);
bool bcm_scp_issue_sys_reset(void);
#define SCP_READ_CFG(cfg) mmio_read_32(CRMU_CFG_BASE + \
offsetof(M0CFG, cfg))
#define SCP_WRITE_CFG(cfg, value) mmio_write_32(CRMU_CFG_BASE + \
offsetof(M0CFG, cfg), value)
#define SCP_READ_CFG16(cfg) mmio_read_16(CRMU_CFG_BASE + \
offsetof(M0CFG, cfg))
#define SCP_WRITE_CFG16(cfg, value) mmio_write_16(CRMU_CFG_BASE + \
offsetof(M0CFG, cfg), value)
#define SCP_READ_CFG8(cfg) mmio_read_8(CRMU_CFG_BASE + \
offsetof(M0CFG, cfg))
#define SCP_WRITE_CFG8(cfg, value) mmio_write_8(CRMU_CFG_BASE + \
offsetof(M0CFG, cfg), value)
#endif
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SDIO_H
#define SDIO_H
#include <stdbool.h>
#define SR_IPROC_SDIO0_CFG_BASE 0x689006e4
#define SR_IPROC_SDIO0_SID_BASE 0x68900b00
#define SR_IPROC_SDIO0_PAD_BASE 0x68a4017c
#define SR_IPROC_SDIO0_IOCTRL_BASE 0x68e02408
#define SR_IPROC_SDIO1_CFG_BASE 0x68900734
#define SR_IPROC_SDIO1_SID_BASE 0x68900b08
#define SR_IPROC_SDIO1_PAD_BASE 0x68a401b4
#define SR_IPROC_SDIO1_IOCTRL_BASE 0x68e03408
#define NS3Z_IPROC_SDIO0_CFG_BASE 0x68a20540
#define NS3Z_IPROC_SDIO0_SID_BASE 0x68900b00
#define NS3Z_IPROC_SDIO0_TP_OUT_SEL 0x68a20308
#define NS3Z_IPROC_SDIO0_PAD_BASE 0x68a20500
#define NS3Z_IPROC_SDIO0_IOCTRL_BASE 0x68e02408
#define PHY_BYPASS BIT(14)
#define LEGACY_EN BIT(31)
#define PHY_DISABLE (LEGACY_EN | PHY_BYPASS)
#define NS3Z_IPROC_SDIO1_CFG_BASE 0x68a30540
#define NS3Z_IPROC_SDIO1_SID_BASE 0x68900b08
#define NS3Z_IPROC_SDIO1_PAD_BASE 0x68a30500
#define NS3Z_IPROC_SDIO1_IOCTRL_BASE 0x68e03408
#define ICFG_SDIO_CAP0 0x10
#define ICFG_SDIO_CAP1 0x14
#define ICFG_SDIO_STRAPSTATUS_0 0x0
#define ICFG_SDIO_STRAPSTATUS_1 0x4
#define ICFG_SDIO_STRAPSTATUS_2 0x8
#define ICFG_SDIO_STRAPSTATUS_3 0xc
#define ICFG_SDIO_STRAPSTATUS_4 0x18
#define ICFG_SDIO_SID_ARADDR 0x0
#define ICFG_SDIO_SID_AWADDR 0x4
#define ICFG_SDIOx_CAP0__SLOT_TYPE_MASK 0x3
#define ICFG_SDIOx_CAP0__SLOT_TYPE_SHIFT 27
#define ICFG_SDIOx_CAP0__INT_MODE_SHIFT 26
#define ICFG_SDIOx_CAP0__SYS_BUS_64BIT_SHIFT 25
#define ICFG_SDIOx_CAP0__VOLTAGE_1P8V_SHIFT 24
#define ICFG_SDIOx_CAP0__VOLTAGE_3P0V_SHIFT 23
#define ICFG_SDIOx_CAP0__VOLTAGE_3P3V_SHIFT 22
#define ICFG_SDIOx_CAP0__SUSPEND_RESUME_SHIFT 21
#define ICFG_SDIOx_CAP0__SDMA_SHIFT 20
#define ICFG_SDIOx_CAP0__HIGH_SPEED_SHIFT 19
#define ICFG_SDIOx_CAP0__ADMA2_SHIFT 18
#define ICFG_SDIOx_CAP0__EXTENDED_MEDIA_SHIFT 17
#define ICFG_SDIOx_CAP0__MAX_BLOCK_LEN_MASK 0x3
#define ICFG_SDIOx_CAP0__MAX_BLOCK_LEN_SHIFT 15
#define ICFG_SDIOx_CAP0__BASE_CLK_FREQ_MASK 0xff
#define ICFG_SDIOx_CAP0__BASE_CLK_FREQ_SHIFT 7
#define ICFG_SDIOx_CAP0__TIMEOUT_UNIT_SHIFT 6
#define ICFG_SDIOx_CAP0__TIMEOUT_CLK_FREQ_MASK 0x3f
#define ICFG_SDIOx_CAP0__TIMEOUT_CLK_FREQ_SHIFT 0
#define ICFG_SDIOx_CAP1__SPI_BLOCK_MODE_SHIFT 22
#define ICFG_SDIOx_CAP1__SPI_MODE_SHIFT 21
#define ICFG_SDIOx_CAP1__CLK_MULT_MASK 0xff
#define ICFG_SDIOx_CAP1__CLK_MULT_SHIFT 13
#define ICFG_SDIOx_CAP1__RETUNING_MODE_MASK 0x3
#define ICFG_SDIOx_CAP1__RETUNING_MODE_SHIFT 11
#define ICFG_SDIOx_CAP1__TUNE_SDR50_SHIFT 10
#define ICFG_SDIOx_CAP1__TIME_RETUNE_MASK 0xf
#define ICFG_SDIOx_CAP1__TIME_RETUNE_SHIFT 6
#define ICFG_SDIOx_CAP1__DRIVER_D_SHIFT 5
#define ICFG_SDIOx_CAP1__DRIVER_C_SHIFT 4
#define ICFG_SDIOx_CAP1__DRIVER_A_SHIFT 3
#define ICFG_SDIOx_CAP1__DDR50_SHIFT 2
#define ICFG_SDIOx_CAP1__SDR104_SHIFT 1
#define ICFG_SDIOx_CAP1__SDR50_SHIFT 0
#ifdef USE_DDR
#define SDIO_DMA 1
#else
#define SDIO_DMA 0
#endif
#define SDIO0_CAP0_CFG \
(0x1 << ICFG_SDIOx_CAP0__SLOT_TYPE_SHIFT) \
| (0x0 << ICFG_SDIOx_CAP0__INT_MODE_SHIFT) \
| (0x0 << ICFG_SDIOx_CAP0__SYS_BUS_64BIT_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__VOLTAGE_1P8V_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__VOLTAGE_3P0V_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__VOLTAGE_3P3V_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__SUSPEND_RESUME_SHIFT) \
| (SDIO_DMA << ICFG_SDIOx_CAP0__SDMA_SHIFT) \
| (SDIO_DMA << ICFG_SDIOx_CAP0__ADMA2_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__HIGH_SPEED_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__EXTENDED_MEDIA_SHIFT) \
| (0x2 << ICFG_SDIOx_CAP0__MAX_BLOCK_LEN_SHIFT) \
| (0xc8 << ICFG_SDIOx_CAP0__BASE_CLK_FREQ_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__TIMEOUT_UNIT_SHIFT) \
| (0x30 << ICFG_SDIOx_CAP0__TIMEOUT_CLK_FREQ_SHIFT)
#define SDIO0_CAP1_CFG \
(0x1 << ICFG_SDIOx_CAP1__SPI_BLOCK_MODE_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__SPI_MODE_SHIFT)\
| (0x0 << ICFG_SDIOx_CAP1__CLK_MULT_SHIFT)\
| (0x2 << ICFG_SDIOx_CAP1__RETUNING_MODE_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__TUNE_SDR50_SHIFT)\
| (0x0 << ICFG_SDIOx_CAP1__DRIVER_D_SHIFT)\
| (0x0 << ICFG_SDIOx_CAP1__DRIVER_C_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__DRIVER_A_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__DDR50_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__SDR104_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__SDR50_SHIFT)
#define SDIO1_CAP0_CFG \
(0x0 << ICFG_SDIOx_CAP0__SLOT_TYPE_SHIFT) \
| (0x0 << ICFG_SDIOx_CAP0__INT_MODE_SHIFT) \
| (0x0 << ICFG_SDIOx_CAP0__SYS_BUS_64BIT_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__VOLTAGE_1P8V_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__VOLTAGE_3P0V_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__VOLTAGE_3P3V_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__SUSPEND_RESUME_SHIFT) \
| (SDIO_DMA << ICFG_SDIOx_CAP0__SDMA_SHIFT) \
| (SDIO_DMA << ICFG_SDIOx_CAP0__ADMA2_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__HIGH_SPEED_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__EXTENDED_MEDIA_SHIFT) \
| (0x2 << ICFG_SDIOx_CAP0__MAX_BLOCK_LEN_SHIFT) \
| (0xc8 << ICFG_SDIOx_CAP0__BASE_CLK_FREQ_SHIFT) \
| (0x1 << ICFG_SDIOx_CAP0__TIMEOUT_UNIT_SHIFT) \
| (0x30 << ICFG_SDIOx_CAP0__TIMEOUT_CLK_FREQ_SHIFT)
#define SDIO1_CAP1_CFG \
(0x1 << ICFG_SDIOx_CAP1__SPI_BLOCK_MODE_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__SPI_MODE_SHIFT)\
| (0x0 << ICFG_SDIOx_CAP1__CLK_MULT_SHIFT)\
| (0x2 << ICFG_SDIOx_CAP1__RETUNING_MODE_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__TUNE_SDR50_SHIFT)\
| (0x0 << ICFG_SDIOx_CAP1__DRIVER_D_SHIFT)\
| (0x0 << ICFG_SDIOx_CAP1__DRIVER_C_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__DRIVER_A_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__DDR50_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__SDR104_SHIFT)\
| (0x1 << ICFG_SDIOx_CAP1__SDR50_SHIFT)
#define PAD_SDIO_CLK 0x4
#define PAD_SDIO_DATA0 0x8
#define PAD_SDIO_DATA1 0xc
#define PAD_SDIO_DATA2 0x10
#define PAD_SDIO_DATA3 0x14
#define PAD_SDIO_DATA4 0x18
#define PAD_SDIO_DATA5 0x1c
#define PAD_SDIO_DATA6 0x20
#define PAD_SDIO_DATA7 0x24
#define PAD_SDIO_CMD 0x28
/* 12mA Drive strength*/
#define PAD_SDIO_SELX (0x5 << 1)
#define PAD_SDIO_SRC (1 << 0)
#define PAD_SDIO_MASK (0xF << 0)
#define PAD_SDIO_VALUE (PAD_SDIO_SELX | PAD_SDIO_SRC)
/*
* SDIO_PRESETVAL0
*
* Each 13 Bit filed consists:
* drivestrength - 12:11
* clkgensel - b10
* sdkclkfreqsel - 9:0
* Field Bit(s) Description
* ============================================================
* SDR25_PRESET 25:13 Preset Value for SDR25
* SDR50_PRESET 12:0 Preset Value for SDR50
*/
#define SDIO_PRESETVAL0 0x01005001
/*
* SDIO_PRESETVAL1
*
* Each 13 Bit filed consists:
* drivestrength - 12:11
* clkgensel - b10
* sdkclkfreqsel - 9:0
* Field Bit(s) Description
* ============================================================
* SDR104_PRESET 25:13 Preset Value for SDR104
* SDR12_PRESET 12:0 Preset Value for SDR12
*/
#define SDIO_PRESETVAL1 0x03000004
/*
* SDIO_PRESETVAL2
*
* Each 13 Bit filed consists:
* drivestrength - 12:11
* clkgensel - b10
* sdkclkfreqsel - 9:0
* Field Bit(s) Description
* ============================================================
* HIGH_SPEED_PRESET 25:13 Preset Value for High Speed
* INIT_PRESET 12:0 Preset Value for Initialization
*/
#define SDIO_PRESETVAL2 0x010040FA
/*
* SDIO_PRESETVAL3
*
* Each 13 Bit filed consists:
* drivestrength - 12:11
* clkgensel - b10
* sdkclkfreqsel - 9:0
* Field Bit(s) Description
* ============================================================
* DDR50_PRESET 25:13 Preset Value for DDR50
* DEFAULT_PRESET 12:0 Preset Value for Default Speed
*/
#define SDIO_PRESETVAL3 0x01004004
/*
* SDIO_PRESETVAL4
*
* Field Bit(s) Description
* ============================================================
* FORCE_USE_IP_TUNE_CLK 30 Force use IP clock
* TUNING_COUNT 29:24 Tuning count
* OVERRIDE_1P8V 23:16
* OVERRIDE_3P3V 15:8
* OVERRIDE_3P0V 7:0
*/
#define SDIO_PRESETVAL4 0x20010101
#define SDIO_SID_SHIFT 5
typedef struct {
uintptr_t cfg_base;
uintptr_t sid_base;
uintptr_t io_ctrl_base;
uintptr_t pad_base;
} SDIO_CFG;
void brcm_stingray_sdio_init(void);
#endif /* SDIO_H */
/*
* Copyright (c) 2016-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SR_DEF_H
#define SR_DEF_H
#ifndef __ASSEMBLER__
#include <lib/mmio.h>
#endif
#include <common/interrupt_props.h>
#include <drivers/arm/gic_common.h>
#include <crmu_def.h>
/* Special value used to verify platform parameters from BL2 to BL3-1 */
#define BRCM_BL31_PLAT_PARAM_VAL ULL(0x0f1e2d3c4b5a6978)
#define MHB_BASE_ADDR 0x60000000
#define PLAT_BRCM_CCN_BASE 0x61000000
#define CORESIGHT_BASE_ADDR 0x62000000
#define SMMU_BASE 0x64000000
/* memory map entries*/
/* Grouping block device for bigger MMU region */
/* covers MHB, CNN, coresight, GIC, MMU, APB, CRMU */
#define PERIPH0_BASE MHB_BASE_ADDR
#define PERIPH0_SIZE 0x06d00000
#define PERIPH1_BASE 0x66d80000
#define PERIPH1_SIZE 0x00f80000
#define HSLS_BASE_ADDR 0x68900000
#define HSLS_SIZE 0x04500000
#define GIC500_BASE 0x63c00000
#define GIC500_SIZE 0x400000
/*******************************************************************************
* CCN related constants
******************************************************************************/
#define OLY_MN_REGISTERS_NODE0_SECURE_ACCESS (PLAT_BRCM_CCN_BASE + 0x0)
#define OLY_RNI3PDVM_REGISTERS_NODE8_AUX_CTL (PLAT_BRCM_CCN_BASE + 0x880500)
/* Used for acceleration of coherent ordered writes */
#define OLY_RNI3PDVM_REGISTERS_NODE8_AUX_CTL_WUO BIT(4)
/* Wait for completion of requests at RN-I */
#define OLY_RNI3PDVM_REGISTERS_NODE8_AUX_CTL_WFC BIT(3)
/*
* Forces all reads from the RN-I to be sent with the request order bit set
* and this ensures ordered allocation of read data buffers in the RN-I
*/
#define OLY_RNI3PDVM_REGISTERS_NODE8_AUX_CTL_RQO BIT(5)
#define OLY_RNI3PDVM_REGISTERS_NODE14_AUX_CTL (PLAT_BRCM_CCN_BASE + 0x8e0500)
/* Wait for completion of requests at RN-I */
#define OLY_RNI3PDVM_REGISTERS_NODE14_AUX_CTL_WFC BIT(3)
#define OLY_HNI_REGISTERS_NODE0_POS_CONTROL (PLAT_BRCM_CCN_BASE + 0x80000)
#define POS_CONTROL_HNI_POS_EN BIT(0)
#define OLY_HNI_REGISTERS_NODE0_PCIERC_RNI_NODEID_LIST \
(PLAT_BRCM_CCN_BASE + 0x80008)
/* PAXB and PAXC connected to 8th Node */
#define SR_RNI_PCIE_CONNECTED BIT(8)
/* PAXB connected to 6th Node */
#define SRP_RNI_PCIE_CONNECTED BIT(6)
#define OLY_HNI_REGISTERS_NODE0_SA_AUX_CTL (PLAT_BRCM_CCN_BASE + 0x80500)
#define SA_AUX_CTL_POS_EARLY_WR_COMP_EN BIT(5)
#define SA_AUX_CTL_SER_DEVNE_WR BIT(9)
/*******************************************************************************
* Coresight related constants
******************************************************************************/
#define CORESIGHT_BASE_ADDR 0x62000000
#define IHOST0_BASE 0x66000000
#define IHOST_ADDR_SPACE 0x2000
/*******************************************************************************
* SCR related constants
******************************************************************************/
#define SCR_BASE 0x6600a000
#define SCR_ARCACHE_OFFSET 4
#define SCR_ARCACHE_MASK (0x3 << SCR_ARCACHE_OFFSET)
#define SCR_AWCACHE_OFFSET 6
#define SCR_AWCACHE_MASK (0x3 << SCR_AWCACHE_OFFSET)
#define SCR_AXCACHE_CONFIG_MASK (SCR_ARCACHE_MASK | SCR_AWCACHE_MASK)
#define SCR_TBUX_AXCACHE_CONFIG ((0x1 << SCR_AWCACHE_OFFSET) | \
(0x1 << SCR_ARCACHE_OFFSET))
#define SCR_REGS_SCR_SOFT_RESET (SCR_BASE + 0x1c)
#define SCR_REGS_GIC_SOFT_RESET BIT(0)
#define SCR_GPV_BASE 0x66100000
#define SCR_NOC_SECURITY0 (SCR_GPV_BASE + 0x08)
#define SCR_NOC_DDR_REGISTER_ACCESS (SCR_GPV_BASE + 0x30)
/*******************************************************************************
* MEMC and DDR related constants
******************************************************************************/
#define DDR0_CONTROL_ROOT 0x66200000
#define EMEM_SS_CFG_0_ROOT 0x66202000
#define EMEM_SYS_IF_0_ROOT 0x66204000
#define DDR_PHY0_ROOT 0x66240000
#define DDR1_CONTROL_ROOT 0x66280000
#define EMEM_SS_CFG_1_ROOT 0x66282000
#define EMEM_SYS_IF_1_ROOT 0x66284000
#define DDR_PHY1_ROOT 0x662c0000
#define DDR2_CONTROL_ROOT 0x66300000
#define EMEM_SS_CFG_2_ROOT 0x66302000
#define EMEM_SYS_IF_2_ROOT 0x66304000
#define DDR_PHY2_ROOT 0x66340000
/*******************************************************************************
* TZC400 related constants
******************************************************************************/
#define TZC_400_BASE 0x66d84000
/*******************************************************************************
* FS4 related constants
******************************************************************************/
#define FS4_SRAM_IDM_IO_CONTROL_DIRECT 0x66d8a408
#define FS4_CRYPTO_IDM_IO_CONTROL_DIRECT 0x66d8e408
#define FS4_CRYPTO_IDM_RESET_CONTROL 0x66d8e800
#define FS4_CRYPTO_BASE 0x67000000
#define FS4_CRYPTO_DME_BASE (FS4_CRYPTO_BASE + 0x280000)
#define FS4_RAID_IDM_IO_CONTROL_DIRECT 0x66d8f408
#define FS4_RAID_IDM_IO_STATUS 0x66d8f500
#define FS4_RAID_IDM_RESET_CONTROL 0x66d8f800
#define FS4_RAID_BASE 0x67400000
#define FS4_RAID_DME_BASE (FS4_RAID_BASE + 0x280000)
#define FS4_CRYPTO_GPV_BASE 0x67300000
#define FS4_RAID_GPV_BASE 0x67700000
#define FS6_PKI_BASE 0x67400000
#define FS6_PKI_DME_BASE 0x66D90000
#define TZC400_FS_SRAM_ROOT 0x66d84000
#define GATE_KEEPER_OFFSET 0x8
#define REGION_ATTRIBUTES_0_OFFSET 0x110
#define REGION_ID_ACCESS_0_OFFSET 0x114
#define NIC400_FS_NOC_ROOT 0x66e00000
#define NIC400_FS_NOC_SECURITY2_OFFSET 0x10
#define NIC400_FS_NOC_SECURITY4_OFFSET 0x18
#define NIC400_FS_NOC_SECURITY7_OFFSET 0x24
/*******************************************************************************
* SATA PHY related constants
******************************************************************************/
#define SATA_BASE 0x67d00000
/*******************************************************************************
* USB related constants
******************************************************************************/
#define USB_BASE 0x68500000
#define USB_SIZE 0x00400000
#define XHC_BASE (USB_BASE + 0x11000)
#define MAX_USB_PORTS 3
/*******************************************************************************
* HSLS related constants
******************************************************************************/
#define IPROC_ROOT 0x68900000
#define HSLS_ICFG_REGS_BASE IPROC_ROOT
#define HSLS_IDM_REGS_BASE 0x68e00000
#define HSLS_MODE_SEL_CONTROL 0x68a40000
#define HSLS_TZPC_BASE 0x68b40000
#define HSLS_GPV_BASE 0x6cd00000
/*******************************************************************************
* Chip ID related constants
******************************************************************************/
#define ICFG_CHIP_ID HSLS_ICFG_REGS_BASE
#define CHIP_ID_SR 0xd730
#define CHIP_ID_NS3Z 0xe56d
#define CHIP_ID_MASK 0xf000
#define ICFG_CHIP_REVISION_ID (HSLS_ICFG_REGS_BASE + 0x4)
#define PLAT_CHIP_ID_GET (mmio_read_32(ICFG_CHIP_ID))
#define PLAT_CHIP_REV_GET (mmio_read_32(ICFG_CHIP_REVISION_ID))
/*******************************************************************************
* Timers related constants
******************************************************************************/
/* ChipcommonG_tim0_TIM_TIMER1Load 0x68930000 */
#define SP804_TIMER0_BASE 0x68930000
#define SP804_TIMER1_BASE 0x68940000
#define SP804_TIMER0_TIMER_VAL_REG_OFFSET 0x4
#define SP804_TIMER0_CLKMULT 2
#define SP804_TIMER0_CLKDIV 25
/*******************************************************************************
* GPIO related constants
******************************************************************************/
#define IPROC_GPIO_NS_BASE 0x689d0000
#define IPROC_GPIO_S_BASE 0x68b00000
#define IPROC_GPIO_NR 151
#define GPIO_S_CNTRL_REG 0x68b60000
/*******************************************************************************
* I2C SMBUS related constants
******************************************************************************/
#define SMBUS0_REGS_BASE 0x689b0000
#define SMBUS1_REGS_BASE 0x689e0000
/*******************************************************************************
* UART related constants
******************************************************************************/
#define ChipcommonG_UART0_UART_RBR_THR_DLL 0x68a00000
#define ChipcommonG_UART1_UART_RBR_THR_DLL 0x68a10000
#define ChipcommonG_UART2_UART_RBR_THR_DLL 0x68a20000
#define ChipcommonG_UART3_UART_RBR_THR_DLL 0x68a30000
#define UART0_BASE_ADDR ChipcommonG_UART0_UART_RBR_THR_DLL
#define UART1_BASE_ADDR ChipcommonG_UART1_UART_RBR_THR_DLL
#define UART2_BASE_ADDR ChipcommonG_UART2_UART_RBR_THR_DLL
#define UART3_BASE_ADDR ChipcommonG_UART3_UART_RBR_THR_DLL
#define UART_SPR_OFFSET 0x1c /* Scratch Pad Register */
#define LOG_LEVEL_REGISTER CRMU_SPARE_REG_3
#define GET_LOG_LEVEL() (mmio_read_32(LOG_LEVEL_REGISTER))
#define SET_LOG_LEVEL(x) (mmio_write_32(LOG_LEVEL_REGISTER, x))
#define IO_RETRY_REGISTER CRMU_SPARE_REG_4
#define DWC_UART_REFCLK (25 * 1000 * 1000)
#define DWC_UART_REFCLK_DIV 16
/* Baud rate in emulation will vary based on setting of 25MHz SCLK */
#define DWC_UART_BAUDRATE 115200
#define BRCM_CRASH_CONSOLE_BASE UART1_BASE_ADDR
#define BRCM_CRASH_CONSOLE_REFCLK DWC_UART_REFCLK
#define BRCM_CRASH_CONSOLE_BAUDRATE DWC_UART_BAUDRATE
#ifdef BOARD_CONSOLE_UART
#define PLAT_BRCM_BOOT_UART_BASE BOARD_CONSOLE_UART
#else
#define PLAT_BRCM_BOOT_UART_BASE UART1_BASE_ADDR
#endif
#define CONSOLE_UART_ID ((PLAT_BRCM_BOOT_UART_BASE >> 16) & 0x3)
#define PLAT_BRCM_BOOT_UART_CLK_IN_HZ DWC_UART_REFCLK
#define BRCM_CONSOLE_BAUDRATE DWC_UART_BAUDRATE
#define PLAT_BRCM_BL31_RUN_UART_BASE PLAT_BRCM_BOOT_UART_BASE
#define PLAT_BRCM_BL31_RUN_UART_CLK_IN_HZ PLAT_BRCM_BOOT_UART_CLK_IN_HZ
/*******************************************************************************
* IOMUX related constants
******************************************************************************/
#define HSLS_IOPAD_BASE HSLS_MODE_SEL_CONTROL
#define MODE_SEL_CONTROL_FSEL_MASK 0x7
#define MODE_SEL_CONTROL_FSEL_MODE0 0x0
#define MODE_SEL_CONTROL_FSEL_MODE1 0x1
#define MODE_SEL_CONTROL_FSEL_MODE2 0x2
#define MODE_SEL_CONTROL_FSEL_MODE3 0x3
#define MODE_SEL_CONTROL_FSEL_DEBUG 0x4
#define IPROC_IOPAD_MODE_BASE (HSLS_MODE_SEL_CONTROL + 0x29c)
#define UART0_SIN_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x4a8)
#define UART0_SOUT_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x4ac)
#define UART1_SIN_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x3b8)
#define UART1_SOUT_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x3bc)
#define UARTx_SIN_MODE_SEL_CONTROL_FSEL 0
#define UARTx_SOUT_MODE_SEL_CONTROL_FSEL 0
/*******************************************************************************
* PKA constants
******************************************************************************/
#define ICFG_PKA_MEM_PWR_CTRL (HSLS_ICFG_REGS_BASE + 0xac0)
#define ICFG_PKA_MEM_PWR_CTRL__POWERONIN BIT(0)
#define ICFG_PKA_MEM_PWR_CTRL__POWEROKIN BIT(1)
#define ICFG_PKA_MEM_PWR_CTRL__ARRPOWERONIN BIT(2)
#define ICFG_PKA_MEM_PWR_CTRL__ARRPOWEROKIN BIT(3)
#define ICFG_PKA_MEM_PWR_CTRL__POWERONOUT BIT(4)
#define ICFG_PKA_MEM_PWR_CTRL__POWEROKOUT BIT(5)
#define ICFG_PKA_MEM_PWR_CTRL__ARRPOWERONOUT BIT(6)
#define ICFG_PKA_MEM_PWR_CTRL__ARRPOWEROKOUT BIT(7)
#define ICFG_PKA_MEM_PWR_CTRL__ISO BIT(8)
/*******************************************************************************
* Trusted Watchdog constants
******************************************************************************/
#define ARM_SP805_TWDG_BASE 0x68b30000
#define ARM_SP805_TWDG_CLK_HZ ((25 * 1000 * 1000) / 2)
/*
* The TBBR document specifies a watchdog timeout of 256 seconds. SP805
* asserts reset after two consecutive countdowns (2 x 128 = 256 sec)
*/
#define ARM_TWDG_TIMEOUT_SEC 128
#define ARM_TWDG_LOAD_VAL (ARM_SP805_TWDG_CLK_HZ * \
ARM_TWDG_TIMEOUT_SEC)
/*******************************************************************************
* SOTP related constants
******************************************************************************/
#define SOTP_REGS_OTP_BASE 0x68b50000
#define SOTP_CHIP_CTRL (SOTP_REGS_OTP_BASE + 0x4c)
#define SOTP_CLEAR_SYSCTRL_ALL_MASTER_NS 0
/*******************************************************************************
* DMAC/PL330 related constants
******************************************************************************/
#define DMAC_M0_IDM_IO_CONTROL_DIRECT (HSLS_IDM_REGS_BASE + 0x408)
#define BOOT_MANAGER_NS BIT(25)
#define DMAC_M0_IDM_RESET_CONTROL (HSLS_IDM_REGS_BASE + 0x800)
#define ICFG_DMAC_CONFIG_0 (HSLS_ICFG_REGS_BASE + 0x190)
#define ICFG_DMAC_CONFIG_1 (HSLS_ICFG_REGS_BASE + 0x194)
#define ICFG_DMAC_CONFIG_2 (HSLS_ICFG_REGS_BASE + 0x198)
#define BOOT_PERIPHERAL_NS 0xffffffff
#define ICFG_DMAC_CONFIG_3 (HSLS_ICFG_REGS_BASE + 0x19c)
#define BOOT_IRQ_NS 0x0000ffff
#define ICFG_DMAC_SID_ARADDR_CONTROL (HSLS_ICFG_REGS_BASE + 0xaf0)
#define ICFG_DMAC_SID_AWADDR_CONTROL (HSLS_ICFG_REGS_BASE + 0xaf4)
#define ICFG_DMAC_MEM_PWR_CTRL__POWERONIN BIT(0)
#define ICFG_DMAC_MEM_PWR_CTRL__POWEROKIN BIT(1)
#define ICFG_DMAC_MEM_PWR_CTRL__ARRPOWERONIN BIT(2)
#define ICFG_DMAC_MEM_PWR_CTRL__ARRPOWEROKIN BIT(3)
#define ICFG_DMAC_MEM_PWR_CTRL__POWERONOUT BIT(4)
#define ICFG_DMAC_MEM_PWR_CTRL__POWEROKOUT BIT(5)
#define ICFG_DMAC_MEM_PWR_CTRL__ARRPOWERONOUT BIT(6)
#define ICFG_DMAC_MEM_PWR_CTRL__ARRPOWEROKOUT BIT(7)
#define ICFG_DMAC_MEM_PWR_CTRL__ISO BIT(8)
#define ICFG_DMAC_MEM_PWR_CTRL (HSLS_ICFG_REGS_BASE + 0xadc)
/*******************************************************************************
* PNOR related constants
******************************************************************************/
#define PNOR_ICFG_BASE (HSLS_ICFG_REGS_BASE + 0x780)
#define PNOR_ICFG_CS_0 PNOR_ICFG_BASE
#define PNOR_ICFG_CS_1 (PNOR_ICFG_BASE + 0x4)
#define PNOR_ICFG_CS_2 (PNOR_ICFG_BASE + 0x8)
#define PNOR_ICFG_CS_x_MASK0_MASK 0xff
#define PNOR_ICFG_CS_x_MASK0_SHIFT 8
#define PNOR_ICFG_CS_x_MATCH0_MASK 0xff
#define PNOR_ICFG_CS_x_MATCH0_SHIFT 0
#define PNOR_IDM_BASE (HSLS_IDM_REGS_BASE + 0xb000)
#define PNOR_IDM_IO_CONTROL_DIRECT (PNOR_IDM_BASE + 0x408)
#define PNOR_IDM_IO_RESET_CONTROL (PNOR_IDM_BASE + 0x800)
#define PNOR_REG_BASE 0x68c50000
#define PNOR_REG_DIRECT_CMD (PNOR_REG_BASE + 0x010)
#define PNOR_REG_SET_CYCLES (PNOR_REG_BASE + 0x014)
#define PNOR_REG_SET_OPMODE (PNOR_REG_BASE + 0x018)
#define PNOR_REG_REFRESH_0 (PNOR_REG_BASE + 0x020)
#define PNOR_REG_PERIPH_ID0 (PNOR_REG_BASE + 0xfe0)
#define PNOR_REG_PERIPH_ID1 (PNOR_REG_BASE + 0xfe4)
#define PNOR_REG_PERIPH_ID2 (PNOR_REG_BASE + 0xfe8)
#define PNOR_REG_PERIPH_ID3 (PNOR_REG_BASE + 0xfec)
#define PNOR_REG_PERIPH_IDx_MASK 0xff
/*******************************************************************************
* NAND related constants
******************************************************************************/
#define NAND_FLASH_REVISION 0x68c60000
#define NAND_IDM_IDM_IO_CONTROL_DIRECT (HSLS_IDM_REGS_BASE + 0xa408)
#define NAND_IDM_IDM_RESET_CONTROL (HSLS_IDM_REGS_BASE + 0xa800)
/*******************************************************************************
* eMMC related constants
******************************************************************************/
#define PLAT_SD_MAX_READ_LENGTH 0x400
#define SDIO0_EMMCSDXC_SYSADDR 0x68cf1000
#define SDIO_IDM0_IO_CONTROL_DIRECT (HSLS_IDM_REGS_BASE + 0x2408)
#define SDIO_IDM1_IO_CONTROL_DIRECT (HSLS_IDM_REGS_BASE + 0x3408)
#define SDIO_IDM0_IDM_RESET_CONTROL (HSLS_IDM_REGS_BASE + 0x2800)
#define ICFG_SDIO0_BASE (HSLS_ICFG_REGS_BASE + 0x6e4)
#define ICFG_SDIO1_BASE (HSLS_ICFG_REGS_BASE + 0x734)
#define ICFG_SDIO0_CAP0 (ICFG_SDIO0_BASE + 0x10)
#define ICFG_SDIO0_CAP1 (ICFG_SDIO0_BASE + 0x14)
#define ICFG_SDIO0_SID (HSLS_ICFG_REGS_BASE + 0xb00)
#define ICFG_SDIO1_SID (HSLS_ICFG_REGS_BASE + 0xb08)
/*******************************************************************************
* Bootstrap related constants
******************************************************************************/
#define ROM_S0_IDM_IO_STATUS (HSLS_IDM_REGS_BASE + 0x9500)
/*******************************************************************************
* ROM related constants
******************************************************************************/
#define ROM_BASE_ADDR 0x6ce00000
#define ROM_VERSION_STRING_ADDR (ROM_BASE_ADDR + 0x28000)
#define ROM_BUILD_MESSAGE_ADDR (ROM_BASE_ADDR + 0x28018)
/*******************************************************************************
* Boot source peripheral related constants
******************************************************************************/
#define QSPI_CTRL_BASE_ADDR 0x68c70000
#define QSPI_BASE_ADDR 0x70000000
#define QSPI_SIZE 0x08000000
#define NOR_BASE_ADDR 0x74000000
#define NOR_SIZE 0x04000000
#define NAND_BASE_ADDR 0x78000000
#define NAND_SIZE 0x08000000
#define QSPI_IDM_RESET_CONTROL (HSLS_IDM_REGS_BASE + 0xc800)
#define APBR_IDM_RESET_CONTROL (HSLS_IDM_REGS_BASE + 0xe800)
#define APBS_IDM_IDM_RESET_CONTROL (HSLS_IDM_REGS_BASE + 0xf800)
#define APBX_IDM_IDM_IO_CONTROL_DIRECT (HSLS_IDM_REGS_BASE + 0x10408)
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_CLK_ENABLE 0
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_WDOG_SCLK_SEL 2
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_TIM0_SCLK_SEL 4
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_TIM1_SCLK_SEL 6
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_TIM2_SCLK_SEL 8
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_TIM3_SCLK_SEL 10
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_TIM4_SCLK_SEL 12
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_TIM5_SCLK_SEL 13
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_TIM6_SCLK_SEL 14
#define APBX_IDM_IDM_IO_CONTROL_DIRECT_TIM7_SCLK_SEL 15
#define APBY_IDM_IDM_IO_CONTROL_DIRECT (HSLS_IDM_REGS_BASE + 0x11408)
#define APBY_IDM_IDM_IO_CONTROL_DIRECT_CLK_ENABLE 0
#define APBY_IDM_IDM_IO_CONTROL_DIRECT_UART0_SCLK_SEL 2
#define APBY_IDM_IDM_IO_CONTROL_DIRECT_UART1_SCLK_SEL 4
#define APBY_IDM_IDM_IO_CONTROL_DIRECT_UART2_SCLK_SEL 6
#define APBY_IDM_IDM_IO_CONTROL_DIRECT_UART3_SCLK_SEL 8
#define APBZ_IDM_IDM_IO_CONTROL_DIRECT (HSLS_IDM_REGS_BASE + 0x12408)
#define APBZ_IDM_IDM_IO_CONTROL_DIRECT_CLK_ENABLE 0
#define APBZ_IDM_IDM_IO_CONTROL_DIRECT_WDOG_SCLK_SEL 2
/*******************************************************************************
* Stingray memory map related constants
******************************************************************************/
/* The last 4KB of Trusted SRAM are used as shared memory */
#define BRCM_SHARED_RAM_SIZE 0x0
#define BRCM_SHARED_RAM_BASE (PLAT_BRCM_TRUSTED_SRAM_BASE + \
PLAT_BRCM_TRUSTED_SRAM_SIZE - \
BRCM_SHARED_RAM_SIZE)
/* Reserve 4 KB to store error logs in BL2 */
#define BCM_ELOG_BL2_SIZE 0x00001000
#define BCM_ELOG_BL2_BASE BL1_RW_LIMIT
/* The remaining Trusted SRAM is used to load the BL images */
#define BRCM_BL_RAM_BASE (PLAT_BRCM_TRUSTED_SRAM_BASE)
#define BRCM_BL_RAM_SIZE (PLAT_BRCM_TRUSTED_SRAM_SIZE - \
BRCM_SHARED_RAM_SIZE)
/* DDR Address where TMON temperature values are written */
#define TMON_SHARED_DDR_ADDRESS 0x8f100000
/* Reserve 4 kB to pass data to BL33 */
#define BL33_SHARED_DDR_BASE 0x8f102000
#define BL33_SHARED_DDR_SIZE 0x1000
/* Default AP error logging base addr */
#ifndef ELOG_AP_UART_LOG_BASE
#define ELOG_AP_UART_LOG_BASE 0x8f110000
#endif
/* Reserve 16 to store error logs in BL31 */
#define BCM_ELOG_BL31_BASE ELOG_AP_UART_LOG_BASE
#define BCM_ELOG_BL31_SIZE 0x4000
/*******************************************************************************
* Non-secure DDR Map
******************************************************************************/
#define BRCM_DRAM1_BASE ULL(0x80000000)
#define BRCM_DRAM1_SIZE ULL(0x10000000)
#define BRCM_DRAM2_BASE ULL(0x880000000)
#define BRCM_DRAM2_SIZE ULL(0x780000000)
#define BRCM_DRAM3_BASE ULL(0x8800000000)
#define BRCM_DRAM3_SIZE ULL(0x7800000000)
#define BRCM_SHARED_DRAM_BASE BL33_SHARED_DDR_BASE
#define BRCM_SHARED_DRAM_SIZE BL33_SHARED_DDR_SIZE
#define BRCM_EXT_SRAM_BASE ULL(0x74000000)
#define BRCM_EXT_SRAM_SIZE ULL(0x4000000)
/* Priority levels for platforms */
#define PLAT_RAS_PRI 0x10
#define PLAT_SDEI_CRITICAL_PRI 0x60
#define PLAT_SDEI_NORMAL_PRI 0x70
/* Define a list of Group 1 Secure and Group 0 interrupts as per GICv3 */
#define BRCM_IRQ_SEC_SGI_0 14
#define BRCM_IRQ_SEC_SGI_1 15
/* RTC periodic interrupt */
#define BRCM_IRQ_SEC_SPI_0 49
/*
* Macros for local power states in SR platforms encoded by State-ID field
* within the power-state parameter.
*/
/* Local power state for power domains in Run state. */
#define PLAT_LOCAL_STATE_RUN 0
/* Local power state for retention. Valid only for CPU power domains */
#define PLAT_LOCAL_STATE_RET 1
/*
* Local power state for OFF/power-down. Valid for CPU and cluster power
* domains.
*/
#define PLAT_LOCAL_STATE_OFF 2
/*
* This macro defines the deepest retention state possible. A higher state
* id will represent an invalid or a power down state.
*/
#define PLAT_MAX_RET_STATE PLAT_LOCAL_STATE_RET
/*
* This macro defines the deepest power down states possible. Any state ID
* higher than this is invalid.
*/
#define PLAT_MAX_OFF_STATE PLAT_LOCAL_STATE_OFF
/* ChiMP-related constants */
#define NITRO_TZPC_TZPCDECPROT0clr 0x60c01808
#define NITRO_TZPC_TZPCDECPROT0clr__DECPROT0_chimp_m_clr_R 1
#define NIC400_NITRO_CHIMP_S_IDM_IO_CONTROL_DIRECT 0x60e00408
#define CHIMP_INDIRECT_ADDR_MASK 0x3fffff
#define CHIMP_INDIRECT_BASE 0x60800000
#define CHIMP_REG_ECO_RESERVED 0x3042400
#define CHIMP_FLASH_ACCESS_DONE_BIT 2
/* indicate FRU table programming is done successfully */
#define CHIMP_FRU_PROG_DONE_BIT 9
#define CHIMP_REG_CTRL_BPE_MODE_REG 0x0
#define CHIMP_REG_CTRL_BPE_STAT_REG 0x4
#define CHIMP_REG_CTRL_FSTBOOT_PTR_REG 0x8
#define CHIMP_REG_CHIMP_REG_CTRL_BPE_MODE_REG__cm3_rst_L 1
#define CHIMP_REG_CHIMP_REG_CTRL_BPE_MODE_REG__cm3_rst_R 1
#define CHIMP_REG_CTRL_BASE 0x3040000
#define CHIMP_FAST_BOOT_MODE_BIT 2
#define CHIMP_REG_CHIMP_APE_SCPAD 0x3300000
#define CHIMP_REG_CHIMP_SCPAD 0x3100000
/* Chimp health status offset in scratch pad ram */
#define CHIMP_HEALTH_STATUS_OFFSET 0x8
/*
* If not in NIC mode then FASTBOOT can be enabled.
* "Not in NIC mode" means that FORCE_FASTBOOT is set
* and a valid (1 or 2) fastboot type is specified.
*
* Three types of fastboot are supported:
* 0 = No fastboot. Boots Nitro/ChiMP and lets ROM loader
* initialize ChiMP from NVRAM (QSPI).
*
* 1 = Jump in place (need a flat image)
* This is intended to speedup Nitro FW boot on Palladium,
* can be used with a real chip as well.
* 2 = Jump normally with decompression
* Modus operandi for a real chip. Works also on Palladium
* Note: image decompressing takes time on Palladium.
* 3 = No fastboot support. No ChiMP bringup
* (use only for AP debug or for ChiMP's deferred setup).
*/
#define CHIMP_FASTBOOT_JUMP_DECOMPRESS 2
#define CHIMP_FASTBOOT_JUMP_IN_PLACE 1
#define CHIMP_FASTBOOT_NITRO_RESET 0
/*
* Definitions for a non-Nitro access
* to QSPI PAD after the handshake
*/
#define QSPI_HOLD_N_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x3e8)
#define QSPI_WP_N_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x3ec)
#define QSPI_SCK_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x3f0)
#define QSPI_CS_N_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x3f4)
#define QSPI_MOSI_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x3f8)
#define QSPI_MISO_MODE_SEL_CONTROL (HSLS_MODE_SEL_CONTROL + 0x3fc)
/*******************************************************************************
* Stream IDs for different blocks of SR
* block_id for different blocks is as follows:
* PCIE : 0x0
* PAXC : 0x1
* FS4 : 0x2
* Rest of the masters(includes MHB via RNI): 0x3
******************************************************************************/
#define SR_SID_VAL(block_id, subblock_id, device_num) ((block_id << 13) | \
(subblock_id << 11) | \
(device_num))
#define CRMU_STREAM_ID SR_SID_VAL(0x3, 0x0, 0x7)
#define CRMU_SID_SHIFT 5
#define DMAC_STREAM_ID SR_SID_VAL(0x3, 0x0, 0x0)
#define DMAC_SID_SHIFT 5
/* DDR SHMOO Values defines */
#define IDRAM_SHMOO_VALUES_ADDR CRMU_IDRAM_BASE_ADDR
#define DDR_SHMOO_VALUES_ADDR 0x8f103000
#define SHMOO_SIZE_PER_CHANNEL 0x1000
#endif /* SR_DEF_H */
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SR_UTILS_H
#define SR_UTILS_H
#include <lib/mmio.h>
#include <chip_id.h>
#include <cmn_plat_util.h>
#include <sr_def.h>
static inline void brcm_stingray_set_qspi_mux(int enable_ap)
{
mmio_write_32(QSPI_HOLD_N_MODE_SEL_CONTROL, enable_ap);
mmio_write_32(QSPI_WP_N_MODE_SEL_CONTROL, enable_ap);
mmio_write_32(QSPI_SCK_MODE_SEL_CONTROL, enable_ap);
mmio_write_32(QSPI_CS_N_MODE_SEL_CONTROL, enable_ap);
mmio_write_32(QSPI_MOSI_MODE_SEL_CONTROL, enable_ap);
mmio_write_32(QSPI_MISO_MODE_SEL_CONTROL, enable_ap);
}
static inline void brcm_stingray_set_straps(uint32_t boot_source)
{
/* Enable software strap override */
mmio_setbits_32(CDRU_CHIP_STRAP_CTRL,
BIT(CDRU_CHIP_STRAP_CTRL__SOFTWARE_OVERRIDE));
/* set straps to the next boot source */
mmio_clrsetbits_32(CDRU_CHIP_STRAP_DATA,
BOOT_SOURCE_MASK,
boot_source);
/* Disable software strap override */
mmio_clrbits_32(CDRU_CHIP_STRAP_CTRL,
BIT(CDRU_CHIP_STRAP_CTRL__SOFTWARE_OVERRIDE));
}
#endif
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SWREG_H
#define SWREG_H
/* default voltage if no valid OTP */
#define VDDC_CORE_DEF_VOLT 910000 /* 0.91v */
#define IHOST_DEF_VOLT 940000 /* 0.94v */
#define B0_VDDC_CORE_DEF_VOLT 950000 /* 0.95v */
#define B0_IHOST_DEF_VOLT 950000 /* 0.95v */
#define B0_DDR_VDDC_DEF_VOLT 1000000 /* 1v */
#define SWREG_IHOST1_DIS 4
#define SWREG_IHOST1_REG_RESETB 5
#define SWREG_IHOST1_PMU_STABLE 2
enum sw_reg {
DDR_VDDC = 1,
IHOST03,
IHOST12,
IHOST_ARRAY,
DDRIO_SLAVE,
VDDC_CORE,
VDDC1,
DDRIO_MASTER
};
int set_swreg(enum sw_reg reg_id, uint32_t micro_volts);
int swreg_firmware_update(void);
#endif
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef TIMER_SYNC_H
#define TIMER_SYNC_H
void brcm_timer_sync_init(void);
#endif
#
# Copyright (c) 2019-2020, Broadcom
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Set the toc_flags to 1 for 100% speed operation
# Set the toc_flags to 2 for 50% speed operation
# Set the toc_flags to 3 for 25% speed operation
# Set the toc_flags bit 3 to indicate ignore the fip in UEFI copy mode
PLAT_TOC_FLAGS := 0x0
# Set the IHOST_PLL_FREQ to,
# 1 for full speed
# 2 for 50% speed
# 3 for 25% speed
# 0 for bypass
$(eval $(call add_define_val,IHOST_PLL_FREQ,1))
# Enable workaround for ERRATA_A72_859971
ERRATA_A72_859971 := 1
# Cache Coherency Interconnect Driver needed
DRIVER_CC_ENABLE := 1
$(eval $(call add_define,DRIVER_CC_ENABLE))
# Enable to erase eMMC
INCLUDE_EMMC_DRIVER_ERASE_CODE := 0
ifeq (${INCLUDE_EMMC_DRIVER_ERASE_CODE},1)
$(eval $(call add_define,INCLUDE_EMMC_DRIVER_ERASE_CODE))
endif
# BL31 is in DRAM
ARM_BL31_IN_DRAM := 1
ifneq (${USE_EMULATOR},yes)
STINGRAY_EMULATION_SETUP := 0
ifeq (${FASTBOOT_TYPE},)
override FASTBOOT_TYPE := 0
endif
USE_PAXB := yes
USE_PAXC := yes
USE_CHIMP := yes
endif
USE_CRMU_SRAM := yes
# Disable FS4 clocks - they can be reenabled when needed by linux
FS4_DISABLE_CLOCK := yes
# Enable error logging by default for Stingray
BCM_ELOG := yes
# Enable FRU support by default for Stingray
ifeq (${USE_FRU},)
USE_FRU := no
endif
# Use single cluster
ifeq (${USE_SINGLE_CLUSTER},yes)
$(info Using Single Cluster)
$(eval $(call add_define,USE_SINGLE_CLUSTER))
endif
# Use DDR
ifeq (${USE_DDR},yes)
$(info Using DDR)
$(eval $(call add_define,USE_DDR))
endif
ifeq (${BOARD_CFG},)
BOARD_CFG := bcm958742k
endif
# Use PAXB
ifeq (${USE_PAXB},yes)
$(info Using PAXB)
$(eval $(call add_define,USE_PAXB))
endif
# Use FS4
ifeq (${USE_FS4},yes)
$(info Using FS4)
$(eval $(call add_define,USE_FS4))
endif
# Use FS6
ifeq (${USE_FS6},yes)
$(info Using FS6)
$(eval $(call add_define,USE_FS6))
endif
# Disable FS4 clock
ifeq (${FS4_DISABLE_CLOCK},yes)
$(info Using FS4_DISABLE_CLOCK)
$(eval $(call add_define,FS4_DISABLE_CLOCK))
endif
ifneq (${NCSI_IO_DRIVE_STRENGTH_MA},)
$(info Using NCSI_IO_DRIVE_STRENGTH_MA)
$(eval $(call add_define,NCSI_IO_DRIVE_STRENGTH_MA))
endif
# Use NAND
ifeq (${USE_NAND},$(filter yes, ${USE_NAND}))
$(info Using NAND)
$(eval $(call add_define,USE_NAND))
endif
# Enable Broadcom error logging support
ifeq (${BCM_ELOG},yes)
$(info Using BCM_ELOG)
$(eval $(call add_define,BCM_ELOG))
endif
# BL31 build for standalone mode
ifeq (${STANDALONE_BL31},yes)
RESET_TO_BL31 := 1
$(info Using RESET_TO_BL31)
endif
# BL31 force full frequency for all CPUs
ifeq (${BL31_FORCE_CPU_FULL_FREQ},yes)
$(info Using BL31_FORCE_CPU_FULL_FREQ)
$(eval $(call add_define,BL31_FORCE_CPU_FULL_FREQ))
endif
# Enable non-secure accesses to CCN registers
ifeq (${BL31_CCN_NONSECURE},yes)
$(info Using BL31_CCN_NONSECURE)
$(eval $(call add_define,BL31_CCN_NONSECURE))
endif
# Use ChiMP
ifeq (${USE_CHIMP},yes)
$(info Using ChiMP)
$(eval $(call add_define,USE_CHIMP))
endif
# Use PAXC
ifeq (${USE_PAXC},yes)
$(info Using PAXC)
$(eval $(call add_define,USE_PAXC))
ifeq (${CHIMPFW_USE_SIDELOAD},yes)
$(info Using ChiMP FW sideload)
$(eval $(call add_define,CHIMPFW_USE_SIDELOAD))
endif
$(eval $(call add_define,FASTBOOT_TYPE))
$(eval $(call add_define,CHIMP_FB1_ENTRY))
endif
ifeq (${DEFAULT_SWREG_CONFIG}, 1)
$(eval $(call add_define,DEFAULT_SWREG_CONFIG))
endif
ifeq (${CHIMP_ALWAYS_NEEDS_QSPI},yes)
$(eval $(call add_define,CHIMP_ALWAYS_NEEDS_QSPI))
endif
# For testing purposes, use memsys stubs. Remove once memsys is fully tested.
USE_MEMSYS_STUBS := yes
# Default, use BL1_RW area
ifneq (${BL2_USE_BL1_RW},no)
$(eval $(call add_define,USE_BL1_RW))
endif
# Default soft reset is L3
$(eval $(call add_define,CONFIG_SOFT_RESET_L3))
# Enable Chip OTP driver
DRIVER_OCOTP_ENABLE := 1
ifneq (${WARMBOOT_DDR_S3_SUPPORT},)
DRIVER_SPI_ENABLE := 1
endif
include plat/brcm/board/common/board_common.mk
SOC_DIR := brcm/board/stingray
PLAT_INCLUDES += -Iplat/${SOC_DIR}/include/ \
-Iinclude/plat/brcm/common/ \
-Iplat/brcm/common/
PLAT_BL_COMMON_SOURCES += lib/cpus/aarch64/cortex_a72.S \
plat/${SOC_DIR}/aarch64/plat_helpers.S \
drivers/ti/uart/aarch64/16550_console.S \
plat/${SOC_DIR}/src/tz_sec.c \
drivers/arm/tzc/tzc400.c \
plat/${SOC_DIR}/driver/plat_emmc.c \
plat/${SOC_DIR}/src/topology.c
ifeq (${USE_CHIMP},yes)
PLAT_BL_COMMON_SOURCES += drivers/brcm/chimp.c
endif
BL2_SOURCES += plat/${SOC_DIR}/driver/ihost_pll_config.c \
plat/${SOC_DIR}/src/bl2_setup.c \
plat/${SOC_DIR}/driver/swreg.c
ifeq (${USE_DDR},yes)
PLAT_INCLUDES += -Iplat/${SOC_DIR}/driver/ddr/soc/include
else
PLAT_INCLUDES += -Iplat/${SOC_DIR}/driver/ext_sram_init
BL2_SOURCES += plat/${SOC_DIR}/driver/ext_sram_init/ext_sram_init.c
endif
# Include GICv3 driver files
include drivers/arm/gic/v3/gicv3.mk
BRCM_GIC_SOURCES := ${GICV3_SOURCES} \
plat/common/plat_gicv3.c \
plat/brcm/common/brcm_gicv3.c
BL31_SOURCES += \
drivers/arm/ccn/ccn.c \
plat/brcm/board/common/timer_sync.c \
plat/brcm/common/brcm_ccn.c \
plat/common/plat_psci_common.c \
plat/${SOC_DIR}/driver/ihost_pll_config.c \
plat/${SOC_DIR}/src/bl31_setup.c \
plat/${SOC_DIR}/src/fsx.c \
plat/${SOC_DIR}/src/iommu.c \
plat/${SOC_DIR}/src/sdio.c \
${BRCM_GIC_SOURCES}
ifneq (${NCSI_IO_DRIVE_STRENGTH_MA},)
BL31_SOURCES += plat/${SOC_DIR}/src/ncsi.c
endif
ifeq (${USE_PAXB},yes)
BL31_SOURCES += plat/${SOC_DIR}/src/paxb.c
BL31_SOURCES += plat/${SOC_DIR}/src/sr_paxb_phy.c
endif
ifeq (${USE_PAXC},yes)
BL31_SOURCES += plat/${SOC_DIR}/src/paxc.c
endif
ifdef SCP_BL2
PLAT_INCLUDES += -Iplat/brcm/common/
BL2_SOURCES += plat/brcm/common/brcm_mhu.c \
plat/brcm/common/brcm_scpi.c \
plat/${SOC_DIR}/src/scp_utils.c \
plat/${SOC_DIR}/src/scp_cmd.c \
drivers/brcm/scp.c
BL31_SOURCES += plat/brcm/common/brcm_mhu.c \
plat/brcm/common/brcm_scpi.c \
plat/${SOC_DIR}/src/brcm_pm_ops.c
else
BL31_SOURCES += plat/${SOC_DIR}/src/ihost_pm.c \
plat/${SOC_DIR}/src/pm.c
endif
ifeq (${ELOG_SUPPORT},1)
ifeq (${ELOG_STORE_MEDIA},DDR)
BL2_SOURCES += plat/brcm/board/common/bcm_elog_ddr.c
endif
endif
ifeq (${BL31_BOOT_PRELOADED_SCP}, 1)
ifdef SCP_BL2
SCP_CFG_DIR=$(dir ${SCP_BL2})
PLAT_INCLUDES += -I${SCP_CFG_DIR}
endif
PLAT_INCLUDES += -Iplat/brcm/common/
# By default use OPTEE Assigned memory
PRELOADED_SCP_BASE ?= 0x8E000000
PRELOADED_SCP_SIZE ?= 0x10000
$(eval $(call add_define,PRELOADED_SCP_BASE))
$(eval $(call add_define,PRELOADED_SCP_SIZE))
$(eval $(call add_define,BL31_BOOT_PRELOADED_SCP))
BL31_SOURCES += plat/${SOC_DIR}/src/scp_utils.c \
plat/${SOC_DIR}/src/scp_cmd.c \
drivers/brcm/scp.c
endif
# Do not execute the startup code on warm reset.
PROGRAMMABLE_RESET_ADDRESS := 1
# Nitro FW, config and Crash log uses secure DDR memory
# Inaddition to above, Nitro master and slave is also secure
ifneq ($(NITRO_SECURE_ACCESS),)
$(eval $(call add_define,NITRO_SECURE_ACCESS))
$(eval $(call add_define,DDR_NITRO_SECURE_REGION_START))
$(eval $(call add_define,DDR_NITRO_SECURE_REGION_END))
endif
/*
* Copyright (c) 2016-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <common/bl_common.h>
#include <common/debug.h>
#include <drivers/arm/sp805.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <chimp.h>
#include <chip_id.h>
#include <cmn_plat_util.h>
#include <dmu.h>
#include <emmc_api.h>
#include <fru.h>
#ifdef USE_GPIO
#include <drivers/gpio.h>
#include <iproc_gpio.h>
#endif
#include <platform_def.h>
#include <sotp.h>
#include <swreg.h>
#include <sr_utils.h>
#ifdef USE_DDR
#include <ddr_init.h>
#else
#include <ext_sram_init.h>
#endif
#if DRIVER_OCOTP_ENABLE
#include <ocotp.h>
#endif
#include "board_info.h"
#define WORD_SIZE 8
#define SWREG_AVS_OTP_OFFSET (13 * WORD_SIZE) /* 13th row byte offset */
#define AON_GPIO_OTP_OFFSET (28 * WORD_SIZE) /* 28th row byte offset */
#define BYTES_TO_READ 8
/* OTP voltage step definitions */
#define MVOLT_STEP_MAX 0x18 /* 1v */
#define MVOLT_PER_STEP 10 /* 0.01mv per step */
#define MVOLT_BASE 760 /* 0.76v */
#define STEP_TO_UVOLTS(step) \
((MVOLT_BASE + (MVOLT_PER_STEP * (step))) * 1000)
#define GET_BITS(first, last, data) \
((data >> first) & ((1 << (last - first + 1)) - 1))
/*
* SW-REG OTP encoding:
*
* SWREG_bits[11:0] = OTP 13th row 12 bits[55:44]
* SWREG_bits[11:10] - Valid Bits (0x2 - valid, if not 0x2 - Invalid)
* SWREG_bits[9:5] - iHost03, iHost12
* SWREG_bits[4:0] - Core VDDC
*/
#define SWREG_OTP_BITS_START 12 /* 44th bit in MSB 32-bits */
#define SWREG_OTP_BITS_END 23 /* 55th bit in MSB 32-bits */
#define SWREG_VDDC_FIELD_START 0
#define SWREG_VDDC_FIELD_END 4
#define SWREG_IHOST_FIELD_START 5
#define SWREG_IHOST_FIELD_END 9
#define SWREG_VALID_BIT_START 10
#define SWREG_VALID_BIT_END 11
#define SWREG_VALID_BITS 0x2
/*
* Row 13 bit 56 is programmed as '1' today. It is not being used, so plan
* is to flip this bit to '0' for B1 rev. Hence SW can leverage this bit
* to identify Bx chip to program different sw-regulators.
*/
#define SPARE_BIT 24
#define IS_SR_B0(data) (((data) >> SPARE_BIT) & 0x1)
#if DRIVER_OCOTP_ENABLE
static struct otpc_map otp_stingray_map = {
.otpc_row_size = 2,
.data_r_offset = {0x10, 0x5c},
.data_w_offset = {0x2c, 0x64},
.word_size = 8,
.stride = 8,
};
#endif
void plat_bcm_bl2_early_platform_setup(void)
{
/* Select UART0 for AP via mux setting*/
if (PLAT_BRCM_BOOT_UART_BASE == UART0_BASE_ADDR) {
mmio_write_32(UART0_SIN_MODE_SEL_CONTROL, 1);
mmio_write_32(UART0_SOUT_MODE_SEL_CONTROL, 1);
}
}
#ifdef USE_NAND
static void brcm_stingray_nand_init(void)
{
unsigned int val;
unsigned int nand_idm_reset_control = 0x68e0a800;
VERBOSE(" stingray nand init start.\n");
/* Reset NAND */
VERBOSE(" - reset nand\n");
val = mmio_read_32((uintptr_t)(nand_idm_reset_control + 0x0));
mmio_write_32((uintptr_t)(nand_idm_reset_control + 0x0), val | 0x1);
udelay(500);
val = mmio_read_32((uintptr_t)(nand_idm_reset_control + 0x0));
mmio_write_32((uintptr_t)(nand_idm_reset_control + 0x0), val & ~0x1);
udelay(500);
VERBOSE(" stingray nand init done.\n");
}
#endif
#if defined(USE_PAXB) || defined(USE_PAXC) || defined(USE_SATA)
#define PCIE_RESCAL_CFG_0 0x40000130
#define PCIE_CFG_RESCAL_RSTB_R (1 << 16)
#define PCIE_CFG_RESCAL_PWRDNB_R (1 << 8)
#define PCIE_RESCAL_STATUS_0 0x4000014c
#define PCIE_STAT_PON_VALID_R (1 << 0)
#define PCIE_RESCAL_OUTPUT_STATUS 0x40000154
#define CDRU_PCIE_RESET_N_R (1 << CDRU_MISC_RESET_CONTROL__CDRU_PCIE_RESET_N_R)
#ifdef EMULATION_SETUP
static void brcm_stingray_pcie_reset(void)
{
}
#else
static void brcm_stingray_pcie_reset(void)
{
unsigned int data;
int try;
if (bcm_chimp_is_nic_mode()) {
INFO("NIC mode detected; PCIe reset/rescal not executed\n");
return;
}
mmio_clrbits_32(CDRU_MISC_RESET_CONTROL, CDRU_PCIE_RESET_N_R);
mmio_setbits_32(CDRU_MISC_RESET_CONTROL, CDRU_PCIE_RESET_N_R);
/* Release reset */
mmio_setbits_32(PCIE_RESCAL_CFG_0, PCIE_CFG_RESCAL_RSTB_R);
mdelay(1);
/* Power UP */
mmio_setbits_32(PCIE_RESCAL_CFG_0,
(PCIE_CFG_RESCAL_RSTB_R | PCIE_CFG_RESCAL_PWRDNB_R));
try = 1000;
do {
udelay(1);
data = mmio_read_32(PCIE_RESCAL_STATUS_0);
try--;
} while ((data & PCIE_STAT_PON_VALID_R) == 0x0 && (try > 0));
if (try <= 0)
ERROR("PCIE_RESCAL_STATUS_0: 0x%x\n", data);
VERBOSE("PCIE_SATA_RESCAL_STATUS_0 0x%x.\n",
mmio_read_32(PCIE_RESCAL_STATUS_0));
VERBOSE("PCIE_SATA_RESCAL_OUTPUT_STATUS 0x%x.\n",
mmio_read_32(PCIE_RESCAL_OUTPUT_STATUS));
INFO("PCIE SATA Rescal Init done\n");
}
#endif /* EMULATION_SETUP */
#endif /* USE_PAXB || USE_PAXC || USE_SATA */
#ifdef USE_PAXC
void brcm_stingray_chimp_check_and_fastboot(void)
{
int fastboot_init_result;
if (bcm_chimp_is_nic_mode())
/* Do not wait here */
return;
#if WARMBOOT_DDR_S3_SUPPORT
/*
* Currently DDR shmoo parameters and QSPI boot source are
* tied. DDR shmoo parameters are stored in QSPI, which is
* used for warmboot.
* Do not reset nitro for warmboot
*/
if (is_warmboot() && (boot_source_get() == BOOT_SOURCE_QSPI))
return;
#endif /* WARMBOOT_DDR_S3_SUPPORT */
/*
* Not in NIC mode,
* initiate fastboot (if enabled)
*/
if (FASTBOOT_TYPE == CHIMP_FASTBOOT_NITRO_RESET) {
VERBOSE("Bring up Nitro/ChiMP\n");
if (boot_source_get() == BOOT_SOURCE_QSPI)
WARN("Nitro boots from QSPI when AP has booted from QSPI.\n");
brcm_stingray_set_qspi_mux(0);
VERBOSE("Nitro controls the QSPI\n");
}
fastboot_init_result = bcm_chimp_initiate_fastboot(FASTBOOT_TYPE);
if (fastboot_init_result && boot_source_get() != BOOT_SOURCE_QSPI)
ERROR("Nitro init error %d. Status: 0x%x; bpe_mod reg: 0x%x\n"
"fastboot register: 0x%x; handshake register 0x%x\n",
fastboot_init_result,
bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_STAT_REG),
bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_MODE_REG),
bcm_chimp_read_ctrl(CHIMP_REG_CTRL_FSTBOOT_PTR_REG),
bcm_chimp_read(CHIMP_REG_ECO_RESERVED));
/*
* CRMU watchdog kicks is an example, which is L1 reset,
* does not clear Nitro scratch pad ram.
* For Nitro resets: Clear the Nitro health status memory.
*/
bcm_chimp_write((CHIMP_REG_CHIMP_SCPAD + CHIMP_HEALTH_STATUS_OFFSET),
0);
}
#endif
void set_ihost_vddc_swreg(uint32_t ihost_uvolts, uint32_t vddc_uvolts)
{
NOTICE("ihost_uvolts: %duv, vddc_uvolts: %duv\n",
ihost_uvolts, vddc_uvolts);
set_swreg(VDDC_CORE, vddc_uvolts);
set_swreg(IHOST03, ihost_uvolts);
set_swreg(IHOST12, ihost_uvolts);
}
/*
* Reads SWREG AVS OTP bits (13th row) with ECC enabled and get voltage
* defined in OTP if valid OTP is found
*/
void read_avs_otp_bits(uint32_t *ihost_uvolts, uint32_t *vddc_uvolts)
{
uint32_t offset = SWREG_AVS_OTP_OFFSET;
uint32_t ihost_step, vddc_step;
uint32_t avs_bits;
uint32_t buf[2];
if (bcm_otpc_read(offset, &buf[0], BYTES_TO_READ, 1) == -1)
return;
VERBOSE("AVS OTP %d ROW: 0x%x.0x%x\n",
offset/WORD_SIZE, buf[1], buf[0]);
/* get voltage readings from AVS OTP bits */
avs_bits = GET_BITS(SWREG_OTP_BITS_START,
SWREG_OTP_BITS_END,
buf[1]);
/* check for valid otp bits */
if (GET_BITS(SWREG_VALID_BIT_START, SWREG_VALID_BIT_END, avs_bits) !=
SWREG_VALID_BITS) {
WARN("Invalid AVS OTP bits at %d row\n", offset/WORD_SIZE);
return;
}
/* get ihost and vddc step value */
vddc_step = GET_BITS(SWREG_VDDC_FIELD_START,
SWREG_VDDC_FIELD_END,
avs_bits);
ihost_step = GET_BITS(SWREG_IHOST_FIELD_START,
SWREG_IHOST_FIELD_END,
avs_bits);
if ((ihost_step > MVOLT_STEP_MAX) || (vddc_step > MVOLT_STEP_MAX)) {
WARN("OTP entry invalid\n");
return;
}
/* get voltage in micro-volts */
*ihost_uvolts = STEP_TO_UVOLTS(ihost_step);
*vddc_uvolts = STEP_TO_UVOLTS(vddc_step);
}
/*
* This api reads otp bits and program internal swreg's - ihos12, ihost03,
* vddc_core and ddr_core based on different chip. External swreg's
* programming will be done from crmu.
*
* For A2 chip:
* Read OTP row 20, bit 50. This bit will be set for A2 chip. Once A2 chip is
* found, read AVS OTP row 13, 12bits[55:44], if valid otp bits are found
* then set ihost and vddc according to avs otp bits else set them to 0.94v
* and 0.91v respectively. Also update the firmware after setting voltage.
*
* For B0 chip:
* Read OTP row 13, bit 56. This bit will be set for B0 chip. Once B0 chip is
* found then set ihost and vddc to 0.95v and ddr_core to 1v. No AVS OTP bits
* are used get ihost/vddc voltages.
*
* For B1 chip:
* Read AVS OTP row 13, 12bits[55:44], if valid otp bits are found then set
* ihost and vddc according to avs otp bits else set them to 0.94v and 0.91v
* respectively.
*/
void set_swreg_based_on_otp(void)
{
/* default voltage if no valid OTP */
uint32_t vddc_uvolts = VDDC_CORE_DEF_VOLT;
uint32_t ihost_uvolts = IHOST_DEF_VOLT;
uint32_t ddrc_uvolts;
uint32_t offset;
uint32_t buf[2];
offset = SWREG_AVS_OTP_OFFSET;
if (bcm_otpc_read(offset, &buf[0], BYTES_TO_READ, 1) == -1)
return;
VERBOSE("OTP %d ROW: 0x%x.0x%x\n",
offset/WORD_SIZE, buf[1], buf[0]);
if (IS_SR_B0(buf[1])) {
/* don't read AVS OTP for B0 */
ihost_uvolts = B0_IHOST_DEF_VOLT;
vddc_uvolts = B0_VDDC_CORE_DEF_VOLT;
ddrc_uvolts = B0_DDR_VDDC_DEF_VOLT;
} else {
read_avs_otp_bits(&ihost_uvolts, &vddc_uvolts);
}
#if (IHOST_REG_TYPE == IHOST_REG_INTEGRATED) && \
(VDDC_REG_TYPE == VDDC_REG_INTEGRATED)
/* enable IHOST12 cluster before changing voltage */
NOTICE("Switching on the Regulator idx: %u\n",
SWREG_IHOST1_DIS);
mmio_clrsetbits_32(CRMU_SWREG_CTRL_ADDR,
BIT(SWREG_IHOST1_DIS),
BIT(SWREG_IHOST1_REG_RESETB));
/* wait for regulator supply gets stable */
while (!(mmio_read_32(CRMU_SWREG_STATUS_ADDR) &
(1 << SWREG_IHOST1_PMU_STABLE)))
;
INFO("Regulator supply got stable\n");
#ifndef DEFAULT_SWREG_CONFIG
swreg_firmware_update();
#endif
set_ihost_vddc_swreg(ihost_uvolts, vddc_uvolts);
#endif
if (IS_SR_B0(buf[1])) {
NOTICE("ddrc_uvolts: %duv\n", ddrc_uvolts);
set_swreg(DDR_VDDC, ddrc_uvolts);
}
}
#ifdef USE_DDR
static struct ddr_info ddr_info;
#endif
#ifdef USE_FRU
static struct fru_area_info fru_area[FRU_MAX_NR_AREAS];
static struct fru_board_info board_info;
static struct fru_time fru_tm;
static uint8_t fru_tbl[BCM_MAX_FRU_LEN];
static void board_detect_fru(void)
{
uint32_t i, result;
int ret = -1;
result = bcm_emmc_init(false);
if (!result) {
ERROR("eMMC init failed\n");
return;
}
/* go through eMMC boot partitions looking for FRU table */
for (i = EMMC_BOOT_PARTITION1; i <= EMMC_BOOT_PARTITION2; i++) {
result = emmc_partition_select(i);
if (!result) {
ERROR("Switching to eMMC part %u failed\n", i);
return;
}
result = emmc_read(BCM_FRU_TBL_OFFSET, (uintptr_t)fru_tbl,
BCM_MAX_FRU_LEN, BCM_MAX_FRU_LEN);
if (!result) {
ERROR("Failed to read from eMMC part %u\n", i);
return;
}
/*
* Run sanity check and checksum to make sure valid FRU table
* is detected
*/
ret = fru_validate(fru_tbl, fru_area);
if (ret < 0) {
WARN("FRU table not found in eMMC part %u\n", i);
continue;
}
/* parse DDR information from FRU table */
ret = fru_parse_ddr(fru_tbl, &fru_area[FRU_AREA_INTERNAL],
&ddr_info);
if (ret < 0) {
WARN("No FRU DDR info found in eMMC part %u\n", i);
continue;
}
/* parse board information from FRU table */
ret = fru_parse_board(fru_tbl, &fru_area[FRU_AREA_BOARD_INFO],
&board_info);
if (ret < 0) {
WARN("No FRU board info found in eMMC part %u\n", i);
continue;
}
/* if we reach here, valid FRU table is parsed */
break;
}
if (ret < 0) {
WARN("FRU table missing for this board\n");
return;
}
for (i = 0; i < BCM_MAX_NR_DDR; i++) {
INFO("DDR channel index: %d\n", ddr_info.mcb[i].idx);
INFO("DDR size %u GB\n", ddr_info.mcb[i].size_mb / 1024);
INFO("DDR ref ID by SW (Not MCB Ref ID) 0x%x\n",
ddr_info.mcb[i].ref_id);
}
fru_format_time(board_info.mfg_date, &fru_tm);
INFO("**** FRU board information ****\n");
INFO("Language 0x%x\n", board_info.lang);
INFO("Manufacturing Date %u.%02u.%02u, %02u:%02u\n",
fru_tm.year, fru_tm.month, fru_tm.day,
fru_tm.hour, fru_tm.min);
INFO("Manufacturing Date(Raw) 0x%x\n", board_info.mfg_date);
INFO("Manufacturer %s\n", board_info.manufacturer);
INFO("Product Name %s\n", board_info.product_name);
INFO("Serial number %s\n", board_info.serial_number);
INFO("Part number %s\n", board_info.part_number);
INFO("File ID %s\n", board_info.file_id);
}
#endif /* USE_FRU */
#ifdef USE_GPIO
#define INVALID_GPIO 0xffff
static const int gpio_cfg_bitmap[MAX_NR_GPIOS] = {
#ifdef BRD_DETECT_GPIO_BIT0
BRD_DETECT_GPIO_BIT0,
#else
INVALID_GPIO,
#endif
#ifdef BRD_DETECT_GPIO_BIT1
BRD_DETECT_GPIO_BIT1,
#else
INVALID_GPIO,
#endif
#ifdef BRD_DETECT_GPIO_BIT2
BRD_DETECT_GPIO_BIT2,
#else
INVALID_GPIO,
#endif
#ifdef BRD_DETECT_GPIO_BIT3
BRD_DETECT_GPIO_BIT3,
#else
INVALID_GPIO,
#endif
};
static uint8_t gpio_bitmap;
/*
* Use an odd number to avoid potential conflict with public GPIO level
* defines
*/
#define GPIO_STATE_FLOAT 15
/*
* If GPIO_SUPPORT_FLOAT_DETECTION is disabled, simply return GPIO level
*
* If GPIO_SUPPORT_FLOAT_DETECTION is enabled, add additional test for possible
* pin floating (unconnected) scenario. This support is assuming externally
* applied pull up / pull down will have a stronger pull than the internal pull
* up / pull down.
*/
static uint8_t gpio_get_state(int gpio)
{
uint8_t val;
/* set direction to GPIO input */
gpio_set_direction(gpio, GPIO_DIR_IN);
#ifndef GPIO_SUPPORT_FLOAT_DETECTION
if (gpio_get_value(gpio) == GPIO_LEVEL_HIGH)
val = GPIO_LEVEL_HIGH;
else
val = GPIO_LEVEL_LOW;
return val;
#else
/*
* Enable internal pull down. If GPIO level is still high, there must
* be an external pull up
*/
gpio_set_pull(gpio, GPIO_PULL_DOWN);
if (gpio_get_value(gpio) == GPIO_LEVEL_HIGH) {
val = GPIO_LEVEL_HIGH;
goto exit;
}
/*
* Enable internal pull up. If GPIO level is still low, there must
* be an external pull down
*/
gpio_set_pull(gpio, GPIO_PULL_UP);
if (gpio_get_value(gpio) == GPIO_LEVEL_LOW) {
val = GPIO_LEVEL_LOW;
goto exit;
}
/* if reached here, the pin must be not connected */
val = GPIO_STATE_FLOAT;
exit:
/* make sure internall pull is disabled */
if (gpio_get_pull(gpio) != GPIO_PULL_NONE)
gpio_set_pull(gpio, GPIO_PULL_NONE);
return val;
#endif
}
static void board_detect_gpio(void)
{
unsigned int i, val;
int gpio;
iproc_gpio_init(IPROC_GPIO_S_BASE, IPROC_GPIO_NR,
IPROC_IOPAD_MODE_BASE, HSLS_IOPAD_BASE);
gpio_bitmap = 0;
for (i = 0; i < MAX_NR_GPIOS; i++) {
if (gpio_cfg_bitmap[i] == INVALID_GPIO)
continue;
/*
* Construct the bitmap based on GPIO value. Floating pin
* detection is a special case. As soon as a floating pin is
* detected, a special value of MAX_GPIO_BITMAP_VAL is
* assigned and we break out of the loop immediately
*/
gpio = gpio_cfg_bitmap[i];
val = gpio_get_state(gpio);
if (val == GPIO_STATE_FLOAT) {
gpio_bitmap = MAX_GPIO_BITMAP_VAL;
break;
}
if (val == GPIO_LEVEL_HIGH)
gpio_bitmap |= BIT(i);
}
memcpy(&ddr_info, &gpio_ddr_info[gpio_bitmap], sizeof(ddr_info));
INFO("Board detection GPIO bitmap = 0x%x\n", gpio_bitmap);
}
#endif /* USE_GPIO */
static void bcm_board_detect(void)
{
#ifdef DDR_LEGACY_MCB_SUPPORTED
/* Loading default DDR info */
memcpy(&ddr_info, &default_ddr_info, sizeof(ddr_info));
#endif
#ifdef USE_FRU
board_detect_fru();
#endif
#ifdef USE_GPIO
board_detect_gpio();
#endif
}
static void dump_persistent_regs(void)
{
NOTICE("pr0: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG0));
NOTICE("pr1: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG1));
NOTICE("pr2: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG2));
NOTICE("pr3: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG3));
NOTICE("pr4: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG4));
NOTICE("pr5: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG5));
NOTICE("pr6: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG6));
NOTICE("pr7: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG7));
NOTICE("pr8: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG8));
NOTICE("pr9: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG9));
NOTICE("pr10: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG10));
NOTICE("pr11: %x\n", mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG11));
}
void plat_bcm_bl2_plat_arch_setup(void)
{
if (chip_get_rev_id_major() == CHIP_REV_MAJOR_AX) {
if (!(sotp_mem_read(SOTP_ATF_CFG_ROW_ID, SOTP_ROW_NO_ECC) &
SOTP_ATF_WATCHDOG_ENABLE_MASK)) {
/*
* Stop sp805 watchdog timer immediately.
* It might has been set up by MCU patch earlier for
* eMMC workaround.
*
* Note the watchdog timer started in CRMU has a very
* short timeout and needs to be stopped immediately.
* Down below we restart it with a much longer timeout
* for BL2 and BL31
*/
sp805_stop(ARM_SP805_TWDG_BASE);
}
}
#if !BRCM_DISABLE_TRUSTED_WDOG
/*
* start secure watchdog for BL2 and BL31.
* Note that UART download can take a longer time,
* so do not allow watchdog for UART download,
* as this boot source is not a standard modus operandi.
*/
if (boot_source_get() != BOOT_SOURCE_UART)
sp805_start(ARM_SP805_TWDG_BASE, ARM_TWDG_LOAD_VAL);
#endif
#ifdef BCM_ELOG
/* Ensure logging is started out fresh in BL2. */
mmio_write_32(BCM_ELOG_BL2_BASE, 0);
#endif
/*
* In BL2, since we have very limited space to store logs, we only
* save logs that are >= the WARNING level.
*/
bcm_elog_init((void *)BCM_ELOG_BL2_BASE, BCM_ELOG_BL2_SIZE,
LOG_LEVEL_WARNING);
dump_persistent_regs();
/* Read CRMU mailbox 0 */
NOTICE("RESET (reported by CRMU): 0x%x\n",
mmio_read_32(CRMU_READ_MAIL_BOX0));
/*
* All non-boot-source PADs are in forced input-mode at
* reset so clear the force on non-boot-source PADs using
* CDRU register.
*/
mmio_clrbits_32((uintptr_t)CDRU_CHIP_IO_PAD_CONTROL,
(1 << CDRU_CHIP_IO_PAD_CONTROL__CDRU_IOMUX_FORCE_PAD_IN_R));
#if DRIVER_OCOTP_ENABLE
bcm_otpc_init(&otp_stingray_map);
#endif
set_swreg_based_on_otp();
#if IHOST_PLL_FREQ != 0
bcm_set_ihost_pll_freq(0x0, IHOST_PLL_FREQ);
#endif
#ifdef INCLUDE_EMMC_DRIVER_ERASE_CODE
/* The erasable unit of the eMMC is the "Erase Group";
* Erase group is measured in write blocks which are the
* basic writable units of the Device.
* The size of the Erase Group is a Device specific parameter
*/
emmc_erase(EMMC_ERASE_START_BLOCK, EMMC_ERASE_BLOCK_COUNT,
EMMC_ERASE_PARTITION);
#endif
bcm_board_detect();
#ifdef DRIVER_EMMC_ENABLE
/* Initialize the card, if it is not */
if (bcm_emmc_init(true) < 0)
WARN("eMMC Card Initialization Failed!!!\n");
#endif
#if BL2_TEST_I2C
i2c_test();
#endif
#ifdef USE_DDR
ddr_initialize(&ddr_info);
ddr_secure_region_config(SECURE_DDR_BASE_ADDRESS,
SECURE_DDR_END_ADDRESS);
#ifdef NITRO_SECURE_ACCESS
ddr_secure_region_config(DDR_NITRO_SECURE_REGION_START,
DDR_NITRO_SECURE_REGION_END);
#endif
#else
ext_sram_init();
#endif
#if BL2_TEST_MEM
ddr_test();
#endif
#ifdef USE_NAND
brcm_stingray_nand_init();
#endif
#if defined(USE_PAXB) || defined(USE_PAXC) || defined(USE_SATA)
brcm_stingray_pcie_reset();
#endif
#ifdef USE_PAXC
if (boot_source_get() != BOOT_SOURCE_QSPI)
brcm_stingray_chimp_check_and_fastboot();
#endif
#if ((!CLEAN_DDR || MMU_DISABLED))
/*
* Now DDR has been initialized. We want to copy all the logs in SRAM
* into DDR so we will have much more space to store the logs in the
* next boot stage
*/
bcm_elog_copy_log((void *)BCM_ELOG_BL31_BASE,
MIN(BCM_ELOG_BL2_SIZE, BCM_ELOG_BL31_SIZE)
);
/*
* We are not yet at the end of BL2, but we can stop log here so we do
* not need to add 'bcm_elog_exit' to the standard BL2 code. The
* benefit of capturing BL2 logs after this is very minimal in a
* production system
* NOTE: BL2 logging must be exited before going forward to setup
* page tables
*/
bcm_elog_exit();
#endif
}
/*
* Copyright (c) 2015 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <errno.h>
#include <common/bl_common.h>
#include <common/debug.h>
#include <cortex_a72.h>
#include <drivers/arm/sp805.h>
#include <drivers/console.h>
#include <drivers/delay_timer.h>
#include <drivers/ti/uart/uart_16550.h>
#include <lib/mmio.h>
#include <lib/utils_def.h>
#include <plat/common/common_def.h>
#include <plat/common/platform.h>
#include <bl33_info.h>
#include <chimp.h>
#include <cmn_plat_util.h>
#include <dmu.h>
#include <fsx.h>
#include <iommu.h>
#include <ncsi.h>
#include <paxb.h>
#include <paxc.h>
#include <platform_def.h>
#include <sdio.h>
#include <sr_utils.h>
#include <timer_sync.h>
/*******************************************************************************
* Perform any BL3-1 platform setup common to ARM standard platforms
******************************************************************************/
static void brcm_stingray_gain_qspi_control(void)
{
if (boot_source_get() != BOOT_SOURCE_QSPI) {
if (bcm_chimp_is_nic_mode() &&
(!bcm_chimp_handshake_done())) {
/*
* Last chance to wait for ChiMP firmware to report
* "I am done" before grabbing the QSPI
*/
WARN("ChiMP still not booted\n");
#ifndef CHIMP_ALWAYS_NEEDS_QSPI
WARN("ChiMP is given the last chance to boot (%d s)\n",
CHIMP_HANDSHAKE_TIMEOUT_MS / 1000);
if (!bcm_chimp_wait_handshake()) {
ERROR("ChiMP failed to boot\n");
} else {
INFO("ChiMP booted successfully\n");
}
#endif
}
#ifndef CHIMP_ALWAYS_NEEDS_QSPI
INFO("AP grabs QSPI\n");
/*
* For QSPI boot sbl/bl1 has already taken care.
* For other boot sources QSPI needs to be muxed to
* AP for exclusive use
*/
brcm_stingray_set_qspi_mux(1);
INFO("AP (bl31) gained control over QSPI\n");
#endif
}
}
static void brcm_stingray_dma_pl330_init(void)
{
unsigned int val;
VERBOSE("dma pl330 init start\n");
/* Set DMAC boot_manager_ns = 0x1 */
VERBOSE(" - configure boot security state\n");
mmio_setbits_32(DMAC_M0_IDM_IO_CONTROL_DIRECT, BOOT_MANAGER_NS);
/* Set boot_peripheral_ns[n:0] = 0xffffffff */
mmio_write_32(ICFG_DMAC_CONFIG_2, BOOT_PERIPHERAL_NS);
/* Set boot_irq_ns[n:0] = 0x0000ffff */
mmio_write_32(ICFG_DMAC_CONFIG_3, BOOT_IRQ_NS);
/* Set DMAC stream_id */
VERBOSE(" - configure stream_id = 0x6000\n");
val = (DMAC_STREAM_ID << DMAC_SID_SHIFT);
mmio_write_32(ICFG_DMAC_SID_ARADDR_CONTROL, val);
mmio_write_32(ICFG_DMAC_SID_AWADDR_CONTROL, val);
/* Reset DMAC */
VERBOSE(" - reset dma pl330\n");
mmio_setbits_32(DMAC_M0_IDM_RESET_CONTROL, 0x1);
udelay(500);
mmio_clrbits_32(DMAC_M0_IDM_RESET_CONTROL, 0x1);
udelay(500);
INFO("dma pl330 init done\n");
}
static void brcm_stingray_spi_pl022_init(uintptr_t idm_reset_control)
{
VERBOSE("spi pl022 init start\n");
/* Reset APB SPI bridge */
VERBOSE(" - reset apb spi bridge\n");
mmio_setbits_32(idm_reset_control, 0x1);
udelay(500);
mmio_clrbits_32(idm_reset_control, 0x1);
udelay(500);
INFO("spi pl022 init done\n");
}
#define CDRU_SATA_RESET_N \
BIT(CDRU_MISC_RESET_CONTROL__CDRU_SATA_RESET_N_R)
#define CDRU_MISC_CLK_SATA \
BIT(CDRU_MISC_CLK_ENABLE_CONTROL__CDRU_SATA_CLK_EN_R)
#define CCN_CONFIG_CLK_ENABLE (1 << 2)
#define MMU_CONFIG_CLK_ENABLE (0x3F << 16)
#define SATA_SATA_TOP_CTRL_BUS_CTRL (SATA_BASE + 0x2044)
#define DMA_BIT_CTRL_MASK 0x003
#define DMA_DESCR_ENDIAN_CTRL (DMA_BIT_CTRL_MASK << 0x002)
#define DMA_DATA_ENDIAN_CTRL (DMA_BIT_CTRL_MASK << 0x004)
#define SATA_PORT_SATA3_PCB_REG8 (SATA_BASE + 0x2320)
#define SATA_PORT_SATA3_PCB_REG11 (SATA_BASE + 0x232c)
#define SATA_PORT_SATA3_PCB_BLOCK_ADDR (SATA_BASE + 0x233c)
#define SATA3_AFE_TXRX_ACTRL 0x1d0
/* TXDriver swing setting is 800mV */
#define DFS_SWINGNOPE_VALUE (0x0 << 6)
#define DFS_SWINGNOPE_MASK (0x3 << 6)
#define DFS_SWINGPE_VALUE (0x1 << 4)
#define DFS_SWINGPE_MASK (0x3 << 4)
#define DFS_INJSTRENGTH_VALUE (0x0 << 4)
#define DFS_INJSTRENGTH_MASK (0x3 << 4)
#define DFS_INJEN (0x1 << 3)
#define SATA_CORE_MEM_CTRL (SATA_BASE + 0x3a08)
#define SATA_CORE_MEM_CTRL_ISO BIT(0)
#define SATA_CORE_MEM_CTRL_ARRPOWEROKIN BIT(1)
#define SATA_CORE_MEM_CTRL_ARRPOWERONIN BIT(2)
#define SATA_CORE_MEM_CTRL_POWEROKIN BIT(3)
#define SATA_CORE_MEM_CTRL_POWERONIN BIT(4)
#define SATA0_IDM_RESET_CONTROL (SATA_BASE + 0x500800)
#define SATA_APBT0_IDM_IO_CONTROL_DIRECT (SATA_BASE + 0x51a408)
#define IO_CONTROL_DIRECT_CLK_ENABLE BIT(0)
#define SATA_APBT0_IDM_RESET_CONTROL (SATA_BASE + 0x51a800)
#define IDM_RESET_CONTROL_RESET BIT(0)
#define NIC400_SATA_NOC_SECURITY1 0x6830000c
#define SATA_NOC_SECURITY1_FIELD 0xf
#define NIC400_SATA_NOC_SECURITY2 0x68300010
#define SATA_NOC_SECURITY2_FIELD 0xf
#define NIC400_SATA_NOC_SECURITY3 0x68300014
#define SATA_NOC_SECURITY3_FIELD 0x1
#define NIC400_SATA_NOC_SECURITY4 0x68300018
#define SATA_NOC_SECURITY4_FIELD 0x1
#define NIC400_SATA_NOC_SECURITY5 0x6830001c
#define SATA_NOC_SECURITY5_FIELD 0xf
#define NIC400_SATA_NOC_SECURITY6 0x68300020
#define SATA_NOC_SECURITY6_FIELD 0x1
#define NIC400_SATA_NOC_SECURITY7 0x68300024
#define SATA_NOC_SECURITY7_FIELD 0xf
#define NIC400_SATA_NOC_SECURITY8 0x68300028
#define SATA_NOC_SECURITY8_FIELD 0xf
#define NIC400_SATA_NOC_SECURITY9 0x6830002c
#define SATA_NOC_SECURITY9_FIELD 0x1
#define SATA_APBT_IDM_PORT_REG(port, reg) \
(((port/4) << 12) + reg)
#define SATA_IDM_PORT_REG(port, reg) ((port << 12) + reg)
#define SATA_PORT_REG(port, reg) \
(((port%4) << 16) + ((port/4) << 20) + reg)
#define MAX_SATA_PORTS 8
#define USE_SATA_PORTS 8
#ifdef USE_SATA
static const uint8_t sr_b0_sata_port[MAX_SATA_PORTS] = {
0, 1, 2, 3, 4, 5, 6, 7
};
static uint32_t brcm_stingray_get_sata_port(unsigned int port)
{
return sr_b0_sata_port[port];
}
static void brcm_stingray_sata_init(void)
{
unsigned int port = 0;
uint32_t sata_port;
mmio_setbits_32(CDRU_MISC_CLK_ENABLE_CONTROL,
CDRU_MISC_CLK_SATA);
mmio_clrbits_32(CDRU_MISC_RESET_CONTROL, CDRU_SATA_RESET_N);
mmio_setbits_32(CDRU_MISC_RESET_CONTROL, CDRU_SATA_RESET_N);
for (port = 0; port < USE_SATA_PORTS; port++) {
sata_port = brcm_stingray_get_sata_port(port);
mmio_write_32(SATA_APBT_IDM_PORT_REG(sata_port,
SATA_APBT0_IDM_RESET_CONTROL),
0x0);
mmio_setbits_32(SATA_APBT_IDM_PORT_REG(sata_port,
SATA_APBT0_IDM_IO_CONTROL_DIRECT),
IO_CONTROL_DIRECT_CLK_ENABLE);
mmio_write_32(SATA_IDM_PORT_REG(sata_port,
SATA0_IDM_RESET_CONTROL),
0x0);
mmio_setbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
SATA_CORE_MEM_CTRL_ARRPOWERONIN);
mmio_setbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
SATA_CORE_MEM_CTRL_ARRPOWEROKIN);
mmio_setbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
SATA_CORE_MEM_CTRL_POWERONIN);
mmio_setbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
SATA_CORE_MEM_CTRL_POWEROKIN);
mmio_clrbits_32(SATA_PORT_REG(sata_port, SATA_CORE_MEM_CTRL),
SATA_CORE_MEM_CTRL_ISO);
mmio_clrbits_32(SATA_PORT_REG(sata_port,
SATA_SATA_TOP_CTRL_BUS_CTRL),
(DMA_DESCR_ENDIAN_CTRL | DMA_DATA_ENDIAN_CTRL));
}
mmio_setbits_32(NIC400_SATA_NOC_SECURITY1, SATA_NOC_SECURITY1_FIELD);
mmio_setbits_32(NIC400_SATA_NOC_SECURITY2, SATA_NOC_SECURITY2_FIELD);
mmio_setbits_32(NIC400_SATA_NOC_SECURITY3, SATA_NOC_SECURITY3_FIELD);
mmio_setbits_32(NIC400_SATA_NOC_SECURITY4, SATA_NOC_SECURITY4_FIELD);
mmio_setbits_32(NIC400_SATA_NOC_SECURITY5, SATA_NOC_SECURITY5_FIELD);
mmio_setbits_32(NIC400_SATA_NOC_SECURITY6, SATA_NOC_SECURITY6_FIELD);
mmio_setbits_32(NIC400_SATA_NOC_SECURITY7, SATA_NOC_SECURITY7_FIELD);
mmio_setbits_32(NIC400_SATA_NOC_SECURITY8, SATA_NOC_SECURITY8_FIELD);
mmio_setbits_32(NIC400_SATA_NOC_SECURITY9, SATA_NOC_SECURITY9_FIELD);
INFO("sata init done\n");
}
#else
static void poweroff_sata_pll(void)
{
/*
* SATA subsystem is clocked by LCPLL0 which is enabled by
* default by bootrom. Poweroff the PLL if SATA is not used
*/
/* enable isolation */
mmio_setbits_32(CRMU_AON_CTRL1,
BIT(CRMU_AON_CTRL1__LCPLL0_ISO_IN));
/* Power off the SATA PLL/LDO */
mmio_clrbits_32(CRMU_AON_CTRL1,
(BIT(CRMU_AON_CTRL1__LCPLL0_PWRON_LDO) |
BIT(CRMU_AON_CTRL1__LCPLL0_PWR_ON)));
}
#endif
#ifdef USE_AMAC
#ifdef EMULATION_SETUP
#define ICFG_AMAC_STRAP_CONFIG (HSLS_ICFG_REGS_BASE + 0xa5c)
#define ICFG_AMAC_STRAP_DLL_BYPASS (1 << 2)
#endif
#define ICFG_AMAC_MAC_CTRL_REG (HSLS_ICFG_REGS_BASE + 0xa6c)
#define ICFG_AMAC_MAC_FULL_DUPLEX (1 << 1)
#define ICFG_AMAC_RGMII_PHY_CONFIG (HSLS_ICFG_REGS_BASE + 0xa60)
#define ICFG_AMAC_SID_CONTROL (HSLS_ICFG_REGS_BASE + 0xb10)
#define ICFG_AMAC_SID_SHIFT 5
#define ICFG_AMAC_SID_AWADDR_OFFSET 0x0
#define ICFG_AMAC_SID_ARADDR_OFFSET 0x4
#define AMAC_RPHY_1000_DATARATE (1 << 20)
#define AMAC_RPHY_FULL_DUPLEX (1 << 5)
#define AMAC_RPHY_SPEED_OFFSET 2
#define AMAC_RPHY_SPEED_MASK (7 << AMAC_RPHY_SPEED_OFFSET)
#define AMAC_RPHY_1G_SPEED (2 << AMAC_RPHY_SPEED_OFFSET)
#define ICFG_AMAC_MEM_PWR_CTRL (HSLS_ICFG_REGS_BASE + 0xa68)
#define AMAC_ISO BIT(9)
#define AMAC_STDBY BIT(8)
#define AMAC_ARRPOWEROKIN BIT(7)
#define AMAC_ARRPOWERONIN BIT(6)
#define AMAC_POWEROKIN BIT(5)
#define AMAC_POWERONIN BIT(4)
#define AMAC_IDM0_IO_CONTROL_DIRECT (HSLS_IDM_REGS_BASE + 0x4408)
#define AMAC_IDM0_ARCACHE_OFFSET 16
#define AMAC_IDM0_AWCACHE_OFFSET 7
#define AMAC_IDM0_ARCACHE_MASK (0xF << AMAC_IDM0_ARCACHE_OFFSET)
#define AMAC_IDM0_AWCACHE_MASK (0xF << AMAC_IDM0_AWCACHE_OFFSET)
/* ARCACHE - AWCACHE is 0xB7 for write-back no allocate */
#define AMAC_IDM0_ARCACHE_VAL (0xb << AMAC_IDM0_ARCACHE_OFFSET)
#define AMAC_IDM0_AWCACHE_VAL (0x7 << AMAC_IDM0_AWCACHE_OFFSET)
static void brcm_stingray_amac_init(void)
{
unsigned int val;
uintptr_t icfg_amac_sid = ICFG_AMAC_SID_CONTROL;
VERBOSE("amac init start\n");
val = SR_SID_VAL(0x3, 0x0, 0x4) << ICFG_AMAC_SID_SHIFT;
mmio_write_32(icfg_amac_sid + ICFG_AMAC_SID_AWADDR_OFFSET, val);
mmio_write_32(icfg_amac_sid + ICFG_AMAC_SID_ARADDR_OFFSET, val);
mmio_setbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_ARRPOWEROKIN);
mmio_setbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_ARRPOWERONIN);
mmio_setbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_POWEROKIN);
mmio_setbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_POWERONIN);
mmio_clrbits_32(ICFG_AMAC_MEM_PWR_CTRL, AMAC_ISO);
mmio_write_32(APBR_IDM_RESET_CONTROL, 0x0);
mmio_clrsetbits_32(ICFG_AMAC_RGMII_PHY_CONFIG, AMAC_RPHY_SPEED_MASK,
AMAC_RPHY_1G_SPEED); /*1 Gbps line rate*/
/* 1000 datarate set */
mmio_setbits_32(ICFG_AMAC_RGMII_PHY_CONFIG, AMAC_RPHY_1000_DATARATE);
/* full duplex */
mmio_setbits_32(ICFG_AMAC_RGMII_PHY_CONFIG, AMAC_RPHY_FULL_DUPLEX);
#ifdef EMULATION_SETUP
/* DLL bypass */
mmio_setbits_32(ICFG_AMAC_STRAP_CONFIG, ICFG_AMAC_STRAP_DLL_BYPASS);
#endif
/* serdes full duplex */
mmio_setbits_32(ICFG_AMAC_MAC_CTRL_REG, ICFG_AMAC_MAC_FULL_DUPLEX);
mmio_clrsetbits_32(AMAC_IDM0_IO_CONTROL_DIRECT, AMAC_IDM0_ARCACHE_MASK,
AMAC_IDM0_ARCACHE_VAL);
mmio_clrsetbits_32(AMAC_IDM0_IO_CONTROL_DIRECT, AMAC_IDM0_AWCACHE_MASK,
AMAC_IDM0_AWCACHE_VAL);
INFO("amac init done\n");
}
#endif /* USE_AMAC */
static void brcm_stingray_pka_meminit(void)
{
uintptr_t icfg_mem_ctrl = ICFG_PKA_MEM_PWR_CTRL;
VERBOSE("pka meminit start\n");
VERBOSE(" - arrpoweron\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_PKA_MEM_PWR_CTRL__ARRPOWERONIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_PKA_MEM_PWR_CTRL__ARRPOWERONOUT))
;
VERBOSE(" - arrpowerok\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_PKA_MEM_PWR_CTRL__ARRPOWEROKIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_PKA_MEM_PWR_CTRL__ARRPOWEROKOUT))
;
VERBOSE(" - poweron\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_PKA_MEM_PWR_CTRL__POWERONIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_PKA_MEM_PWR_CTRL__POWERONOUT))
;
VERBOSE(" - powerok\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_PKA_MEM_PWR_CTRL__POWEROKIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_PKA_MEM_PWR_CTRL__POWEROKOUT))
;
/* Wait sometime */
mdelay(1);
VERBOSE(" - remove isolation\n");
mmio_clrbits_32(icfg_mem_ctrl, ICFG_PKA_MEM_PWR_CTRL__ISO);
INFO("pka meminit done\n");
}
static void brcm_stingray_smmu_init(void)
{
unsigned int val;
uintptr_t smmu_base = SMMU_BASE;
VERBOSE("smmu init start\n");
/* Configure SCR0 */
VERBOSE(" - configure scr0\n");
val = mmio_read_32(smmu_base + 0x0);
val |= (0x1 << 12);
mmio_write_32(smmu_base + 0x0, val);
/* Reserve context banks for secure masters */
arm_smmu_reserve_secure_cntxt();
/* Print configuration */
VERBOSE(" - scr0=0x%x scr1=0x%x scr2=0x%x\n",
mmio_read_32(smmu_base + 0x0),
mmio_read_32(smmu_base + 0x4),
mmio_read_32(smmu_base + 0x8));
VERBOSE(" - idr0=0x%x idr1=0x%x idr2=0x%x\n",
mmio_read_32(smmu_base + 0x20),
mmio_read_32(smmu_base + 0x24),
mmio_read_32(smmu_base + 0x28));
VERBOSE(" - idr3=0x%x idr4=0x%x idr5=0x%x\n",
mmio_read_32(smmu_base + 0x2c),
mmio_read_32(smmu_base + 0x30),
mmio_read_32(smmu_base + 0x34));
VERBOSE(" - idr6=0x%x idr7=0x%x\n",
mmio_read_32(smmu_base + 0x38),
mmio_read_32(smmu_base + 0x3c));
INFO("smmu init done\n");
}
static void brcm_stingray_dma_pl330_meminit(void)
{
uintptr_t icfg_mem_ctrl = ICFG_DMAC_MEM_PWR_CTRL;
VERBOSE("dmac meminit start\n");
VERBOSE(" - arrpoweron\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_DMAC_MEM_PWR_CTRL__ARRPOWERONIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_DMAC_MEM_PWR_CTRL__ARRPOWERONOUT))
;
VERBOSE(" - arrpowerok\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_DMAC_MEM_PWR_CTRL__ARRPOWEROKIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_DMAC_MEM_PWR_CTRL__ARRPOWEROKOUT))
;
VERBOSE(" - poweron\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_DMAC_MEM_PWR_CTRL__POWERONIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_DMAC_MEM_PWR_CTRL__POWERONOUT))
;
VERBOSE(" - powerok\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_DMAC_MEM_PWR_CTRL__POWEROKIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_DMAC_MEM_PWR_CTRL__POWEROKOUT))
;
/* Wait sometime */
mdelay(1);
VERBOSE(" - remove isolation\n");
mmio_clrbits_32(icfg_mem_ctrl, ICFG_DMAC_MEM_PWR_CTRL__ISO);
INFO("dmac meminit done\n");
}
/* program the crmu access ranges for allowing non sec access*/
static void brcm_stingray_crmu_access_init(void)
{
/* Enable 0x6641c001 - 0x6641c701 for non secure access */
mmio_write_32(CRMU_CORE_ADDR_RANGE0_LOW, 0x6641c001);
mmio_write_32(CRMU_CORE_ADDR_RANGE0_LOW + 0x4, 0x6641c701);
/* Enable 0x6641d001 - 0x66424b01 for non secure access */
mmio_write_32(CRMU_CORE_ADDR_RANGE1_LOW, 0x6641d001);
mmio_write_32(CRMU_CORE_ADDR_RANGE1_LOW + 0x4, 0x66424b01);
/* Enable 0x66425001 - 0x66425f01 for non secure access */
mmio_write_32(CRMU_CORE_ADDR_RANGE2_LOW, 0x66425001);
mmio_write_32(CRMU_CORE_ADDR_RANGE2_LOW + 0x4, 0x66425f01);
INFO("crmu access init done\n");
}
static void brcm_stingray_scr_init(void)
{
unsigned int val;
uintptr_t scr_base = SCR_BASE;
unsigned int clr_mask = SCR_AXCACHE_CONFIG_MASK;
unsigned int set_mask = SCR_TBUX_AXCACHE_CONFIG;
VERBOSE("scr init start\n");
/* awdomain=0x1 and ardomain=0x1 */
mmio_clrsetbits_32(scr_base + 0x0, clr_mask, set_mask);
val = mmio_read_32(scr_base + 0x0);
VERBOSE(" - set tbu0_config=0x%x\n", val);
/* awdomain=0x1 and ardomain=0x1 */
mmio_clrsetbits_32(scr_base + 0x4, clr_mask, set_mask);
val = mmio_read_32(scr_base + 0x4);
VERBOSE(" - set tbu1_config=0x%x\n", val);
/* awdomain=0x1 and ardomain=0x1 */
mmio_clrsetbits_32(scr_base + 0x8, clr_mask, set_mask);
val = mmio_read_32(scr_base + 0x8);
VERBOSE(" - set tbu2_config=0x%x\n", val);
/* awdomain=0x1 and ardomain=0x1 */
mmio_clrsetbits_32(scr_base + 0xc, clr_mask, set_mask);
val = mmio_read_32(scr_base + 0xc);
VERBOSE(" - set tbu3_config=0x%x\n", val);
/* awdomain=0x1 and ardomain=0x1 */
mmio_clrsetbits_32(scr_base + 0x10, clr_mask, set_mask);
val = mmio_read_32(scr_base + 0x10);
VERBOSE(" - set tbu4_config=0x%x\n", val);
/* awdomain=0x0 and ardomain=0x0 */
mmio_clrbits_32(scr_base + 0x14, clr_mask);
val = mmio_read_32(scr_base + 0x14);
VERBOSE(" - set gic_config=0x%x\n", val);
INFO("scr init done\n");
}
static void brcm_stingray_hsls_tzpcprot_init(void)
{
unsigned int val;
uintptr_t tzpcdecprot_base = HSLS_TZPC_BASE;
VERBOSE("hsls tzpcprot init start\n");
/* Treat third-party masters as non-secured */
val = 0;
val |= BIT(6); /* SDIO1 */
val |= BIT(5); /* SDIO0 */
val |= BIT(0); /* AMAC */
mmio_write_32(tzpcdecprot_base + 0x810, val);
/* Print TZPC decode status registers */
VERBOSE(" - tzpcdecprot0=0x%x\n",
mmio_read_32(tzpcdecprot_base + 0x800));
VERBOSE(" - tzpcdecprot1=0x%x\n",
mmio_read_32(tzpcdecprot_base + 0x80c));
INFO("hsls tzpcprot init done\n");
}
#ifdef USE_I2S
#define ICFG_AUDIO_POWER_CTRL (HSLS_ICFG_REGS_BASE + 0xaa8)
#define ICFG_AUDIO_POWER_CTRL__POWERONIN BIT(0)
#define ICFG_AUDIO_POWER_CTRL__POWEROKIN BIT(1)
#define ICFG_AUDIO_POWER_CTRL__ARRPOWERONIN BIT(2)
#define ICFG_AUDIO_POWER_CTRL__ARRPOWEROKIN BIT(3)
#define ICFG_AUDIO_POWER_CTRL__POWERONOUT BIT(4)
#define ICFG_AUDIO_POWER_CTRL__POWEROKOUT BIT(5)
#define ICFG_AUDIO_POWER_CTRL__ARRPOWERONOUT BIT(6)
#define ICFG_AUDIO_POWER_CTRL__ARRPOWEROKOUT BIT(7)
#define ICFG_AUDIO_POWER_CTRL__ISO BIT(8)
#define ICFG_AUDIO_SID_CONTROL (HSLS_ICFG_REGS_BASE + 0xaf8)
#define ICFG_AUDIO_SID_SHIFT 5
#define ICFG_AUDIO_SID_AWADDR_OFFSET 0x0
#define ICFG_AUDIO_SID_ARADDR_OFFSET 0x4
#define I2S_RESET_CONTROL (HSLS_IDM_REGS_BASE + 0x1800)
#define I2S_IDM_IO_CONTROL (HSLS_IDM_REGS_BASE + 0x1408)
#define IO_CONTROL_CLK_ENABLE BIT(0)
#define I2S_IDM0_ARCACHE_OFFSET 16
#define I2S_IDM0_AWCACHE_OFFSET 20
#define I2S_IDM0_ARCACHE_MASK (0xF << I2S_IDM0_ARCACHE_OFFSET)
#define I2S_IDM0_AWCACHE_MASK (0xF << I2S_IDM0_AWCACHE_OFFSET)
/* ARCACHE - AWCACHE is 0x22 Normal Non-cacheable Non-bufferable. */
#define I2S_IDM0_ARCACHE_VAL (0x2 << I2S_IDM0_ARCACHE_OFFSET)
#define I2S_IDM0_AWCACHE_VAL (0x2 << I2S_IDM0_AWCACHE_OFFSET)
static void brcm_stingray_audio_init(void)
{
unsigned int val;
uintptr_t icfg_mem_ctrl = ICFG_AUDIO_POWER_CTRL;
uintptr_t icfg_audio_sid = ICFG_AUDIO_SID_CONTROL;
mmio_write_32(I2S_RESET_CONTROL, 0x0);
mmio_clrsetbits_32(I2S_IDM_IO_CONTROL, I2S_IDM0_ARCACHE_MASK,
I2S_IDM0_ARCACHE_VAL);
mmio_clrsetbits_32(I2S_IDM_IO_CONTROL, I2S_IDM0_AWCACHE_MASK,
I2S_IDM0_AWCACHE_VAL);
mmio_setbits_32(I2S_IDM_IO_CONTROL, IO_CONTROL_CLK_ENABLE);
VERBOSE("audio meminit start\n");
VERBOSE(" - configure stream_id = 0x6001\n");
val = SR_SID_VAL(0x3, 0x0, 0x1) << ICFG_AUDIO_SID_SHIFT;
mmio_write_32(icfg_audio_sid + ICFG_AUDIO_SID_AWADDR_OFFSET, val);
mmio_write_32(icfg_audio_sid + ICFG_AUDIO_SID_ARADDR_OFFSET, val);
VERBOSE(" - arrpoweron\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_AUDIO_POWER_CTRL__ARRPOWERONIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_AUDIO_POWER_CTRL__ARRPOWERONOUT))
;
VERBOSE(" - arrpowerok\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_AUDIO_POWER_CTRL__ARRPOWEROKIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_AUDIO_POWER_CTRL__ARRPOWEROKOUT))
;
VERBOSE(" - poweron\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_AUDIO_POWER_CTRL__POWERONIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_AUDIO_POWER_CTRL__POWERONOUT))
;
VERBOSE(" - powerok\n");
mmio_setbits_32(icfg_mem_ctrl,
ICFG_AUDIO_POWER_CTRL__POWEROKIN);
while (!(mmio_read_32(icfg_mem_ctrl) &
ICFG_AUDIO_POWER_CTRL__POWEROKOUT))
;
/* Wait sometime */
mdelay(1);
VERBOSE(" - remove isolation\n");
mmio_clrbits_32(icfg_mem_ctrl, ICFG_AUDIO_POWER_CTRL__ISO);
INFO("audio meminit done\n");
}
#endif /* USE_I2S */
/*
* These defines do not match the regfile but they are renamed in a way such
* that they are much more readible
*/
#define SCR_GPV_SMMU_NS (SCR_GPV_BASE + 0x28)
#define SCR_GPV_GIC500_NS (SCR_GPV_BASE + 0x34)
#define HSLS_GPV_NOR_S0_NS (HSLS_GPV_BASE + 0x14)
#define HSLS_GPV_IDM1_NS (HSLS_GPV_BASE + 0x18)
#define HSLS_GPV_IDM2_NS (HSLS_GPV_BASE + 0x1c)
#define HSLS_SDIO0_SLAVE_NS (HSLS_GPV_BASE + 0x20)
#define HSLS_SDIO1_SLAVE_NS (HSLS_GPV_BASE + 0x24)
#define HSLS_GPV_APBY_NS (HSLS_GPV_BASE + 0x2c)
#define HSLS_GPV_APBZ_NS (HSLS_GPV_BASE + 0x30)
#define HSLS_GPV_APBX_NS (HSLS_GPV_BASE + 0x34)
#define HSLS_GPV_APBS_NS (HSLS_GPV_BASE + 0x38)
#define HSLS_GPV_QSPI_S0_NS (HSLS_GPV_BASE + 0x68)
#define HSLS_GPV_APBR_NS (HSLS_GPV_BASE + 0x6c)
#define FS4_CRYPTO_GPV_RM_SLAVE_NS (FS4_CRYPTO_GPV_BASE + 0x8)
#define FS4_CRYPTO_GPV_APB_SWITCH_NS (FS4_CRYPTO_GPV_BASE + 0xc)
#define FS4_RAID_GPV_RM_SLAVE_NS (FS4_RAID_GPV_BASE + 0x8)
#define FS4_RAID_GPV_APB_SWITCH_NS (FS4_RAID_GPV_BASE + 0xc)
#define FS4_CRYPTO_IDM_NS (NIC400_FS_NOC_ROOT + 0x1c)
#define FS4_RAID_IDM_NS (NIC400_FS_NOC_ROOT + 0x28)
#define FS4_CRYPTO_RING_COUNT 32
#define FS4_CRYPTO_DME_COUNT 10
#define FS4_CRYPTO_AE_COUNT 10
#define FS4_CRYPTO_START_STREAM_ID 0x4000
#define FS4_CRYPTO_MSI_DEVICE_ID 0x4100
#define FS4_RAID_RING_COUNT 32
#define FS4_RAID_DME_COUNT 8
#define FS4_RAID_AE_COUNT 8
#define FS4_RAID_START_STREAM_ID 0x4200
#define FS4_RAID_MSI_DEVICE_ID 0x4300
#define FS6_PKI_AXI_SLAVE_NS \
(NIC400_FS_NOC_ROOT + NIC400_FS_NOC_SECURITY2_OFFSET)
#define FS6_PKI_AE_DME_APB_NS \
(NIC400_FS_NOC_ROOT + NIC400_FS_NOC_SECURITY7_OFFSET)
#define FS6_PKI_IDM_IO_CONTROL_DIRECT 0x0
#define FS6_PKI_IDM_RESET_CONTROL 0x0
#define FS6_PKI_RING_COUNT 32
#define FS6_PKI_DME_COUNT 1
#define FS6_PKI_AE_COUNT 4
#define FS6_PKI_START_STREAM_ID 0x4000
#define FS6_PKI_MSI_DEVICE_ID 0x4100
static void brcm_stingray_security_init(void)
{
unsigned int val;
val = mmio_read_32(SCR_GPV_SMMU_NS);
val |= BIT(0); /* SMMU NS = 1 */
mmio_write_32(SCR_GPV_SMMU_NS, val);
val = mmio_read_32(SCR_GPV_GIC500_NS);
val |= BIT(0); /* GIC-500 NS = 1 */
mmio_write_32(SCR_GPV_GIC500_NS, val);
val = mmio_read_32(HSLS_GPV_NOR_S0_NS);
val |= BIT(0); /* NOR SLAVE NS = 1 */
mmio_write_32(HSLS_GPV_NOR_S0_NS, val);
val = mmio_read_32(HSLS_GPV_IDM1_NS);
val |= BIT(0); /* DMA IDM NS = 1 */
val |= BIT(1); /* I2S IDM NS = 1 */
val |= BIT(2); /* AMAC IDM NS = 1 */
val |= BIT(3); /* SDIO0 IDM NS = 1 */
val |= BIT(4); /* SDIO1 IDM NS = 1 */
val |= BIT(5); /* DS_3 IDM NS = 1 */
mmio_write_32(HSLS_GPV_IDM1_NS, val);
val = mmio_read_32(HSLS_GPV_IDM2_NS);
val |= BIT(2); /* QSPI IDM NS = 1 */
val |= BIT(1); /* NOR IDM NS = 1 */
val |= BIT(0); /* NAND IDM NS = 1 */
mmio_write_32(HSLS_GPV_IDM2_NS, val);
val = mmio_read_32(HSLS_GPV_APBY_NS);
val |= BIT(10); /* I2S NS = 1 */
val |= BIT(4); /* IOPAD NS = 1 */
val |= 0xf; /* UARTx NS = 1 */
mmio_write_32(HSLS_GPV_APBY_NS, val);
val = mmio_read_32(HSLS_GPV_APBZ_NS);
val |= BIT(2); /* RNG NS = 1 */
mmio_write_32(HSLS_GPV_APBZ_NS, val);
val = mmio_read_32(HSLS_GPV_APBS_NS);
val |= 0x3; /* SPIx NS = 1 */
mmio_write_32(HSLS_GPV_APBS_NS, val);
val = mmio_read_32(HSLS_GPV_APBR_NS);
val |= BIT(7); /* QSPI APB NS = 1 */
val |= BIT(6); /* NAND APB NS = 1 */
val |= BIT(5); /* NOR APB NS = 1 */
val |= BIT(4); /* AMAC APB NS = 1 */
val |= BIT(1); /* DMA S1 APB NS = 1 */
mmio_write_32(HSLS_GPV_APBR_NS, val);
val = mmio_read_32(HSLS_SDIO0_SLAVE_NS);
val |= BIT(0); /* SDIO0 NS = 1 */
mmio_write_32(HSLS_SDIO0_SLAVE_NS, val);
val = mmio_read_32(HSLS_SDIO1_SLAVE_NS);
val |= BIT(0); /* SDIO1 NS = 1 */
mmio_write_32(HSLS_SDIO1_SLAVE_NS, val);
val = mmio_read_32(HSLS_GPV_APBX_NS);
val |= BIT(14); /* SMBUS1 NS = 1 */
val |= BIT(13); /* GPIO NS = 1 */
val |= BIT(12); /* WDT NS = 1 */
val |= BIT(11); /* SMBUS0 NS = 1 */
val |= BIT(10); /* Timer7 NS = 1 */
val |= BIT(9); /* Timer6 NS = 1 */
val |= BIT(8); /* Timer5 NS = 1 */
val |= BIT(7); /* Timer4 NS = 1 */
val |= BIT(6); /* Timer3 NS = 1 */
val |= BIT(5); /* Timer2 NS = 1 */
val |= BIT(4); /* Timer1 NS = 1 */
val |= BIT(3); /* Timer0 NS = 1 */
val |= BIT(2); /* MDIO NS = 1 */
val |= BIT(1); /* PWM NS = 1 */
mmio_write_32(HSLS_GPV_APBX_NS, val);
val = mmio_read_32(HSLS_GPV_QSPI_S0_NS);
val |= BIT(0); /* QSPI NS = 1 */
mmio_write_32(HSLS_GPV_QSPI_S0_NS, val);
#ifdef USE_FS4
val = 0x1; /* FS4 Crypto rm_slave */
mmio_write_32(FS4_CRYPTO_GPV_RM_SLAVE_NS, val);
val = 0x1; /* FS4 Crypto apb_switch */
mmio_write_32(FS4_CRYPTO_GPV_APB_SWITCH_NS, val);
val = 0x1; /* FS4 Raid rm_slave */
mmio_write_32(FS4_RAID_GPV_RM_SLAVE_NS, val);
val = 0x1; /* FS4 Raid apb_switch */
mmio_write_32(FS4_RAID_GPV_APB_SWITCH_NS, val);
val = 0x1; /* FS4 Crypto IDM */
mmio_write_32(FS4_CRYPTO_IDM_NS, val);
val = 0x1; /* FS4 RAID IDM */
mmio_write_32(FS4_RAID_IDM_NS, val);
#endif
#ifdef BL31_CCN_NONSECURE
/* Enable non-secure access to CCN registers */
mmio_write_32(OLY_MN_REGISTERS_NODE0_SECURE_ACCESS, 0x1);
#endif
#ifdef DDR_CTRL_PHY_NONSECURE
mmio_write_32(SCR_NOC_DDR_REGISTER_ACCESS, 0x1);
#endif
paxc_mhb_ns_init();
/* unlock scr idm for non secure access */
mmio_write_32(SCR_NOC_SECURITY0, 0xffffffff);
INFO("security init done\r\n");
}
void brcm_gpio_pad_ns_init(void)
{
/* configure all GPIO pads for non secure world access*/
mmio_write_32(GPIO_S_CNTRL_REG, 0xffffffff); /* 128-140 gpio pads */
mmio_write_32(GPIO_S_CNTRL_REG + 0x4, 0xffffffff); /* 96-127 gpio pad */
mmio_write_32(GPIO_S_CNTRL_REG + 0x8, 0xffffffff); /* 64-95 gpio pad */
mmio_write_32(GPIO_S_CNTRL_REG + 0xc, 0xffffffff); /* 32-63 gpio pad */
mmio_write_32(GPIO_S_CNTRL_REG + 0x10, 0xffffffff); /* 0-31 gpio pad */
}
#ifndef USE_DDR
static void brcm_stingray_sram_ns_init(void)
{
uintptr_t sram_root = TZC400_FS_SRAM_ROOT;
uintptr_t noc_root = NIC400_FS_NOC_ROOT;
mmio_write_32(sram_root + GATE_KEEPER_OFFSET, 1);
mmio_write_32(sram_root + REGION_ATTRIBUTES_0_OFFSET, 0xc0000000);
mmio_write_32(sram_root + REGION_ID_ACCESS_0_OFFSET, 0x00010001);
mmio_write_32(noc_root + NIC400_FS_NOC_SECURITY4_OFFSET, 0x1);
INFO(" stingray sram ns init done.\n");
}
#endif
static void ccn_pre_init(void)
{
/*
* Set WFC bit of RN-I nodes where FS4 is connected.
* This is required inorder to wait for read/write requests
* completion acknowledgment. Otherwise FS4 Ring Manager is
* getting stale data because of re-ordering of read/write
* requests at CCN level
*/
mmio_setbits_32(OLY_RNI3PDVM_REGISTERS_NODE8_AUX_CTL,
OLY_RNI3PDVM_REGISTERS_NODE8_AUX_CTL_WFC);
}
static void ccn_post_init(void)
{
mmio_setbits_32(OLY_HNI_REGISTERS_NODE0_PCIERC_RNI_NODEID_LIST,
SRP_RNI_PCIE_CONNECTED);
mmio_setbits_32(OLY_HNI_REGISTERS_NODE0_SA_AUX_CTL,
SA_AUX_CTL_SER_DEVNE_WR);
mmio_clrbits_32(OLY_HNI_REGISTERS_NODE0_POS_CONTROL,
POS_CONTROL_HNI_POS_EN);
mmio_clrbits_32(OLY_HNI_REGISTERS_NODE0_SA_AUX_CTL,
SA_AUX_CTL_POS_EARLY_WR_COMP_EN);
}
#ifndef BL31_BOOT_PRELOADED_SCP
static void crmu_init(void)
{
/*
* Configure CRMU for using SMMU
*/
/*Program CRMU Stream ID */
mmio_write_32(CRMU_MASTER_AXI_ARUSER_CONFIG,
(CRMU_STREAM_ID << CRMU_SID_SHIFT));
mmio_write_32(CRMU_MASTER_AXI_AWUSER_CONFIG,
(CRMU_STREAM_ID << CRMU_SID_SHIFT));
/* Create Identity mapping */
arm_smmu_create_identity_map(DOMAIN_CRMU);
/* Enable Client Port for Secure Masters*/
arm_smmu_enable_secure_client_port();
}
#endif
static void brcm_fsx_init(void)
{
#if defined(USE_FS4) && defined(USE_FS6)
#error "USE_FS4 and USE_FS6 should not be used together"
#endif
#ifdef USE_FS4
fsx_init(eFS4_CRYPTO, FS4_CRYPTO_RING_COUNT, FS4_CRYPTO_DME_COUNT,
FS4_CRYPTO_AE_COUNT, FS4_CRYPTO_START_STREAM_ID,
FS4_CRYPTO_MSI_DEVICE_ID, FS4_CRYPTO_IDM_IO_CONTROL_DIRECT,
FS4_CRYPTO_IDM_RESET_CONTROL, FS4_CRYPTO_BASE,
FS4_CRYPTO_DME_BASE);
fsx_init(eFS4_RAID, FS4_RAID_RING_COUNT, FS4_RAID_DME_COUNT,
FS4_RAID_AE_COUNT, FS4_RAID_START_STREAM_ID,
FS4_RAID_MSI_DEVICE_ID, FS4_RAID_IDM_IO_CONTROL_DIRECT,
FS4_RAID_IDM_RESET_CONTROL, FS4_RAID_BASE,
FS4_RAID_DME_BASE);
fsx_meminit("raid",
FS4_RAID_IDM_IO_CONTROL_DIRECT,
FS4_RAID_IDM_IO_STATUS);
#endif
}
static void bcm_bl33_pass_info(void)
{
struct bl33_info *info = (struct bl33_info *)BL33_SHARED_DDR_BASE;
if (sizeof(*info) > BL33_SHARED_DDR_SIZE)
WARN("bl33 shared area not reserved\n");
info->version = BL33_INFO_VERSION;
info->chip.chip_id = PLAT_CHIP_ID_GET;
info->chip.rev_id = PLAT_CHIP_REV_GET;
}
DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, CORTEX_A72_L2CTLR_EL1)
void plat_bcm_bl31_early_platform_setup(void *from_bl2,
bl_params_t *plat_params_from_bl2)
{
#ifdef BL31_BOOT_PRELOADED_SCP
image_info_t scp_image_info;
scp_image_info.image_base = PRELOADED_SCP_BASE;
scp_image_info.image_size = PRELOADED_SCP_SIZE;
bcm_bl2_plat_handle_scp_bl2(&scp_image_info);
#endif
/*
* In BL31, logs are saved to DDR and we have much larger space to
* store logs. We can now afford to save all logs >= the 'INFO' level
*/
bcm_elog_init((void *)BCM_ELOG_BL31_BASE, BCM_ELOG_BL31_SIZE,
LOG_LEVEL_INFO);
INFO("L2CTLR = 0x%lx\n", read_l2ctlr_el1());
brcm_timer_sync_init();
brcm_stingray_dma_pl330_init();
brcm_stingray_dma_pl330_meminit();
brcm_stingray_spi_pl022_init(APBS_IDM_IDM_RESET_CONTROL);
#ifdef USE_AMAC
brcm_stingray_amac_init();
#endif
brcm_stingray_sdio_init();
#ifdef NCSI_IO_DRIVE_STRENGTH_MA
brcm_stingray_ncsi_init();
#endif
#ifdef USE_USB
xhci_phy_init();
#endif
#ifdef USE_SATA
brcm_stingray_sata_init();
#else
poweroff_sata_pll();
#endif
ccn_pre_init();
brcm_fsx_init();
brcm_stingray_smmu_init();
brcm_stingray_pka_meminit();
brcm_stingray_crmu_access_init();
brcm_stingray_scr_init();
brcm_stingray_hsls_tzpcprot_init();
#ifdef USE_I2S
brcm_stingray_audio_init();
#endif
ccn_post_init();
paxb_init();
paxc_init();
#ifndef BL31_BOOT_PRELOADED_SCP
crmu_init();
#endif
/* Note: this should be last thing because
* FS4 GPV registers only work after FS4 block
* (i.e. crypto,raid,cop) is out of reset.
*/
brcm_stingray_security_init();
brcm_gpio_pad_ns_init();
#ifndef USE_DDR
brcm_stingray_sram_ns_init();
#endif
#ifdef BL31_FORCE_CPU_FULL_FREQ
bcm_set_ihost_pll_freq(0x0, PLL_FREQ_FULL);
#endif
brcm_stingray_gain_qspi_control();
#ifdef USE_PAXC
/*
* Check that the handshake has occurred and report ChiMP status.
* This is required. Otherwise (especially on Palladium)
* Linux might have booted to the pcie stage whereas
* ChiMP has not yet booted. Note that nic_mode case has already
* been considered above.
*/
if ((boot_source_get() != BOOT_SOURCE_QSPI) &&
(!bcm_chimp_is_nic_mode()) &&
(!bcm_chimp_wait_handshake())
) {
/* Does ChiMP report an error ? */
uint32_t err;
err = bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_STAT_REG);
if ((err & CHIMP_ERROR_MASK) == 0)
/* ChiMP has not booted yet, but no error reported */
WARN("ChiMP not booted yet, but no error reported.\n");
}
#if DEBUG
if (boot_source_get() != BOOT_SOURCE_QSPI)
INFO("Current ChiMP Status: 0x%x; bpe_mod reg: 0x%x\n"
"fastboot register: 0x%x; handshake register 0x%x\n",
bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_STAT_REG),
bcm_chimp_read_ctrl(CHIMP_REG_CTRL_BPE_MODE_REG),
bcm_chimp_read_ctrl(CHIMP_REG_CTRL_FSTBOOT_PTR_REG),
bcm_chimp_read(CHIMP_REG_ECO_RESERVED));
#endif /* DEBUG */
#endif
#ifdef FS4_DISABLE_CLOCK
flush_dcache_range(
PLAT_BRCM_TRUSTED_SRAM_BASE,
PLAT_BRCM_TRUSTED_SRAM_SIZE);
fs4_disable_clocks(true, true, true);
#endif
/* pass information to BL33 through shared DDR region */
bcm_bl33_pass_info();
/*
* We are not yet at the end of BL31, but we can stop log here so we do
* not need to add 'bcm_elog_exit' to the standard BL31 code. The
* benefit of capturing BL31 logs after this is very minimal in a
* production system
*/
bcm_elog_exit();
#if !BRCM_DISABLE_TRUSTED_WDOG
/*
* Secure watchdog was started earlier in BL2, now it's time to stop
* it
*/
sp805_stop(ARM_SP805_TWDG_BASE);
#endif
}
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <drivers/arm/ccn.h>
#include <lib/bakery_lock.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
#include <lib/spinlock.h>
#include <brcm_scpi.h>
#include <chimp.h>
#include <cmn_plat_util.h>
#include <plat_brcm.h>
#include <platform_def.h>
#include <sr_utils.h>
#include "m0_cfg.h"
#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
#define CLUSTER_PWR_STATE(state) \
((state)->pwr_domain_state[MPIDR_AFFLVL1])
#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL2])
#define VENDOR_RST_TYPE_SHIFT 4
#if HW_ASSISTED_COHERENCY
/*
* On systems where participant CPUs are cache-coherent, we can use spinlocks
* instead of bakery locks.
*/
spinlock_t event_lock;
#define event_lock_get(_lock) spin_lock(&_lock)
#define event_lock_release(_lock) spin_unlock(&_lock)
#else
/*
* Use bakery locks for state coordination as not all participants are
* cache coherent now.
*/
DEFINE_BAKERY_LOCK(event_lock);
#define event_lock_get(_lock) bakery_lock_get(&_lock)
#define event_lock_release(_lock) bakery_lock_release(&_lock)
#endif
static int brcm_pwr_domain_on(u_register_t mpidr)
{
/*
* SCP takes care of powering up parent power domains so we
* only need to care about level 0
*/
scpi_set_brcm_power_state(mpidr, scpi_power_on, scpi_power_on,
scpi_power_on);
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Handler called when a power level has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from. This handler would never be invoked with
* the system power domain uninitialized as either the primary would have taken
* care of it as part of cold boot or the first core awakened from system
* suspend would have already initialized it.
******************************************************************************/
static void brcm_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr());
/* Assert that the system power domain need not be initialized */
assert(SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_RUN);
assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
/*
* Perform the common cluster specific operations i.e enable coherency
* if this cluster was off.
*/
if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF) {
INFO("Cluster #%lu entering to snoop/dvm domain\n", cluster_id);
ccn_enter_snoop_dvm_domain(1 << cluster_id);
}
/* Program the gic per-cpu distributor or re-distributor interface */
plat_brcm_gic_pcpu_init();
/* Enable the gic cpu interface */
plat_brcm_gic_cpuif_enable();
}
static void brcm_power_down_common(void)
{
unsigned int standbywfil2, standbywfi;
uint64_t mpidr = read_mpidr_el1();
switch (MPIDR_AFFLVL1_VAL(mpidr)) {
case 0x0:
standbywfi = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFI;
standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFIL2;
break;
case 0x1:
standbywfi = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFI;
standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFIL2;
break;
case 0x2:
standbywfi = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFI;
standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFIL2;
break;
case 0x3:
standbywfi = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFI;
standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFIL2;
break;
default:
ERROR("Invalid cluster #%llx\n", MPIDR_AFFLVL1_VAL(mpidr));
return;
}
/* Clear the WFI status bit */
event_lock_get(event_lock);
mmio_setbits_32(CDRU_PROC_EVENT_CLEAR,
(1 << (standbywfi + MPIDR_AFFLVL0_VAL(mpidr))) |
(1 << standbywfil2));
event_lock_release(event_lock);
}
/*
* Helper function to inform power down state to SCP.
*/
static void brcm_scp_suspend(const psci_power_state_t *target_state)
{
uint32_t cluster_state = scpi_power_on;
uint32_t system_state = scpi_power_on;
/* Check if power down at system power domain level is requested */
if (SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
system_state = scpi_power_retention;
/* Check if Cluster is to be turned off */
if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
cluster_state = scpi_power_off;
/*
* Ask the SCP to power down the appropriate components depending upon
* their state.
*/
scpi_set_brcm_power_state(read_mpidr_el1(),
scpi_power_off,
cluster_state,
system_state);
}
/*
* Helper function to turn off a CPU power domain and its parent power domains
* if applicable. Since SCPI doesn't differentiate between OFF and suspend, we
* call the suspend helper here.
*/
static void brcm_scp_off(const psci_power_state_t *target_state)
{
brcm_scp_suspend(target_state);
}
static void brcm_pwr_domain_off(const psci_power_state_t *target_state)
{
unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
/* Prevent interrupts from spuriously waking up this cpu */
plat_brcm_gic_cpuif_disable();
/* Turn redistributor off */
plat_brcm_gic_redistif_off();
/* If Cluster is to be turned off, disable coherency */
if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
ccn_exit_snoop_dvm_domain(1 << cluster_id);
brcm_power_down_common();
brcm_scp_off(target_state);
}
/*******************************************************************************
* Handler called when the CPU power domain is about to enter standby.
******************************************************************************/
static void brcm_cpu_standby(plat_local_state_t cpu_state)
{
unsigned int scr;
assert(cpu_state == PLAT_LOCAL_STATE_RET);
scr = read_scr_el3();
/*
* Enable the Non secure interrupt to wake the CPU.
* In GICv3 affinity routing mode, the non secure group1 interrupts use
* the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
* Enabling both the bits works for both GICv2 mode and GICv3 affinity
* routing mode.
*/
write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
isb();
dsb();
wfi();
/*
* Restore SCR to the original value, synchronisation of scr_el3 is
* done by eret while el3_exit to save some execution cycles.
*/
write_scr_el3(scr);
}
/*
* Helper function to shutdown the system via SCPI.
*/
static void __dead2 brcm_scp_sys_shutdown(void)
{
/*
* Disable GIC CPU interface to prevent pending interrupt
* from waking up the AP from WFI.
*/
plat_brcm_gic_cpuif_disable();
/* Flush and invalidate data cache */
dcsw_op_all(DCCISW);
/* Bring Cluster out of coherency domain as its going to die */
plat_brcm_interconnect_exit_coherency();
brcm_power_down_common();
/* Send the power down request to the SCP */
scpi_sys_power_state(scpi_system_shutdown);
wfi();
ERROR("BRCM System Off: operation not handled.\n");
panic();
}
/*
* Helper function to reset the system
*/
static void __dead2 brcm_scp_sys_reset(unsigned int reset_type)
{
/*
* Disable GIC CPU interface to prevent pending interrupt
* from waking up the AP from WFI.
*/
plat_brcm_gic_cpuif_disable();
/* Flush and invalidate data cache */
dcsw_op_all(DCCISW);
/* Bring Cluster out of coherency domain as its going to die */
plat_brcm_interconnect_exit_coherency();
brcm_power_down_common();
/* Send the system reset request to the SCP
*
* As per PSCI spec system power state could be
* 0-> Shutdown
* 1-> Reboot- Board level Reset
* 2-> Reset - SoC level Reset
*
* Spec allocates 8 bits, 2 nibble, for this. One nibble is sufficient
* for sending the state hence We are utilizing 2nd nibble for vendor
* define reset type.
*/
scpi_sys_power_state((reset_type << VENDOR_RST_TYPE_SHIFT) |
scpi_system_reboot);
wfi();
ERROR("BRCM System Reset: operation not handled.\n");
panic();
}
static void __dead2 brcm_system_reset(void)
{
unsigned int reset_type;
if (bcm_chimp_is_nic_mode())
reset_type = SOFT_RESET_L3;
else
reset_type = SOFT_SYS_RESET_L1;
brcm_scp_sys_reset(reset_type);
}
static int brcm_system_reset2(int is_vendor, int reset_type,
u_register_t cookie)
{
if (!is_vendor) {
/* Architectural warm boot: only warm reset is supported */
reset_type = SOFT_RESET_L3;
} else {
uint32_t boot_source = (uint32_t)cookie;
boot_source &= BOOT_SOURCE_MASK;
brcm_stingray_set_straps(boot_source);
}
brcm_scp_sys_reset(reset_type);
/*
* brcm_scp_sys_reset cannot return (it is a __dead function),
* but brcm_system_reset2 has to return some value, even in
* this case.
*/
return 0;
}
static int brcm_validate_ns_entrypoint(uintptr_t entrypoint)
{
/*
* Check if the non secure entrypoint lies within the non
* secure DRAM.
*/
if ((entrypoint >= BRCM_NS_DRAM1_BASE) &&
(entrypoint < (BRCM_NS_DRAM1_BASE + BRCM_NS_DRAM1_SIZE)))
return PSCI_E_SUCCESS;
#ifndef AARCH32
if ((entrypoint >= BRCM_DRAM2_BASE) &&
(entrypoint < (BRCM_DRAM2_BASE + BRCM_DRAM2_SIZE)))
return PSCI_E_SUCCESS;
if ((entrypoint >= BRCM_DRAM3_BASE) &&
(entrypoint < (BRCM_DRAM3_BASE + BRCM_DRAM3_SIZE)))
return PSCI_E_SUCCESS;
#endif
return PSCI_E_INVALID_ADDRESS;
}
/*******************************************************************************
* ARM standard platform handler called to check the validity of the power state
* parameter.
******************************************************************************/
static int brcm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
int pstate = psci_get_pstate_type(power_state);
int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
int i;
assert(req_state);
if (pwr_lvl > PLAT_MAX_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
/* Sanity check the requested state */
if (pstate == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on power level 0
* Ignore any other power level.
*/
if (pwr_lvl != MPIDR_AFFLVL0)
return PSCI_E_INVALID_PARAMS;
req_state->pwr_domain_state[MPIDR_AFFLVL0] =
PLAT_LOCAL_STATE_RET;
} else {
for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
req_state->pwr_domain_state[i] =
PLAT_LOCAL_STATE_OFF;
}
/*
* We expect the 'state id' to be zero.
*/
if (psci_get_pstate_id(power_state))
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Export the platform handlers via plat_brcm_psci_pm_ops. The ARM Standard
* platform will take care of registering the handlers with PSCI.
******************************************************************************/
plat_psci_ops_t plat_brcm_psci_pm_ops = {
.pwr_domain_on = brcm_pwr_domain_on,
.pwr_domain_on_finish = brcm_pwr_domain_on_finish,
.pwr_domain_off = brcm_pwr_domain_off,
.cpu_standby = brcm_cpu_standby,
.system_off = brcm_scp_sys_shutdown,
.system_reset = brcm_system_reset,
.system_reset2 = brcm_system_reset2,
.validate_ns_entrypoint = brcm_validate_ns_entrypoint,
.validate_power_state = brcm_validate_power_state,
};
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const struct plat_psci_ops **psci_ops)
{
*psci_ops = &plat_brcm_psci_pm_ops;
/* Setup mailbox with entry point. */
mmio_write_64(CRMU_CFG_BASE + offsetof(M0CFG, core_cfg.rvbar),
sec_entrypoint);
return 0;
}
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/debug.h>
#include <drivers/console.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <plat/common/common_def.h>
#include <fsx.h>
#include <platform_def.h>
#include <sr_utils.h>
#define FS4_IDM_IO_CONTROL_DIRECT__SRAM_CLK_EN 0
#define FS4_IDM_IO_CONTROL_DIRECT__MEM_POWERON 11
#define FS4_IDM_IO_CONTROL_DIRECT__MEM_POWEROK 12
#define FS4_IDM_IO_CONTROL_DIRECT__MEM_ARRPOWERON 13
#define FS4_IDM_IO_CONTROL_DIRECT__MEM_ARRPOWEROK 14
#define FS4_IDM_IO_CONTROL_DIRECT__MEM_ISO 15
#define FS4_IDM_IO_CONTROL_DIRECT__CLK_EN 31
#define FS4_IDM_IO_STATUS__MEM_POWERON 0
#define FS4_IDM_IO_STATUS__MEM_POWEROK 1
#define FS4_IDM_IO_STATUS__MEM_ARRPOWERON 2
#define FS4_IDM_IO_STATUS__MEM_ARRPOWEROK 3
#define FS4_IDM_IO_STATUS__MEM_ALLOK 0xf
#define FS4_IDM_RESET_CONTROL__RESET 0
#define FSX_RINGx_BASE(__b, __i) \
((__b) + (__i) * 0x10000)
#define FSX_RINGx_VERSION_NUMBER(__b, __i) \
(FSX_RINGx_BASE(__b, __i) + 0x0)
#define FSX_RINGx_MSI_DEV_ID(__b, __i) \
(FSX_RINGx_BASE(__b, __i) + 0x44)
#define FSX_COMM_RINGx_BASE(__b, __i) \
((__b) + 0x200000 + (__i) * 0x100)
#define FSX_COMM_RINGx_CONTROL(__b, __i) \
(FSX_COMM_RINGx_BASE(__b, __i) + 0x0)
#define FSX_COMM_RINGx_CONTROL__AXI_ID 8
#define FSX_COMM_RINGx_CONTROL__AXI_ID_MASK 0x1f
#define FSX_COMM_RINGx_CONTROL__PRIORITY 4
#define FSX_COMM_RINGx_CONTROL__PRIORITY_MASK 0x7
#define FSX_COMM_RINGx_CONTROL__AE_GROUP 0
#define FSX_COMM_RINGx_CONTROL__AE_GROUP_MASK 0x7
#define FSX_COMM_RINGx_MSI_DEV_ID(__b, __i) \
(FSX_COMM_RINGx_BASE(__b, __i) + 0x4)
#define FSX_AEx_BASE(__b, __i) \
((__b) + 0x202000 + (__i) * 0x100)
#define FSX_AEx_CONTROL_REGISTER(__b, __i) \
(FSX_AEx_BASE(__b, __i) + 0x0)
#define FSX_AEx_CONTROL_REGISTER__ACTIVE 4
#define FSX_AEx_CONTROL_REGISTER__GROUP_ID 0
#define FSX_AEx_CONTROL_REGISTER__GROUP_ID_MASK 0x7
#define FSX_COMM_RM_RING_SECURITY_SETTING 0x0
#define FSX_COMM_RM_SSID_CONTROL 0x4
#define FSX_COMM_RM_SSID_CONTROL__RING_BITS 5
#define FSX_COMM_RM_SSID_CONTROL__MASK 0x3ff
#define FSX_COMM_RM_CONTROL_REGISTER 0x8
#define FSX_COMM_RM_CONTROL_REGISTER__CONFIG_DONE 2
#define FSX_COMM_RM_CONTROL_REGISTER__AE_TIMEOUT 5
#define FSX_COMM_RM_CONTROL_REGISTER__AE_LOCKING 7
#define FSX_COMM_RM_TIMER_CONTROL_0 0xc
#define FSX_COMM_RM_TIMER_CONTROL_0__FAST 16
#define FSX_COMM_RM_TIMER_CONTROL_0__MEDIUM 0
#define FSX_COMM_RM_TIMER_CONTROL_1 0x10
#define FSX_COMM_RM_TIMER_CONTROL_1__SLOW 16
#define FSX_COMM_RM_TIMER_CONTROL_1__IDLE 0
#define FSX_COMM_RM_BURST_BD_THRESHOLD 0x14
#define FSX_COMM_RM_BURST_BD_THRESHOLD_LOW 0
#define FSX_COMM_RM_BURST_BD_THRESHOLD_HIGH 16
#define FSX_COMM_RM_BURST_LENGTH 0x18
#define FSX_COMM_RM_BURST_LENGTH__FOR_DDR_ADDR_GEN 16
#define FSX_COMM_RM_BURST_LENGTH__FOR_DDR_ADDR_GEN_MASK 0x1ff
#define FSX_COMM_RM_BURST_LENGTH__FOR_TOGGLE 0
#define FSX_COMM_RM_BURST_LENGTH__FOR_TOGGLE_MASK 0x1ff
#define FSX_COMM_RM_FIFO_THRESHOLD 0x1c
#define FSX_COMM_RM_FIFO_THRESHOLD__BD_FIFO_FULL 16
#define FSX_COMM_RM_FIFO_THRESHOLD__BD_FIFO_FULL_MASK 0x1ff
#define FSX_COMM_RM_FIFO_THRESHOLD__AE_FIFO_FULL 0
#define FSX_COMM_RM_FIFO_THRESHOLD__AE_FIFO_FULL_MASK 0x1f
#define FSX_COMM_RM_AE_TIMEOUT 0x24
#define FSX_COMM_RM_RING_FLUSH_TIMEOUT 0x2c
#define FSX_COMM_RM_MEMORY_CONFIGURATION 0x30
#define FSX_COMM_RM_MEMORY_CONFIGURATION__ARRPOWERONIN 12
#define FSX_COMM_RM_MEMORY_CONFIGURATION__ARRPOWEROKIN 13
#define FSX_COMM_RM_MEMORY_CONFIGURATION__POWERONIN 14
#define FSX_COMM_RM_MEMORY_CONFIGURATION__POWEROKIN 15
#define FSX_COMM_RM_AXI_CONTROL 0x34
#define FSX_COMM_RM_AXI_CONTROL__WRITE_CHANNEL_EN 28
#define FSX_COMM_RM_AXI_CONTROL__READ_CHANNEL_EN 24
#define FSX_COMM_RM_AXI_CONTROL__AWQOS 20
#define FSX_COMM_RM_AXI_CONTROL__ARQOS 16
#define FSX_COMM_RM_AXI_CONTROL__AWPROT 12
#define FSX_COMM_RM_AXI_CONTROL__ARPROT 8
#define FSX_COMM_RM_AXI_CONTROL__AWCACHE 4
#define FSX_COMM_RM_AXI_CONTROL__ARCACHE 0
#define FSX_COMM_RM_CONFIG_INTERRUPT_STATUS_CLEAR 0x48
#define FSX_COMM_RM_GROUP_PKT_EXTENSION_SUPPORT 0xc0
#define FSX_COMM_RM_AXI_READ_BURST_THRESHOLD 0xc8
#define FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MASK 0x1ff
#define FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MAX 16
#define FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MIN 0
#define FSX_COMM_RM_GROUP_RING_COUNT 0xcc
#define FSX_COMM_RM_MAIN_HW_INIT_DONE 0x12c
#define FSX_COMM_RM_MAIN_HW_INIT_DONE__MASK 0x1
#define FSX_DMEx_BASE(__b, __i) \
((__b) + (__i) * 0x1000)
#define FSX_DMEx_AXI_CONTROL(__b, __i) \
(FSX_DMEx_BASE(__b, __i) + 0x4)
#define FSX_DMEx_AXI_CONTROL__WRITE_CHANNEL_EN 28
#define FSX_DMEx_AXI_CONTROL__READ_CHANNEL_EN 24
#define FSX_DMEx_AXI_CONTROL__AWQOS 20
#define FSX_DMEx_AXI_CONTROL__ARQOS 16
#define FSX_DMEx_AXI_CONTROL__AWCACHE 4
#define FSX_DMEx_AXI_CONTROL__ARCACHE 0
#define FSX_DMEx_WR_FIFO_THRESHOLD(__b, __i) \
(FSX_DMEx_BASE(__b, __i) + 0xc)
#define FSX_DMEx_WR_FIFO_THRESHOLD__MASK 0x3ff
#define FSX_DMEx_WR_FIFO_THRESHOLD__MAX 10
#define FSX_DMEx_WR_FIFO_THRESHOLD__MIN 0
#define FSX_DMEx_RD_FIFO_THRESHOLD(__b, __i) \
(FSX_DMEx_BASE(__b, __i) + 0x14)
#define FSX_DMEx_RD_FIFO_THRESHOLD__MASK 0x3ff
#define FSX_DMEx_RD_FIFO_THRESHOLD__MAX 10
#define FSX_DMEx_RD_FIFO_THRESHOLD__MIN 0
#define FS6_SUB_TOP_BASE 0x66D8F800
#define FS6_PKI_DME_RESET 0x4
#define PKI_DME_RESET 1
char *fsx_type_names[] = {
"fs4-raid",
"fs4-crypto",
"fs6-pki",
};
void fsx_init(eFSX_TYPE fsx_type,
unsigned int ring_count,
unsigned int dme_count,
unsigned int ae_count,
unsigned int start_stream_id,
unsigned int msi_dev_id,
uintptr_t idm_io_control_direct,
uintptr_t idm_reset_control,
uintptr_t base,
uintptr_t dme_base)
{
int try;
unsigned int i, v, data;
uintptr_t fs4_idm_io_control_direct = idm_io_control_direct;
uintptr_t fs4_idm_reset_control = idm_reset_control;
uintptr_t fsx_comm_rm = (base + 0x203000);
VERBOSE("fsx %s init start\n", fsx_type_names[fsx_type]);
if (fsx_type == eFS4_RAID || fsx_type == eFS4_CRYPTO) {
/* Enable FSx engine clock */
VERBOSE(" - enable fsx clock\n");
mmio_write_32(fs4_idm_io_control_direct,
(1U << FS4_IDM_IO_CONTROL_DIRECT__CLK_EN));
udelay(500);
/* Reset FSx engine */
VERBOSE(" - reset fsx\n");
v = mmio_read_32(fs4_idm_reset_control);
v |= (1 << FS4_IDM_RESET_CONTROL__RESET);
mmio_write_32(fs4_idm_reset_control, v);
udelay(500);
v = mmio_read_32(fs4_idm_reset_control);
v &= ~(1 << FS4_IDM_RESET_CONTROL__RESET);
mmio_write_32(fs4_idm_reset_control, v);
} else {
/*
* Default RM and AE are out of reset,
* So only DME Reset added here
*/
v = mmio_read_32(FS6_SUB_TOP_BASE + FS6_PKI_DME_RESET);
v &= ~(PKI_DME_RESET);
mmio_write_32(FS6_SUB_TOP_BASE + FS6_PKI_DME_RESET, v);
}
/* Wait for HW-init done */
VERBOSE(" - wait for HW-init done\n");
try = 10000;
do {
udelay(1);
data = mmio_read_32(fsx_comm_rm +
FSX_COMM_RM_MAIN_HW_INIT_DONE);
try--;
} while (!(data & FSX_COMM_RM_MAIN_HW_INIT_DONE__MASK) && (try > 0));
if (try <= 0)
ERROR("fsx_comm_rm + 0x%x: 0x%x\n",
data, FSX_COMM_RM_MAIN_HW_INIT_DONE);
/* Make all rings non-secured */
VERBOSE(" - make all rings non-secured\n");
v = 0xffffffff;
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_RING_SECURITY_SETTING, v);
/* Set start stream-id for rings to */
VERBOSE(" - set start stream-id for rings to 0x%x\n",
start_stream_id);
v = start_stream_id >> FSX_COMM_RM_SSID_CONTROL__RING_BITS;
v &= FSX_COMM_RM_SSID_CONTROL__MASK;
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_SSID_CONTROL, v);
/* Set timer configuration */
VERBOSE(" - set timer configuration\n");
v = 0x0271 << FSX_COMM_RM_TIMER_CONTROL_0__MEDIUM;
v |= (0x0138 << FSX_COMM_RM_TIMER_CONTROL_0__FAST);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_TIMER_CONTROL_0, v);
v = 0x09c4 << FSX_COMM_RM_TIMER_CONTROL_1__IDLE;
v |= (0x04e2 << FSX_COMM_RM_TIMER_CONTROL_1__SLOW);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_TIMER_CONTROL_1, v);
v = 0x0000f424;
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_RING_FLUSH_TIMEOUT, v);
/* Set burst length and fifo threshold */
VERBOSE(" - set burst length, fifo and bd threshold\n");
v = 0x0;
v |= (0x8 << FSX_COMM_RM_BURST_LENGTH__FOR_DDR_ADDR_GEN);
v |= (0x8 << FSX_COMM_RM_BURST_LENGTH__FOR_TOGGLE);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_BURST_LENGTH, v);
v = 0x0;
v |= (0x67 << FSX_COMM_RM_FIFO_THRESHOLD__BD_FIFO_FULL);
v |= (0x18 << FSX_COMM_RM_FIFO_THRESHOLD__AE_FIFO_FULL);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_FIFO_THRESHOLD, v);
v = 0x0;
v |= (0x8 << FSX_COMM_RM_BURST_BD_THRESHOLD_LOW);
v |= (0x8 << FSX_COMM_RM_BURST_BD_THRESHOLD_HIGH);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_BURST_BD_THRESHOLD, v);
/* Set memory configuration */
VERBOSE(" - set memory configuration\n");
v = 0x0;
v |= (1 << FSX_COMM_RM_MEMORY_CONFIGURATION__POWERONIN);
v |= (1 << FSX_COMM_RM_MEMORY_CONFIGURATION__POWEROKIN);
v |= (1 << FSX_COMM_RM_MEMORY_CONFIGURATION__ARRPOWERONIN);
v |= (1 << FSX_COMM_RM_MEMORY_CONFIGURATION__ARRPOWEROKIN);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_MEMORY_CONFIGURATION, v);
/* AXI configuration for RM */
v = 0;
v |= (0x1 << FSX_COMM_RM_AXI_CONTROL__WRITE_CHANNEL_EN);
v |= (0x1 << FSX_COMM_RM_AXI_CONTROL__READ_CHANNEL_EN);
v |= (0xe << FSX_COMM_RM_AXI_CONTROL__AWQOS);
v |= (0xa << FSX_COMM_RM_AXI_CONTROL__ARQOS);
v |= (0x2 << FSX_COMM_RM_AXI_CONTROL__AWPROT);
v |= (0x2 << FSX_COMM_RM_AXI_CONTROL__ARPROT);
v |= (0xf << FSX_COMM_RM_AXI_CONTROL__AWCACHE);
v |= (0xf << FSX_COMM_RM_AXI_CONTROL__ARCACHE);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_AXI_CONTROL, v);
VERBOSE(" - set AXI control = 0x%x\n",
mmio_read_32(fsx_comm_rm + FSX_COMM_RM_AXI_CONTROL));
v = 0x0;
v |= (0x10 << FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MAX);
v |= (0x10 << FSX_COMM_RM_AXI_READ_BURST_THRESHOLD__MIN);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_AXI_READ_BURST_THRESHOLD, v);
VERBOSE(" - set AXI read burst threshold = 0x%x\n",
mmio_read_32(fsx_comm_rm + FSX_COMM_RM_AXI_READ_BURST_THRESHOLD));
/* Configure group ring count for all groups */
/* By default we schedule extended packets
* on all AEs/DMEs in a group.
*/
v = (dme_count & 0xf) << 0;
v |= (dme_count & 0xf) << 4;
v |= (dme_count & 0xf) << 8;
v |= (dme_count & 0xf) << 12;
v |= (dme_count & 0xf) << 16;
v |= (dme_count & 0xf) << 20;
v |= (dme_count & 0xf) << 24;
v |= (dme_count & 0xf) << 28;
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_GROUP_RING_COUNT, v);
/*
* Due to HW issue spurious interrupts are getting generated.
* To fix sw needs to clear the config status interrupts
* before setting CONFIG_DONE.
*/
mmio_write_32(fsx_comm_rm +
FSX_COMM_RM_CONFIG_INTERRUPT_STATUS_CLEAR,
0xffffffff);
/* Configure RM control */
VERBOSE(" - configure RM control\n");
v = mmio_read_32(fsx_comm_rm + FSX_COMM_RM_CONTROL_REGISTER);
v |= (1 << FSX_COMM_RM_CONTROL_REGISTER__AE_LOCKING);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_CONTROL_REGISTER, v);
v |= (1 << FSX_COMM_RM_CONTROL_REGISTER__CONFIG_DONE);
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_CONTROL_REGISTER, v);
/* Configure AE timeout */
VERBOSE(" - configure AE timeout\n");
v = 0x00003fff;
mmio_write_32(fsx_comm_rm + FSX_COMM_RM_AE_TIMEOUT, v);
/* Initialize all AEs */
for (i = 0; i < ae_count; i++) {
VERBOSE(" - initialize AE%d\n", i);
v = (0x1 << FSX_AEx_CONTROL_REGISTER__ACTIVE);
mmio_write_32(FSX_AEx_CONTROL_REGISTER(base, i), v);
}
/* Initialize all DMEs */
for (i = 0; i < dme_count; i++) {
VERBOSE(" - initialize DME%d\n", i);
v = 0;
v |= (0x1 << FSX_DMEx_AXI_CONTROL__WRITE_CHANNEL_EN);
v |= (0x1 << FSX_DMEx_AXI_CONTROL__READ_CHANNEL_EN);
v |= (0xe << FSX_DMEx_AXI_CONTROL__AWQOS);
v |= (0xa << FSX_DMEx_AXI_CONTROL__ARQOS);
v |= (0xf << FSX_DMEx_AXI_CONTROL__AWCACHE);
v |= (0xf << FSX_DMEx_AXI_CONTROL__ARCACHE);
mmio_write_32(FSX_DMEx_AXI_CONTROL(dme_base, i), v);
VERBOSE(" -- AXI_CONTROL = 0x%x\n",
mmio_read_32(FSX_DMEx_AXI_CONTROL(dme_base, i)));
v = 0;
v |= (0x4 << FSX_DMEx_WR_FIFO_THRESHOLD__MIN);
v |= (0x4 << FSX_DMEx_WR_FIFO_THRESHOLD__MAX);
mmio_write_32(FSX_DMEx_WR_FIFO_THRESHOLD(dme_base, i), v);
VERBOSE(" -- WR_FIFO_THRESHOLD = 0x%x\n",
mmio_read_32(FSX_DMEx_WR_FIFO_THRESHOLD(dme_base, i)));
v = 0;
v |= (0x4 << FSX_DMEx_RD_FIFO_THRESHOLD__MIN);
v |= (0x4 << FSX_DMEx_RD_FIFO_THRESHOLD__MAX);
mmio_write_32(FSX_DMEx_RD_FIFO_THRESHOLD(dme_base, i), v);
VERBOSE(" -- RD_FIFO_THRESHOLD = 0x%x\n",
mmio_read_32(FSX_DMEx_RD_FIFO_THRESHOLD(dme_base, i)));
}
/* Configure ring axi id and msi device id */
for (i = 0; i < ring_count; i++) {
VERBOSE(" - ring%d version=0x%x\n", i,
mmio_read_32(FSX_RINGx_VERSION_NUMBER(base, i)));
mmio_write_32(FSX_COMM_RINGx_MSI_DEV_ID(base, i),
msi_dev_id);
v = 0;
v |= ((i & FSX_COMM_RINGx_CONTROL__AXI_ID_MASK) <<
FSX_COMM_RINGx_CONTROL__AXI_ID);
mmio_write_32(FSX_COMM_RINGx_CONTROL(base, i), v);
}
INFO("fsx %s init done\n", fsx_type_names[fsx_type]);
}
void fsx_meminit(const char *name,
uintptr_t idm_io_control_direct,
uintptr_t idm_io_status)
{
int try;
unsigned int val;
VERBOSE("fsx %s meminit start\n", name);
VERBOSE(" - arrpoweron\n");
mmio_setbits_32(idm_io_control_direct,
BIT(FS4_IDM_IO_CONTROL_DIRECT__MEM_ARRPOWERON));
while (!(mmio_read_32(idm_io_status) &
BIT(FS4_IDM_IO_STATUS__MEM_ARRPOWERON)))
;
VERBOSE(" - arrpowerok\n");
mmio_setbits_32(idm_io_control_direct,
(1 << FS4_IDM_IO_CONTROL_DIRECT__MEM_ARRPOWEROK));
while (!(mmio_read_32(idm_io_status) &
BIT(FS4_IDM_IO_STATUS__MEM_ARRPOWEROK)))
;
VERBOSE(" - poweron\n");
mmio_setbits_32(idm_io_control_direct,
(1 << FS4_IDM_IO_CONTROL_DIRECT__MEM_POWERON));
while (!(mmio_read_32(idm_io_status) &
BIT(FS4_IDM_IO_STATUS__MEM_POWERON)))
;
VERBOSE(" - powerok\n");
mmio_setbits_32(idm_io_control_direct,
(1 << FS4_IDM_IO_CONTROL_DIRECT__MEM_POWEROK));
while (!(mmio_read_32(idm_io_status) &
BIT(FS4_IDM_IO_STATUS__MEM_POWEROK)))
;
/* Final check on all power bits */
try = 10;
do {
val = mmio_read_32(idm_io_status);
if (val == FS4_IDM_IO_STATUS__MEM_ALLOK)
break;
/* Wait sometime */
mdelay(1);
try--;
} while (try > 0);
/* Remove memory isolation if things are fine. */
if (try <= 0) {
INFO(" - powerup failed\n");
} else {
VERBOSE(" - remove isolation\n");
mmio_clrbits_32(idm_io_control_direct,
(1 << FS4_IDM_IO_CONTROL_DIRECT__MEM_ISO));
VERBOSE(" - powerup done\n");
}
INFO("fsx %s meminit done\n", name);
}
void fs4_disable_clocks(bool disable_sram,
bool disable_crypto,
bool disable_raid)
{
VERBOSE("fs4 disable clocks start\n");
if (disable_sram) {
VERBOSE(" - disable sram clock\n");
mmio_clrbits_32(FS4_SRAM_IDM_IO_CONTROL_DIRECT,
(1 << FS4_IDM_IO_CONTROL_DIRECT__SRAM_CLK_EN));
}
if (disable_crypto) {
VERBOSE(" - disable crypto clock\n");
mmio_setbits_32(CDRU_GENPLL5_CONTROL1,
CDRU_GENPLL5_CONTROL1__CHNL1_CRYPTO_AE_CLK);
}
if (disable_raid) {
VERBOSE(" - disable raid clock\n");
mmio_setbits_32(CDRU_GENPLL5_CONTROL1,
CDRU_GENPLL5_CONTROL1__CHNL2_RAID_AE_CLK);
}
if (disable_sram && disable_crypto && disable_raid) {
VERBOSE(" - disable root clock\n");
mmio_setbits_32(CDRU_GENPLL5_CONTROL1,
CDRU_GENPLL5_CONTROL1__CHNL0_DME_CLK);
mmio_setbits_32(CDRU_GENPLL2_CONTROL1,
CDRU_GENPLL2_CONTROL1__CHNL6_FS4_CLK);
}
INFO("fs4 disable clocks done\n");
}
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <dmu.h>
#include <ihost_pm.h>
#include <platform_def.h>
#define CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST1 2
#define CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST2 1
#define CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST3 0
#define CDRU_MISC_RESET_CONTROL__CDRU_IH1_RESET 9
#define CDRU_MISC_RESET_CONTROL__CDRU_IH2_RESET 8
#define CDRU_MISC_RESET_CONTROL__CDRU_IH3_RESET 7
#define A72_CRM_SOFTRESETN_0 0x480
#define A72_CRM_SOFTRESETN_1 0x484
#define A72_CRM_DOMAIN_4_CONTROL 0x810
#define A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_DFT 3
#define A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_MEM 6
#define A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_I_O 0
#define A72_CRM_SUBSYSTEM_MEMORY_CONTROL_3 0xB4C
#define MEMORY_PDA_HI_SHIFT 0x0
#define A72_CRM_PLL_PWR_ON 0x70
#define A72_CRM_PLL_PWR_ON__PLL0_ISO_PLLOUT 4
#define A72_CRM_PLL_PWR_ON__PLL0_PWRON_LDO 1
#define A72_CRM_PLL_PWR_ON__PLL0_PWRON_PLL 0
#define A72_CRM_SUBSYSTEM_MEMORY_CONTROL_2 0xB48
#define A72_CRM_PLL_INTERRUPT_STATUS 0x8c
#define A72_CRM_PLL_INTERRUPT_STATUS__PLL0_LOCK_LOST_STATUS 8
#define A72_CRM_PLL_INTERRUPT_STATUS__PLL0_LOCK_STATUS 9
#define A72_CRM_INTERRUPT_ENABLE 0x4
#define A72_CRM_INTERRUPT_ENABLE__PLL0_INT_ENABLE 4
#define A72_CRM_PLL_INTERRUPT_ENABLE 0x88
#define A72_CRM_PLL_INTERRUPT_ENABLE__PLL0_LOCK_STATUS_INT_ENB 9
#define A72_CRM_PLL_INTERRUPT_ENABLE__PLL0_LOCK_LOST_STATUS_INT_ENB 8
#define A72_CRM_PLL0_CFG0_CTRL 0x120
#define A72_CRM_PLL0_CFG1_CTRL 0x124
#define A72_CRM_PLL0_CFG2_CTRL 0x128
#define A72_CRM_PLL0_CFG3_CTRL 0x12C
#define A72_CRM_CORE_CONFIG_DBGCTRL__DBGROMADDRV 0
#define A72_CRM_CORE_CONFIG_DBGCTRL 0xD50
#define A72_CRM_CORE_CONFIG_DBGROM_LO 0xD54
#define A72_CRM_CORE_CONFIG_DBGROM_HI 0xD58
#define A72_CRM_SUBSYSTEM_CONFIG_1__DBGL1RSTDISABLE 2
#define A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN 0
#define A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN 1
#define A72_CRM_AXI_CLK_DESC 0x304
#define A72_CRM_ACP_CLK_DESC 0x308
#define A72_CRM_ATB_CLK_DESC 0x30C
#define A72_CRM_PCLKDBG_DESC 0x310
#define A72_CRM_CLOCK_MODE_CONTROL 0x40
#define A72_CRM_CLOCK_MODE_CONTROL__CLK_CHANGE_TRIGGER 0
#define A72_CRM_CLOCK_CONTROL_0 0x200
#define A72_CRM_CLOCK_CONTROL_0__ARM_HW_SW_ENABLE_SEL 0
#define A72_CRM_CLOCK_CONTROL_0__AXI_HW_SW_ENABLE_SEL 2
#define A72_CRM_CLOCK_CONTROL_0__ACP_HW_SW_ENABLE_SEL 4
#define A72_CRM_CLOCK_CONTROL_0__ATB_HW_SW_ENABLE_SEL 6
#define A72_CRM_CLOCK_CONTROL_0__PCLKDBG_HW_SW_ENA_SEL 8
#define A72_CRM_CLOCK_CONTROL_1 0x204
#define A72_CRM_CLOCK_CONTROL_1__TMON_HW_SW_ENABLE_SEL 6
#define A72_CRM_CLOCK_CONTROL_1__APB_HW_SW_ENABLE_SEL 8
#define A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN 0
#define A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN 1
#define A72_CRM_SOFTRESETN_0__AXI_SOFTRESETN 9
#define A72_CRM_SOFTRESETN_0__ACP_SOFTRESETN 10
#define A72_CRM_SOFTRESETN_0__ATB_SOFTRESETN 11
#define A72_CRM_SOFTRESETN_0__PCLKDBG_SOFTRESETN 12
#define A72_CRM_SOFTRESETN_0__TMON_SOFTRESETN 15
#define A72_CRM_SOFTRESETN_0__L2_SOFTRESETN 3
#define A72_CRM_SOFTRESETN_1__APB_SOFTRESETN 8
/* core related regs */
#define A72_CRM_DOMAIN_0_CONTROL 0x800
#define A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_MEM 0x6
#define A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_I_O 0x0
#define A72_CRM_DOMAIN_1_CONTROL 0x804
#define A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_MEM 0x6
#define A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_I_O 0x0
#define A72_CRM_CORE_CONFIG_RVBA0_LO 0xD10
#define A72_CRM_CORE_CONFIG_RVBA0_MID 0xD14
#define A72_CRM_CORE_CONFIG_RVBA0_HI 0xD18
#define A72_CRM_CORE_CONFIG_RVBA1_LO 0xD20
#define A72_CRM_CORE_CONFIG_RVBA1_MID 0xD24
#define A72_CRM_CORE_CONFIG_RVBA1_HI 0xD28
#define A72_CRM_SUBSYSTEM_CONFIG_0 0xC80
#define A72_CRM_SUBSYSTEM_CONFIG_0__DBGPWRDUP_CFG_SHIFT 4
#define A72_CRM_SOFTRESETN_0__COREPOR0_SOFTRESETN 4
#define A72_CRM_SOFTRESETN_0__COREPOR1_SOFTRESETN 5
#define A72_CRM_SOFTRESETN_1__CORE0_SOFTRESETN 0
#define A72_CRM_SOFTRESETN_1__DEBUG0_SOFTRESETN 4
#define A72_CRM_SOFTRESETN_1__CORE1_SOFTRESETN 1
#define A72_CRM_SOFTRESETN_1__DEBUG1_SOFTRESETN 5
#define SPROC_MEMORY_BISR 0
static int cluster_power_status[PLAT_BRCM_CLUSTER_COUNT] = {CLUSTER_POWER_ON,
CLUSTER_POWER_OFF,
CLUSTER_POWER_OFF,
CLUSTER_POWER_OFF};
void ihost_power_on_cluster(u_register_t mpidr)
{
uint32_t rst, d2xs;
uint32_t cluster_id;
uint32_t ihost_base;
#if SPROC_MEMORY_BISR
uint32_t bisr, cnt;
#endif
cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
uint32_t cluster0_freq_sel;
if (cluster_power_status[cluster_id] == CLUSTER_POWER_ON)
return;
cluster_power_status[cluster_id] = CLUSTER_POWER_ON;
INFO("enabling Cluster #%u\n", cluster_id);
switch (cluster_id) {
case 1:
rst = (1 << CDRU_MISC_RESET_CONTROL__CDRU_IH1_RESET);
d2xs = (1 << CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST1);
#if SPROC_MEMORY_BISR
bisr = CRMU_BISR_PDG_MASK__CRMU_BISR_IHOST1;
#endif
break;
case 2:
rst = (1 << CDRU_MISC_RESET_CONTROL__CDRU_IH2_RESET);
d2xs = (1 << CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST2);
#if SPROC_MEMORY_BISR
bisr = CRMU_BISR_PDG_MASK__CRMU_BISR_IHOST2;
#endif
break;
case 3:
rst = (1 << CDRU_MISC_RESET_CONTROL__CDRU_IH3_RESET);
d2xs = (1 << CDRU_CCN_REGISTER_CONTROL_1__D2XS_PD_IHOST3);
#if SPROC_MEMORY_BISR
bisr = CRMU_BISR_PDG_MASK__CRMU_BISR_IHOST3;
#endif
break;
default:
ERROR("Invalid cluster :%u\n", cluster_id);
return;
}
/* Releasing ihost resets */
mmio_setbits_32(CDRU_MISC_RESET_CONTROL, rst);
/* calculate cluster/ihost base address */
ihost_base = IHOST0_BASE + cluster_id * IHOST_ADDR_SPACE;
/* Remove Cluster IO isolation */
mmio_clrsetbits_32(ihost_base + A72_CRM_DOMAIN_4_CONTROL,
(1 << A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_I_O),
(1 << A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_DFT) |
(1 << A72_CRM_DOMAIN_4_CONTROL__DOMAIN_4_ISO_MEM));
/*
* Since BISR sequence requires that all cores of cluster should
* have removed I/O isolation hence doing same here.
*/
/* Remove core0 memory IO isolations */
mmio_clrsetbits_32(ihost_base + A72_CRM_DOMAIN_0_CONTROL,
(1 << A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_I_O),
(1 << A72_CRM_DOMAIN_0_CONTROL__DOMAIN_0_ISO_MEM));
/* Remove core1 memory IO isolations */
mmio_clrsetbits_32(ihost_base + A72_CRM_DOMAIN_1_CONTROL,
(1 << A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_I_O),
(1 << A72_CRM_DOMAIN_1_CONTROL__DOMAIN_1_ISO_MEM));
#if SPROC_MEMORY_BISR
mmio_setbits_32(CRMU_BISR_PDG_MASK, (1 << bisr));
if (!(mmio_read_32(CDRU_CHIP_STRAP_DATA_LSW) &
(1 << CDRU_CHIP_STRAP_DATA_LSW__BISR_BYPASS_MODE))) {
/* BISR completion would take max 2 usec */
cnt = 0;
while (cnt < 2) {
udelay(1);
if (mmio_read_32(CRMU_CHIP_OTPC_STATUS) &
(1 << CRMU_CHIP_OTPC_STATUS__OTP_BISR_LOAD_DONE))
break;
cnt++;
}
}
/* if BISR is not completed, need to be checked with ASIC team */
if (((mmio_read_32(CRMU_CHIP_OTPC_STATUS)) &
(1 << CRMU_CHIP_OTPC_STATUS__OTP_BISR_LOAD_DONE)) == 0) {
WARN("BISR did not completed and need to be addressed\n");
}
#endif
/* PLL Power up. supply is already on. Turn on PLL LDO/PWR */
mmio_write_32(ihost_base + A72_CRM_PLL_PWR_ON,
(1 << A72_CRM_PLL_PWR_ON__PLL0_ISO_PLLOUT) |
(1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_LDO) |
(1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_PLL));
/* 1us in spec; Doubling it to be safe*/
udelay(2);
/* Remove PLL output ISO */
mmio_write_32(ihost_base + A72_CRM_PLL_PWR_ON,
(1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_LDO) |
(1 << A72_CRM_PLL_PWR_ON__PLL0_PWRON_PLL));
/*
* PLL0 Configuration Control Register
* these 4 registers drive the i_pll_ctrl[63:0] input of pll
* (16b per register).
* the values are derived from the spec (sections 8 and 10).
*/
mmio_write_32(ihost_base + A72_CRM_PLL0_CFG0_CTRL, 0x00000000);
mmio_write_32(ihost_base + A72_CRM_PLL0_CFG1_CTRL, 0x00008400);
mmio_write_32(ihost_base + A72_CRM_PLL0_CFG2_CTRL, 0x00000001);
mmio_write_32(ihost_base + A72_CRM_PLL0_CFG3_CTRL, 0x00000000);
/* Read the freq_sel from cluster 0, which is up already */
cluster0_freq_sel = bcm_get_ihost_pll_freq(0);
bcm_set_ihost_pll_freq(cluster_id, cluster0_freq_sel);
udelay(1);
/* Release clock source reset */
mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
(1 << A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN) |
(1 << A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN));
udelay(1);
/*
* Integer division for clks (divider value = n+1).
* These are the divisor of ARM PLL clock frequecy.
*/
mmio_write_32(ihost_base + A72_CRM_AXI_CLK_DESC, 0x00000001);
mmio_write_32(ihost_base + A72_CRM_ACP_CLK_DESC, 0x00000001);
mmio_write_32(ihost_base + A72_CRM_ATB_CLK_DESC, 0x00000004);
mmio_write_32(ihost_base + A72_CRM_PCLKDBG_DESC, 0x0000000b);
/*
* clock change trigger - must set to take effect after clock
* source change
*/
mmio_setbits_32(ihost_base + A72_CRM_CLOCK_MODE_CONTROL,
(1 << A72_CRM_CLOCK_MODE_CONTROL__CLK_CHANGE_TRIGGER));
/* turn on functional clocks */
mmio_setbits_32(ihost_base + A72_CRM_CLOCK_CONTROL_0,
(3 << A72_CRM_CLOCK_CONTROL_0__ARM_HW_SW_ENABLE_SEL) |
(3 << A72_CRM_CLOCK_CONTROL_0__AXI_HW_SW_ENABLE_SEL) |
(3 << A72_CRM_CLOCK_CONTROL_0__ACP_HW_SW_ENABLE_SEL) |
(3 << A72_CRM_CLOCK_CONTROL_0__ATB_HW_SW_ENABLE_SEL) |
(3 << A72_CRM_CLOCK_CONTROL_0__PCLKDBG_HW_SW_ENA_SEL));
mmio_setbits_32(ihost_base + A72_CRM_CLOCK_CONTROL_1,
(3 << A72_CRM_CLOCK_CONTROL_1__TMON_HW_SW_ENABLE_SEL) |
(3 << A72_CRM_CLOCK_CONTROL_1__APB_HW_SW_ENABLE_SEL));
/* Program D2XS Power Down Registers */
mmio_setbits_32(CDRU_CCN_REGISTER_CONTROL_1, d2xs);
/* Program Core Config Debug ROM Address Registers */
/* mark valid for Debug ROM base address */
mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_DBGCTRL,
(1 << A72_CRM_CORE_CONFIG_DBGCTRL__DBGROMADDRV));
/* Program Lo and HI address of coresight DBG rom address */
mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_DBGROM_LO,
(CORESIGHT_BASE_ADDR >> 12) & 0xffff);
mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_DBGROM_HI,
(CORESIGHT_BASE_ADDR >> 28) & 0xffff);
/*
* Release soft resets of different components.
* Order: Bus clocks --> PERIPH --> L2 --> cores
*/
/* Bus clocks soft resets */
mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
(1 << A72_CRM_SOFTRESETN_0__CRYSTAL26_SOFTRESETN) |
(1 << A72_CRM_SOFTRESETN_0__CRM_PLL0_SOFTRESETN) |
(1 << A72_CRM_SOFTRESETN_0__AXI_SOFTRESETN) |
(1 << A72_CRM_SOFTRESETN_0__ACP_SOFTRESETN) |
(1 << A72_CRM_SOFTRESETN_0__ATB_SOFTRESETN) |
(1 << A72_CRM_SOFTRESETN_0__PCLKDBG_SOFTRESETN));
mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_1,
(1 << A72_CRM_SOFTRESETN_1__APB_SOFTRESETN));
/* Periph component softreset */
mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
(1 << A72_CRM_SOFTRESETN_0__TMON_SOFTRESETN));
/* L2 softreset */
mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
(1 << A72_CRM_SOFTRESETN_0__L2_SOFTRESETN));
/* Enable and program Satellite timer */
ihost_enable_satellite_timer(cluster_id);
}
void ihost_power_on_secondary_core(u_register_t mpidr, uint64_t rvbar)
{
uint32_t ihost_base;
uint32_t coreid = MPIDR_AFFLVL0_VAL(mpidr);
uint32_t cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
ihost_base = IHOST0_BASE + cluster_id * IHOST_ADDR_SPACE;
INFO("programming core #%u\n", coreid);
if (coreid) {
/* program the entry point for core1 */
mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA1_LO,
rvbar & 0xFFFF);
mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA1_MID,
(rvbar >> 16) & 0xFFFF);
mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA1_HI,
(rvbar >> 32) & 0xFFFF);
} else {
/* program the entry point for core */
mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA0_LO,
rvbar & 0xFFFF);
mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA0_MID,
(rvbar >> 16) & 0xFFFF);
mmio_write_32(ihost_base + A72_CRM_CORE_CONFIG_RVBA0_HI,
(rvbar >> 32) & 0xFFFF);
}
/* Tell debug logic which processor is up */
mmio_setbits_32(ihost_base + A72_CRM_SUBSYSTEM_CONFIG_0,
(coreid ?
(2 << A72_CRM_SUBSYSTEM_CONFIG_0__DBGPWRDUP_CFG_SHIFT) :
(1 << A72_CRM_SUBSYSTEM_CONFIG_0__DBGPWRDUP_CFG_SHIFT)));
/* releasing soft resets for IHOST core */
mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_0,
(coreid ?
(1 << A72_CRM_SOFTRESETN_0__COREPOR1_SOFTRESETN) :
(1 << A72_CRM_SOFTRESETN_0__COREPOR0_SOFTRESETN)));
mmio_setbits_32(ihost_base + A72_CRM_SOFTRESETN_1,
(coreid ?
((1 << A72_CRM_SOFTRESETN_1__CORE1_SOFTRESETN) |
(1 << A72_CRM_SOFTRESETN_1__DEBUG1_SOFTRESETN)) :
((1 << A72_CRM_SOFTRESETN_1__CORE0_SOFTRESETN) |
(1 << A72_CRM_SOFTRESETN_1__DEBUG0_SOFTRESETN))));
}
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <iommu.h>
#include <platform_def.h>
#define SMMU_BASE 0x64000000
#define ARM_SMMU_MAX_NUM_CNTXT_BANK 64
#define SMMU_CTX_BANK_IDX_SECURE_CRMU 63
#define ARM_SMMU_NUM_SECURE_MASTER 1
#define ARM_SMMU_NSNUMCBO (ARM_SMMU_MAX_NUM_CNTXT_BANK - \
ARM_SMMU_NUM_SECURE_MASTER)
#define ARM_SMMU_NSNUMSMRGO (ARM_SMMU_MAX_NUM_CNTXT_BANK - \
ARM_SMMU_NUM_SECURE_MASTER)
/* Reserved Banks. */
#define SMMU_CTX_BANK_IDX (SMMU_CTX_BANK_IDX_SECURE_CRMU - \
ARM_SMMU_NUM_SECURE_MASTER)
#define NUM_OF_SMRS 1
#define STG1_WITH_STG2_BYPASS 1
#define ARM_LPAE_PGTBL_PHYS_CRMU 0x880000000
#define ARM_LPAE_PGTBL_PHYS 0x880200000
#define ARM_LPAE_PGTBL_PTE_CNT 512
#define ARM_LPAE_PTE_L1_BLOCK_SIZE 0x40000000
#define ARM_LPAE_PTE_L1_ADDR_MASK 0x0000FFFFC0000000UL
#define ARM_LPAE_PTE_TABLE 0x2UL
#define ARM_LPAE_PTE_VALID 0x1UL
#define ARM_LPAE_PTE_ATTRINDX 2
#define ARM_LPAE_PTE_NS 5
#define ARM_LPAE_PTE_AP 6
#define ARM_LPAE_PTE_AP_EL1_RW 0x0
#define ARM_LPAE_PTE_AP_EL0_RW 0x1
#define ARM_LPAE_PTE_SH 8
#define ARM_LPAE_PTE_SH_NON 0x0
#define ARM_LPAE_PTE_SH_OUTER 0x2
#define ARM_LPAE_PTE_SH_INNER 0x3
#define ARM_LPAE_PTE_AF 10
#define ARM_SMMU_RES_SIZE 0x80000
#define ARM_LPAE_PTE_NSTABLE 0x8000000000000000UL
#define ARM_LPAE_PTE_L1_INDEX_SHIFT 30
#define ARM_LPAE_PTE_L1_INDEX_MASK 0x1ff
#define ARM_LPAE_PTE_L0_INDEX_SHIFT 39
#define ARM_LPAE_PTE_L0_INDEX_MASK 0x1ff
#define ARM_LPAE_PTE_TABLE_MASK ~(0xfffUL)
/* Configuration registers */
#define ARM_SMMU_GR0_sCR0 0x0
#define sCR0_CLIENTPD (1 << 0)
#define sCR0_GFRE (1 << 1)
#define sCR0_GFIE (1 << 2)
#define sCR0_GCFGFRE (1 << 4)
#define sCR0_GCFGFIE (1 << 5)
#define sCR0_USFCFG (1 << 10)
#define sCR0_VMIDPNE (1 << 11)
#define sCR0_PTM (1 << 12)
#define sCR0_FB (1 << 13)
#define sCR0_VMID16EN (1 << 31)
#define sCR0_BSU_SHIFT 14
#define sCR0_BSU_MASK 0x3
#define ARM_SMMU_SMMU_SCR1 0x4
#define SCR1_NSNUMCBO_MASK 0xFF
#define SCR1_NSNUMCBO_SHIFT 0x0
#define SCR1_NSNUMSMRGO_MASK 0xFF00
#define SCR1_NSNUMSMRGO_SHIFT 0x8
/* Identification registers */
#define ARM_SMMU_GR0_ID0 0x20
#define ARM_SMMU_GR0_ID1 0x24
#define ARM_SMMU_GR0_ID2 0x28
#define ARM_SMMU_GR0_ID3 0x2c
#define ARM_SMMU_GR0_ID4 0x30
#define ARM_SMMU_GR0_ID5 0x34
#define ARM_SMMU_GR0_ID6 0x38
#define ARM_SMMU_GR0_ID7 0x3c
#define ARM_SMMU_GR0_sGFSR 0x48
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58
#define ID1_PAGESIZE (1U << 31)
#define ID1_NUMPAGENDXB_SHIFT 28
#define ID1_NUMPAGENDXB_MASK 7
#define ID1_NUMS2CB_SHIFT 16
#define ID1_NUMS2CB_MASK 0xff
#define ID1_NUMCB_SHIFT 0
#define ID1_NUMCB_MASK 0xff
/* SMMU global address space */
#define ARM_SMMU_GR0(smmu) ((smmu)->base)
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
/* Stream mapping registers */
#define ARM_SMMU_GR0_SMR(n) (0x800 + (n << 2))
#define SMR_VALID (1U << 31)
#define SMR_MASK_SHIFT 16
#define SMR_ID_SHIFT 0
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + (n << 2))
#define S2CR_CBNDX_SHIFT 0
#define S2CR_CBNDX_MASK 0xff
#define S2CR_TYPE_SHIFT 16
#define S2CR_TYPE_MASK 0x3
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + (n << 2))
#define CBA2R_RW64_32BIT (0 << 0)
#define CBA2R_RW64_64BIT (1 << 0)
#define CBA2R_VMID_SHIFT 16
#define CBA2R_VMID_MASK 0xffff
#define ARM_SMMU_GR1_CBAR(n) (0x0 + (n << 2))
#define CBAR_VMID_SHIFT 0
#define CBAR_VMID_MASK 0xff
#define CBAR_S1_BPSHCFG_SHIFT 8
#define CBAR_S1_BPSHCFG_MASK 3
#define CBAR_S1_BPSHCFG_NSH 3
#define CBAR_S1_MEMATTR_SHIFT 12
#define CBAR_S1_MEMATTR_MASK 0xf
#define CBAR_S1_MEMATTR_WB 0xf
#define CBAR_TYPE_SHIFT 16
#define CBAR_TYPE_MASK 0x3
#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
#define CBAR_IRPTNDX_SHIFT 24
#define CBAR_IRPTNDX_MASK 0xff
/* Translation context bank */
#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
#define ARM_SMMU_CB_SCTLR 0x0
#define ARM_SMMU_CB_ACTLR 0x4
#define ARM_SMMU_CB_RESUME 0x8
#define ARM_SMMU_CB_TTBCR2 0x10
#define ARM_SMMU_CB_TTBR0 0x20
#define ARM_SMMU_CB_TTBR1 0x28
#define ARM_SMMU_CB_TTBCR 0x30
#define ARM_SMMU_CB_CONTEXTIDR 0x34
#define ARM_SMMU_CB_S1_MAIR0 0x38
#define ARM_SMMU_CB_S1_MAIR1 0x3c
#define ARM_SMMU_CB_PAR 0x50
#define ARM_SMMU_CB_FSR 0x58
#define ARM_SMMU_CB_FAR 0x60
#define ARM_SMMU_CB_FSYNR0 0x68
#define ARM_SMMU_CB_S1_TLBIVA 0x600
#define ARM_SMMU_CB_S1_TLBIASID 0x610
#define ARM_SMMU_CB_S1_TLBIVAL 0x620
#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
#define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
#define SCTLR_CFIE (1 << 6)
#define SCTLR_CFRE (1 << 5)
#define SCTLR_E (1 << 4)
#define SCTLR_AFE (1 << 2)
#define SCTLR_TRE (1 << 1)
#define SCTLR_M (1 << 0)
/* ARM LPAE configuration. */
/**************************************************************/
/* Register bits */
#define ARM_32_LPAE_TCR_EAE (1 << 31)
#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
#define ARM_LPAE_TCR_EPD1 (1 << 23)
#define ARM_LPAE_TCR_TG0_4K (0 << 14)
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
#define ARM_LPAE_TCR_SH0_SHIFT 12
#define ARM_LPAE_TCR_SH0_MASK 0x3
#define ARM_LPAE_TCR_SH_NS 0
#define ARM_LPAE_TCR_SH_OS 2
#define ARM_LPAE_TCR_SH_IS 3
#define ARM_LPAE_TCR_ORGN0_SHIFT 10
#define ARM_LPAE_TCR_IRGN0_SHIFT 8
#define ARM_LPAE_TCR_RGN_MASK 0x3
#define ARM_LPAE_TCR_RGN_NC 0
#define ARM_LPAE_TCR_RGN_WBWA 1
#define ARM_LPAE_TCR_RGN_WT 2
#define ARM_LPAE_TCR_RGN_WB 3
#define ARM_LPAE_TCR_SL0_SHIFT 6
#define ARM_LPAE_TCR_SL0_MASK 0x3
#define ARM_LPAE_TCR_T0SZ_SHIFT 0
#define ARM_LPAE_TCR_SZ_MASK 0xf
#define ARM_LPAE_TCR_PS_SHIFT 16
#define ARM_LPAE_TCR_PS_MASK 0x7
#define ARM_LPAE_TCR_IPS_SHIFT 32
#define ARM_LPAE_TCR_IPS_MASK 0x7
#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
#define ARM_LPAE_MAIR_ATTR_NC 0x44
#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
#define TTBRn_ASID_SHIFT 48
#define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
#define TTBCR2_AS (1 << 4)
#define TTBCR_T0SZ(ia_bits) (64 - (ia_bits))
#define S2CR_PRIVCFG_SHIFT 24
#define S2CR_PRIVCFG_MASK 0x3
/**************************************************************/
uint16_t paxc_stream_ids[] = { 0x2000 };
uint16_t paxc_stream_ids_mask[] = { 0x1fff };
uint16_t crmu_stream_ids[] = { CRMU_STREAM_ID };
uint16_t crmu_stream_ids_mask[] = { 0x0 };
enum arm_smmu_s2cr_type {
S2CR_TYPE_TRANS,
S2CR_TYPE_BYPASS,
S2CR_TYPE_FAULT,
};
enum arm_smmu_s2cr_privcfg {
S2CR_PRIVCFG_DEFAULT,
S2CR_PRIVCFG_DIPAN,
S2CR_PRIVCFG_UNPRIV,
S2CR_PRIVCFG_PRIV,
};
struct arm_smmu_smr {
uint16_t mask;
uint16_t id;
uint32_t valid;
};
struct arm_smmu_s2cr {
int count;
enum arm_smmu_s2cr_type type;
enum arm_smmu_s2cr_privcfg privcfg;
uint8_t cbndx;
};
struct arm_smmu_cfg {
uint8_t cbndx;
uint8_t irptndx;
uint32_t cbar;
};
struct arm_smmu_device {
uint8_t *base;
uint32_t streams;
unsigned long size;
unsigned long pgshift;
unsigned long va_size;
unsigned long ipa_size;
unsigned long pa_size;
struct arm_smmu_smr smr[NUM_OF_SMRS];
struct arm_smmu_s2cr s2cr[NUM_OF_SMRS];
struct arm_smmu_cfg cfg[NUM_OF_SMRS];
uint16_t *stream_ids;
uint16_t *stream_ids_mask;
};
void arm_smmu_enable_secure_client_port(void)
{
uintptr_t smmu_base = SMMU_BASE;
mmio_clrbits_32(smmu_base, sCR0_CLIENTPD);
}
void arm_smmu_reserve_secure_cntxt(void)
{
uintptr_t smmu_base = SMMU_BASE;
mmio_clrsetbits_32(smmu_base + ARM_SMMU_SMMU_SCR1,
(SCR1_NSNUMSMRGO_MASK | SCR1_NSNUMCBO_MASK),
((ARM_SMMU_NSNUMCBO << SCR1_NSNUMCBO_SHIFT) |
(ARM_SMMU_NSNUMSMRGO << SCR1_NSNUMSMRGO_SHIFT)));
}
static void arm_smmu_smr_cfg(struct arm_smmu_device *smmu, uint32_t index)
{
uint32_t idx = smmu->cfg[index].cbndx;
struct arm_smmu_smr *smr = &smmu->smr[index];
uint32_t reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
if (smr->valid)
reg |= SMR_VALID;
mmio_write_32((uintptr_t) (ARM_SMMU_GR0(smmu) +
ARM_SMMU_GR0_SMR(idx)), reg);
}
static void arm_smmu_s2cr_cfg(struct arm_smmu_device *smmu, uint32_t index)
{
uint32_t idx = smmu->cfg[index].cbndx;
struct arm_smmu_s2cr *s2cr = &smmu->s2cr[index];
uint32_t reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
(s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
(s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
mmio_write_32((uintptr_t) (ARM_SMMU_GR0(smmu) +
ARM_SMMU_GR0_S2CR(idx)), reg);
}
static void smmu_set_pgtbl(struct arm_smmu_device *smmu,
enum iommu_domain dom,
uint64_t *pg_table_base)
{
int i, l0_index, l1_index;
uint64_t addr, *pte, *l0_base, *l1_base;
uint64_t addr_space_limit;
if (dom == PCIE_PAXC) {
addr_space_limit = 0xffffffffff;
} else if (dom == DOMAIN_CRMU) {
addr_space_limit = 0xffffffff;
} else {
ERROR("dom is not supported\n");
return;
}
l0_base = pg_table_base;
/* clear L0 descriptors. */
for (i = 0; i < ARM_LPAE_PGTBL_PTE_CNT; i++)
l0_base[i] = 0x0;
addr = 0x0;
while (addr < addr_space_limit) {
/* find L0 pte */
l0_index = ((addr >> ARM_LPAE_PTE_L0_INDEX_SHIFT) &
ARM_LPAE_PTE_L0_INDEX_MASK);
l1_base = l0_base + ((l0_index + 1) * ARM_LPAE_PGTBL_PTE_CNT);
/* setup L0 pte if required */
pte = l0_base + l0_index;
if (*pte == 0x0) {
*pte |= ((uint64_t)l1_base & ARM_LPAE_PTE_TABLE_MASK);
if (dom == PCIE_PAXC)
*pte |= ARM_LPAE_PTE_NSTABLE;
*pte |= ARM_LPAE_PTE_TABLE;
*pte |= ARM_LPAE_PTE_VALID;
}
/* find L1 pte */
l1_index = ((addr >> ARM_LPAE_PTE_L1_INDEX_SHIFT) &
ARM_LPAE_PTE_L1_INDEX_MASK);
pte = l1_base + l1_index;
/* setup L1 pte */
*pte = 0x0;
*pte |= (addr & ARM_LPAE_PTE_L1_ADDR_MASK);
if (addr < 0x80000000) {
*pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV <<
ARM_LPAE_PTE_ATTRINDX);
if (dom == PCIE_PAXC)
*pte |= (1 << ARM_LPAE_PTE_NS);
} else {
*pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE <<
ARM_LPAE_PTE_ATTRINDX);
*pte |= (1 << ARM_LPAE_PTE_NS);
}
*pte |= (ARM_LPAE_PTE_AP_EL0_RW << ARM_LPAE_PTE_AP);
*pte |= (ARM_LPAE_PTE_SH_INNER << ARM_LPAE_PTE_SH);
*pte |= (1 << ARM_LPAE_PTE_AF);
*pte |= ARM_LPAE_PTE_VALID;
addr += ARM_LPAE_PTE_L1_BLOCK_SIZE;
}
}
void arm_smmu_create_identity_map(enum iommu_domain dom)
{
struct arm_smmu_device iommu;
struct arm_smmu_device *smmu = &iommu;
uint32_t reg, reg2;
unsigned long long reg64;
uint32_t idx;
uint16_t asid;
unsigned int context_bank_index;
unsigned long long pg_table_base;
smmu->base = (uint8_t *) SMMU_BASE;
reg = mmio_read_32((uintptr_t) (ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_ID1));
smmu->pgshift = (reg & ID1_PAGESIZE) ? 16 : 12;
smmu->size = ARM_SMMU_RES_SIZE;
smmu->stream_ids = NULL;
switch (dom) {
case PCIE_PAXC:
smmu->stream_ids = &paxc_stream_ids[0];
smmu->stream_ids_mask = &paxc_stream_ids_mask[0];
smmu->streams = ARRAY_SIZE(paxc_stream_ids);
context_bank_index = SMMU_CTX_BANK_IDX;
pg_table_base = ARM_LPAE_PGTBL_PHYS;
break;
case DOMAIN_CRMU:
smmu->stream_ids = &crmu_stream_ids[0];
smmu->stream_ids_mask = &crmu_stream_ids_mask[0];
smmu->streams = ARRAY_SIZE(crmu_stream_ids);
context_bank_index = SMMU_CTX_BANK_IDX_SECURE_CRMU;
pg_table_base = ARM_LPAE_PGTBL_PHYS_CRMU;
break;
default:
ERROR("domain not supported\n");
return;
}
if (smmu->streams > NUM_OF_SMRS) {
INFO("can not support more than %d sids\n", NUM_OF_SMRS);
return;
}
/* set up iommu dev. */
for (idx = 0; idx < smmu->streams; idx++) {
/* S2CR. */
smmu->s2cr[idx].type = S2CR_TYPE_TRANS;
smmu->s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
smmu->s2cr[idx].cbndx = context_bank_index;
smmu->cfg[idx].cbndx = context_bank_index;
smmu->cfg[idx].cbar = STG1_WITH_STG2_BYPASS << CBAR_TYPE_SHIFT;
arm_smmu_s2cr_cfg(smmu, idx);
/* SMR. */
smmu->smr[idx].mask = smmu->stream_ids_mask[idx];
smmu->smr[idx].id = smmu->stream_ids[idx];
smmu->smr[idx].valid = 1;
arm_smmu_smr_cfg(smmu, idx);
/* CBA2R. 64-bit Translation */
mmio_write_32((uintptr_t) (ARM_SMMU_GR1(smmu) +
ARM_SMMU_GR1_CBA2R(smmu->cfg[idx].cbndx)),
0x1);
/* CBAR.*/
reg = smmu->cfg[idx].cbar;
reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
(CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
mmio_write_32((uintptr_t) (ARM_SMMU_GR1(smmu) +
ARM_SMMU_GR1_CBAR(smmu->cfg[idx].cbndx)),
reg);
/* TTBCR. */
reg64 = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
reg64 |= ARM_LPAE_TCR_TG0_4K;
reg64 |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
/* ias 40 bits.*/
reg64 |= TTBCR_T0SZ(40) << ARM_LPAE_TCR_T0SZ_SHIFT;
/* Disable speculative walks through TTBR1 */
reg64 |= ARM_LPAE_TCR_EPD1;
reg = (uint32_t) reg64;
reg2 = (uint32_t) (reg64 >> 32);
reg2 |= TTBCR2_SEP_UPSTREAM;
reg2 |= TTBCR2_AS;
mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
ARM_SMMU_CB_TTBCR2), reg2);
mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
ARM_SMMU_CB_TTBCR), reg);
/* TTBR0. */
asid = smmu->cfg[idx].cbndx;
reg64 = pg_table_base;
reg64 |= (unsigned long long) asid << TTBRn_ASID_SHIFT;
mmio_write_64((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
ARM_SMMU_CB_TTBR0), reg64);
/* TTBR1. */
reg64 = 0;
reg64 |= (unsigned long long) asid << TTBRn_ASID_SHIFT;
mmio_write_64((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
ARM_SMMU_CB_TTBR1), reg64);
/* MAIR. */
reg = (ARM_LPAE_MAIR_ATTR_NC
<< ARM_LPAE_MAIR_ATTR_SHIFT
(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
(ARM_LPAE_MAIR_ATTR_WBRWA <<
ARM_LPAE_MAIR_ATTR_SHIFT
(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
(ARM_LPAE_MAIR_ATTR_DEVICE <<
ARM_LPAE_MAIR_ATTR_SHIFT
(ARM_LPAE_MAIR_ATTR_IDX_DEV));
mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
ARM_SMMU_CB_S1_MAIR0), reg);
/* MAIR1. */
reg = 0;
mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
ARM_SMMU_CB_S1_MAIR1), reg);
/* SCTLR. */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
/* stage 1.*/
reg |= SCTLR_S1_ASIDPNE;
mmio_write_32((uintptr_t) (ARM_SMMU_CB_BASE(smmu) +
ARM_SMMU_CB(smmu, smmu->cfg[idx].cbndx) +
ARM_SMMU_CB_SCTLR), reg);
}
smmu_set_pgtbl(smmu, dom, (uint64_t *)pg_table_base);
}
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdbool.h>
#include <common/debug.h>
#include <lib/mmio.h>
#include <ncsi.h>
#include <sr_def.h>
#include <sr_utils.h>
static const char *const io_drives[] = {
"2mA", "4mA", "6mA", "8mA",
"10mA", "12mA", "14mA", "16mA"
};
void brcm_stingray_ncsi_init(void)
{
unsigned int i = 0;
unsigned int selx = 0;
#if NCSI_IO_DRIVE_STRENGTH_MA == 2
selx = 0x0;
#elif NCSI_IO_DRIVE_STRENGTH_MA == 4
selx = 0x1;
#elif NCSI_IO_DRIVE_STRENGTH_MA == 6
selx = 0x2;
#elif NCSI_IO_DRIVE_STRENGTH_MA == 8
selx = 0x3;
#elif NCSI_IO_DRIVE_STRENGTH_MA == 10
selx = 0x4;
#elif NCSI_IO_DRIVE_STRENGTH_MA == 12
selx = 0x5;
#elif NCSI_IO_DRIVE_STRENGTH_MA == 14
selx = 0x6;
#elif NCSI_IO_DRIVE_STRENGTH_MA == 16
selx = 0x7;
#else
ERROR("Unsupported NCSI_IO_DRIVE_STRENGTH_MA. Please check it.\n");
return;
#endif
INFO("ncsi io drives: %s\n", io_drives[selx]);
for (i = 0; i < NITRO_NCSI_IOPAD_CONTROL_NUM; i++) {
mmio_clrsetbits_32((NITRO_NCSI_IOPAD_CONTROL_BASE + (i * 4)),
PAD_SELX_MASK, PAD_SELX_VALUE(selx));
}
INFO("ncsi init done\n");
}
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <errno.h>
#include <stdbool.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <paxb.h>
#include <sr_def.h>
#include <sr_utils.h>
#define PCIE_CORE_PWR_ARR_POWERON 0x8
#define PCIE_CORE_PWR_ARR_POWEROK 0x4
#define PCIE_CORE_PWR_POWERON 0x2
#define PCIE_CORE_PWR_POWEROK 0x1
#define PCIE_CORE_USER_CFG (PCIE_CORE_BASE + 0x38)
#define PCIE_PAXB_SMMU_SID_CFG (PCIE_CORE_BASE + 0x60)
#ifdef SID_B8_D1_F1
#define PAXB_SMMU_SID_CFG_BUS_WIDTH (0x8 << 8)
#define PAXB_SMMU_SID_CFG_DEV_WIDTH (0x1 << 12)
#define PAXB_SMMU_SID_CFG_FUN_WIDTH (0x1 << 16)
#else
#define PAXB_SMMU_SID_CFG_BUS_WIDTH (0x2 << 8)
#define PAXB_SMMU_SID_CFG_DEV_WIDTH (0x5 << 12)
#define PAXB_SMMU_SID_CFG_FUN_WIDTH (0x3 << 16)
#endif
#define PAXB_APB_TIMEOUT_COUNT_OFFSET 0x034
/* allow up to 5 ms for each power switch to stabilize */
#define PCIE_CORE_PWR_TIMEOUT_MS 5
/* wait 1 microsecond for PCIe core soft reset */
#define PCIE_CORE_SOFT_RST_DELAY_US 1
/*
* List of PAXB APB registers
*/
#define PAXB_BASE 0x48000000
#define PAXB_BASE_OFFSET 0x4000
#define PAXB_OFFSET(core) (PAXB_BASE + \
(core) * PAXB_BASE_OFFSET)
#define PAXB_CLK_CTRL_OFFSET 0x000
#define PAXB_EP_PERST_SRC_SEL_MASK (1 << 2)
#define PAXB_EP_MODE_PERST_MASK (1 << 1)
#define PAXB_RC_PCIE_RST_OUT_MASK (1 << 0)
#define PAXB_MAX_IMAP_WINDOWS 8
#define PAXB_IMAP_REG_WIDTH 8
#define PAXB_IMAP0_REG_WIDTH 4
#define PAXB_AXUSER_REG_WIDTH 4
#define PAXB_CFG_IND_ADDR_OFFSET 0x120
#define PAXB_CFG_IND_DATA_OFFSET 0x124
#define PAXB_CFG_IND_ADDR_MASK 0x1ffc
#define PAXB_CFG_CFG_TYPE_MASK 0x1
#define PAXB_EP_CFG_ADDR_OFFSET 0x1f8
#define PAXB_EP_CFG_DATA_OFFSET 0x1fc
#define PAXB_EP_CFG_ADDR_MASK 0xffc
#define PAXB_EP_CFG_TYPE_MASK 0x1
#define PAXB_0_DEFAULT_IMAP 0xed0
#define DEFAULT_ADDR_INVALID BIT(0)
#define PAXB_0_DEFAULT_IMAP_AXUSER 0xed8
#define PAXB_0_DEFAULT_IMAP_AXCACHE 0xedc
#define IMAP_AXCACHE 0xff
#define OARR_VALID BIT(0)
#define IMAP_VALID BIT(0)
#define PAXB_IMAP0_BASE_OFFSET 0xc00
#define PAXB_IARR0_BASE_OFFSET 0xd00
#define PAXB_IMAP0_OFFSET(idx) (PAXB_IMAP0_BASE_OFFSET + \
(idx) * PAXB_IMAP0_REG_WIDTH)
#define PAXB_IMAP0_WINDOW_SIZE 0x1000
#define PAXB_IMAP2_OFFSET 0xcc0
#define PAXB_IMAP0_REGS_TYPE_OFFSET 0xcd0
#define PAXB_IARR2_LOWER_OFFSET 0xd10
#define PAXB_IMAP3_BASE_OFFSET 0xe08
#define PAXB_IMAP3_OFFSET(idx) (PAXB_IMAP3_BASE_OFFSET + \
(idx) * PAXB_IMAP_REG_WIDTH)
#define PAXB_IMAP3_0_AXUSER_B_OFFSET 0xe48
#define PAXB_IMAP3_0_AXUSER_OFFSET(idx) (PAXB_IMAP3_0_AXUSER_B_OFFSET + \
(idx) * PAXB_AXUSER_REG_WIDTH)
#define PAXB_IMAP4_BASE_OFFSET 0xe70
#define PAXB_IMAP4_OFFSET(idx) (PAXB_IMAP4_BASE_OFFSET + \
(idx) * PAXB_IMAP_REG_WIDTH)
#define PAXB_IMAP4_0_AXUSER_B_OFFSET 0xeb0
#define PAXB_IMAP4_0_AXUSER_OFFSET(idx) (PAXB_IMAP4_0_AXUSER_B_OFFSET + \
(idx) * PAXB_AXUSER_REG_WIDTH)
#define PAXB_CFG_LINK_STATUS_OFFSET 0xf0c
#define PAXB_CFG_PHYLINKUP_MASK (1 << 3)
#define PAXB_CFG_DL_ACTIVE_MASK (1 << 2)
#define PAXB_IMAP0_0_AXUSER_OFFSET 0xf60
#define PAXB_IMAP2_AXUSER_OFFSET 0xfe0
/* cacheable write-back, allocate on both reads and writes */
#define IMAP_ARCACHE 0x0f0
#define IMAP_AWCACHE 0xf00
/* normal access, nonsecure access, and data access */
/* AWQOS:0xe and ARQOS:0xa */
/* AWPROT:0x2 and ARPROT:0x1 */
#define IMAP_AXUSER 0x002e002a
/*
* List of NIC security and PIPEMUX related registers
*/
#define SR_PCIE_NIC_SECURITY_BASE 0x58100000
#define NS3Z_PCIE_NIC_SECURITY_BASE 0x48100000
#define GITS_TRANSLATER 0x63c30000
#define VENDOR_ID 0x14e4
#define CFG_RC_DEV_ID 0x434
#define CFG_RC_DEV_SUBID 0x438
#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
#define PCI_CLASS_BRIDGE_MASK 0xffff00
#define PCI_CLASS_BRIDGE_SHIFT 8
#define PCI_CLASS_BRIDGE_PCI 0x0604
/*
* List of PAXB RC configuration space registers
*/
/* first capability list entry */
#define PCI_CAPABILITY_LIST_OFFSET 0x34
#define PCI_CAPABILITY_SPEED_OFFSET 0xc
#define PCI_EP_CAPABILITY_OFFSET 0x10
#define CFG_RC_LINK_STATUS_CTRL_2 0x0dc
#define CFG_RC_LINK_SPEED_SHIFT 0
#define CFG_RC_LINK_SPEED_MASK (0xf << CFG_RC_LINK_SPEED_SHIFT)
#define CFG_RC_DEVICE_CAP 0x4d4
#define CFG_RC_DEVICE_CAP_MPS_SHIFT 0
#define CFG_RC_DEVICE_CAP_MPS_MASK (0x7 << CFG_RC_DEVICE_CAP_MPS_SHIFT)
/* MPS 256 bytes */
#define CFG_RC_DEVICE_CAP_MPS_256B (0x1 << CFG_RC_DEVICE_CAP_MPS_SHIFT)
/* MPS 512 bytes */
#define CFG_RC_DEVICE_CAP_MPS_512B (0x2 << CFG_RC_DEVICE_CAP_MPS_SHIFT)
#define CFG_RC_TL_FCIMM_NP_LIMIT 0xa10
#define CFG_RC_TL_FCIMM_NP_VAL 0x01500000
#define CFG_RC_TL_FCIMM_P_LIMIT 0xa14
#define CFG_RC_TL_FCIMM_P_VAL 0x03408080
#define CFG_RC_LINK_CAP 0x4dc
#define CFG_RC_LINK_CAP_SPEED_SHIFT 0
#define CFG_RC_LINK_CAP_SPEED_MASK (0xf << CFG_RC_LINK_CAP_SPEED_SHIFT)
#define CFG_RC_LINK_CAP_WIDTH_SHIFT 4
#define CFG_RC_LINK_CAP_WIDTH_MASK (0x1f << CFG_RC_LINK_CAP_WIDTH_SHIFT)
#define CFG_LINK_CAP_RC 0x4f0
#define CFG_RC_DL_ACTIVE_SHIFT 0
#define CFG_RC_DL_ACTIVE_MASK (0x1 << CFG_RC_DL_ACTIVE_SHIFT)
#define CFG_RC_SLOT_CLK_SHIFT 1
#define CFG_RC_SLOT_CLK_MASK (0x1 << CFG_RC_SLOT_CLK_SHIFT)
#define CFG_ROOT_CAP_RC 0x4f8
#define CFG_ROOT_CAP_LTR_SHIFT 1
#define CFG_ROOT_CAP_LTR_MASK (0x1 << CFG_ROOT_CAP_LTR_SHIFT)
#define CFG_RC_CLKREQ_ENABLED 0x4fc
#define CFG_RC_CLKREQ_ENABLED_SHIFT 0
#define CFG_RC_CLKREQ_ENABLED_MASK (0x1 << CFG_RC_CLKREQ_ENABLED_SHIFT)
#define CFG_RC_COEFF_ADDR 0x638
#define CFG_RC_TL_CTRL_0 0x800
#define RC_MEM_DW_CHK_MASK 0x03fe
#define CFG_RC_PDL_CTRL_4 0x1010
#define NPH_FC_INIT_SHIFT 24
#define NPH_FC_INIT_MASK (U(0xff) << NPH_FC_INIT_SHIFT)
#define PD_FC_INIT_SHIFT 12
#define PD_FC_INIT_MASK (0xffff << PD_FC_INIT_SHIFT)
#define CFG_RC_PDL_CTRL_5 0x1014
#define PH_INIT_SHIFT 0
#define PH_INIT_MASK (0xff << PH_INIT_SHIFT)
#define DL_STATUS_OFFSET 0x1048
#define PHYLINKUP BIT(13)
#define PH_INIT 0x10
#define PD_FC_INIT 0x100
#define NPH_FC_INIT 0x8
#define SRP_PH_INIT 0x7F
#define SRP_PD_FC_INIT 0x200
#define SRP_NPH_FC_INIT 0x7F
#define CFG_ADDR_BUS_NUM_SHIFT 20
#define CFG_ADDR_DEV_NUM_SHIFT 15
#define CFG_ADDR_FUNC_NUM_SHIFT 12
#define CFG_ADDR_REG_NUM_SHIFT 2
#define CFG_ADDR_REG_NUM_MASK 0x00000ffc
#define CFG_ADDR_CFG_TYPE_MASK 0x00000003
#define DL_LINK_UP_TIMEOUT_MS 1000
#define CFG_RETRY_STATUS 0xffff0001
#define CRS_TIMEOUT_MS 5000
/* create EP config data to write */
#define DEF_BUS_NO 1 /* default bus 1 */
#define DEF_SLOT_NO 0 /* default slot 0 */
#define DEF_FN_NO 0 /* default fn 0 */
#define EP_CONFIG_VAL(bus_no, slot, fn, where) \
(((bus_no) << CFG_ADDR_BUS_NUM_SHIFT) | \
((slot) << CFG_ADDR_DEV_NUM_SHIFT) | \
((fn) << CFG_ADDR_FUNC_NUM_SHIFT) | \
((where) & CFG_ADDR_REG_NUM_MASK) | \
(1 & CFG_ADDR_CFG_TYPE_MASK))
/* PAXB security offset */
#define PAXB_SECURITY_IDM_OFFSET 0x1c
#define PAXB_SECURITY_APB_OFFSET 0x24
#define PAXB_SECURITY_ECAM_OFFSET 0x3c
#define paxb_get_config(type) paxb_get_##type##_config()
static unsigned int paxb_sec_reg_offset[] = {
0x0c, /* PAXB0 AXI */
0x10, /* PAXB1 AXI */
0x14, /* PAXB2 AXI */
0x18, /* PAXB3 AXI */
0x20, /* PAXB4 AXI */
0x28, /* PAXB5 AXI */
0x2c, /* PAXB6 AXI */
0x30, /* PAXB7 AXI */
0x24, /* PAXB APB */
};
const paxb_cfg *paxb;
/*
* Given a PIPEMUX strap and PCIe core index, this function returns 1 if a
* PCIe core needs to be enabled
*/
int pcie_core_needs_enable(unsigned int core_idx)
{
if (paxb->core_needs_enable)
return paxb->core_needs_enable(core_idx);
return 0;
}
static void pcie_set_default_tx_coeff(uint32_t core_idx, uint32_t link_width)
{
unsigned int lanes = 0;
uint32_t data, addr;
addr = CFG_RC_COEFF_ADDR;
for (lanes = 0; lanes < link_width; lanes = lanes + 2) {
data = paxb_rc_cfg_read(core_idx, addr);
data &= 0xf0f0f0f0;
data |= (7 & 0xf);
data |= (7 & 0xf) << 8;
data |= (7 & 0xf) << 16;
data |= (7 & 0xf) << 24;
paxb_rc_cfg_write(core_idx, addr, data);
addr += 4;
}
}
static int paxb_rc_link_init(void)
{
uint32_t val, link_speed;
unsigned int link_width;
uint32_t core_idx;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
link_width = paxb->get_link_width(core_idx);
if (!link_width) {
ERROR("Unsupported PIPEMUX\n");
return -EOPNOTSUPP;
}
link_speed = paxb->get_link_speed();
/* program RC's link cap reg to advertise proper link width */
val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_CAP);
val &= ~CFG_RC_LINK_CAP_WIDTH_MASK;
val |= (link_width << CFG_RC_LINK_CAP_WIDTH_SHIFT);
paxb_rc_cfg_write(core_idx, CFG_RC_LINK_CAP, val);
/* program RC's link cap reg to advertise proper link speed */
val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_CAP);
val &= ~CFG_RC_LINK_CAP_SPEED_MASK;
val |= link_speed << CFG_RC_LINK_CAP_SPEED_SHIFT;
paxb_rc_cfg_write(core_idx, CFG_RC_LINK_CAP, val);
/* also need to program RC's link status control register */
val = paxb_rc_cfg_read(core_idx, CFG_RC_LINK_STATUS_CTRL_2);
val &= ~(CFG_RC_LINK_SPEED_MASK);
val |= link_speed << CFG_RC_LINK_SPEED_SHIFT;
paxb_rc_cfg_write(core_idx, CFG_RC_LINK_STATUS_CTRL_2, val);
#ifdef WAR_PLX_PRESET_PARITY_FAIL
/* WAR to avoid crash with PLX switch in GEN3*/
/* While PRESET, PLX switch is not fixing parity so disabled */
val = paxb_rc_cfg_read(core_idx, CFG_RC_REG_PHY_CTL_10);
val &= ~(PHY_CTL_10_GEN3_MATCH_PARITY);
paxb_rc_cfg_write(core_idx, CFG_RC_REG_PHY_CTL_10, val);
#endif
pcie_set_default_tx_coeff(core_idx, link_width);
}
return 0;
}
#ifdef PAXB_LINKUP
static void paxb_perst_ctrl(unsigned int core_idx, bool assert)
{
uint32_t clk_ctrl = PAXB_OFFSET(core_idx) + PAXB_CLK_CTRL_OFFSET;
if (assert) {
mmio_clrbits_32(clk_ctrl, PAXB_EP_PERST_SRC_SEL_MASK |
PAXB_EP_MODE_PERST_MASK |
PAXB_RC_PCIE_RST_OUT_MASK);
udelay(250);
} else {
mmio_setbits_32(clk_ctrl, PAXB_RC_PCIE_RST_OUT_MASK);
mdelay(100);
}
}
static void paxb_start_link_up(void)
{
unsigned int core_idx;
uint32_t val, timeout;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
/* toggle PERST */
paxb_perst_ctrl(core_idx, true);
paxb_perst_ctrl(core_idx, false);
timeout = DL_LINK_UP_TIMEOUT_MS;
/* wait for Link up */
do {
val = mmio_read_32(PAXB_OFFSET(core_idx) +
PAXB_CFG_LINK_STATUS_OFFSET);
if (val & PAXB_CFG_DL_ACTIVE_MASK)
break;
mdelay(1);
} while (--timeout);
if (!timeout)
ERROR("PAXB core %u link is down\n", core_idx);
}
}
#endif
static void pcie_core_soft_reset(unsigned int core_idx)
{
uint32_t offset = core_idx * PCIE_CORE_PWR_OFFSET;
uintptr_t ctrl = (uintptr_t)(PCIE_CORE_SOFT_RST_CFG_BASE + offset);
/* Put PCIe core in soft reset */
mmio_clrbits_32(ctrl, PCIE_CORE_SOFT_RST);
/* Wait for 1 us before pulling PCIe core out of soft reset */
udelay(PCIE_CORE_SOFT_RST_DELAY_US);
mmio_setbits_32(ctrl, PCIE_CORE_SOFT_RST);
}
static int pcie_core_pwron_switch(uintptr_t ctrl, uintptr_t status,
uint32_t mask)
{
uint32_t val;
unsigned int timeout = PCIE_CORE_PWR_TIMEOUT_MS;
/* enable switch */
mmio_setbits_32(ctrl, mask);
/* now wait for it to stabilize */
do {
val = mmio_read_32(status);
if ((val & mask) == mask)
return 0;
mdelay(1);
} while (--timeout);
return -EIO;
}
static int pcie_core_pwr_seq(uintptr_t ctrl, uintptr_t status)
{
int ret;
/*
* Enable the switch with the following sequence:
* 1. Array weak switch output switch
* 2. Array strong switch
* 3. Weak switch output acknowledge
* 4. Strong switch output acknowledge
*/
ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_ARR_POWERON);
if (ret)
return ret;
ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_ARR_POWEROK);
if (ret)
return ret;
ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_POWERON);
if (ret)
return ret;
ret = pcie_core_pwron_switch(ctrl, status, PCIE_CORE_PWR_POWEROK);
if (ret)
return ret;
return 0;
}
/*
* This function enables PCIe core and PAXB memory buffer power, and then
* remove the PCIe core from isolation
*/
static int pcie_core_pwr_init(unsigned int core_idx)
{
int ret;
uint32_t offset = core_idx * PCIE_CORE_PWR_OFFSET;
uintptr_t ctrl, status;
/* enable mem power to PCIe core */
ctrl = (uintptr_t)(PCIE_CORE_MEM_PWR_BASE + offset);
status = (uintptr_t)(PCIE_CORE_MEM_PWR_STATUS_BASE + offset);
ret = pcie_core_pwr_seq(ctrl, status);
if (ret) {
ERROR("PCIe core mem power failed\n");
return ret;
}
/* now enable mem power to PAXB wrapper */
ctrl = (uintptr_t)(PCIE_PAXB_MEM_PWR_BASE + offset);
status = (uintptr_t)(PCIE_PAXB_MEM_PWR_STATUS_BASE + offset);
ret = pcie_core_pwr_seq(ctrl, status);
if (ret) {
ERROR("PAXB mem power failed\n");
return ret;
}
/* now remove power isolation */
ctrl = (uintptr_t)(PCIE_CORE_ISO_CFG_BASE + offset);
mmio_clrbits_32(ctrl, PCIE_CORE_ISO | PCIE_CORE_MEM_ISO);
return 0;
}
static void pcie_ss_reset(void)
{
mmio_setbits_32(CDRU_MISC_RESET_CONTROL,
1 << CDRU_MISC_RESET_CONTROL__CDRU_PCIE_RESET_N_R);
}
/*
* This function reads the PIPEMUX strap, figures out all the PCIe cores that
* need to be enabled and enable the mem power for those cores
*/
static int pcie_cores_init(void)
{
int ret;
uint32_t core_idx;
if (paxb->pipemux_init) {
ret = paxb->pipemux_init();
if (ret)
return ret;
}
/* bring PCIe subsystem out of reset */
pcie_ss_reset();
/* power up all PCIe cores that will be used as RC */
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
ret = pcie_core_pwr_init(core_idx);
if (ret) {
ERROR("PCIe core %u power up failed\n", core_idx);
return ret;
}
pcie_core_soft_reset(core_idx);
VERBOSE("PCIe core %u is powered up\n", core_idx);
}
return ret;
}
void paxb_rc_cfg_write(unsigned int core_idx, unsigned int where,
uint32_t val)
{
mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_ADDR_OFFSET,
(where & PAXB_CFG_IND_ADDR_MASK) |
PAXB_CFG_CFG_TYPE_MASK);
mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_DATA_OFFSET, val);
}
unsigned int paxb_rc_cfg_read(unsigned int core_idx, unsigned int where)
{
unsigned int val;
mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_ADDR_OFFSET,
(where & PAXB_CFG_IND_ADDR_MASK) |
PAXB_CFG_CFG_TYPE_MASK);
val = mmio_read_32(PAXB_OFFSET(core_idx) + PAXB_CFG_IND_DATA_OFFSET);
return val;
}
static void paxb_cfg_mps(void)
{
uint32_t val, core_idx, mps;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
val = paxb_rc_cfg_read(core_idx, CFG_RC_DEVICE_CAP);
val &= ~CFG_RC_DEVICE_CAP_MPS_MASK;
mps = CFG_RC_DEVICE_CAP_MPS_256B;
if (core_idx == 0 || core_idx == 1 ||
core_idx == 6 || core_idx == 7) {
mps = CFG_RC_DEVICE_CAP_MPS_512B;
}
val |= mps;
paxb_rc_cfg_write(core_idx, CFG_RC_DEVICE_CAP, val);
}
}
static void paxb_cfg_dev_id(void)
{
uint32_t val, core_idx;
uint32_t device_id;
device_id = paxb->device_id;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
/* Set Core in RC mode */
mmio_setbits_32(PCIE_CORE_USER_CFG +
(core_idx * PCIE_CORE_PWR_OFFSET), 1);
/* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
val = paxb_rc_cfg_read(core_idx, PCI_BRIDGE_CTRL_REG_OFFSET);
val &= ~PCI_CLASS_BRIDGE_MASK;
val |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
paxb_rc_cfg_write(core_idx, PCI_BRIDGE_CTRL_REG_OFFSET, val);
val = (VENDOR_ID << 16) | device_id;
paxb_rc_cfg_write(core_idx, CFG_RC_DEV_ID, val);
val = (device_id << 16) | VENDOR_ID;
paxb_rc_cfg_write(core_idx, CFG_RC_DEV_SUBID, val);
}
}
static void paxb_cfg_tgt_trn(void)
{
uint32_t val, core_idx;
/*
* Disable all mem Rd/Wr size check so it allows target read/write
* transactions to be more than stipulated DW. As a result, PAXB root
* complex will not abort these read/write transcations beyond
* stipulated limit
*/
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
val = paxb_rc_cfg_read(core_idx, CFG_RC_TL_CTRL_0);
val &= ~(RC_MEM_DW_CHK_MASK);
paxb_rc_cfg_write(core_idx, CFG_RC_TL_CTRL_0, val);
}
}
static void paxb_cfg_pdl_ctrl(void)
{
uint32_t val, core_idx;
uint32_t nph, ph, pd;
/* increase the credit counter to 4 for non-posted header */
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
nph = NPH_FC_INIT;
ph = PH_INIT;
pd = PD_FC_INIT;
if (core_idx == 0 || core_idx == 1 ||
core_idx == 6 || core_idx == 7) {
nph = SRP_NPH_FC_INIT;
ph = SRP_PH_INIT;
pd = SRP_PD_FC_INIT;
}
val = paxb_rc_cfg_read(core_idx, CFG_RC_PDL_CTRL_4);
val &= ~NPH_FC_INIT_MASK;
val &= ~PD_FC_INIT_MASK;
val = val | (nph << NPH_FC_INIT_SHIFT);
val = val | (pd << PD_FC_INIT_SHIFT);
paxb_rc_cfg_write(core_idx, CFG_RC_PDL_CTRL_4, val);
val = paxb_rc_cfg_read(core_idx, CFG_RC_PDL_CTRL_5);
val &= ~PH_INIT_MASK;
val = val | (ph << PH_INIT_SHIFT);
paxb_rc_cfg_write(core_idx, CFG_RC_PDL_CTRL_5, val);
/*
* ASIC to give more optmized value after further investigation.
* till then this is important to have to get similar
* performance on all the slots.
*/
paxb_rc_cfg_write(core_idx, CFG_RC_TL_FCIMM_NP_LIMIT,
CFG_RC_TL_FCIMM_NP_VAL);
paxb_rc_cfg_write(core_idx, CFG_RC_TL_FCIMM_P_LIMIT,
CFG_RC_TL_FCIMM_P_VAL);
}
}
static void paxb_cfg_clkreq(void)
{
uint32_t val, core_idx;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
val = paxb_rc_cfg_read(core_idx, CFG_RC_CLKREQ_ENABLED);
val &= ~CFG_RC_CLKREQ_ENABLED_MASK;
paxb_rc_cfg_write(core_idx, CFG_RC_CLKREQ_ENABLED, val);
}
}
static void paxb_cfg_dl_active(bool enable)
{
uint32_t val, core_idx;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
val = paxb_rc_cfg_read(core_idx, CFG_LINK_CAP_RC);
if (enable)
val |= CFG_RC_DL_ACTIVE_MASK;
else
val &= ~CFG_RC_DL_ACTIVE_MASK;
paxb_rc_cfg_write(core_idx, CFG_LINK_CAP_RC, val);
}
}
static void paxb_cfg_LTR(int enable)
{
uint32_t val, core_idx;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
val = paxb_rc_cfg_read(core_idx, CFG_ROOT_CAP_RC);
if (enable)
val |= CFG_ROOT_CAP_LTR_MASK;
else
val &= ~CFG_ROOT_CAP_LTR_MASK;
paxb_rc_cfg_write(core_idx, CFG_ROOT_CAP_RC, val);
}
}
static void paxb_ib_regs_bypass(void)
{
unsigned int i, j;
for (i = 0; i < paxb->num_cores; i++) {
if (!pcie_core_needs_enable(i))
continue;
/* Configure Default IMAP window */
mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP,
DEFAULT_ADDR_INVALID);
mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP_AXUSER,
IMAP_AXUSER);
mmio_write_32(PAXB_OFFSET(i) + PAXB_0_DEFAULT_IMAP_AXCACHE,
IMAP_AXCACHE);
/* Configure MSI IMAP window */
mmio_setbits_32(PAXB_OFFSET(i) +
PAXB_IMAP0_REGS_TYPE_OFFSET,
0x1);
mmio_write_32(PAXB_OFFSET(i) + PAXB_IARR0_BASE_OFFSET,
GITS_TRANSLATER | OARR_VALID);
for (j = 0; j < PAXB_MAX_IMAP_WINDOWS; j++) {
mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_OFFSET(j),
(GITS_TRANSLATER +
(j * PAXB_IMAP0_WINDOW_SIZE)) |
IMAP_VALID);
}
}
}
static void paxb_ib_regs_init(void)
{
unsigned int core_idx;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
/* initialize IARR2 to zero */
mmio_write_32(PAXB_OFFSET(core_idx) + PAXB_IARR2_LOWER_OFFSET,
0x0);
mmio_setbits_32(PAXB_OFFSET(core_idx) +
PAXB_IMAP0_REGS_TYPE_OFFSET,
0x1);
}
}
static void paxb_cfg_apb_timeout(void)
{
unsigned int core_idx;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
/* allow unlimited timeout */
mmio_write_32(PAXB_OFFSET(core_idx) +
PAXB_APB_TIMEOUT_COUNT_OFFSET,
0xFFFFFFFF);
}
}
static void paxb_smmu_cfg(void)
{
unsigned int core_idx;
uint32_t offset;
uint32_t val;
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
offset = core_idx * PCIE_CORE_PWR_OFFSET;
val = mmio_read_32(PCIE_PAXB_SMMU_SID_CFG + offset);
val &= ~(0xFFF00);
val |= (PAXB_SMMU_SID_CFG_FUN_WIDTH |
PAXB_SMMU_SID_CFG_DEV_WIDTH |
PAXB_SMMU_SID_CFG_BUS_WIDTH);
mmio_write_32(PCIE_PAXB_SMMU_SID_CFG + offset, val);
val = mmio_read_32(PCIE_PAXB_SMMU_SID_CFG + offset);
VERBOSE("smmu cfg reg 0x%x\n", val);
}
}
static void paxb_cfg_coherency(void)
{
unsigned int i, j;
for (i = 0; i < paxb->num_cores; i++) {
if (!pcie_core_needs_enable(i))
continue;
#ifdef USE_DDR
mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP2_OFFSET,
IMAP_ARCACHE | IMAP_AWCACHE);
#endif
mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_0_AXUSER_OFFSET,
IMAP_AXUSER);
mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP2_AXUSER_OFFSET,
IMAP_AXUSER);
for (j = 0; j < PAXB_MAX_IMAP_WINDOWS; j++) {
#ifdef USE_DDR
mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP3_OFFSET(j),
IMAP_ARCACHE | IMAP_AWCACHE);
mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP4_OFFSET(j),
IMAP_ARCACHE | IMAP_AWCACHE);
#endif
/* zero out IMAP0 mapping windows for MSI/MSI-X */
mmio_write_32(PAXB_OFFSET(i) + PAXB_IMAP0_OFFSET(j),
0x0);
mmio_write_32(PAXB_OFFSET(i) +
PAXB_IMAP3_0_AXUSER_OFFSET(j),
IMAP_AXUSER);
mmio_write_32(PAXB_OFFSET(i) +
PAXB_IMAP4_0_AXUSER_OFFSET(j),
IMAP_AXUSER);
}
}
}
/*
* This function configures all PAXB related blocks to allow non-secure access
*/
void paxb_ns_init(enum paxb_type type)
{
unsigned int reg;
switch (type) {
case PAXB_SR:
for (reg = 0; reg < ARRAY_SIZE(paxb_sec_reg_offset); reg++) {
mmio_setbits_32(SR_PCIE_NIC_SECURITY_BASE +
paxb_sec_reg_offset[reg], 0x1);
}
/* Enabled all PAXB's relevant IDM blocks access in non-secure mode */
mmio_setbits_32(SR_PCIE_NIC_SECURITY_BASE + PAXB_SECURITY_IDM_OFFSET,
0xffff);
break;
case PAXB_NS3Z:
mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
paxb_sec_reg_offset[0], 0x1);
mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
PAXB_SECURITY_IDM_OFFSET, 0xffff);
mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
PAXB_SECURITY_APB_OFFSET, 0x7);
mmio_setbits_32(NS3Z_PCIE_NIC_SECURITY_BASE +
PAXB_SECURITY_ECAM_OFFSET, 0x1);
break;
}
}
static int paxb_set_config(void)
{
paxb = paxb_get_config(sr);
if (paxb)
return 0;
return -ENODEV;
}
void paxb_init(void)
{
int ret;
ret = paxb_set_config();
if (ret)
return;
paxb_ns_init(paxb->type);
ret = pcie_cores_init();
if (ret)
return;
if (paxb->phy_init) {
ret = paxb->phy_init();
if (ret)
return;
}
paxb_cfg_dev_id();
paxb_cfg_tgt_trn();
paxb_cfg_pdl_ctrl();
if (paxb->type == PAXB_SR) {
paxb_ib_regs_init();
paxb_cfg_coherency();
} else
paxb_ib_regs_bypass();
paxb_cfg_apb_timeout();
paxb_smmu_cfg();
paxb_cfg_clkreq();
paxb_rc_link_init();
/* Stingray Doesn't support LTR */
paxb_cfg_LTR(false);
paxb_cfg_dl_active(true);
paxb_cfg_mps();
#ifdef PAXB_LINKUP
paxb_start_link_up();
#endif
INFO("PAXB init done\n");
}
/*
* Copyright (c) 2017 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/debug.h>
#include <lib/mmio.h>
#include <iommu.h>
#include <platform_def.h>
#include <sr_utils.h>
#define PAXC_BASE 0x60400000
#define PAXC_AXI_CFG_PF 0x10
#define PAXC_AXI_CFG_PF_OFFSET(pf) (PAXC_AXI_CFG_PF + (pf) * 4)
#define PAXC_ARPROT_PF_CFG 0x40
#define PAXC_AWPROT_PF_CFG 0x44
#define PAXC_ARQOS_PF_CFG 0x48
#define PAXC_ARQOS_VAL 0xaaaaaaaa
#define PAXC_AWQOS_PF_CFG 0x4c
#define PAXC_AWQOS_VAL 0xeeeeeeee
#define PAXC_CFG_IND_ADDR_OFFSET 0x1f0
#define PAXC_CFG_IND_ADDR_MASK 0xffc
#define PAXC_CFG_IND_DATA_OFFSET 0x1f4
/* offsets for PAXC root complex configuration space registers */
#define PAXC_CFG_ID_OFFSET 0x434
#define PAXC_RC_VENDOR_ID 0x14e4
#define PAXC_RC_VENDOR_ID_SHIFT 16
#define PAXC_RC_DEVICE_ID 0xd750
#define PAXC_CFG_LINK_CAP_OFFSET 0x4dc
#define PAXC_RC_LINK_CAP_SPD_SHIFT 0
#define PAXC_RC_LINK_CAP_SPD_MASK (0xf << PAXC_RC_LINK_CAP_SPD_SHIFT)
#define PAXC_RC_LINK_CAP_SPD 3
#define PAXC_RC_LINK_CAP_WIDTH_SHIFT 4
#define PAXC_RC_LINK_CAP_WIDTH_MASK (0x1f << PAXC_RC_LINK_CAP_WIDTH_SHIFT)
#define PAXC_RC_LINK_CAP_WIDTH 16
/* offsets for MHB registers */
#define MHB_BASE 0x60401000
#define MHB_MEM_PWR_STATUS_PAXC (MHB_BASE + 0x1c0)
#define MHB_PWR_ARR_POWERON 0x8
#define MHB_PWR_ARR_POWEROK 0x4
#define MHB_PWR_POWERON 0x2
#define MHB_PWR_POWEROK 0x1
#define MHB_PWR_STATUS_MASK (MHB_PWR_ARR_POWERON | \
MHB_PWR_ARR_POWEROK | \
MHB_PWR_POWERON | \
MHB_PWR_POWEROK)
/* max number of PFs from Nitro that PAXC sees */
#define MAX_NR_NITRO_PF 8
#ifdef EMULATION_SETUP
static void paxc_reg_dump(void)
{
}
#else
/* total number of PAXC registers */
#define NR_PAXC_REGS 53
static void paxc_reg_dump(void)
{
uint32_t idx, offset = 0;
VERBOSE("PAXC register dump start\n");
for (idx = 0; idx < NR_PAXC_REGS; idx++, offset += 4)
VERBOSE("offset: 0x%x val: 0x%x\n", offset,
mmio_read_32(PAXC_BASE + offset));
VERBOSE("PAXC register dump end\n");
}
#endif /* EMULATION_SETUP */
#ifdef EMULATION_SETUP
static void mhb_reg_dump(void)
{
}
#else
#define NR_MHB_REGS 227
static void mhb_reg_dump(void)
{
uint32_t idx, offset = 0;
VERBOSE("MHB register dump start\n");
for (idx = 0; idx < NR_MHB_REGS; idx++, offset += 4)
VERBOSE("offset: 0x%x val: 0x%x\n", offset,
mmio_read_32(MHB_BASE + offset));
VERBOSE("MHB register dump end\n");
}
#endif /* EMULATION_SETUP */
static void paxc_rc_cfg_write(uint32_t where, uint32_t val)
{
mmio_write_32(PAXC_BASE + PAXC_CFG_IND_ADDR_OFFSET,
where & PAXC_CFG_IND_ADDR_MASK);
mmio_write_32(PAXC_BASE + PAXC_CFG_IND_DATA_OFFSET, val);
}
static uint32_t paxc_rc_cfg_read(uint32_t where)
{
mmio_write_32(PAXC_BASE + PAXC_CFG_IND_ADDR_OFFSET,
where & PAXC_CFG_IND_ADDR_MASK);
return mmio_read_32(PAXC_BASE + PAXC_CFG_IND_DATA_OFFSET);
}
/*
* Function to program PAXC root complex link capability register
*/
static void paxc_cfg_link_cap(void)
{
uint32_t val;
val = paxc_rc_cfg_read(PAXC_CFG_LINK_CAP_OFFSET);
val &= ~(PAXC_RC_LINK_CAP_SPD_MASK | PAXC_RC_LINK_CAP_WIDTH_MASK);
val |= (PAXC_RC_LINK_CAP_SPD << PAXC_RC_LINK_CAP_SPD_SHIFT) |
(PAXC_RC_LINK_CAP_WIDTH << PAXC_RC_LINK_CAP_WIDTH_SHIFT);
paxc_rc_cfg_write(PAXC_CFG_LINK_CAP_OFFSET, val);
}
/*
* Function to program PAXC root complex vendor ID and device ID
*/
static void paxc_cfg_id(void)
{
uint32_t val;
val = (PAXC_RC_VENDOR_ID << PAXC_RC_VENDOR_ID_SHIFT) |
PAXC_RC_DEVICE_ID;
paxc_rc_cfg_write(PAXC_CFG_ID_OFFSET, val);
}
void paxc_init(void)
{
unsigned int pf_index;
unsigned int val;
val = mmio_read_32(MHB_MEM_PWR_STATUS_PAXC);
if ((val & MHB_PWR_STATUS_MASK) != MHB_PWR_STATUS_MASK) {
INFO("PAXC not powered\n");
return;
}
paxc_cfg_id();
paxc_cfg_link_cap();
paxc_reg_dump();
mhb_reg_dump();
#ifdef USE_DDR
/*
* Set AWCACHE and ARCACHE to 0xff (Cacheable write-back,
* allocate on both reads and writes) per
* recommendation from the ASIC team
*/
val = 0xff;
#else
/* disable IO cache if non-DDR memory is used, e.g., external SRAM */
val = 0x0;
#endif
for (pf_index = 0; pf_index < MAX_NR_NITRO_PF; pf_index++)
mmio_write_32(PAXC_BASE + PAXC_AXI_CFG_PF_OFFSET(pf_index),
val);
/*
* Set ARPROT and AWPROT to enable non-secure access from
* PAXC to all PFs, PF0 to PF7
*/
mmio_write_32(PAXC_BASE + PAXC_ARPROT_PF_CFG, 0x22222222);
mmio_write_32(PAXC_BASE + PAXC_AWPROT_PF_CFG, 0x22222222);
mmio_write_32(PAXC_BASE + PAXC_ARQOS_PF_CFG, PAXC_ARQOS_VAL);
mmio_write_32(PAXC_BASE + PAXC_AWQOS_PF_CFG, PAXC_AWQOS_VAL);
INFO("PAXC init done\n");
}
/*
* These defines do not match the regfile but they are renamed in a way such
* that they are much more readible
*/
#define MHB_NIC_SECURITY_BASE 0x60500000
#define MHB_NIC_PAXC_AXI_NS 0x0008
#define MHB_NIC_IDM_NS 0x000c
#define MHB_NIC_MHB_APB_NS 0x0010
#define MHB_NIC_NITRO_AXI_NS 0x0014
#define MHB_NIC_PCIE_AXI_NS 0x0018
#define MHB_NIC_PAXC_APB_NS 0x001c
#define MHB_NIC_EP_APB_NS 0x0020
#define MHB_NIC_PAXC_APB_S_IDM_SHIFT 5
#define MHB_NIC_EP_APB_S_IDM_SHIFT 4
#define MHB_NIC_MHB_APB_S_IDM_SHIFT 3
#define MHB_NIC_PAXC_AXI_S_IDM_SHIFT 2
#define MHB_NIC_PCIE_AXI_S_IDM_SHIFT 1
#define MHB_NIC_NITRO_AXI_S_IDM_SHIFT 0
#define NIC400_NITRO_TOP_NIC_SECURITY_BASE 0x60d00000
#define NITRO_NIC_SECURITY_3_SHIFT 0x14
#define NITRO_NIC_SECURITY_4_SHIFT 0x18
#define NITRO_NIC_SECURITY_5_SHIFT 0x1c
#define NITRO_NIC_SECURITY_6_SHIFT 0x20
void paxc_mhb_ns_init(void)
{
unsigned int val;
uintptr_t mhb_nic_gpv = MHB_NIC_SECURITY_BASE;
#ifndef NITRO_SECURE_ACCESS
uintptr_t nic400_nitro_gpv = NIC400_NITRO_TOP_NIC_SECURITY_BASE;
#endif /* NITRO_SECURE_ACCESS */
/* set PAXC AXI to allow non-secure access */
val = mmio_read_32(mhb_nic_gpv + MHB_NIC_PAXC_AXI_NS);
val |= 0x1;
mmio_write_32(mhb_nic_gpv + MHB_NIC_PAXC_AXI_NS, val);
/* set various MHB IDM interfaces to allow non-secure access */
val = mmio_read_32(mhb_nic_gpv + MHB_NIC_IDM_NS);
val |= (0x1 << MHB_NIC_PAXC_APB_S_IDM_SHIFT);
val |= (0x1 << MHB_NIC_EP_APB_S_IDM_SHIFT);
val |= (0x1 << MHB_NIC_MHB_APB_S_IDM_SHIFT);
val |= (0x1 << MHB_NIC_PAXC_AXI_S_IDM_SHIFT);
val |= (0x1 << MHB_NIC_PCIE_AXI_S_IDM_SHIFT);
val |= (0x1 << MHB_NIC_NITRO_AXI_S_IDM_SHIFT);
mmio_write_32(mhb_nic_gpv + MHB_NIC_IDM_NS, val);
/* set MHB APB to allow non-secure access */
val = mmio_read_32(mhb_nic_gpv + MHB_NIC_MHB_APB_NS);
val |= 0x1;
mmio_write_32(mhb_nic_gpv + MHB_NIC_MHB_APB_NS, val);
/* set Nitro AXI to allow non-secure access */
val = mmio_read_32(mhb_nic_gpv + MHB_NIC_NITRO_AXI_NS);
val |= 0x1;
mmio_write_32(mhb_nic_gpv + MHB_NIC_NITRO_AXI_NS, val);
/* set PCIe AXI to allow non-secure access */
val = mmio_read_32(mhb_nic_gpv + MHB_NIC_PCIE_AXI_NS);
val |= 0x1;
mmio_write_32(mhb_nic_gpv + MHB_NIC_PCIE_AXI_NS, val);
/* set PAXC APB to allow non-secure access */
val = mmio_read_32(mhb_nic_gpv + MHB_NIC_PAXC_APB_NS);
val |= 0x1;
mmio_write_32(mhb_nic_gpv + MHB_NIC_PAXC_APB_NS, val);
/* set EP APB to allow non-secure access */
val = mmio_read_32(mhb_nic_gpv + MHB_NIC_EP_APB_NS);
val |= 0x1;
mmio_write_32(mhb_nic_gpv + MHB_NIC_EP_APB_NS, val);
#ifndef NITRO_SECURE_ACCESS
/* Set NIC400 to allow non-secure access */
mmio_setbits_32(nic400_nitro_gpv + NITRO_NIC_SECURITY_3_SHIFT, 0x1);
mmio_setbits_32(nic400_nitro_gpv + NITRO_NIC_SECURITY_4_SHIFT, 0x1);
mmio_setbits_32(nic400_nitro_gpv + NITRO_NIC_SECURITY_5_SHIFT, 0x1);
mmio_setbits_32(nic400_nitro_gpv + NITRO_NIC_SECURITY_6_SHIFT, 0x1);
#endif /* NITRO_SECURE_ACCESS */
}
/*
* Copyright (c) 2015 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <drivers/arm/ccn.h>
#include <drivers/delay_timer.h>
#include <lib/bakery_lock.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
#include <lib/spinlock.h>
#include <plat/common/platform.h>
#ifdef USE_PAXC
#include <chimp.h>
#endif
#include <cmn_plat_util.h>
#include <ihost_pm.h>
#include <plat_brcm.h>
#include <platform_def.h>
static uint64_t plat_sec_entrypoint;
/*******************************************************************************
* SR handler called when a power domain is about to be turned on. The
* mpidr determines the CPU to be turned on.
******************************************************************************/
static int brcm_pwr_domain_on(u_register_t mpidr)
{
int cpuid;
cpuid = plat_brcm_calc_core_pos(mpidr);
INFO("mpidr :%lu, cpuid:%d\n", mpidr, cpuid);
#ifdef USE_SINGLE_CLUSTER
if (cpuid > 1)
return PSCI_E_INTERN_FAIL;
#endif
ihost_power_on_cluster(mpidr);
ihost_power_on_secondary_core(mpidr, plat_sec_entrypoint);
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* SR handler called when a power domain has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
******************************************************************************/
static void brcm_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr());
assert(target_state->pwr_domain_state[MPIDR_AFFLVL0] ==
PLAT_LOCAL_STATE_OFF);
if (target_state->pwr_domain_state[MPIDR_AFFLVL1] ==
PLAT_LOCAL_STATE_OFF) {
INFO("Cluster #%lu entering to snoop/dvm domain\n", cluster_id);
ccn_enter_snoop_dvm_domain(1 << cluster_id);
}
/* Enable the gic cpu interface */
plat_brcm_gic_pcpu_init();
/* Program the gic per-cpu distributor or re-distributor interface */
plat_brcm_gic_cpuif_enable();
INFO("Gic Initialization done for this affinity instance\n");
}
static void __dead2 brcm_system_reset(void)
{
uint32_t reset_type = SOFT_SYS_RESET_L1;
#ifdef USE_PAXC
if (bcm_chimp_is_nic_mode())
reset_type = SOFT_RESET_L3;
#endif
INFO("System rebooting - L%d...\n", reset_type);
plat_soft_reset(reset_type);
/* Prevent the function to return due to the attribute */
while (1)
;
}
static int brcm_system_reset2(int is_vendor, int reset_type,
u_register_t cookie)
{
INFO("System rebooting - L%d...\n", reset_type);
plat_soft_reset(reset_type);
/*
* plat_soft_reset cannot return (it is a __dead function),
* but brcm_system_reset2 has to return some value, even in
* this case.
*/
return 0;
}
/*******************************************************************************
* Export the platform handlers via plat_brcm_psci_pm_ops. The ARM Standard
* platform will take care of registering the handlers with PSCI.
******************************************************************************/
const plat_psci_ops_t plat_brcm_psci_pm_ops = {
.pwr_domain_on = brcm_pwr_domain_on,
.pwr_domain_on_finish = brcm_pwr_domain_on_finish,
.system_reset = brcm_system_reset,
.system_reset2 = brcm_system_reset2
};
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
*psci_ops = &plat_brcm_psci_pm_ops;
plat_sec_entrypoint = sec_entrypoint;
return 0;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment