Commit 926cd70a authored by Manish Pandey's avatar Manish Pandey Committed by TrustedFirmware Code Review
Browse files

Merge changes from topic "brcm_initial_support" into integration

* changes:
  doc: brcm: Add documentation file for brcm stingray platform
  drivers: Add SPI Nor flash support
  drivers: Add iproc spi driver
  drivers: Add emmc driver for Broadcom platforms
  Add BL31 support for Broadcom stingray platform
  Add BL2 support for Broadcom stingray platform
  Add bl31 support common across Broadcom platforms
  Add bl2 setup code common across Broadcom platforms
  drivers: Add support to retrieve plat_toc_flags
parents 33f1dd9c fd1017b1
/*
* Copyright (c) 2017-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <common/bl_common.h>
#include <drivers/delay_timer.h>
#include <platform_def.h>
#include <scp.h>
#include <scp_cmd.h>
#include "m0_ipc.h"
/*
* Reads a response from CRMU MAILBOX
* Assumes that access has been granted and locked.
* Note that this is just a temporary implementation until
* channels are introduced
*/
static void scp_read_response(crmu_response_t *resp)
{
uint32_t code;
code = mmio_read_32(CRMU_MAIL_BOX0);
resp->completed = code & MCU_IPC_CMD_DONE_MASK;
resp->cmd = code & SCP_CMD_MASK;
resp->ret = (code & MCU_IPC_CMD_REPLY_MASK) >> MCU_IPC_CMD_REPLY_SHIFT;
}
/*
* Send a command to SCP and wait for timeout us.
* Return: 0 on success
* -1 if there was no proper reply from SCP
* >0 if there was a response from MCU, but
* command completed with an error.
*/
int scp_send_cmd(uint32_t cmd, uint32_t param, uint32_t timeout)
{
int ret = -1;
mmio_write_32(CRMU_MAIL_BOX0, cmd);
mmio_write_32(CRMU_MAIL_BOX1, param);
do {
crmu_response_t scp_resp;
udelay(1);
scp_read_response(&scp_resp);
if (scp_resp.completed &&
(scp_resp.cmd == cmd)) {
/* This command has completed */
ret = scp_resp.ret;
break;
}
} while (--timeout);
return ret;
}
/*
* Copyright (c) 2017-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <arch_helpers.h>
#include <common/bl_common.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <bcm_elog_ddr.h>
#include <brcm_mhu.h>
#include <brcm_scpi.h>
#include <chimp.h>
#include <cmn_plat_util.h>
#include <ddr_init.h>
#include <scp.h>
#include <scp_cmd.h>
#include <scp_utils.h>
#include "m0_cfg.h"
#include "m0_ipc.h"
#ifdef BCM_ELOG
static void prepare_elog(void)
{
#if (CLEAN_DDR && !defined(MMU_DISABLED))
/*
* Now DDR has been initialized. We want to copy all the logs in SRAM
* into DDR so we will have much more space to store the logs in the
* next boot stage
*/
bcm_elog_copy_log((void *)BCM_ELOG_BL31_BASE,
MIN(BCM_ELOG_BL2_SIZE, BCM_ELOG_BL31_SIZE)
);
/*
* We are almost at the end of BL2, and we can stop log here so we do
* not need to add 'bcm_elog_exit' to the standard BL2 code. The
* benefit of capturing BL2 logs after this is very minimal in a
* production system.
*/
bcm_elog_exit();
#endif
/*
* Notify CRMU that now it should pull logs from DDR instead of from
* FS4 SRAM.
*/
SCP_WRITE_CFG(flash_log.can_use_ddr, 1);
}
#endif
bool is_crmu_alive(void)
{
return (scp_send_cmd(MCU_IPC_MCU_CMD_NOP, 0, SCP_CMD_DEFAULT_TIMEOUT_US)
== 0);
}
bool bcm_scp_issue_sys_reset(void)
{
return (scp_send_cmd(MCU_IPC_MCU_CMD_L1_RESET, 0,
SCP_CMD_DEFAULT_TIMEOUT_US));
}
/*
* Note that this is just a temporary implementation until
* channels are introduced
*/
int plat_bcm_bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
{
int scp_patch_activated, scp_patch_version;
#ifndef EMULATION_SETUP
uint8_t active_ch_bitmap, i;
#endif
uint32_t reset_state = 0;
uint32_t mcu_ap_init_param = 0;
/*
* First check if SCP patch has already been loaded
* Send NOP command and see if there is a valid response
*/
scp_patch_activated =
(scp_send_cmd(MCU_IPC_MCU_CMD_NOP, 0,
SCP_CMD_DEFAULT_TIMEOUT_US) == 0);
if (scp_patch_activated) {
INFO("SCP Patch is already active.\n");
reset_state = SCP_READ_CFG(board_cfg.reset_state);
mcu_ap_init_param = SCP_READ_CFG(board_cfg.mcu_init_param);
/* Clear reset state, it's been already read */
SCP_WRITE_CFG(board_cfg.reset_state, 0);
if (mcu_ap_init_param & MCU_PATCH_LOADED_BY_NITRO) {
/*
* Reset "MCU_PATCH_LOADED_BY_NITRO" flag, but
* Preserve any other flags we don't deal with here
*/
INFO("AP booted by Nitro\n");
SCP_WRITE_CFG(
board_cfg.mcu_init_param,
mcu_ap_init_param &
~MCU_PATCH_LOADED_BY_NITRO
);
}
} else {
/*
* MCU Patch not loaded, so load it.
* MCU patch stamps critical points in REG9 (debug test-point)
* Display its last content here. This helps to locate
* where crash occurred if a CRMU watchdog kicked in.
*/
int ret;
INFO("MCU Patch Point: 0x%x\n",
mmio_read_32(CRMU_IHOST_SW_PERSISTENT_REG9));
ret = download_scp_patch((void *)scp_bl2_image_info->image_base,
scp_bl2_image_info->image_size);
if (ret != 0)
return ret;
VERBOSE("SCP Patch loaded OK.\n");
ret = scp_send_cmd(MCU_IPC_MCU_CMD_INIT,
MCU_PATCH_LOADED_BY_AP,
SCP_CMD_SCP_BOOT_TIMEOUT_US);
if (ret) {
ERROR("SCP Patch could not initialize; error %d\n",
ret);
return ret;
}
INFO("SCP Patch successfully initialized.\n");
}
scp_patch_version = scp_send_cmd(MCU_IPC_MCU_CMD_GET_FW_VERSION, 0,
SCP_CMD_DEFAULT_TIMEOUT_US);
INFO("SCP Patch version :0x%x\n", scp_patch_version);
/* Next block just reports current AVS voltages (if applicable) */
{
uint16_t vcore_mv, ihost03_mv, ihost12_mv;
vcore_mv = SCP_READ_CFG16(vcore.millivolts) +
SCP_READ_CFG8(vcore.avs_cfg.additive_margin);
ihost03_mv = SCP_READ_CFG16(ihost03.millivolts) +
SCP_READ_CFG8(ihost03.avs_cfg.additive_margin);
ihost12_mv = SCP_READ_CFG16(ihost12.millivolts) +
SCP_READ_CFG8(ihost12.avs_cfg.additive_margin);
if (vcore_mv || ihost03_mv || ihost12_mv) {
INFO("AVS voltages from cfg (including margin)\n");
if (vcore_mv > 0)
INFO("%s\tVCORE: %dmv\n",
SCP_READ_CFG8(vcore.avs_cfg.avs_set) ?
"*" : "n/a", vcore_mv);
if (ihost03_mv > 0)
INFO("%s\tIHOST03: %dmv\n",
SCP_READ_CFG8(ihost03.avs_cfg.avs_set) ?
"*" : "n/a", ihost03_mv);
if (ihost12_mv > 0)
INFO("%s\tIHOST12: %dmv\n",
SCP_READ_CFG8(ihost12.avs_cfg.avs_set) ?
"*" : "n/a", ihost12_mv);
} else {
INFO("AVS settings not applicable\n");
}
}
#if (CLEAN_DDR && !defined(MMU_DISABLED) && !defined(EMULATION_SETUP))
/* This will clean the DDR and enable ECC if set */
check_ddr_clean();
#endif
#if (WARMBOOT_DDR_S3_SUPPORT && ELOG_STORE_MEDIA_DDR)
elog_init_ddr_log();
#endif
#ifdef BCM_ELOG
/* Prepare ELOG to use DDR */
prepare_elog();
#endif
#ifndef EMULATION_SETUP
/* Ask ddr_init to save obtained DDR information into DDR */
ddr_info_save();
#endif
/*
* Configure TMON DDR address.
* This cfg is common for all cases
*/
SCP_WRITE_CFG(tmon_cfg.ddr_desc, TMON_SHARED_DDR_ADDRESS);
if (reset_state == SOFT_RESET_L3 && !mcu_ap_init_param) {
INFO("SCP configuration after L3 RESET done.\n");
return 0;
}
if (bcm_chimp_is_nic_mode())
/* Configure AP WDT to not reset the NIC interface */
SCP_WRITE_CFG(board_cfg.apwdt_reset_type, SOFT_RESET_L3);
#if (WARMBOOT_DDR_S3_SUPPORT && ELOG_STORE_MEDIA_DDR)
/* When AP WDog triggers perform L3 reset if DDR err logging enabled */
SCP_WRITE_CFG(board_cfg.apwdt_reset_type, SOFT_RESET_L3);
#endif
#ifndef EMULATION_SETUP
#ifdef DDR_SCRUB_ENA
ddr_scrub_enable();
#endif
/* Fill the Active channel information */
active_ch_bitmap = get_active_ddr_channel();
for (i = 0; i < MAX_NR_DDR_CH; i++)
SCP_WRITE_CFG(ddr_cfg.ddr_cfg[i],
(active_ch_bitmap & BIT(i)) ? 1 : 0);
#endif
return 0;
}
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <errno.h>
#include <stdbool.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <sdio.h>
#include <sr_def.h>
#include <sr_utils.h>
const SDIO_CFG sr_sdio0_cfg = {
.cfg_base = SR_IPROC_SDIO0_CFG_BASE,
.sid_base = SR_IPROC_SDIO0_SID_BASE,
.io_ctrl_base = SR_IPROC_SDIO0_IOCTRL_BASE,
.pad_base = SR_IPROC_SDIO0_PAD_BASE,
};
const SDIO_CFG sr_sdio1_cfg = {
.cfg_base = SR_IPROC_SDIO1_CFG_BASE,
.sid_base = SR_IPROC_SDIO1_SID_BASE,
.io_ctrl_base = SR_IPROC_SDIO1_IOCTRL_BASE,
.pad_base = SR_IPROC_SDIO1_PAD_BASE,
};
void brcm_stingray_sdio_init(void)
{
unsigned int val;
const SDIO_CFG *sdio0_cfg, *sdio1_cfg;
sdio0_cfg = &sr_sdio0_cfg;
sdio1_cfg = &sr_sdio1_cfg;
INFO("set sdio0 caps\n");
/* SDIO0 CAPS0 */
val = SDIO0_CAP0_CFG;
INFO("caps0 0x%x\n", val);
mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_CAP0, val);
/* SDIO0 CAPS1 */
val = SDIO0_CAP1_CFG;
INFO("caps1 0x%x\n", val);
mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_CAP1, val);
mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_0,
SDIO_PRESETVAL0);
mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_1,
SDIO_PRESETVAL1);
mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_2,
SDIO_PRESETVAL2);
mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_3,
SDIO_PRESETVAL3);
mmio_write_32(sdio0_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_4,
SDIO_PRESETVAL4);
val = SR_SID_VAL(0x3, 0x0, 0x2) << SDIO_SID_SHIFT;
mmio_write_32(sdio0_cfg->sid_base + ICFG_SDIO_SID_ARADDR, val);
mmio_write_32(sdio0_cfg->sid_base + ICFG_SDIO_SID_AWADDR, val);
val = mmio_read_32(sdio0_cfg->io_ctrl_base);
val &= ~(0xff << 23); /* Clear ARCACHE and AWCACHE */
val |= (0xb7 << 23); /* Set ARCACHE and AWCACHE */
mmio_write_32(sdio0_cfg->io_ctrl_base, val);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_CLK,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA0,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA1,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA2,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA3,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA4,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA5,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA6,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_DATA7,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio0_cfg->pad_base + PAD_SDIO_CMD,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
INFO("set sdio1 caps\n");
/* SDIO1 CAPS0 */
val = SDIO1_CAP0_CFG;
INFO("caps0 0x%x\n", val);
mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_CAP0, val);
/* SDIO1 CAPS1 */
val = SDIO1_CAP1_CFG;
INFO("caps1 0x%x\n", val);
mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_CAP1, val);
mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_0,
SDIO_PRESETVAL0);
mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_1,
SDIO_PRESETVAL1);
mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_2,
SDIO_PRESETVAL2);
mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_3,
SDIO_PRESETVAL3);
mmio_write_32(sdio1_cfg->cfg_base + ICFG_SDIO_STRAPSTATUS_4,
SDIO_PRESETVAL4);
val = SR_SID_VAL(0x3, 0x0, 0x3) << SDIO_SID_SHIFT;
mmio_write_32(sdio1_cfg->sid_base + ICFG_SDIO_SID_ARADDR, val);
mmio_write_32(sdio1_cfg->sid_base + ICFG_SDIO_SID_AWADDR, val);
val = mmio_read_32(sdio1_cfg->io_ctrl_base);
val &= ~(0xff << 23); /* Clear ARCACHE and AWCACHE */
val |= (0xb7 << 23); /* Set ARCACHE and AWCACHE */
mmio_write_32(sdio1_cfg->io_ctrl_base, val);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_CLK,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA0,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA1,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA2,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA3,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA4,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA5,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA6,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_DATA7,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
mmio_clrsetbits_32(sdio1_cfg->pad_base + PAD_SDIO_CMD,
PAD_SDIO_MASK, PAD_SDIO_VALUE);
INFO("sdio init done\n");
}
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <errno.h>
#include <stdbool.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <paxb.h>
#include <sr_def.h>
#include <sr_utils.h>
/* total number of PCIe Phys */
#define NUM_OF_PCIE_SERDES 8
#define CFG_RC_PMI_ADDR 0x1130
#define PMI_RX_TERM_SEQ ((0x1 << 27) | (0x1ff << 16) | (0xd090))
#define PMI_RX_TERM_VAL 0x4c00
#define PMI_PLL_CTRL_4 0xd0b4
#define PMI_SERDES_CLK_ENABLE (1 << 12)
#define WAR_PLX_PRESET_PARITY_FAIL
#define CFG_RC_REG_PHY_CTL_10 0x1838
#define PHY_CTL_10_GEN3_MATCH_PARITY (1 << 15)
#define PMI_X8_CORE0_7_PATCH_SEQ ((0x1 << 27) | (0x1ff << 16) | (0xd2a5))
#define PMI_X8_CORE0_7_PATCH_VAL 0xd864
#define PMI_ADDR_BCAST(addr) ((0x1 << 27) | (0x1ff << 16) | (addr))
#define PMI_ADDR_LANE0(addr) ((0x1 << 27) | (addr))
#define PMI_ADDR_LANE1(addr) ((0x1 << 27) | (0x1 << 16) | (addr))
#define MERLIN16_PCIE_BLK2_PWRMGMT_7 ((0x1 << 27) | (0x1ff << 16) | 0x1208)
#define MERLIN16_PCIE_BLK2_PWRMGMT_8 ((0x1 << 27) | (0x1ff << 16) | 0x1209)
#define MERLIN16_AMS_TX_CTRL_5 ((0x1 << 27) | (0x1ff << 16) | 0xd0a5)
#define MERLIN16_AMS_TX_CTRL_5_VAL \
((1 << 13) | (1 << 12) | (1 << 11) | (1 << 10))
#define MERLIN16_PCIE_BLK2_PWRMGMT_7_VAL 0x96
#define MERLIN16_PCIE_BLK2_PWRMGMT_8_VAL 0x12c
#define CFG_RC_PMI_WDATA 0x1134
#define CFG_RC_WCMD_SHIFT 31
#define CFG_RC_WCMD_MASK ((uint32_t)1U << CFG_RC_WCMD_SHIFT)
#define CFG_RC_RCMD_SHIFT 30
#define CFG_RC_RCMD_MASK ((uint32_t)1U << CFG_RC_RCMD_SHIFT)
#define CFG_RC_RWCMD_MASK (CFG_RC_RCMD_MASK | CFG_RC_WCMD_MASK)
#define CFG_RC_PMI_RDATA 0x1138
#define CFG_RC_RACK_SHIFT 31
#define CFG_RC_RACK_MASK ((uint32_t)1U << CFG_RC_RACK_SHIFT)
/* allow up to 5 ms for PMI write to finish */
#define PMI_TIMEOUT_MS 5
/* in 2x8 RC mode, one needs to patch up Serdes 3 and 7 for link to come up */
#define SERDES_PATCH_PIPEMUX_INDEX 0x3
#define SERDES_PATCH_INDEX 0x8
#define DSC_UC_CTRL 0xd00d
#define DSC_UC_CTRL_RDY_CMD (1 << 7)
#define LANE_DBG_RST_CTRL 0xd164
#define UC_A_CLK_CTRL0 0xd200
#define UC_A_RST_CTRL0 0xd201
#define UC_A_AHB_CTRL0 0xd202
#define UC_A_AHB_STAT0 0xd203
#define UC_A_AHB_WADDR_LSW 0xd204
#define UC_A_AHB_WADDR_MSW 0xd205
#define UC_A_AHB_WDATA_LSW 0xd206
#define UC_A_AHB_WDATA_MSW 0xd207
#define UC_A_AHB_RADDR_LSW 0xd208
#define UC_A_AHB_RADDR_MSW 0xd209
#define UC_A_AHB_RDATA_LSW 0xd20a
#define UC_A_AHB_RDATA_MSW 0xd20b
#define UC_VERSION_NUM 0xd230
#define DSC_SM_CTL22 0xd267
#define UC_DBG1 0xd251
#define LOAD_UC_CHECK 0
#define UC_RAM_INIT_TIMEOUT 100
#define UC_RAM_CONTROL 0xd225
#define UC_INIT_TIMEOUT 100
#define SIZE_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
#define SZ_4 4
#define GET_2_BYTES(p, i) ((uint16_t)p[i] | (uint16_t)p[i+1] << 8)
/*
* List of PCIe LCPLL related registers
*
* LCPLL channel 0 provides the Serdes pad clock when running in RC mode
*/
#define PCIE_LCPLL_BASE 0x40000000
#define PCIE_LCPLL_CTRL0_OFFSET 0x00
#define PCIE_LCPLL_RESETB_SHIFT 31
#define PCIE_LCPLL_RESETB_MASK BIT(PCIE_LCPLL_RESETB_SHIFT)
#define PCIE_LCPLL_P_RESETB_SHIFT 30
#define PCIE_LCPLL_P_RESETB_MASK BIT(PCIE_LCPLL_P_RESETB_SHIFT)
#define PCIE_LCPLL_CTRL3_OFFSET 0x0c
#define PCIE_LCPLL_EN_CTRL_SHIFT 16
#define PCIE_LCPLL_CM_ENA 0x1a
#define PCIE_LCPLL_CM_BUF_ENA 0x18
#define PCIE_LCPLL_D2C2_ENA 0x2
#define PCIE_LCPLL_REF_CLK_SHIFT 1
#define PCIE_LCPLL_REF_CLK_MASK BIT(PCIE_LCPLL_REF_CLK_SHIFT)
#define PCIE_LCPLL_CTRL13_OFFSET 0x34
#define PCIE_LCPLL_D2C2_CTRL_SHIFT 16
#define PCIE_LCPLL_D2C2_TERM_DISC 0xe0
#define PCIE_LCPLL_STATUS_OFFSET 0x40
#define PCIE_LCPLL_LOCK_SHIFT 12
#define PCIE_LCPLL_LOCK_MASK BIT(PCIE_LCPLL_LOCK_SHIFT)
#define PCIE_PIPE_MUX_RC_MODE_OVERRIDE_CFG 0x114
#define PCIE_TX_CLKMASTER_CTRL_OVERRIDE_CFG 0x11c
/* wait 500 microseconds for PCIe LCPLL to power up */
#define PCIE_LCPLL_DELAY_US 500
/* allow up to 5 ms for PCIe LCPLL VCO to lock */
#define PCIE_LCPLL_TIMEOUT_MS 5
#define PCIE_PIPE_MUX_CONFIGURATION_CFG 0x4000010c
#define PCIE_PIPEMUX_SHIFT 19
#define PCIE_PIPEMUX_MASK 0xf
/* keep track of PIPEMUX index to use */
static unsigned int pipemux_idx;
/*
* PCIe PIPEMUX lookup table
*
* Each array index represents a PIPEMUX strap setting
* The array element represents a bitmap where a set bit means the PCIe core
* needs to be enabled as RC
*/
static uint8_t pipemux_table[] = {
/* PIPEMUX = 0, EP 1x16 */
0x00,
/* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */
0x80,
/* PIPEMUX = 2, EP 4x4 */
0x00,
/* PIPEMUX = 3, RC 2x8, cores 0, 7 */
0x81,
/* PIPEMUX = 4, RC 4x4, cores 0, 1, 6, 7 */
0xc3,
/* PIPEMUX = 5, RC 8x2, all 8 cores */
0xff,
/* PIPEMUX = 6, RC 3x4 + 2x2, cores 0, 2, 3, 6, 7 */
0xcd,
/* PIPEMUX = 7, RC 1x4 + 6x2, cores 0, 2, 3, 4, 5, 6, 7 */
0xfd,
/* PIPEMUX = 8, EP 1x8 + RC 4x2, cores 4, 5, 6, 7 */
0xf0,
/* PIPEMUX = 9, EP 1x8 + RC 2x4, cores 6, 7 */
0xc0,
/* PIPEMUX = 10, EP 2x4 + RC 2x4, cores 1, 6 */
0x42,
/* PIPEMUX = 11, EP 2x4 + RC 4x2, cores 2, 3, 4, 5 */
0x3c,
/* PIPEMUX = 12, EP 1x4 + RC 6x2, cores 2, 3, 4, 5, 6, 7 */
0xfc,
/* PIPEMUX = 13, RC 2x4 + RC 1x4 + 2x2, cores 2, 3, 6 */
0x4c,
};
/*
* Return 1 if pipemux strap is supported
*/
static int pipemux_strap_is_valid(uint32_t pipemux)
{
if (pipemux < ARRAY_SIZE(pipemux_table))
return 1;
else
return 0;
}
/*
* Read the PCIe PIPEMUX from strap
*/
static uint32_t pipemux_strap_read(void)
{
uint32_t pipemux;
pipemux = mmio_read_32(PCIE_PIPE_MUX_CONFIGURATION_CFG);
pipemux &= PCIE_PIPEMUX_MASK;
if (pipemux == PCIE_PIPEMUX_MASK) {
/* read the PCIe PIPEMUX strap setting */
pipemux = mmio_read_32(CDRU_CHIP_STRAP_DATA_LSW);
pipemux >>= PCIE_PIPEMUX_SHIFT;
pipemux &= PCIE_PIPEMUX_MASK;
}
return pipemux;
}
/*
* Store the PIPEMUX index (set for each boot)
*/
static void pipemux_save_index(unsigned int idx)
{
pipemux_idx = idx;
}
static int paxb_sr_core_needs_enable(unsigned int core_idx)
{
return !!((pipemux_table[pipemux_idx] >> core_idx) & 0x1);
}
static int pipemux_sr_init(void)
{
uint32_t pipemux;
/* read the PCIe PIPEMUX strap setting */
pipemux = pipemux_strap_read();
if (!pipemux_strap_is_valid(pipemux)) {
ERROR("Invalid PCIe PIPEMUX strap %u\n", pipemux);
return -EIO;
}
/* no PCIe RC is needed */
if (!pipemux_table[pipemux]) {
WARN("PIPEMUX indicates no PCIe RC required\n");
return -ENODEV;
}
/* save the PIPEMUX strap */
pipemux_save_index(pipemux);
return 0;
}
/*
* PCIe RC serdes link width
*
* The array is first organized in rows as indexed by the PIPEMUX setting.
* Within each row, eight lane width entries are specified -- one entry
* per PCIe core, from 0 to 7.
*
* Note: The EP lanes/cores are not mapped in this table! EP cores are
* controlled and thus configured by Nitro.
*/
static uint8_t link_width_table[][NUM_OF_SR_PCIE_CORES] = {
/* PIPEMUX = 0, EP 1x16 */
{0, 0, 0, 0, 0, 0, 0, 0},
/* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */
{0, 0, 0, 0, 0, 0, 0, 8},
/* PIPEMUX = 2, EP 4x4 */
{0, 0, 0, 0, 0, 0, 0, 0},
/* PIPEMUX = 3, RC 2x8, cores 0, 7 */
{8, 0, 0, 0, 0, 0, 0, 8},
/* PIPEMUX = 4, RC 4x4, cores 0, 1, 6, 7 */
{4, 4, 0, 0, 0, 0, 4, 4},
/* PIPEMUX = 5, RC 8x2, all 8 cores */
{2, 2, 2, 2, 2, 2, 2, 2},
/* PIPEMUX = 6, RC 3x4 (cores 0, 6, 7), RC 2x2 (cores 2, 3) */
{4, 0, 2, 2, 0, 0, 4, 4},
/* PIPEMUX = 7, RC 1x4 (core 0), RC 6x2 (cores 2, 3, 4, 5, 6, 7 */
{4, 0, 2, 2, 2, 2, 2, 2},
/* PIPEMUX = 8, EP 1x8 + RC 4x2 (cores 4, 5, 6, 7) */
{0, 0, 0, 0, 2, 2, 2, 2},
/* PIPEMUX = 9, EP 1x8 + RC 2x4 (cores 6, 7) */
{0, 0, 0, 0, 0, 0, 4, 4},
/* PIPEMUX = 10, EP 2x4 + RC 2x4 (cores 1, 6) */
{0, 4, 0, 0, 0, 0, 4, 0},
/* PIPEMUX = 11, EP 2x4 + RC 4x2 (cores 2, 3, 4, 5) */
{0, 0, 2, 2, 2, 2, 0, 0},
/* PIPEMUX = 12, EP 1x4 + RC 6x2 (cores 2, 3, 4, 5, 6, 7) */
{0, 0, 2, 2, 2, 2, 2, 2},
/* PIPEMUX = 13, EP 2x4 + RC 1x4 (core 6) + RC 2x2 (cores 2, 3) */
{0, 0, 2, 2, 0, 0, 4, 0}
};
/*
* function for writes to the Serdes registers through the PMI interface
*/
static int paxb_pmi_write(unsigned int core_idx, uint32_t pmi, uint32_t val)
{
uint32_t status;
unsigned int timeout = PMI_TIMEOUT_MS;
paxb_rc_cfg_write(core_idx, CFG_RC_PMI_ADDR, pmi);
val &= ~CFG_RC_RWCMD_MASK;
val |= CFG_RC_WCMD_MASK;
paxb_rc_cfg_write(core_idx, CFG_RC_PMI_WDATA, val);
do {
status = paxb_rc_cfg_read(core_idx, CFG_RC_PMI_WDATA);
/* wait for write command bit to clear */
if ((status & CFG_RC_WCMD_MASK) == 0)
return 0;
} while (--timeout);
return -EIO;
}
/*
* function for reads from the Serdes registers through the PMI interface
*/
static int paxb_pmi_read(unsigned int core_idx, uint32_t pmi, uint32_t *val)
{
uint32_t status;
unsigned int timeout = PMI_TIMEOUT_MS;
paxb_rc_cfg_write(core_idx, CFG_RC_PMI_ADDR, pmi);
paxb_rc_cfg_write(core_idx, CFG_RC_PMI_WDATA, CFG_RC_RCMD_MASK);
do {
status = paxb_rc_cfg_read(core_idx, CFG_RC_PMI_RDATA);
/* wait for read ack bit set */
if ((status & CFG_RC_RACK_MASK)) {
*val = paxb_rc_cfg_read(core_idx, CFG_RC_PMI_RDATA);
return 0;
}
} while (--timeout);
return -EIO;
}
#ifndef BOARD_PCIE_EXT_CLK
/*
* PCIe Override clock lookup table
*
* Each array index represents pcie override clock has been done
* by CFW or not.
*/
static uint8_t pcie_override_clk_table[] = {
/* PIPEMUX = 0, EP 1x16 */
0x0,
/* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */
0x1,
/* PIPEMUX = 2, EP 4x4 */
0x0,
/* PIPEMUX = 3, RC 2x8, cores 0, 7 */
0x0,
/* PIPEMUX = 4, RC 4x4, cores 0, 1, 6, 7 */
0x0,
/* PIPEMUX = 5, RC 8x2, all 8 cores */
0x0,
/* PIPEMUX = 6, RC 3x4 + 2x2, cores 0, 2, 3, 6, 7 */
0x0,
/* PIPEMUX = 7, RC 1x4 + 6x2, cores 0, 2, 3, 4, 5, 6, 7 */
0x0,
/* PIPEMUX = 8, EP 1x8 + RC 4x2, cores 4, 5, 6, 7 */
0x0,
/* PIPEMUX = 9, EP 1x8 + RC 2x4, cores 6, 7 */
0x0,
/* PIPEMUX = 10, EP 2x4 + RC 2x4, cores 1, 6 */
0x0,
/* PIPEMUX = 11, EP 2x4 + RC 4x2, cores 2, 3, 4, 5 */
0x0,
/* PIPEMUX = 12, EP 1x4 + RC 6x2, cores 2, 3, 4, 5, 6, 7 */
0x0,
/* PIPEMUX = 13, RC 2x4 + RC 1x4 + 2x2, cores 2, 3, 6 */
0x0,
};
/*
* Bring up LCPLL channel 0 reference clock for PCIe serdes used in RC mode
*/
static int pcie_lcpll_init(void)
{
uintptr_t reg;
unsigned int timeout = PCIE_LCPLL_TIMEOUT_MS;
uint32_t val;
if (pcie_override_clk_table[pipemux_idx]) {
/*
* Check rc_mode_override again to avoid halt
* because of cfw uninitialized lcpll.
*/
reg = (uintptr_t)(PCIE_LCPLL_BASE +
PCIE_PIPE_MUX_RC_MODE_OVERRIDE_CFG);
val = mmio_read_32(reg);
if (val & 0x1)
return 0;
else
return -ENODEV;
}
/* power on PCIe LCPLL and its LDO */
reg = (uintptr_t)CRMU_AON_CTRL1;
mmio_setbits_32(reg, CRMU_PCIE_LCPLL_PWR_ON_MASK |
CRMU_PCIE_LCPLL_PWRON_LDO_MASK);
udelay(PCIE_LCPLL_DELAY_US);
/* remove isolation */
mmio_clrbits_32(reg, CRMU_PCIE_LCPLL_ISO_IN_MASK);
udelay(PCIE_LCPLL_DELAY_US);
/* disconnect termination */
reg = (uintptr_t)(PCIE_LCPLL_BASE + PCIE_LCPLL_CTRL13_OFFSET);
mmio_setbits_32(reg, PCIE_LCPLL_D2C2_TERM_DISC <<
PCIE_LCPLL_D2C2_CTRL_SHIFT);
/* enable CML buf1/2 and D2C2 */
reg = (uintptr_t)(PCIE_LCPLL_BASE + PCIE_LCPLL_CTRL3_OFFSET);
mmio_setbits_32(reg, PCIE_LCPLL_CM_ENA << PCIE_LCPLL_EN_CTRL_SHIFT);
/* select diff clock mux out as ref clock */
mmio_clrbits_32(reg, PCIE_LCPLL_REF_CLK_MASK);
/* delay for 500 microseconds per ASIC spec for PCIe LCPLL */
udelay(PCIE_LCPLL_DELAY_US);
/* now bring PCIe LCPLL out of reset */
reg = (uintptr_t)(PCIE_LCPLL_BASE + PCIE_LCPLL_CTRL0_OFFSET);
mmio_setbits_32(reg, PCIE_LCPLL_RESETB_MASK);
/* wait for PLL to lock */
reg = (uintptr_t)(PCIE_LCPLL_BASE + PCIE_LCPLL_STATUS_OFFSET);
do {
val = mmio_read_32(reg);
if ((val & PCIE_LCPLL_LOCK_MASK) == PCIE_LCPLL_LOCK_MASK) {
/* now bring the post divider out of reset */
reg = (uintptr_t)(PCIE_LCPLL_BASE +
PCIE_LCPLL_CTRL0_OFFSET);
mmio_setbits_32(reg, PCIE_LCPLL_P_RESETB_MASK);
VERBOSE("PCIe LCPLL locked\n");
return 0;
}
mdelay(1);
} while (--timeout);
ERROR("PCIe LCPLL failed to lock\n");
return -EIO;
}
#else
/*
* Bring up EXT CLK reference clock for PCIe serdes used in RC mode
* XTAL_BYPASS (3 << 0)
* INTR_LC_REF (5 << 0)
* PD_CML_LC_REF_OUT (1 << 4)
* PD_CML_REF_CH_OUT (1 << 8)
* CLK_MASTER_SEL (1 << 11)
* CLK_MASTER_CTRL_A (1 << 12)
* CLK_MASTER_CTRL_B (2 << 14)
*/
static const uint16_t pcie_ext_clk[][NUM_OF_PCIE_SERDES] = {
/* PIPEMUX = 0, EP 1x16 */
{0},
/* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */
{0},
/* PIPEMUX = 2, EP 4x4 */
{0},
/* PIPEMUX = 3, RC 2x8, cores 0, 7 */
{0x8803, 0x9115, 0x9115, 0x1115, 0x8803, 0x9115, 0x9115, 0x1115},
/* PIPEMUX = 4, RC 4x4, cores 0, 1, 6, 7 */
{0x8803, 0x1115, 0x8915, 0x1115, 0x8803, 0x1115, 0x8915, 0x1115,},
/* PIPEMUX = 5, RC 8x2, all 8 cores */
{0x0803, 0x0915, 0x0915, 0x0915, 0x0803, 0x0915, 0x0915, 0x0915,},
/* PIPEMUX = 6, RC 3x4 + 2x2, cores 0, 2, 3, 6, 7 */
{0},
/* PIPEMUX = 7, RC 1x4 + 6x2, cores 0, 2, 3, 4, 5, 6, 7 */
{0},
/* PIPEMUX = 8, EP 1x8 + RC 4x2, cores 4, 5, 6, 7 */
{0},
/* PIPEMUX = 9, EP 1x8 + RC 2x4, cores 6, 7 */
{0},
/* PIPEMUX = 10, EP 2x4 + RC 2x4, cores 1, 6 */
{0},
/* PIPEMUX = 11, EP 2x4 + RC 4x2, cores 2, 3, 4, 5 */
{0},
/* PIPEMUX = 12, EP 1x4 + RC 6x2, cores 2, 3, 4, 5, 6, 7 */
{0},
/* PIPEMUX = 13, RC 2x4 + RC 1x4 + 2x2, cores 2, 3, 6 */
{0},
};
static void pcie_ext_clk_init(void)
{
unsigned int serdes;
uint32_t val;
for (serdes = 0; serdes < NUM_OF_PCIE_SERDES; serdes++) {
val = pcie_ext_clk[pipemux_idx][serdes];
if (!val)
return;
mmio_write_32(PCIE_CORE_RESERVED_CFG +
serdes * PCIE_CORE_PWR_OFFSET, val);
}
/* disable CML buf1/2 and enable D2C2 */
mmio_clrsetbits_32((PCIE_LCPLL_BASE + PCIE_LCPLL_CTRL3_OFFSET),
PCIE_LCPLL_CM_BUF_ENA << PCIE_LCPLL_EN_CTRL_SHIFT,
PCIE_LCPLL_D2C2_ENA << PCIE_LCPLL_EN_CTRL_SHIFT);
mmio_write_32(PCIE_LCPLL_BASE + PCIE_TX_CLKMASTER_CTRL_OVERRIDE_CFG, 1);
INFO("Overriding Clocking - using REF clock from PAD...\n");
}
#endif
static int load_uc(unsigned int core_idx)
{
return 0;
}
static int paxb_serdes_gate_clock(unsigned int core_idx, int gate_clk)
{
unsigned int link_width, serdes, nr_serdes;
uintptr_t pmi_base;
unsigned int rdata;
uint32_t core_offset = core_idx * PCIE_CORE_PWR_OFFSET;
link_width = paxb->get_link_width(core_idx);
if (!link_width) {
ERROR("Unsupported PIPEMUX\n");
return -EOPNOTSUPP;
}
nr_serdes = link_width / 2;
pmi_base = (uintptr_t)(PCIE_CORE_PMI_CFG_BASE + core_offset);
for (serdes = 0; serdes < nr_serdes; serdes++) {
mmio_write_32(pmi_base, serdes);
paxb_pmi_read(core_idx, PMI_ADDR_LANE0(PMI_PLL_CTRL_4), &rdata);
if (!gate_clk)
rdata |= PMI_SERDES_CLK_ENABLE;
else
rdata &= ~PMI_SERDES_CLK_ENABLE;
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(PMI_PLL_CTRL_4), rdata);
}
return 0;
}
static int paxb_gen3_serdes_init(unsigned int core_idx, uint32_t nSerdes)
{
uint32_t rdata;
int serdes;
uintptr_t pmi_base;
unsigned int timeout;
unsigned int reg_d230, reg_d267;
pmi_base = (uintptr_t)(PCIE_CORE_PMI_CFG_BASE +
(core_idx * PCIE_CORE_PWR_OFFSET));
for (serdes = 0; serdes < nSerdes; serdes++) {
/* select the PMI interface */
mmio_write_32(pmi_base, serdes);
/* Clock enable */
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_CLK_CTRL0),
0x3);
/* Release reset of master */
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_RST_CTRL0),
0x1);
/* clearing PRAM memory */
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_AHB_CTRL0),
0x100);
timeout = UC_RAM_INIT_TIMEOUT;
do {
paxb_pmi_read(core_idx,
PMI_ADDR_LANE0(UC_A_AHB_STAT0),
&rdata);
} while ((rdata & 0x01) == 0 && timeout--);
if (!timeout)
return -EIO;
timeout = UC_RAM_INIT_TIMEOUT;
do {
paxb_pmi_read(core_idx,
PMI_ADDR_LANE1(UC_A_AHB_STAT0),
&rdata);
} while ((rdata & 0x01) == 0 && timeout--);
if (!timeout)
return -EIO;
/* clearing PRAM memory */
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_AHB_CTRL0),
0);
/* to identify 2 lane serdes */
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_DBG1), 0x1);
/* De-Assert Pram & master resets */
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_RST_CTRL0),
0x9);
if (load_uc(core_idx))
return -EIO;
/* UC UC ready for command */
paxb_pmi_read(core_idx, PMI_ADDR_LANE0(DSC_UC_CTRL),
&rdata);
rdata |= DSC_UC_CTRL_RDY_CMD;
paxb_pmi_write(core_idx, PMI_ADDR_LANE0(DSC_UC_CTRL),
rdata);
paxb_pmi_read(core_idx, PMI_ADDR_LANE1(DSC_UC_CTRL),
&rdata);
rdata |= DSC_UC_CTRL_RDY_CMD;
paxb_pmi_write(core_idx, PMI_ADDR_LANE1(DSC_UC_CTRL),
rdata);
/* Lane reset */
paxb_pmi_write(core_idx,
PMI_ADDR_BCAST(LANE_DBG_RST_CTRL), 0x3);
/* De-Assert Core and Master resets */
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(UC_A_RST_CTRL0),
0x3);
timeout = UC_INIT_TIMEOUT;
while (timeout--) {
paxb_pmi_read(core_idx,
PMI_ADDR_LANE0(UC_VERSION_NUM),
&reg_d230);
paxb_pmi_read(core_idx,
PMI_ADDR_LANE0(DSC_SM_CTL22),
&reg_d267);
if (((reg_d230 & 0xffff) != 0) &
((reg_d267 & 0xc000) == 0xc000)) {
break;
}
mdelay(1);
}
if (!timeout)
return -EIO;
timeout = UC_INIT_TIMEOUT;
while (timeout--) {
paxb_pmi_read(core_idx,
PMI_ADDR_LANE1(UC_VERSION_NUM),
&reg_d230);
paxb_pmi_read(core_idx,
PMI_ADDR_LANE1(DSC_SM_CTL22),
&reg_d267);
if (((reg_d230 & 0xffff) != 0) &
((reg_d267 & 0xc000) == 0xc000)) {
break;
}
mdelay(1);
}
if (!timeout)
return -EIO;
}
return 0;
}
static int pcie_serdes_requires_patch(unsigned int serdes_idx)
{
if (pipemux_idx != SERDES_PATCH_PIPEMUX_INDEX)
return 0;
return !!((SERDES_PATCH_INDEX >> serdes_idx) & 0x1);
}
static void pcie_tx_coeff_p7(unsigned int core_idx)
{
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd11b), 0x00aa);
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd11c), 0x1155);
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd11d), 0x2449);
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd11e), 0x000f);
paxb_pmi_write(core_idx, PMI_ADDR_BCAST(0xd307), 0x0001);
}
static unsigned int paxb_sr_get_rc_link_width(unsigned int core_idx)
{
return link_width_table[pipemux_idx][core_idx];
}
static uint32_t paxb_sr_get_rc_link_speed(void)
{
return GEN3_LINK_SPEED;
}
static int paxb_serdes_init(unsigned int core_idx, unsigned int nr_serdes)
{
uint32_t core_offset = core_idx * PCIE_CORE_PWR_OFFSET;
unsigned int serdes;
uintptr_t pmi_base;
int ret;
/*
* Each serdes has a x2 link width
*
* Use PAXB to patch the serdes for proper RX termination through the
* PMI interface
*/
pmi_base = (uintptr_t)(PCIE_CORE_PMI_CFG_BASE + core_offset);
for (serdes = 0; serdes < nr_serdes; serdes++) {
/* select the PMI interface */
mmio_write_32(pmi_base, serdes);
/* patch Serdes for RX termination */
ret = paxb_pmi_write(core_idx, PMI_RX_TERM_SEQ,
PMI_RX_TERM_VAL);
if (ret)
goto err_pmi;
ret = paxb_pmi_write(core_idx, MERLIN16_PCIE_BLK2_PWRMGMT_7,
MERLIN16_PCIE_BLK2_PWRMGMT_7_VAL);
if (ret)
goto err_pmi;
ret = paxb_pmi_write(core_idx, MERLIN16_PCIE_BLK2_PWRMGMT_8,
MERLIN16_PCIE_BLK2_PWRMGMT_8_VAL);
if (ret)
goto err_pmi;
ret = paxb_pmi_write(core_idx, MERLIN16_AMS_TX_CTRL_5,
MERLIN16_AMS_TX_CTRL_5_VAL);
if (ret)
goto err_pmi;
pcie_tx_coeff_p7(core_idx);
if (pcie_serdes_requires_patch(serdes)) {
if (((core_idx == 0) || (core_idx == 7))) {
ret = paxb_pmi_write(core_idx,
PMI_X8_CORE0_7_PATCH_SEQ,
PMI_X8_CORE0_7_PATCH_VAL);
if (ret)
goto err_pmi;
}
}
}
return 0;
err_pmi:
ERROR("PCIe PMI write failed\n");
return ret;
}
static int paxb_sr_phy_init(void)
{
int ret;
unsigned int core_idx;
#ifndef BOARD_PCIE_EXT_CLK
ret = pcie_lcpll_init();
if (ret)
return ret;
#else
pcie_ext_clk_init();
#endif
for (core_idx = 0; core_idx < paxb->num_cores; core_idx++) {
if (!pcie_core_needs_enable(core_idx))
continue;
unsigned int link_width;
paxb_serdes_gate_clock(core_idx, 0);
link_width = paxb->get_link_width(core_idx);
if (!link_width) {
ERROR("Unsupported PIPEMUX\n");
return -EOPNOTSUPP;
}
ret = paxb_serdes_init(core_idx, link_width / 2);
if (ret) {
ERROR("PCIe serdes initialization failed for core %u\n",
core_idx);
return ret;
}
ret = paxb_gen3_serdes_init(core_idx, link_width / 2);
if (ret) {
ERROR("PCIe GEN3 serdes initialization failed\n");
return ret;
}
}
return 0;
}
const paxb_cfg sr_paxb_cfg = {
.type = PAXB_SR,
.device_id = SR_B0_DEVICE_ID,
.pipemux_init = pipemux_sr_init,
.phy_init = paxb_sr_phy_init,
.core_needs_enable = paxb_sr_core_needs_enable,
.num_cores = NUM_OF_SR_PCIE_CORES,
.get_link_width = paxb_sr_get_rc_link_width,
.get_link_speed = paxb_sr_get_rc_link_speed,
};
const paxb_cfg *paxb_get_sr_config(void)
{
return &sr_paxb_cfg;
}
/*
* Copyright (c) 2019-2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdint.h>
#include <plat_brcm.h>
#include <platform_def.h>
/*
* On Stingray, the system power level is the highest power level.
* The first entry in the power domain descriptor specifies the
* number of system power domains i.e. 1.
*/
#define SR_PWR_DOMAINS_AT_MAX_PWR_LVL 1
/*
* The Stingray power domain tree descriptor. The cluster power domains
* are arranged so that when the PSCI generic code creates the power
* domain tree, the indices of the CPU power domain nodes it allocates
* match the linear indices returned by plat_core_pos_by_mpidr()
* i.e. CLUSTER0 CPUs are allocated indices from 0 to 1 and the higher
* indices for other Cluster CPUs.
*/
const unsigned char sr_power_domain_tree_desc[] = {
/* No of root nodes */
SR_PWR_DOMAINS_AT_MAX_PWR_LVL,
/* No of children for the root node */
BRCM_CLUSTER_COUNT,
/* No of children for the first cluster node */
PLATFORM_CLUSTER0_CORE_COUNT,
/* No of children for the second cluster node */
PLATFORM_CLUSTER1_CORE_COUNT,
/* No of children for the third cluster node */
PLATFORM_CLUSTER2_CORE_COUNT,
/* No of children for the fourth cluster node */
PLATFORM_CLUSTER3_CORE_COUNT,
};
/*******************************************************************************
* This function returns the Stingray topology tree information.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{
return sr_power_domain_tree_desc;
}
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
return plat_brcm_calc_core_pos(mpidr);
}
/*
* Copyright (c) 2016 - 2020, Broadcom
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/debug.h>
#include <drivers/arm/tzc400.h>
#include <lib/mmio.h>
#include <cmn_sec.h>
#include <platform_def.h>
/*
* Trust Zone controllers
*/
#define TZC400_FS_SRAM_ROOT 0x66d84000
/*
* TZPC Master configure registers
*/
/* TZPC_TZPCDECPROT0set */
#define TZPC0_MASTER_NS_BASE 0x68b40804
#define TZPC0_SATA3_BIT 5
#define TZPC0_SATA2_BIT 4
#define TZPC0_SATA1_BIT 3
#define TZPC0_SATA0_BIT 2
#define TZPC0_USB3H1_BIT 1
#define TZPC0_USB3H0_BIT 0
#define TZPC0_MASTER_SEC_DEFAULT 0
/* TZPC_TZPCDECPROT1set */
#define TZPC1_MASTER_NS_BASE 0x68b40810
#define TZPC1_SDIO1_BIT 6
#define TZPC1_SDIO0_BIT 5
#define TZPC1_AUDIO0_BIT 4
#define TZPC1_USB2D_BIT 3
#define TZPC1_USB2H1_BIT 2
#define TZPC1_USB2H0_BIT 1
#define TZPC1_AMAC0_BIT 0
#define TZPC1_MASTER_SEC_DEFAULT 0
struct tz_sec_desc {
uintptr_t addr;
uint32_t val;
};
static const struct tz_sec_desc tz_master_defaults[] = {
{ TZPC0_MASTER_NS_BASE, TZPC0_MASTER_SEC_DEFAULT },
{ TZPC1_MASTER_NS_BASE, TZPC1_MASTER_SEC_DEFAULT }
};
/*
* Initialize the TrustZone Controller for SRAM partitioning.
*/
static void bcm_tzc_setup(void)
{
VERBOSE("Configuring SRAM TrustZone Controller\n");
/* Init the TZASC controller */
tzc400_init(TZC400_FS_SRAM_ROOT);
/*
* Close the entire SRAM space
* Region 0 covers the entire SRAM space
* None of the NS device can access it.
*/
tzc400_configure_region0(TZC_REGION_S_RDWR, 0);
/* Do raise an exception if a NS device tries to access secure memory */
tzc400_set_action(TZC_ACTION_ERR);
}
/*
* Configure TZ Master as NS_MASTER or SECURE_MASTER
* To set a Master to non-secure, use *_SET registers
* To set a Master to secure, use *_CLR registers (set + 0x4 address)
*/
static void tz_master_set(uint32_t base, uint32_t value, uint32_t ns)
{
if (ns == SECURE_MASTER) {
mmio_write_32(base + 4, value);
} else {
mmio_write_32(base, value);
}
}
/*
* Initialize the secure environment for sdio.
*/
void plat_tz_sdio_ns_master_set(uint32_t ns)
{
tz_master_set(TZPC1_MASTER_NS_BASE,
1 << TZPC1_SDIO0_BIT,
ns);
}
/*
* Initialize the secure environment for usb.
*/
void plat_tz_usb_ns_master_set(uint32_t ns)
{
tz_master_set(TZPC1_MASTER_NS_BASE,
1 << TZPC1_USB2H0_BIT,
ns);
}
/*
* Set masters to default configuration.
*
* DMA security settings are programmed into the PL-330 controller and
* are not set by iProc TZPC registers.
* DMA always comes up as secure master (*NS bit is 0).
*
* Because the default reset values of TZPC are 0 (== Secure),
* ARM Verilog code makes all masters, including PCIe, come up as
* secure.
* However, SOTP has a bit called SOTP_ALLMASTER_NS that overrides
* TZPC and makes all masters non-secure for AB devices.
*
* Hence we first set all the TZPC bits to program all masters,
* including PCIe, as non-secure, then set the CLEAR_ALLMASTER_NS bit
* so that the SOTP_ALLMASTER_NS cannot override TZPC.
* now security settings for each masters come from TZPC
* (which makes all masters other than DMA as non-secure).
*
* During the boot, all masters other than DMA Ctrlr + list
* are non-secure in an AB Prod/AB Dev/AB Pending device.
*
*/
void plat_tz_master_default_cfg(void)
{
int i;
/* Configure default secure and non-secure TZ Masters */
for (i = 0; i < ARRAY_SIZE(tz_master_defaults); i++) {
tz_master_set(tz_master_defaults[i].addr,
tz_master_defaults[i].val,
SECURE_MASTER);
tz_master_set(tz_master_defaults[i].addr,
~tz_master_defaults[i].val,
NS_MASTER);
}
/* Clear all master NS */
mmio_setbits_32(SOTP_CHIP_CTRL,
1 << SOTP_CLEAR_SYSCTRL_ALL_MASTER_NS);
/* Initialize TZ controller and Set SRAM to secure */
bcm_tzc_setup();
}
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/bl_common.h>
#include <common/desc_image_load.h>
#include <plat/common/platform.h>
#include <platform_def.h>
/*******************************************************************************
* Following descriptor provides BL image/ep information that gets used
* by BL2 to load the images and also subset of this information is
* passed to next BL image. The image loading sequence is managed by
* populating the images in required loading order. The image execution
* sequence is managed by populating the `next_handoff_image_id` with
* the next executable image id.
******************************************************************************/
static bl_mem_params_node_t bl2_mem_params_descs[] = {
#ifdef SCP_BL2_BASE
/* Fill SCP_BL2 related information if it exists */
{
.image_id = SCP_BL2_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
VERSION_2, image_info_t, 0),
.image_info.image_base = SCP_BL2_BASE,
.image_info.image_max_size = PLAT_MAX_SCP_BL2_SIZE,
.next_handoff_image_id = INVALID_IMAGE_ID,
},
#endif /* SCP_BL2_BASE */
/* Fill BL31 related information */
{
.image_id = BL31_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_2, entry_point_info_t,
SECURE | EXECUTABLE | EP_FIRST_EXE),
.ep_info.pc = BL31_BASE,
.ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS),
#if DEBUG
.ep_info.args.arg3 = BRCM_BL31_PLAT_PARAM_VAL,
#endif
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
.image_info.image_base = BL31_BASE,
.image_info.image_max_size = BL31_LIMIT - BL31_BASE,
#ifdef BL32_BASE
.next_handoff_image_id = BL32_IMAGE_ID,
#else
.next_handoff_image_id = BL33_IMAGE_ID,
#endif
},
#ifdef BL32_BASE
/* Fill BL32 related information */
{
.image_id = BL32_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
.ep_info.pc = BL32_BASE,
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, 0),
.image_info.image_base = BL32_BASE,
.image_info.image_max_size = BL32_LIMIT - BL32_BASE,
.next_handoff_image_id = BL33_IMAGE_ID,
},
#endif /* BL32_BASE */
/* Fill BL33 related information */
{
.image_id = BL33_IMAGE_ID,
SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
#ifdef PRELOADED_BL33_BASE
.ep_info.pc = PRELOADED_BL33_BASE,
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
#else
.ep_info.pc = PLAT_BRCM_NS_IMAGE_OFFSET,
SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
VERSION_2, image_info_t, 0),
.image_info.image_base = PLAT_BRCM_NS_IMAGE_OFFSET,
.image_info.image_max_size = BRCM_DRAM1_SIZE,
#endif /* PRELOADED_BL33_BASE */
.next_handoff_image_id = INVALID_IMAGE_ID,
}
};
REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <string.h>
#include <arch_helpers.h>
#include <common/bl_common.h>
#include <common/debug.h>
#include <common/desc_image_load.h>
#include <drivers/arm/sp804_delay_timer.h>
#include <lib/mmio.h>
#include <bcm_console.h>
#include <platform_def.h>
#include <plat/brcm/common/plat_brcm.h>
/* Data structure which holds the extents of the trusted SRAM for BL2 */
static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
/* Weak definitions may be overridden in specific BRCM platform */
#pragma weak plat_bcm_bl2_platform_setup
#pragma weak plat_bcm_bl2_plat_arch_setup
#pragma weak plat_bcm_security_setup
#pragma weak plat_bcm_bl2_plat_handle_scp_bl2
#pragma weak plat_bcm_bl2_early_platform_setup
void plat_bcm_bl2_early_platform_setup(void)
{
}
void plat_bcm_bl2_platform_setup(void)
{
}
void plat_bcm_bl2_plat_arch_setup(void)
{
}
void plat_bcm_security_setup(void)
{
}
void bcm_bl2_early_platform_setup(uintptr_t tb_fw_config,
meminfo_t *mem_layout)
{
/* Initialize the console to provide early debug support */
bcm_console_boot_init();
/* Setup the BL2 memory layout */
bl2_tzram_layout = *mem_layout;
/* Initialise the IO layer and register platform IO devices */
plat_brcm_io_setup();
/* Log HW reset event */
INFO("RESET: 0x%x\n",
mmio_read_32(CRMU_RESET_EVENT_LOG));
}
void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
{
/* SoC specific setup */
plat_bcm_bl2_early_platform_setup();
/* Initialize delay timer driver using SP804 dual timer 0 */
sp804_timer_init(SP804_TIMER0_BASE,
SP804_TIMER0_CLKMULT, SP804_TIMER0_CLKDIV);
/* BRCM platforms generic setup */
bcm_bl2_early_platform_setup((uintptr_t)arg0, (meminfo_t *)arg1);
}
/*
* Perform Broadcom platform setup.
*/
void bcm_bl2_platform_setup(void)
{
/* Initialize the secure environment */
plat_bcm_security_setup();
}
void bl2_platform_setup(void)
{
bcm_bl2_platform_setup();
plat_bcm_bl2_platform_setup();
}
/*******************************************************************************
* Perform the very early platform specific architectural setup here. At the
* moment this is only initializes the mmu in a quick and dirty way.
******************************************************************************/
void bcm_bl2_plat_arch_setup(void)
{
#ifndef MMU_DISABLED
if (!(read_sctlr_el1() & SCTLR_M_BIT)) {
const mmap_region_t bl_regions[] = {
MAP_REGION_FLAT(bl2_tzram_layout.total_base,
bl2_tzram_layout.total_size,
MT_MEMORY | MT_RW | MT_SECURE),
MAP_REGION_FLAT(BL_CODE_BASE,
BL_CODE_END - BL_CODE_BASE,
MT_CODE | MT_SECURE),
MAP_REGION_FLAT(BL_RO_DATA_BASE,
BL_RO_DATA_END - BL_RO_DATA_BASE,
MT_RO_DATA | MT_SECURE),
#if USE_COHERENT_MEM
MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
BL_COHERENT_RAM_END -
BL_COHERENT_RAM_BASE,
MT_DEVICE | MT_RW | MT_SECURE),
#endif
{0}
};
setup_page_tables(bl_regions, plat_brcm_get_mmap());
enable_mmu_el1(0);
}
#endif
}
void bl2_plat_arch_setup(void)
{
#ifdef ENA_MMU_BEFORE_DDR_INIT
/*
* Once MMU is enabled before DDR, MEMORY TESTS
* get affected as read/write transaction might occures from
* caches. So For running memory test, one should not set this
* flag.
*/
bcm_bl2_plat_arch_setup();
plat_bcm_bl2_plat_arch_setup();
#else
plat_bcm_bl2_plat_arch_setup();
bcm_bl2_plat_arch_setup();
#endif
}
int bcm_bl2_handle_post_image_load(unsigned int image_id)
{
int err = 0;
bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
assert(bl_mem_params);
switch (image_id) {
case BL32_IMAGE_ID:
bl_mem_params->ep_info.spsr = brcm_get_spsr_for_bl32_entry();
break;
case BL33_IMAGE_ID:
/* BL33 expects to receive the primary CPU MPID (through r0) */
bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
bl_mem_params->ep_info.spsr = brcm_get_spsr_for_bl33_entry();
break;
#ifdef SCP_BL2_BASE
case SCP_BL2_IMAGE_ID:
/* The subsequent handling of SCP_BL2 is platform specific */
err = bcm_bl2_handle_scp_bl2(&bl_mem_params->image_info);
if (err)
WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
break;
#endif
default:
/* Do nothing in default case */
break;
}
return err;
}
/*******************************************************************************
* This function can be used by the platforms to update/use image
* information for given `image_id`.
******************************************************************************/
int bcm_bl2_plat_handle_post_image_load(unsigned int image_id)
{
return bcm_bl2_handle_post_image_load(image_id);
}
int bl2_plat_handle_post_image_load(unsigned int image_id)
{
return bcm_bl2_plat_handle_post_image_load(image_id);
}
#ifdef SCP_BL2_BASE
int plat_bcm_bl2_plat_handle_scp_bl2(image_info_t *scp_bl2_image_info)
{
return 0;
}
int bcm_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
{
return plat_bcm_bl2_plat_handle_scp_bl2(scp_bl2_image_info);
}
#endif
/*
* Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <arch.h>
#include <arch_helpers.h>
#include <common/bl_common.h>
#include <common/debug.h>
#include <drivers/arm/sp804_delay_timer.h>
#include <lib/utils.h>
#include <plat/common/platform.h>
#include <bcm_console.h>
#include <plat_brcm.h>
#include <platform_def.h>
#ifdef BL33_SHARED_DDR_BASE
struct bl33_info *bl33_info = (struct bl33_info *)BL33_SHARED_DDR_BASE;
#endif
/*
* Placeholder variables for copying the arguments that have been passed to
* BL31 from BL2.
*/
static entry_point_info_t bl32_image_ep_info;
static entry_point_info_t bl33_image_ep_info;
/* Weak definitions may be overridden in specific BRCM platform */
#pragma weak plat_bcm_bl31_early_platform_setup
#pragma weak plat_brcm_pwrc_setup
#pragma weak plat_brcm_security_setup
void plat_brcm_security_setup(void)
{
}
void plat_brcm_pwrc_setup(void)
{
}
void plat_bcm_bl31_early_platform_setup(void *from_bl2,
bl_params_t *plat_params_from_bl2)
{
}
/*******************************************************************************
* Return a pointer to the 'entry_point_info' structure of the next image for
* the security state specified. BL33 corresponds to the non-secure image type
* while BL32 corresponds to the secure image type. A NULL pointer is returned
* if the image does not exist.
******************************************************************************/
struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
{
entry_point_info_t *next_image_info;
assert(sec_state_is_valid(type));
next_image_info = (type == NON_SECURE)
? &bl33_image_ep_info : &bl32_image_ep_info;
/*
* None of the images on the ARM development platforms can have 0x0
* as the entrypoint
*/
if (next_image_info->pc)
return next_image_info;
else
return NULL;
}
/*******************************************************************************
* Perform any BL31 early platform setup common to ARM standard platforms.
* Here is an opportunity to copy parameters passed by the calling EL (S-EL1
* in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
* done before the MMU is initialized so that the memory layout can be used
* while creating page tables. BL2 has flushed this information to memory, so
* we are guaranteed to pick up good data.
******************************************************************************/
void __init brcm_bl31_early_platform_setup(void *from_bl2,
uintptr_t soc_fw_config,
uintptr_t hw_config,
void *plat_params_from_bl2)
{
/* Initialize the console to provide early debug support */
bcm_console_boot_init();
/* Initialize delay timer driver using SP804 dual timer 0 */
sp804_timer_init(SP804_TIMER0_BASE,
SP804_TIMER0_CLKMULT, SP804_TIMER0_CLKDIV);
#if RESET_TO_BL31
/* There are no parameters from BL2 if BL31 is a reset vector */
assert(from_bl2 == NULL);
assert(plat_params_from_bl2 == NULL);
# ifdef BL32_BASE
/* Populate entry point information for BL32 */
SET_PARAM_HEAD(&bl32_image_ep_info,
PARAM_EP,
VERSION_1,
0);
SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
bl32_image_ep_info.pc = BL32_BASE;
bl32_image_ep_info.spsr = brcm_get_spsr_for_bl32_entry();
# endif /* BL32_BASE */
/* Populate entry point information for BL33 */
SET_PARAM_HEAD(&bl33_image_ep_info,
PARAM_EP,
VERSION_1,
0);
/*
* Tell BL31 where the non-trusted software image
* is located and the entry state information
*/
bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
bl33_image_ep_info.spsr = brcm_get_spsr_for_bl33_entry();
SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
# if ARM_LINUX_KERNEL_AS_BL33
/*
* According to the file ``Documentation/arm64/booting.txt`` of the
* Linux kernel tree, Linux expects the physical address of the device
* tree blob (DTB) in x0, while x1-x3 are reserved for future use and
* must be 0.
*/
bl33_image_ep_info.args.arg0 = (u_register_t)PRELOADED_DTB_BASE;
bl33_image_ep_info.args.arg1 = 0U;
bl33_image_ep_info.args.arg2 = 0U;
bl33_image_ep_info.args.arg3 = 0U;
# endif
#else /* RESET_TO_BL31 */
/*
* In debug builds, we pass a special value in 'plat_params_from_bl2'
* to verify platform parameters from BL2 to BL31.
* In release builds, it's not used.
*/
assert(((unsigned long long)plat_params_from_bl2) ==
BRCM_BL31_PLAT_PARAM_VAL);
/*
* Check params passed from BL2 should not be NULL
*/
bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
assert(params_from_bl2 != NULL);
assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
assert(params_from_bl2->h.version >= VERSION_2);
bl_params_node_t *bl_params = params_from_bl2->head;
/*
* Copy BL33 and BL32 (if present), entry point information.
* They are stored in Secure RAM, in BL2's address space.
*/
while (bl_params != NULL) {
if (bl_params->image_id == BL32_IMAGE_ID &&
bl_params->image_info->h.attr != IMAGE_ATTRIB_SKIP_LOADING)
bl32_image_ep_info = *bl_params->ep_info;
if (bl_params->image_id == BL33_IMAGE_ID)
bl33_image_ep_info = *bl_params->ep_info;
bl_params = bl_params->next_params_info;
}
if (bl33_image_ep_info.pc == 0U)
panic();
#endif /* RESET_TO_BL31 */
#ifdef BL33_SHARED_DDR_BASE
/* Pass information to BL33 thorugh x0 */
bl33_image_ep_info.args.arg0 = (u_register_t)BL33_SHARED_DDR_BASE;
bl33_image_ep_info.args.arg1 = 0ULL;
bl33_image_ep_info.args.arg2 = 0ULL;
bl33_image_ep_info.args.arg3 = 0ULL;
#endif
}
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
{
#ifdef BL31_LOG_LEVEL
SET_LOG_LEVEL(BL31_LOG_LEVEL);
#endif
brcm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3);
plat_bcm_bl31_early_platform_setup((void *)arg0, (void *)arg3);
#ifdef DRIVER_CC_ENABLE
/*
* Initialize Interconnect for this cluster during cold boot.
* No need for locks as no other CPU is active.
*/
plat_brcm_interconnect_init();
/*
* Enable Interconnect coherency for the primary CPU's cluster.
* Earlier bootloader stages might already do this (e.g. Trusted
* Firmware's BL1 does it) but we can't assume so. There is no harm in
* executing this code twice anyway.
* Platform specific PSCI code will enable coherency for other
* clusters.
*/
plat_brcm_interconnect_enter_coherency();
#endif
}
/*******************************************************************************
* Perform any BL31 platform setup common to ARM standard platforms
******************************************************************************/
void brcm_bl31_platform_setup(void)
{
/* Initialize the GIC driver, cpu and distributor interfaces */
plat_brcm_gic_driver_init();
plat_brcm_gic_init();
/* Initialize power controller before setting up topology */
plat_brcm_pwrc_setup();
}
/*******************************************************************************
* Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
* standard platforms
* Perform BL31 platform setup
******************************************************************************/
void brcm_bl31_plat_runtime_setup(void)
{
console_switch_state(CONSOLE_FLAG_RUNTIME);
/* Initialize the runtime console */
bcm_console_runtime_init();
}
void bl31_platform_setup(void)
{
brcm_bl31_platform_setup();
/* Initialize the secure environment */
plat_brcm_security_setup();
}
void bl31_plat_runtime_setup(void)
{
brcm_bl31_plat_runtime_setup();
}
/*******************************************************************************
* Perform the very early platform specific architectural setup shared between
* ARM standard platforms. This only does basic initialization. Later
* architectural setup (bl31_arch_setup()) does not do anything platform
* specific.
******************************************************************************/
void __init brcm_bl31_plat_arch_setup(void)
{
#ifndef MMU_DISABLED
const mmap_region_t bl_regions[] = {
MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE,
MT_MEMORY | MT_RW | MT_SECURE),
MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
MT_CODE | MT_SECURE),
MAP_REGION_FLAT(BL_RO_DATA_BASE,
BL_RO_DATA_END - BL_RO_DATA_BASE,
MT_RO_DATA | MT_SECURE),
#if USE_COHERENT_MEM
MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
MT_DEVICE | MT_RW | MT_SECURE),
#endif
{0}
};
setup_page_tables(bl_regions, plat_brcm_get_mmap());
enable_mmu_el3(0);
#endif
}
void __init bl31_plat_arch_setup(void)
{
brcm_bl31_plat_arch_setup();
}
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
#include <drivers/arm/ccn.h>
#include <platform_def.h>
static const unsigned char master_to_rn_id_map[] = {
PLAT_BRCM_CLUSTER_TO_CCN_ID_MAP
};
static const ccn_desc_t bcm_ccn_desc = {
.periphbase = PLAT_BRCM_CCN_BASE,
.num_masters = ARRAY_SIZE(master_to_rn_id_map),
.master_to_rn_id_map = master_to_rn_id_map
};
void plat_brcm_interconnect_init(void)
{
ccn_init(&bcm_ccn_desc);
}
void plat_brcm_interconnect_enter_coherency(void)
{
ccn_enter_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
}
void plat_brcm_interconnect_exit_coherency(void)
{
ccn_exit_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
}
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <plat/common/platform.h>
#include <plat_brcm.h>
#include <platform_def.h>
/* Weak definitions may be overridden in specific BRCM platform */
#pragma weak plat_get_ns_image_entrypoint
#pragma weak plat_brcm_get_mmap
uintptr_t plat_get_ns_image_entrypoint(void)
{
#ifdef PRELOADED_BL33_BASE
return PRELOADED_BL33_BASE;
#else
return PLAT_BRCM_NS_IMAGE_OFFSET;
#endif
}
uint32_t brcm_get_spsr_for_bl32_entry(void)
{
/*
* The Secure Payload Dispatcher service is responsible for
* setting the SPSR prior to entry into the BL32 image.
*/
return 0;
}
uint32_t brcm_get_spsr_for_bl33_entry(void)
{
unsigned int mode;
uint32_t spsr;
/* Figure out what mode we enter the non-secure world in */
mode = el_implemented(2) ? MODE_EL2 : MODE_EL1;
/*
* TODO: Consider the possibility of specifying the SPSR in
* the FIP ToC and allowing the platform to have a say as
* well.
*/
spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
return spsr;
}
const mmap_region_t *plat_brcm_get_mmap(void)
{
return plat_brcm_mmap;
}
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <drivers/arm/gicv3.h>
#include <plat/common/platform.h>
#include <platform_def.h>
/* The GICv3 driver only needs to be initialized in EL3 */
static uintptr_t brcm_rdistif_base_addrs[PLATFORM_CORE_COUNT];
static const interrupt_prop_t brcm_interrupt_props[] = {
/* G1S interrupts */
PLAT_BRCM_G1S_IRQ_PROPS(INTR_GROUP1S),
/* G0 interrupts */
PLAT_BRCM_G0_IRQ_PROPS(INTR_GROUP0)
};
/*
* MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
* to core position.
*
* Calculating core position is dependent on MPIDR_EL1.MT bit. However, affinity
* values read from GICR_TYPER don't have an MT field. To reuse the same
* translation used for CPUs, we insert MT bit read from the PE's MPIDR into
* that read from GICR_TYPER.
*
* Assumptions:
*
* - All CPUs implemented in the system have MPIDR_EL1.MT bit set;
* - No CPUs implemented in the system use affinity level 3.
*/
static unsigned int brcm_gicv3_mpidr_hash(u_register_t mpidr)
{
mpidr |= (read_mpidr_el1() & MPIDR_MT_MASK);
return plat_core_pos_by_mpidr(mpidr);
}
static const gicv3_driver_data_t brcm_gic_data = {
.gicd_base = PLAT_BRCM_GICD_BASE,
.gicr_base = PLAT_BRCM_GICR_BASE,
.interrupt_props = brcm_interrupt_props,
.interrupt_props_num = ARRAY_SIZE(brcm_interrupt_props),
.rdistif_num = PLATFORM_CORE_COUNT,
.rdistif_base_addrs = brcm_rdistif_base_addrs,
.mpidr_to_core_pos = brcm_gicv3_mpidr_hash
};
void plat_brcm_gic_driver_init(void)
{
/* TODO Check if this is required to be initialized here
* after getting initialized in EL3, should we re-init this here
* in S-EL1
*/
gicv3_driver_init(&brcm_gic_data);
}
void plat_brcm_gic_init(void)
{
gicv3_distif_init();
gicv3_rdistif_init(plat_my_core_pos());
gicv3_cpuif_enable(plat_my_core_pos());
}
void plat_brcm_gic_cpuif_enable(void)
{
gicv3_cpuif_enable(plat_my_core_pos());
}
void plat_brcm_gic_cpuif_disable(void)
{
gicv3_cpuif_disable(plat_my_core_pos());
}
void plat_brcm_gic_pcpu_init(void)
{
gicv3_rdistif_init(plat_my_core_pos());
}
void plat_brcm_gic_redistif_on(void)
{
gicv3_rdistif_on(plat_my_core_pos());
}
void plat_brcm_gic_redistif_off(void)
{
gicv3_rdistif_off(plat_my_core_pos());
}
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/bl_common.h>
#include <common/desc_image_load.h>
#include <plat/common/platform.h>
#pragma weak plat_flush_next_bl_params
#pragma weak plat_get_bl_image_load_info
#pragma weak plat_get_next_bl_params
/*******************************************************************************
* This function flushes the data structures so that they are visible
* in memory for the next BL image.
******************************************************************************/
void plat_flush_next_bl_params(void)
{
flush_bl_params_desc();
}
/*******************************************************************************
* This function returns the list of loadable images.
******************************************************************************/
struct bl_load_info *plat_get_bl_image_load_info(void)
{
return get_bl_load_info_from_mem_params_desc();
}
/*******************************************************************************
* This function returns the list of executable images.
******************************************************************************/
struct bl_params *plat_get_next_bl_params(void)
{
bl_params_t *next_bl_params = get_next_bl_params_from_mem_params_desc();
populate_next_bl_params_config(next_bl_params);
return next_bl_params;
}
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <string.h>
#include <common/debug.h>
#include <drivers/io/io_driver.h>
#include <drivers/io/io_fip.h>
#include <drivers/io/io_memmap.h>
#include <drivers/io/io_storage.h>
#include <tools_share/firmware_image_package.h>
#include <cmn_plat_def.h>
#include <cmn_plat_util.h>
#include <plat_brcm.h>
#include <platform_def.h>
/* IO devices */
static const io_dev_connector_t *fip_dev_con;
static uintptr_t fip_dev_handle;
static const io_dev_connector_t *memmap_dev_con;
static uintptr_t memmap_dev_handle;
static const io_block_spec_t fip_block_spec = {
.offset = PLAT_BRCM_FIP_BASE,
.length = PLAT_BRCM_FIP_MAX_SIZE
};
static const io_block_spec_t qspi_fip_block_spec = {
.offset = PLAT_BRCM_FIP_QSPI_BASE,
.length = PLAT_BRCM_FIP_MAX_SIZE
};
static const io_block_spec_t nand_fip_block_spec = {
.offset = PLAT_BRCM_FIP_NAND_BASE,
.length = PLAT_BRCM_FIP_MAX_SIZE
};
static const io_uuid_spec_t bl2_uuid_spec = {
.uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
};
static const io_uuid_spec_t scp_bl2_uuid_spec = {
.uuid = UUID_SCP_FIRMWARE_SCP_BL2,
};
static const io_uuid_spec_t bl31_uuid_spec = {
.uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
};
static const io_uuid_spec_t bl32_uuid_spec = {
.uuid = UUID_SECURE_PAYLOAD_BL32,
};
static const io_uuid_spec_t bl32_extra1_uuid_spec = {
.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA1,
};
static const io_uuid_spec_t bl32_extra2_uuid_spec = {
.uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
};
static const io_uuid_spec_t bl33_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
};
static const io_uuid_spec_t tb_fw_config_uuid_spec = {
.uuid = UUID_TB_FW_CONFIG,
};
static const io_uuid_spec_t hw_config_uuid_spec = {
.uuid = UUID_HW_CONFIG,
};
static const io_uuid_spec_t soc_fw_config_uuid_spec = {
.uuid = UUID_SOC_FW_CONFIG,
};
static const io_uuid_spec_t tos_fw_config_uuid_spec = {
.uuid = UUID_TOS_FW_CONFIG,
};
static const io_uuid_spec_t nt_fw_config_uuid_spec = {
.uuid = UUID_NT_FW_CONFIG,
};
#if TRUSTED_BOARD_BOOT
static const io_uuid_spec_t tb_fw_cert_uuid_spec = {
.uuid = UUID_TRUSTED_BOOT_FW_CERT,
};
static const io_uuid_spec_t trusted_key_cert_uuid_spec = {
.uuid = UUID_TRUSTED_KEY_CERT,
};
static const io_uuid_spec_t scp_fw_key_cert_uuid_spec = {
.uuid = UUID_SCP_FW_KEY_CERT,
};
static const io_uuid_spec_t soc_fw_key_cert_uuid_spec = {
.uuid = UUID_SOC_FW_KEY_CERT,
};
static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = {
.uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
};
static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
};
static const io_uuid_spec_t scp_fw_cert_uuid_spec = {
.uuid = UUID_SCP_FW_CONTENT_CERT,
};
static const io_uuid_spec_t soc_fw_cert_uuid_spec = {
.uuid = UUID_SOC_FW_CONTENT_CERT,
};
static const io_uuid_spec_t tos_fw_cert_uuid_spec = {
.uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
};
static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
.uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
};
#endif /* TRUSTED_BOARD_BOOT */
static int open_fip(const uintptr_t spec);
static int open_memmap(const uintptr_t spec);
static int open_qspi(const uintptr_t spec);
static int open_nand(const uintptr_t spec);
struct plat_io_policy {
uintptr_t *dev_handle;
uintptr_t image_spec;
int (*check)(const uintptr_t spec);
};
/* By default, BRCM platforms load images from the FIP */
static const struct plat_io_policy policies[] = {
[FIP_IMAGE_ID] = {
&memmap_dev_handle,
(uintptr_t)&fip_block_spec,
open_memmap
},
[BL2_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl2_uuid_spec,
open_fip
},
[SCP_BL2_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&scp_bl2_uuid_spec,
open_fip
},
[BL31_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl31_uuid_spec,
open_fip
},
[BL32_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl32_uuid_spec,
open_fip
},
[BL32_EXTRA1_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl32_extra1_uuid_spec,
open_fip
},
[BL32_EXTRA2_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl32_extra2_uuid_spec,
open_fip
},
[BL33_IMAGE_ID] = {
&fip_dev_handle,
(uintptr_t)&bl33_uuid_spec,
open_fip
},
[TB_FW_CONFIG_ID] = {
&fip_dev_handle,
(uintptr_t)&tb_fw_config_uuid_spec,
open_fip
},
[HW_CONFIG_ID] = {
&fip_dev_handle,
(uintptr_t)&hw_config_uuid_spec,
open_fip
},
[SOC_FW_CONFIG_ID] = {
&fip_dev_handle,
(uintptr_t)&soc_fw_config_uuid_spec,
open_fip
},
[TOS_FW_CONFIG_ID] = {
&fip_dev_handle,
(uintptr_t)&tos_fw_config_uuid_spec,
open_fip
},
[NT_FW_CONFIG_ID] = {
&fip_dev_handle,
(uintptr_t)&nt_fw_config_uuid_spec,
open_fip
},
#if TRUSTED_BOARD_BOOT
[TRUSTED_BOOT_FW_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&tb_fw_cert_uuid_spec,
open_fip
},
[TRUSTED_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&trusted_key_cert_uuid_spec,
open_fip
},
[SCP_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&scp_fw_key_cert_uuid_spec,
open_fip
},
[SOC_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&soc_fw_key_cert_uuid_spec,
open_fip
},
[TRUSTED_OS_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&tos_fw_key_cert_uuid_spec,
open_fip
},
[NON_TRUSTED_FW_KEY_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&nt_fw_key_cert_uuid_spec,
open_fip
},
[SCP_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&scp_fw_cert_uuid_spec,
open_fip
},
[SOC_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&soc_fw_cert_uuid_spec,
open_fip
},
[TRUSTED_OS_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&tos_fw_cert_uuid_spec,
open_fip
},
[NON_TRUSTED_FW_CONTENT_CERT_ID] = {
&fip_dev_handle,
(uintptr_t)&nt_fw_cert_uuid_spec,
open_fip
},
#endif /* TRUSTED_BOARD_BOOT */
};
/* By default, BRCM platforms load images from the FIP */
static const struct plat_io_policy boot_source_policies[] = {
[BOOT_SOURCE_QSPI] = {
&memmap_dev_handle,
(uintptr_t)&qspi_fip_block_spec,
open_qspi
},
[BOOT_SOURCE_NAND] = {
&memmap_dev_handle,
(uintptr_t)&nand_fip_block_spec,
open_nand
},
};
/* Weak definitions may be overridden in specific brcm platform */
#pragma weak plat_brcm_io_setup
#pragma weak plat_brcm_process_flags
static int open_fip(const uintptr_t spec)
{
int result;
uintptr_t local_image_handle;
/* See if a Firmware Image Package is available */
result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
if (result == 0) {
result = io_open(fip_dev_handle, spec, &local_image_handle);
if (result == 0) {
VERBOSE("Using FIP\n");
io_close(local_image_handle);
}
}
return result;
}
static int open_memmap(const uintptr_t spec)
{
int result;
uintptr_t local_image_handle;
result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL);
if (result == 0) {
result = io_open(memmap_dev_handle, spec, &local_image_handle);
if (result == 0) {
VERBOSE("Using Memmap\n");
io_close(local_image_handle);
}
}
return result;
}
static int open_qspi(const uintptr_t spec)
{
return open_memmap(spec);
}
static int open_nand(const uintptr_t spec)
{
return open_memmap(spec);
}
void brcm_io_setup(void)
{
int io_result;
uint32_t boot_source;
io_result = register_io_dev_fip(&fip_dev_con);
assert(io_result == 0);
io_result = register_io_dev_memmap(&memmap_dev_con);
assert(io_result == 0);
/* Open connections to devices and cache the handles */
io_result = io_dev_open(fip_dev_con, (uintptr_t)NULL,
&fip_dev_handle);
assert(io_result == 0);
boot_source = boot_source_get();
switch (boot_source) {
case BOOT_SOURCE_QSPI:
case BOOT_SOURCE_NAND:
default:
io_result = io_dev_open(memmap_dev_con, (uintptr_t)NULL,
&memmap_dev_handle);
break;
}
assert(io_result == 0);
/* Ignore improbable errors in release builds */
(void)io_result;
}
void plat_brcm_io_setup(void)
{
brcm_io_setup();
}
void plat_brcm_process_flags(uint16_t plat_toc_flags __unused)
{
WARN("%s not implemented\n", __func__);
}
/*
* Return an IO device handle and specification which can be used to access
* an image. Use this to enforce platform load policy
*/
int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
uintptr_t *image_spec)
{
int result;
const struct plat_io_policy *policy;
uint32_t boot_source;
uint16_t lcl_plat_toc_flg;
assert(image_id < ARRAY_SIZE(policies));
boot_source = boot_source_get();
if (image_id == FIP_IMAGE_ID)
policy = &boot_source_policies[boot_source];
else
policy = &policies[image_id];
result = policy->check(policy->image_spec);
if (result == 0) {
*image_spec = policy->image_spec;
*dev_handle = *(policy->dev_handle);
if (image_id == TRUSTED_BOOT_FW_CERT_ID) {
/*
* Process the header flags to perform
* such custom actions as speeding up PLL.
* CERT seems to be the first image accessed
* by BL1 so this is where we process the flags.
*/
fip_dev_get_plat_toc_flag((io_dev_info_t *)fip_dev_handle,
&lcl_plat_toc_flg);
plat_brcm_process_flags(lcl_plat_toc_flg);
}
}
return result;
}
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <arch_helpers.h>
#include <drivers/delay_timer.h>
#include <lib/bakery_lock.h>
#include <brcm_mhu.h>
#include <platform_def.h>
#include "m0_ipc.h"
#define PLAT_MHU_INTR_REG AP_TO_SCP_MAILBOX1
/* SCP MHU secure channel registers */
#define SCP_INTR_S_STAT CRMU_IHOST_SW_PERSISTENT_REG11
#define SCP_INTR_S_SET CRMU_IHOST_SW_PERSISTENT_REG11
#define SCP_INTR_S_CLEAR CRMU_IHOST_SW_PERSISTENT_REG11
/* CPU MHU secure channel registers */
#define CPU_INTR_S_STAT CRMU_IHOST_SW_PERSISTENT_REG10
#define CPU_INTR_S_SET CRMU_IHOST_SW_PERSISTENT_REG10
#define CPU_INTR_S_CLEAR CRMU_IHOST_SW_PERSISTENT_REG10
static DEFINE_BAKERY_LOCK(bcm_lock);
/*
* Slot 31 is reserved because the MHU hardware uses this register bit to
* indicate a non-secure access attempt. The total number of available slots is
* therefore 31 [30:0].
*/
#define MHU_MAX_SLOT_ID 30
void mhu_secure_message_start(unsigned int slot_id)
{
int iter = 1000000;
assert(slot_id <= MHU_MAX_SLOT_ID);
bakery_lock_get(&bcm_lock);
/* Make sure any previous command has finished */
do {
if (!(mmio_read_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_STAT) &
(1 << slot_id)))
break;
udelay(1);
} while (--iter);
assert(iter != 0);
}
void mhu_secure_message_send(unsigned int slot_id)
{
uint32_t response, iter = 1000000;
assert(slot_id <= MHU_MAX_SLOT_ID);
assert(!(mmio_read_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_STAT) &
(1 << slot_id)));
/* Send command to SCP */
mmio_setbits_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_SET, 1 << slot_id);
mmio_write_32(CRMU_MAIL_BOX0, MCU_IPC_MCU_CMD_SCPI);
mmio_write_32(PLAT_BRCM_MHU_BASE + PLAT_MHU_INTR_REG, 0x1);
/* Wait until IPC transport acknowledges reception of SCP command */
do {
response = mmio_read_32(CRMU_MAIL_BOX0);
if ((response & ~MCU_IPC_CMD_REPLY_MASK) ==
(MCU_IPC_CMD_DONE_MASK | MCU_IPC_MCU_CMD_SCPI))
break;
udelay(1);
} while (--iter);
assert(iter != 0);
}
uint32_t mhu_secure_message_wait(void)
{
/* Wait for response from SCP */
uint32_t response, iter = 1000000;
do {
response = mmio_read_32(PLAT_BRCM_MHU_BASE + SCP_INTR_S_STAT);
if (!response)
break;
udelay(1);
} while (--iter);
assert(iter != 0);
return response;
}
void mhu_secure_message_end(unsigned int slot_id)
{
assert(slot_id <= MHU_MAX_SLOT_ID);
/*
* Clear any response we got by writing one in the relevant slot bit to
* the CLEAR register
*/
mmio_clrbits_32(PLAT_BRCM_MHU_BASE + SCP_INTR_S_CLEAR, 1 << slot_id);
bakery_lock_release(&bcm_lock);
}
void mhu_secure_init(void)
{
bakery_lock_init(&bcm_lock);
/*
* The STAT register resets to zero. Ensure it is in the expected state,
* as a stale or garbage value would make us think it's a message we've
* already sent.
*/
mmio_write_32(PLAT_BRCM_MHU_BASE + CPU_INTR_S_STAT, 0);
mmio_write_32(PLAT_BRCM_MHU_BASE + SCP_INTR_S_STAT, 0);
}
void plat_brcm_pwrc_setup(void)
{
mhu_secure_init();
}
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef BRCM_MHU_H
#define BRCM_MHU_H
#include <stdint.h>
void mhu_secure_message_start(unsigned int slot_id);
void mhu_secure_message_send(unsigned int slot_id);
uint32_t mhu_secure_message_wait(void);
void mhu_secure_message_end(unsigned int slot_id);
void mhu_secure_init(void);
#endif /* BRCM_MHU_H */
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <string.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <lib/utils.h>
#include <plat/common/platform.h>
#include <brcm_mhu.h>
#include <brcm_scpi.h>
#include <platform_def.h>
#define SCPI_SHARED_MEM_SCP_TO_AP (PLAT_SCP_COM_SHARED_MEM_BASE)
#define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_SCP_COM_SHARED_MEM_BASE \
+ 0x100)
/* Header and payload addresses for commands from AP to SCP */
#define SCPI_CMD_HEADER_AP_TO_SCP \
((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
#define SCPI_CMD_PAYLOAD_AP_TO_SCP \
((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
/* Header and payload addresses for responses from SCP to AP */
#define SCPI_RES_HEADER_SCP_TO_AP \
((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
#define SCPI_RES_PAYLOAD_SCP_TO_AP \
((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
/* ID of the MHU slot used for the SCPI protocol */
#define SCPI_MHU_SLOT_ID 0
static void scpi_secure_message_start(void)
{
mhu_secure_message_start(SCPI_MHU_SLOT_ID);
}
static void scpi_secure_message_send(size_t payload_size)
{
/*
* Ensure that any write to the SCPI payload area is seen by SCP before
* we write to the MHU register. If these 2 writes were reordered by
* the CPU then SCP would read stale payload data
*/
dmbst();
mhu_secure_message_send(SCPI_MHU_SLOT_ID);
}
static void scpi_secure_message_receive(scpi_cmd_t *cmd)
{
uint32_t mhu_status;
assert(cmd != NULL);
mhu_status = mhu_secure_message_wait();
/* Expect an SCPI message, reject any other protocol */
if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
mhu_status);
panic();
}
/*
* Ensure that any read to the SCPI payload area is done after reading
* the MHU register. If these 2 reads were reordered then the CPU would
* read invalid payload data
*/
dmbld();
memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
}
static void scpi_secure_message_end(void)
{
mhu_secure_message_end(SCPI_MHU_SLOT_ID);
}
int scpi_wait_ready(void)
{
scpi_cmd_t scpi_cmd;
VERBOSE("Waiting for SCP_READY command...\n");
/* Get a message from the SCP */
scpi_secure_message_start();
scpi_secure_message_receive(&scpi_cmd);
scpi_secure_message_end();
/* We are expecting 'SCP Ready', produce correct error if it's not */
scpi_status_t status = SCP_OK;
if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
ERROR("Unexpected SCP command: expected #%u, received #%u\n",
SCPI_CMD_SCP_READY, scpi_cmd.id);
status = SCP_E_SUPPORT;
} else if (scpi_cmd.size != 0) {
ERROR("SCP_READY cmd has incorrect size: expected 0, got %u\n",
scpi_cmd.size);
status = SCP_E_SIZE;
}
VERBOSE("Sending response for SCP_READY command\n");
/*
* Send our response back to SCP.
* We are using the same SCPI header, just update the status field.
*/
scpi_cmd.status = status;
scpi_secure_message_start();
memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
scpi_secure_message_send(0);
scpi_secure_message_end();
return status == SCP_OK ? 0 : -1;
}
void scpi_set_brcm_power_state(unsigned int mpidr,
scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
scpi_power_state_t brcm_state)
{
scpi_cmd_t *cmd;
uint32_t state = 0;
uint32_t *payload_addr;
#if ARM_PLAT_MT
/*
* The current SCPI driver only caters for single-threaded platforms.
* Hence we ignore the thread ID (which is always 0) for such platforms.
*/
state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */
state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */
#else
state |= mpidr & 0x0f; /* CPU ID */
state |= (mpidr & 0xf00) >> 4; /* Cluster ID */
#endif /* ARM_PLAT_MT */
state |= cpu_state << 8;
state |= cluster_state << 12;
state |= brcm_state << 16;
scpi_secure_message_start();
/* Populate the command header */
cmd = SCPI_CMD_HEADER_AP_TO_SCP;
cmd->id = SCPI_CMD_SET_POWER_STATE;
cmd->set = SCPI_SET_NORMAL;
cmd->sender = 0;
cmd->size = sizeof(state);
/* Populate the command payload */
payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
*payload_addr = state;
scpi_secure_message_send(sizeof(state));
/*
* SCP does not reply to this command in order to avoid MHU interrupts
* from the sender, which could interfere with its power state request.
*/
scpi_secure_message_end();
}
/*
* Query and obtain power state from SCP.
*
* In response to the query, SCP returns power states of all CPUs in all
* clusters of the system. The returned response is then filtered based on the
* supplied MPIDR. Power states of requested cluster and CPUs within are updated
* via. supplied non-NULL pointer arguments.
*
* Returns 0 on success, or -1 on errors.
*/
int scpi_get_brcm_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
unsigned int *cluster_state_p)
{
scpi_cmd_t *cmd;
scpi_cmd_t response;
int power_state, cpu, cluster, rc = -1;
/*
* Extract CPU and cluster membership of the given MPIDR. SCPI caters
* for only up to 0xf clusters, and 8 CPUs per cluster
*/
cpu = mpidr & MPIDR_AFFLVL_MASK;
cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
if (cpu >= 8 || cluster >= 0xf)
return -1;
scpi_secure_message_start();
/* Populate request headers */
zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
cmd = SCPI_CMD_HEADER_AP_TO_SCP;
cmd->id = SCPI_CMD_GET_POWER_STATE;
/*
* Send message and wait for SCP's response
*/
scpi_secure_message_send(0);
scpi_secure_message_receive(&response);
if (response.status != SCP_OK)
goto exit;
/* Validate SCP response */
if (!CHECK_RESPONSE(response, cluster))
goto exit;
/* Extract power states for required cluster */
power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
if (CLUSTER_ID(power_state) != cluster)
goto exit;
/* Update power state via. pointers */
if (cluster_state_p)
*cluster_state_p = CLUSTER_POWER_STATE(power_state);
if (cpu_state_p)
*cpu_state_p = CPU_POWER_STATE(power_state);
rc = 0;
exit:
scpi_secure_message_end();
return rc;
}
uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
{
scpi_cmd_t *cmd;
uint8_t *payload_addr;
scpi_secure_message_start();
/* Populate the command header */
cmd = SCPI_CMD_HEADER_AP_TO_SCP;
cmd->id = SCPI_CMD_SYS_POWER_STATE;
cmd->set = 0;
cmd->sender = 0;
cmd->size = sizeof(*payload_addr);
/* Populate the command payload */
payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
*payload_addr = system_state & 0xff;
scpi_secure_message_send(sizeof(*payload_addr));
scpi_secure_message_end();
return SCP_OK;
}
/*
* Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef BRCM_SCPI_H
#define BRCM_SCPI_H
#include <stddef.h>
#include <stdint.h>
/*
* An SCPI command consists of a header and a payload.
* The following structure describes the header. It is 64-bit long.
*/
typedef struct {
/* Command ID */
uint32_t id : 7;
/* Set ID. Identifies whether this is a standard or extended command. */
uint32_t set : 1;
/* Sender ID to match a reply. The value is sender specific. */
uint32_t sender : 8;
/* Size of the payload in bytes (0 - 511) */
uint32_t size : 9;
uint32_t reserved : 7;
/*
* Status indicating the success of a command.
* See the enum below.
*/
uint32_t status;
} scpi_cmd_t;
typedef enum {
SCPI_SET_NORMAL = 0, /* Normal SCPI commands */
SCPI_SET_EXTENDED /* Extended SCPI commands */
} scpi_set_t;
enum {
SCP_OK = 0, /* Success */
SCP_E_PARAM, /* Invalid parameter(s) */
SCP_E_ALIGN, /* Invalid alignment */
SCP_E_SIZE, /* Invalid size */
SCP_E_HANDLER, /* Invalid handler or callback */
SCP_E_ACCESS, /* Invalid access or permission denied */
SCP_E_RANGE, /* Value out of range */
SCP_E_TIMEOUT, /* Time out has ocurred */
SCP_E_NOMEM, /* Invalid memory area or pointer */
SCP_E_PWRSTATE, /* Invalid power state */
SCP_E_SUPPORT, /* Feature not supported or disabled */
SCPI_E_DEVICE, /* Device error */
SCPI_E_BUSY, /* Device is busy */
};
typedef uint32_t scpi_status_t;
typedef enum {
SCPI_CMD_SCP_READY = 0x01,
SCPI_CMD_SET_POWER_STATE = 0x03,
SCPI_CMD_GET_POWER_STATE = 0x04,
SCPI_CMD_SYS_POWER_STATE = 0x05
} scpi_command_t;
/*
* Macros to parse SCP response to GET_POWER_STATE command
*
* [3:0] : cluster ID
* [7:4] : cluster state: 0 = on; 3 = off; rest are reserved
* [15:8]: on/off state for individual CPUs in the cluster
*
* Payload is in little-endian
*/
#define CLUSTER_ID(_resp) ((_resp) & 0xf)
#define CLUSTER_POWER_STATE(_resp) (((_resp) >> 4) & 0xf)
/* Result is a bit mask of CPU on/off states in the cluster */
#define CPU_POWER_STATE(_resp) (((_resp) >> 8) & 0xff)
/*
* For GET_POWER_STATE, SCP returns the power states of every cluster. The
* size of response depends on the number of clusters in the system. The
* SCP-to-AP payload contains 2 bytes per cluster. Make sure the response is
* large enough to contain power states of a given cluster
*/
#define CHECK_RESPONSE(_resp, _clus) (_resp.size >= (((_clus) + 1) * 2))
typedef enum {
scpi_power_on = 0,
scpi_power_retention = 1,
scpi_power_off = 3,
} scpi_power_state_t;
typedef enum {
scpi_system_shutdown = 0,
scpi_system_reboot = 1,
scpi_system_reset = 2
} scpi_system_state_t;
extern int scpi_wait_ready(void);
extern void scpi_set_brcm_power_state(unsigned int mpidr,
scpi_power_state_t cpu_state,
scpi_power_state_t cluster_state,
scpi_power_state_t css_state);
int scpi_get_brcm_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
unsigned int *cluster_state_p);
uint32_t scpi_sys_power_state(scpi_system_state_t system_state);
#endif /* BRCM_SCPI_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment